diff options
76 files changed, 474 insertions, 428 deletions
diff --git a/arch/s390/include/asm/eadm.h b/arch/s390/include/asm/eadm.h index 67026300c88e..144809a3f4f6 100644 --- a/arch/s390/include/asm/eadm.h +++ b/arch/s390/include/asm/eadm.h | |||
@@ -3,6 +3,7 @@ | |||
3 | 3 | ||
4 | #include <linux/types.h> | 4 | #include <linux/types.h> |
5 | #include <linux/device.h> | 5 | #include <linux/device.h> |
6 | #include <linux/blkdev.h> | ||
6 | 7 | ||
7 | struct arqb { | 8 | struct arqb { |
8 | u64 data; | 9 | u64 data; |
@@ -105,13 +106,14 @@ struct scm_driver { | |||
105 | int (*probe) (struct scm_device *scmdev); | 106 | int (*probe) (struct scm_device *scmdev); |
106 | int (*remove) (struct scm_device *scmdev); | 107 | int (*remove) (struct scm_device *scmdev); |
107 | void (*notify) (struct scm_device *scmdev, enum scm_event event); | 108 | void (*notify) (struct scm_device *scmdev, enum scm_event event); |
108 | void (*handler) (struct scm_device *scmdev, void *data, int error); | 109 | void (*handler) (struct scm_device *scmdev, void *data, |
110 | blk_status_t error); | ||
109 | }; | 111 | }; |
110 | 112 | ||
111 | int scm_driver_register(struct scm_driver *scmdrv); | 113 | int scm_driver_register(struct scm_driver *scmdrv); |
112 | void scm_driver_unregister(struct scm_driver *scmdrv); | 114 | void scm_driver_unregister(struct scm_driver *scmdrv); |
113 | 115 | ||
114 | int eadm_start_aob(struct aob *aob); | 116 | int eadm_start_aob(struct aob *aob); |
115 | void scm_irq_handler(struct aob *aob, int error); | 117 | void scm_irq_handler(struct aob *aob, blk_status_t error); |
116 | 118 | ||
117 | #endif /* _ASM_S390_EADM_H */ | 119 | #endif /* _ASM_S390_EADM_H */ |
diff --git a/arch/um/drivers/ubd_kern.c b/arch/um/drivers/ubd_kern.c index 85410279beab..b55fe9bf5d3e 100644 --- a/arch/um/drivers/ubd_kern.c +++ b/arch/um/drivers/ubd_kern.c | |||
@@ -534,7 +534,7 @@ static void ubd_handler(void) | |||
534 | for (count = 0; count < n/sizeof(struct io_thread_req *); count++) { | 534 | for (count = 0; count < n/sizeof(struct io_thread_req *); count++) { |
535 | blk_end_request( | 535 | blk_end_request( |
536 | (*irq_req_buffer)[count]->req, | 536 | (*irq_req_buffer)[count]->req, |
537 | 0, | 537 | BLK_STS_OK, |
538 | (*irq_req_buffer)[count]->length | 538 | (*irq_req_buffer)[count]->length |
539 | ); | 539 | ); |
540 | kfree((*irq_req_buffer)[count]); | 540 | kfree((*irq_req_buffer)[count]); |
diff --git a/block/blk-core.c b/block/blk-core.c index c7068520794b..e942a9f814c7 100644 --- a/block/blk-core.c +++ b/block/blk-core.c | |||
@@ -129,11 +129,66 @@ void blk_rq_init(struct request_queue *q, struct request *rq) | |||
129 | } | 129 | } |
130 | EXPORT_SYMBOL(blk_rq_init); | 130 | EXPORT_SYMBOL(blk_rq_init); |
131 | 131 | ||
132 | static const struct { | ||
133 | int errno; | ||
134 | const char *name; | ||
135 | } blk_errors[] = { | ||
136 | [BLK_STS_OK] = { 0, "" }, | ||
137 | [BLK_STS_NOTSUPP] = { -EOPNOTSUPP, "operation not supported" }, | ||
138 | [BLK_STS_TIMEOUT] = { -ETIMEDOUT, "timeout" }, | ||
139 | [BLK_STS_NOSPC] = { -ENOSPC, "critical space allocation" }, | ||
140 | [BLK_STS_TRANSPORT] = { -ENOLINK, "recoverable transport" }, | ||
141 | [BLK_STS_TARGET] = { -EREMOTEIO, "critical target" }, | ||
142 | [BLK_STS_NEXUS] = { -EBADE, "critical nexus" }, | ||
143 | [BLK_STS_MEDIUM] = { -ENODATA, "critical medium" }, | ||
144 | [BLK_STS_PROTECTION] = { -EILSEQ, "protection" }, | ||
145 | [BLK_STS_RESOURCE] = { -ENOMEM, "kernel resource" }, | ||
146 | |||
147 | /* everything else not covered above: */ | ||
148 | [BLK_STS_IOERR] = { -EIO, "I/O" }, | ||
149 | }; | ||
150 | |||
151 | blk_status_t errno_to_blk_status(int errno) | ||
152 | { | ||
153 | int i; | ||
154 | |||
155 | for (i = 0; i < ARRAY_SIZE(blk_errors); i++) { | ||
156 | if (blk_errors[i].errno == errno) | ||
157 | return (__force blk_status_t)i; | ||
158 | } | ||
159 | |||
160 | return BLK_STS_IOERR; | ||
161 | } | ||
162 | EXPORT_SYMBOL_GPL(errno_to_blk_status); | ||
163 | |||
164 | int blk_status_to_errno(blk_status_t status) | ||
165 | { | ||
166 | int idx = (__force int)status; | ||
167 | |||
168 | if (WARN_ON_ONCE(idx > ARRAY_SIZE(blk_errors))) | ||
169 | return -EIO; | ||
170 | return blk_errors[idx].errno; | ||
171 | } | ||
172 | EXPORT_SYMBOL_GPL(blk_status_to_errno); | ||
173 | |||
174 | static void print_req_error(struct request *req, blk_status_t status) | ||
175 | { | ||
176 | int idx = (__force int)status; | ||
177 | |||
178 | if (WARN_ON_ONCE(idx > ARRAY_SIZE(blk_errors))) | ||
179 | return; | ||
180 | |||
181 | printk_ratelimited(KERN_ERR "%s: %s error, dev %s, sector %llu\n", | ||
182 | __func__, blk_errors[idx].name, req->rq_disk ? | ||
183 | req->rq_disk->disk_name : "?", | ||
184 | (unsigned long long)blk_rq_pos(req)); | ||
185 | } | ||
186 | |||
132 | static void req_bio_endio(struct request *rq, struct bio *bio, | 187 | static void req_bio_endio(struct request *rq, struct bio *bio, |
133 | unsigned int nbytes, int error) | 188 | unsigned int nbytes, blk_status_t error) |
134 | { | 189 | { |
135 | if (error) | 190 | if (error) |
136 | bio->bi_error = error; | 191 | bio->bi_error = blk_status_to_errno(error); |
137 | 192 | ||
138 | if (unlikely(rq->rq_flags & RQF_QUIET)) | 193 | if (unlikely(rq->rq_flags & RQF_QUIET)) |
139 | bio_set_flag(bio, BIO_QUIET); | 194 | bio_set_flag(bio, BIO_QUIET); |
@@ -2177,29 +2232,29 @@ static int blk_cloned_rq_check_limits(struct request_queue *q, | |||
2177 | * @q: the queue to submit the request | 2232 | * @q: the queue to submit the request |
2178 | * @rq: the request being queued | 2233 | * @rq: the request being queued |
2179 | */ | 2234 | */ |
2180 | int blk_insert_cloned_request(struct request_queue *q, struct request *rq) | 2235 | blk_status_t blk_insert_cloned_request(struct request_queue *q, struct request *rq) |
2181 | { | 2236 | { |
2182 | unsigned long flags; | 2237 | unsigned long flags; |
2183 | int where = ELEVATOR_INSERT_BACK; | 2238 | int where = ELEVATOR_INSERT_BACK; |
2184 | 2239 | ||
2185 | if (blk_cloned_rq_check_limits(q, rq)) | 2240 | if (blk_cloned_rq_check_limits(q, rq)) |
2186 | return -EIO; | 2241 | return BLK_STS_IOERR; |
2187 | 2242 | ||
2188 | if (rq->rq_disk && | 2243 | if (rq->rq_disk && |
2189 | should_fail_request(&rq->rq_disk->part0, blk_rq_bytes(rq))) | 2244 | should_fail_request(&rq->rq_disk->part0, blk_rq_bytes(rq))) |
2190 | return -EIO; | 2245 | return BLK_STS_IOERR; |
2191 | 2246 | ||
2192 | if (q->mq_ops) { | 2247 | if (q->mq_ops) { |
2193 | if (blk_queue_io_stat(q)) | 2248 | if (blk_queue_io_stat(q)) |
2194 | blk_account_io_start(rq, true); | 2249 | blk_account_io_start(rq, true); |
2195 | blk_mq_sched_insert_request(rq, false, true, false, false); | 2250 | blk_mq_sched_insert_request(rq, false, true, false, false); |
2196 | return 0; | 2251 | return BLK_STS_OK; |
2197 | } | 2252 | } |
2198 | 2253 | ||
2199 | spin_lock_irqsave(q->queue_lock, flags); | 2254 | spin_lock_irqsave(q->queue_lock, flags); |
2200 | if (unlikely(blk_queue_dying(q))) { | 2255 | if (unlikely(blk_queue_dying(q))) { |
2201 | spin_unlock_irqrestore(q->queue_lock, flags); | 2256 | spin_unlock_irqrestore(q->queue_lock, flags); |
2202 | return -ENODEV; | 2257 | return BLK_STS_IOERR; |
2203 | } | 2258 | } |
2204 | 2259 | ||
2205 | /* | 2260 | /* |
@@ -2216,7 +2271,7 @@ int blk_insert_cloned_request(struct request_queue *q, struct request *rq) | |||
2216 | __blk_run_queue(q); | 2271 | __blk_run_queue(q); |
2217 | spin_unlock_irqrestore(q->queue_lock, flags); | 2272 | spin_unlock_irqrestore(q->queue_lock, flags); |
2218 | 2273 | ||
2219 | return 0; | 2274 | return BLK_STS_OK; |
2220 | } | 2275 | } |
2221 | EXPORT_SYMBOL_GPL(blk_insert_cloned_request); | 2276 | EXPORT_SYMBOL_GPL(blk_insert_cloned_request); |
2222 | 2277 | ||
@@ -2450,15 +2505,14 @@ struct request *blk_peek_request(struct request_queue *q) | |||
2450 | rq = NULL; | 2505 | rq = NULL; |
2451 | break; | 2506 | break; |
2452 | } else if (ret == BLKPREP_KILL || ret == BLKPREP_INVALID) { | 2507 | } else if (ret == BLKPREP_KILL || ret == BLKPREP_INVALID) { |
2453 | int err = (ret == BLKPREP_INVALID) ? -EREMOTEIO : -EIO; | ||
2454 | |||
2455 | rq->rq_flags |= RQF_QUIET; | 2508 | rq->rq_flags |= RQF_QUIET; |
2456 | /* | 2509 | /* |
2457 | * Mark this request as started so we don't trigger | 2510 | * Mark this request as started so we don't trigger |
2458 | * any debug logic in the end I/O path. | 2511 | * any debug logic in the end I/O path. |
2459 | */ | 2512 | */ |
2460 | blk_start_request(rq); | 2513 | blk_start_request(rq); |
2461 | __blk_end_request_all(rq, err); | 2514 | __blk_end_request_all(rq, ret == BLKPREP_INVALID ? |
2515 | BLK_STS_TARGET : BLK_STS_IOERR); | ||
2462 | } else { | 2516 | } else { |
2463 | printk(KERN_ERR "%s: bad return=%d\n", __func__, ret); | 2517 | printk(KERN_ERR "%s: bad return=%d\n", __func__, ret); |
2464 | break; | 2518 | break; |
@@ -2547,7 +2601,7 @@ EXPORT_SYMBOL(blk_fetch_request); | |||
2547 | /** | 2601 | /** |
2548 | * blk_update_request - Special helper function for request stacking drivers | 2602 | * blk_update_request - Special helper function for request stacking drivers |
2549 | * @req: the request being processed | 2603 | * @req: the request being processed |
2550 | * @error: %0 for success, < %0 for error | 2604 | * @error: block status code |
2551 | * @nr_bytes: number of bytes to complete @req | 2605 | * @nr_bytes: number of bytes to complete @req |
2552 | * | 2606 | * |
2553 | * Description: | 2607 | * Description: |
@@ -2566,49 +2620,19 @@ EXPORT_SYMBOL(blk_fetch_request); | |||
2566 | * %false - this request doesn't have any more data | 2620 | * %false - this request doesn't have any more data |
2567 | * %true - this request has more data | 2621 | * %true - this request has more data |
2568 | **/ | 2622 | **/ |
2569 | bool blk_update_request(struct request *req, int error, unsigned int nr_bytes) | 2623 | bool blk_update_request(struct request *req, blk_status_t error, |
2624 | unsigned int nr_bytes) | ||
2570 | { | 2625 | { |
2571 | int total_bytes; | 2626 | int total_bytes; |
2572 | 2627 | ||
2573 | trace_block_rq_complete(req, error, nr_bytes); | 2628 | trace_block_rq_complete(req, blk_status_to_errno(error), nr_bytes); |
2574 | 2629 | ||
2575 | if (!req->bio) | 2630 | if (!req->bio) |
2576 | return false; | 2631 | return false; |
2577 | 2632 | ||
2578 | if (error && !blk_rq_is_passthrough(req) && | 2633 | if (unlikely(error && !blk_rq_is_passthrough(req) && |
2579 | !(req->rq_flags & RQF_QUIET)) { | 2634 | !(req->rq_flags & RQF_QUIET))) |
2580 | char *error_type; | 2635 | print_req_error(req, error); |
2581 | |||
2582 | switch (error) { | ||
2583 | case -ENOLINK: | ||
2584 | error_type = "recoverable transport"; | ||
2585 | break; | ||
2586 | case -EREMOTEIO: | ||
2587 | error_type = "critical target"; | ||
2588 | break; | ||
2589 | case -EBADE: | ||
2590 | error_type = "critical nexus"; | ||
2591 | break; | ||
2592 | case -ETIMEDOUT: | ||
2593 | error_type = "timeout"; | ||
2594 | break; | ||
2595 | case -ENOSPC: | ||
2596 | error_type = "critical space allocation"; | ||
2597 | break; | ||
2598 | case -ENODATA: | ||
2599 | error_type = "critical medium"; | ||
2600 | break; | ||
2601 | case -EIO: | ||
2602 | default: | ||
2603 | error_type = "I/O"; | ||
2604 | break; | ||
2605 | } | ||
2606 | printk_ratelimited(KERN_ERR "%s: %s error, dev %s, sector %llu\n", | ||
2607 | __func__, error_type, req->rq_disk ? | ||
2608 | req->rq_disk->disk_name : "?", | ||
2609 | (unsigned long long)blk_rq_pos(req)); | ||
2610 | |||
2611 | } | ||
2612 | 2636 | ||
2613 | blk_account_io_completion(req, nr_bytes); | 2637 | blk_account_io_completion(req, nr_bytes); |
2614 | 2638 | ||
@@ -2674,7 +2698,7 @@ bool blk_update_request(struct request *req, int error, unsigned int nr_bytes) | |||
2674 | } | 2698 | } |
2675 | EXPORT_SYMBOL_GPL(blk_update_request); | 2699 | EXPORT_SYMBOL_GPL(blk_update_request); |
2676 | 2700 | ||
2677 | static bool blk_update_bidi_request(struct request *rq, int error, | 2701 | static bool blk_update_bidi_request(struct request *rq, blk_status_t error, |
2678 | unsigned int nr_bytes, | 2702 | unsigned int nr_bytes, |
2679 | unsigned int bidi_bytes) | 2703 | unsigned int bidi_bytes) |
2680 | { | 2704 | { |
@@ -2715,7 +2739,7 @@ EXPORT_SYMBOL_GPL(blk_unprep_request); | |||
2715 | /* | 2739 | /* |
2716 | * queue lock must be held | 2740 | * queue lock must be held |
2717 | */ | 2741 | */ |
2718 | void blk_finish_request(struct request *req, int error) | 2742 | void blk_finish_request(struct request *req, blk_status_t error) |
2719 | { | 2743 | { |
2720 | struct request_queue *q = req->q; | 2744 | struct request_queue *q = req->q; |
2721 | 2745 | ||
@@ -2752,7 +2776,7 @@ EXPORT_SYMBOL(blk_finish_request); | |||
2752 | /** | 2776 | /** |
2753 | * blk_end_bidi_request - Complete a bidi request | 2777 | * blk_end_bidi_request - Complete a bidi request |
2754 | * @rq: the request to complete | 2778 | * @rq: the request to complete |
2755 | * @error: %0 for success, < %0 for error | 2779 | * @error: block status code |
2756 | * @nr_bytes: number of bytes to complete @rq | 2780 | * @nr_bytes: number of bytes to complete @rq |
2757 | * @bidi_bytes: number of bytes to complete @rq->next_rq | 2781 | * @bidi_bytes: number of bytes to complete @rq->next_rq |
2758 | * | 2782 | * |
@@ -2766,7 +2790,7 @@ EXPORT_SYMBOL(blk_finish_request); | |||
2766 | * %false - we are done with this request | 2790 | * %false - we are done with this request |
2767 | * %true - still buffers pending for this request | 2791 | * %true - still buffers pending for this request |
2768 | **/ | 2792 | **/ |
2769 | static bool blk_end_bidi_request(struct request *rq, int error, | 2793 | static bool blk_end_bidi_request(struct request *rq, blk_status_t error, |
2770 | unsigned int nr_bytes, unsigned int bidi_bytes) | 2794 | unsigned int nr_bytes, unsigned int bidi_bytes) |
2771 | { | 2795 | { |
2772 | struct request_queue *q = rq->q; | 2796 | struct request_queue *q = rq->q; |
@@ -2785,7 +2809,7 @@ static bool blk_end_bidi_request(struct request *rq, int error, | |||
2785 | /** | 2809 | /** |
2786 | * __blk_end_bidi_request - Complete a bidi request with queue lock held | 2810 | * __blk_end_bidi_request - Complete a bidi request with queue lock held |
2787 | * @rq: the request to complete | 2811 | * @rq: the request to complete |
2788 | * @error: %0 for success, < %0 for error | 2812 | * @error: block status code |
2789 | * @nr_bytes: number of bytes to complete @rq | 2813 | * @nr_bytes: number of bytes to complete @rq |
2790 | * @bidi_bytes: number of bytes to complete @rq->next_rq | 2814 | * @bidi_bytes: number of bytes to complete @rq->next_rq |
2791 | * | 2815 | * |
@@ -2797,7 +2821,7 @@ static bool blk_end_bidi_request(struct request *rq, int error, | |||
2797 | * %false - we are done with this request | 2821 | * %false - we are done with this request |
2798 | * %true - still buffers pending for this request | 2822 | * %true - still buffers pending for this request |
2799 | **/ | 2823 | **/ |
2800 | static bool __blk_end_bidi_request(struct request *rq, int error, | 2824 | static bool __blk_end_bidi_request(struct request *rq, blk_status_t error, |
2801 | unsigned int nr_bytes, unsigned int bidi_bytes) | 2825 | unsigned int nr_bytes, unsigned int bidi_bytes) |
2802 | { | 2826 | { |
2803 | if (blk_update_bidi_request(rq, error, nr_bytes, bidi_bytes)) | 2827 | if (blk_update_bidi_request(rq, error, nr_bytes, bidi_bytes)) |
@@ -2811,7 +2835,7 @@ static bool __blk_end_bidi_request(struct request *rq, int error, | |||
2811 | /** | 2835 | /** |
2812 | * blk_end_request - Helper function for drivers to complete the request. | 2836 | * blk_end_request - Helper function for drivers to complete the request. |
2813 | * @rq: the request being processed | 2837 | * @rq: the request being processed |
2814 | * @error: %0 for success, < %0 for error | 2838 | * @error: block status code |
2815 | * @nr_bytes: number of bytes to complete | 2839 | * @nr_bytes: number of bytes to complete |
2816 | * | 2840 | * |
2817 | * Description: | 2841 | * Description: |
@@ -2822,7 +2846,8 @@ static bool __blk_end_bidi_request(struct request *rq, int error, | |||
2822 | * %false - we are done with this request | 2846 | * %false - we are done with this request |
2823 | * %true - still buffers pending for this request | 2847 | * %true - still buffers pending for this request |
2824 | **/ | 2848 | **/ |
2825 | bool blk_end_request(struct request *rq, int error, unsigned int nr_bytes) | 2849 | bool blk_end_request(struct request *rq, blk_status_t error, |
2850 | unsigned int nr_bytes) | ||
2826 | { | 2851 | { |
2827 | return blk_end_bidi_request(rq, error, nr_bytes, 0); | 2852 | return blk_end_bidi_request(rq, error, nr_bytes, 0); |
2828 | } | 2853 | } |
@@ -2831,12 +2856,12 @@ EXPORT_SYMBOL(blk_end_request); | |||
2831 | /** | 2856 | /** |
2832 | * blk_end_request_all - Helper function for drives to finish the request. | 2857 | * blk_end_request_all - Helper function for drives to finish the request. |
2833 | * @rq: the request to finish | 2858 | * @rq: the request to finish |
2834 | * @error: %0 for success, < %0 for error | 2859 | * @error: block status code |
2835 | * | 2860 | * |
2836 | * Description: | 2861 | * Description: |
2837 | * Completely finish @rq. | 2862 | * Completely finish @rq. |
2838 | */ | 2863 | */ |
2839 | void blk_end_request_all(struct request *rq, int error) | 2864 | void blk_end_request_all(struct request *rq, blk_status_t error) |
2840 | { | 2865 | { |
2841 | bool pending; | 2866 | bool pending; |
2842 | unsigned int bidi_bytes = 0; | 2867 | unsigned int bidi_bytes = 0; |
@@ -2852,7 +2877,7 @@ EXPORT_SYMBOL(blk_end_request_all); | |||
2852 | /** | 2877 | /** |
2853 | * __blk_end_request - Helper function for drivers to complete the request. | 2878 | * __blk_end_request - Helper function for drivers to complete the request. |
2854 | * @rq: the request being processed | 2879 | * @rq: the request being processed |
2855 | * @error: %0 for success, < %0 for error | 2880 | * @error: block status code |
2856 | * @nr_bytes: number of bytes to complete | 2881 | * @nr_bytes: number of bytes to complete |
2857 | * | 2882 | * |
2858 | * Description: | 2883 | * Description: |
@@ -2862,7 +2887,8 @@ EXPORT_SYMBOL(blk_end_request_all); | |||
2862 | * %false - we are done with this request | 2887 | * %false - we are done with this request |
2863 | * %true - still buffers pending for this request | 2888 | * %true - still buffers pending for this request |
2864 | **/ | 2889 | **/ |
2865 | bool __blk_end_request(struct request *rq, int error, unsigned int nr_bytes) | 2890 | bool __blk_end_request(struct request *rq, blk_status_t error, |
2891 | unsigned int nr_bytes) | ||
2866 | { | 2892 | { |
2867 | return __blk_end_bidi_request(rq, error, nr_bytes, 0); | 2893 | return __blk_end_bidi_request(rq, error, nr_bytes, 0); |
2868 | } | 2894 | } |
@@ -2871,12 +2897,12 @@ EXPORT_SYMBOL(__blk_end_request); | |||
2871 | /** | 2897 | /** |
2872 | * __blk_end_request_all - Helper function for drives to finish the request. | 2898 | * __blk_end_request_all - Helper function for drives to finish the request. |
2873 | * @rq: the request to finish | 2899 | * @rq: the request to finish |
2874 | * @error: %0 for success, < %0 for error | 2900 | * @error: block status code |
2875 | * | 2901 | * |
2876 | * Description: | 2902 | * Description: |
2877 | * Completely finish @rq. Must be called with queue lock held. | 2903 | * Completely finish @rq. Must be called with queue lock held. |
2878 | */ | 2904 | */ |
2879 | void __blk_end_request_all(struct request *rq, int error) | 2905 | void __blk_end_request_all(struct request *rq, blk_status_t error) |
2880 | { | 2906 | { |
2881 | bool pending; | 2907 | bool pending; |
2882 | unsigned int bidi_bytes = 0; | 2908 | unsigned int bidi_bytes = 0; |
@@ -2892,7 +2918,7 @@ EXPORT_SYMBOL(__blk_end_request_all); | |||
2892 | /** | 2918 | /** |
2893 | * __blk_end_request_cur - Helper function to finish the current request chunk. | 2919 | * __blk_end_request_cur - Helper function to finish the current request chunk. |
2894 | * @rq: the request to finish the current chunk for | 2920 | * @rq: the request to finish the current chunk for |
2895 | * @error: %0 for success, < %0 for error | 2921 | * @error: block status code |
2896 | * | 2922 | * |
2897 | * Description: | 2923 | * Description: |
2898 | * Complete the current consecutively mapped chunk from @rq. Must | 2924 | * Complete the current consecutively mapped chunk from @rq. Must |
@@ -2902,7 +2928,7 @@ EXPORT_SYMBOL(__blk_end_request_all); | |||
2902 | * %false - we are done with this request | 2928 | * %false - we are done with this request |
2903 | * %true - still buffers pending for this request | 2929 | * %true - still buffers pending for this request |
2904 | */ | 2930 | */ |
2905 | bool __blk_end_request_cur(struct request *rq, int error) | 2931 | bool __blk_end_request_cur(struct request *rq, blk_status_t error) |
2906 | { | 2932 | { |
2907 | return __blk_end_request(rq, error, blk_rq_cur_bytes(rq)); | 2933 | return __blk_end_request(rq, error, blk_rq_cur_bytes(rq)); |
2908 | } | 2934 | } |
@@ -3243,7 +3269,7 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule) | |||
3243 | * Short-circuit if @q is dead | 3269 | * Short-circuit if @q is dead |
3244 | */ | 3270 | */ |
3245 | if (unlikely(blk_queue_dying(q))) { | 3271 | if (unlikely(blk_queue_dying(q))) { |
3246 | __blk_end_request_all(rq, -ENODEV); | 3272 | __blk_end_request_all(rq, BLK_STS_IOERR); |
3247 | continue; | 3273 | continue; |
3248 | } | 3274 | } |
3249 | 3275 | ||
diff --git a/block/blk-exec.c b/block/blk-exec.c index a9451e3b8587..5c0f3dc446dc 100644 --- a/block/blk-exec.c +++ b/block/blk-exec.c | |||
@@ -16,7 +16,7 @@ | |||
16 | * @rq: request to complete | 16 | * @rq: request to complete |
17 | * @error: end I/O status of the request | 17 | * @error: end I/O status of the request |
18 | */ | 18 | */ |
19 | static void blk_end_sync_rq(struct request *rq, int error) | 19 | static void blk_end_sync_rq(struct request *rq, blk_status_t error) |
20 | { | 20 | { |
21 | struct completion *waiting = rq->end_io_data; | 21 | struct completion *waiting = rq->end_io_data; |
22 | 22 | ||
@@ -69,7 +69,7 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk, | |||
69 | 69 | ||
70 | if (unlikely(blk_queue_dying(q))) { | 70 | if (unlikely(blk_queue_dying(q))) { |
71 | rq->rq_flags |= RQF_QUIET; | 71 | rq->rq_flags |= RQF_QUIET; |
72 | __blk_end_request_all(rq, -ENXIO); | 72 | __blk_end_request_all(rq, BLK_STS_IOERR); |
73 | spin_unlock_irq(q->queue_lock); | 73 | spin_unlock_irq(q->queue_lock); |
74 | return; | 74 | return; |
75 | } | 75 | } |
diff --git a/block/blk-flush.c b/block/blk-flush.c index c4e0880b54bb..a572b47fa059 100644 --- a/block/blk-flush.c +++ b/block/blk-flush.c | |||
@@ -164,7 +164,7 @@ static bool blk_flush_queue_rq(struct request *rq, bool add_front) | |||
164 | */ | 164 | */ |
165 | static bool blk_flush_complete_seq(struct request *rq, | 165 | static bool blk_flush_complete_seq(struct request *rq, |
166 | struct blk_flush_queue *fq, | 166 | struct blk_flush_queue *fq, |
167 | unsigned int seq, int error) | 167 | unsigned int seq, blk_status_t error) |
168 | { | 168 | { |
169 | struct request_queue *q = rq->q; | 169 | struct request_queue *q = rq->q; |
170 | struct list_head *pending = &fq->flush_queue[fq->flush_pending_idx]; | 170 | struct list_head *pending = &fq->flush_queue[fq->flush_pending_idx]; |
@@ -216,7 +216,7 @@ static bool blk_flush_complete_seq(struct request *rq, | |||
216 | return kicked | queued; | 216 | return kicked | queued; |
217 | } | 217 | } |
218 | 218 | ||
219 | static void flush_end_io(struct request *flush_rq, int error) | 219 | static void flush_end_io(struct request *flush_rq, blk_status_t error) |
220 | { | 220 | { |
221 | struct request_queue *q = flush_rq->q; | 221 | struct request_queue *q = flush_rq->q; |
222 | struct list_head *running; | 222 | struct list_head *running; |
@@ -341,7 +341,7 @@ static bool blk_kick_flush(struct request_queue *q, struct blk_flush_queue *fq) | |||
341 | return blk_flush_queue_rq(flush_rq, false); | 341 | return blk_flush_queue_rq(flush_rq, false); |
342 | } | 342 | } |
343 | 343 | ||
344 | static void flush_data_end_io(struct request *rq, int error) | 344 | static void flush_data_end_io(struct request *rq, blk_status_t error) |
345 | { | 345 | { |
346 | struct request_queue *q = rq->q; | 346 | struct request_queue *q = rq->q; |
347 | struct blk_flush_queue *fq = blk_get_flush_queue(q, NULL); | 347 | struct blk_flush_queue *fq = blk_get_flush_queue(q, NULL); |
@@ -382,7 +382,7 @@ static void flush_data_end_io(struct request *rq, int error) | |||
382 | blk_run_queue_async(q); | 382 | blk_run_queue_async(q); |
383 | } | 383 | } |
384 | 384 | ||
385 | static void mq_flush_data_end_io(struct request *rq, int error) | 385 | static void mq_flush_data_end_io(struct request *rq, blk_status_t error) |
386 | { | 386 | { |
387 | struct request_queue *q = rq->q; | 387 | struct request_queue *q = rq->q; |
388 | struct blk_mq_hw_ctx *hctx; | 388 | struct blk_mq_hw_ctx *hctx; |
diff --git a/block/blk-mq.c b/block/blk-mq.c index 22438d5036a3..adcc1c0dce6e 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c | |||
@@ -394,7 +394,7 @@ void blk_mq_free_request(struct request *rq) | |||
394 | } | 394 | } |
395 | EXPORT_SYMBOL_GPL(blk_mq_free_request); | 395 | EXPORT_SYMBOL_GPL(blk_mq_free_request); |
396 | 396 | ||
397 | inline void __blk_mq_end_request(struct request *rq, int error) | 397 | inline void __blk_mq_end_request(struct request *rq, blk_status_t error) |
398 | { | 398 | { |
399 | blk_account_io_done(rq); | 399 | blk_account_io_done(rq); |
400 | 400 | ||
@@ -409,7 +409,7 @@ inline void __blk_mq_end_request(struct request *rq, int error) | |||
409 | } | 409 | } |
410 | EXPORT_SYMBOL(__blk_mq_end_request); | 410 | EXPORT_SYMBOL(__blk_mq_end_request); |
411 | 411 | ||
412 | void blk_mq_end_request(struct request *rq, int error) | 412 | void blk_mq_end_request(struct request *rq, blk_status_t error) |
413 | { | 413 | { |
414 | if (blk_update_request(rq, error, blk_rq_bytes(rq))) | 414 | if (blk_update_request(rq, error, blk_rq_bytes(rq))) |
415 | BUG(); | 415 | BUG(); |
@@ -988,7 +988,7 @@ bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list) | |||
988 | pr_err("blk-mq: bad return on queue: %d\n", ret); | 988 | pr_err("blk-mq: bad return on queue: %d\n", ret); |
989 | case BLK_MQ_RQ_QUEUE_ERROR: | 989 | case BLK_MQ_RQ_QUEUE_ERROR: |
990 | errors++; | 990 | errors++; |
991 | blk_mq_end_request(rq, -EIO); | 991 | blk_mq_end_request(rq, BLK_STS_IOERR); |
992 | break; | 992 | break; |
993 | } | 993 | } |
994 | 994 | ||
@@ -1433,7 +1433,7 @@ static void __blk_mq_try_issue_directly(struct request *rq, blk_qc_t *cookie, | |||
1433 | 1433 | ||
1434 | if (ret == BLK_MQ_RQ_QUEUE_ERROR) { | 1434 | if (ret == BLK_MQ_RQ_QUEUE_ERROR) { |
1435 | *cookie = BLK_QC_T_NONE; | 1435 | *cookie = BLK_QC_T_NONE; |
1436 | blk_mq_end_request(rq, -EIO); | 1436 | blk_mq_end_request(rq, BLK_STS_IOERR); |
1437 | return; | 1437 | return; |
1438 | } | 1438 | } |
1439 | 1439 | ||
diff --git a/block/bsg-lib.c b/block/bsg-lib.c index 9b91daefcd9b..c4513b23f57a 100644 --- a/block/bsg-lib.c +++ b/block/bsg-lib.c | |||
@@ -37,7 +37,7 @@ static void bsg_destroy_job(struct kref *kref) | |||
37 | struct bsg_job *job = container_of(kref, struct bsg_job, kref); | 37 | struct bsg_job *job = container_of(kref, struct bsg_job, kref); |
38 | struct request *rq = job->req; | 38 | struct request *rq = job->req; |
39 | 39 | ||
40 | blk_end_request_all(rq, scsi_req(rq)->result); | 40 | blk_end_request_all(rq, BLK_STS_OK); |
41 | 41 | ||
42 | put_device(job->dev); /* release reference for the request */ | 42 | put_device(job->dev); /* release reference for the request */ |
43 | 43 | ||
@@ -202,7 +202,7 @@ static void bsg_request_fn(struct request_queue *q) | |||
202 | ret = bsg_create_job(dev, req); | 202 | ret = bsg_create_job(dev, req); |
203 | if (ret) { | 203 | if (ret) { |
204 | scsi_req(req)->result = ret; | 204 | scsi_req(req)->result = ret; |
205 | blk_end_request_all(req, ret); | 205 | blk_end_request_all(req, BLK_STS_OK); |
206 | spin_lock_irq(q->queue_lock); | 206 | spin_lock_irq(q->queue_lock); |
207 | continue; | 207 | continue; |
208 | } | 208 | } |
diff --git a/block/bsg.c b/block/bsg.c index 40db8ff4c618..59d02dd31b0c 100644 --- a/block/bsg.c +++ b/block/bsg.c | |||
@@ -294,14 +294,14 @@ out: | |||
294 | * async completion call-back from the block layer, when scsi/ide/whatever | 294 | * async completion call-back from the block layer, when scsi/ide/whatever |
295 | * calls end_that_request_last() on a request | 295 | * calls end_that_request_last() on a request |
296 | */ | 296 | */ |
297 | static void bsg_rq_end_io(struct request *rq, int uptodate) | 297 | static void bsg_rq_end_io(struct request *rq, blk_status_t status) |
298 | { | 298 | { |
299 | struct bsg_command *bc = rq->end_io_data; | 299 | struct bsg_command *bc = rq->end_io_data; |
300 | struct bsg_device *bd = bc->bd; | 300 | struct bsg_device *bd = bc->bd; |
301 | unsigned long flags; | 301 | unsigned long flags; |
302 | 302 | ||
303 | dprintk("%s: finished rq %p bc %p, bio %p stat %d\n", | 303 | dprintk("%s: finished rq %p bc %p, bio %p\n", |
304 | bd->name, rq, bc, bc->bio, uptodate); | 304 | bd->name, rq, bc, bc->bio); |
305 | 305 | ||
306 | bc->hdr.duration = jiffies_to_msecs(jiffies - bc->hdr.duration); | 306 | bc->hdr.duration = jiffies_to_msecs(jiffies - bc->hdr.duration); |
307 | 307 | ||
diff --git a/drivers/block/DAC960.c b/drivers/block/DAC960.c index 26a51be77227..245a879b036e 100644 --- a/drivers/block/DAC960.c +++ b/drivers/block/DAC960.c | |||
@@ -3464,7 +3464,7 @@ static inline bool DAC960_ProcessCompletedRequest(DAC960_Command_T *Command, | |||
3464 | bool SuccessfulIO) | 3464 | bool SuccessfulIO) |
3465 | { | 3465 | { |
3466 | struct request *Request = Command->Request; | 3466 | struct request *Request = Command->Request; |
3467 | int Error = SuccessfulIO ? 0 : -EIO; | 3467 | blk_status_t Error = SuccessfulIO ? BLK_STS_OK : BLK_STS_IOERR; |
3468 | 3468 | ||
3469 | pci_unmap_sg(Command->Controller->PCIDevice, Command->cmd_sglist, | 3469 | pci_unmap_sg(Command->Controller->PCIDevice, Command->cmd_sglist, |
3470 | Command->SegmentCount, Command->DmaDirection); | 3470 | Command->SegmentCount, Command->DmaDirection); |
diff --git a/drivers/block/amiflop.c b/drivers/block/amiflop.c index a328f673adfe..49908c74bfcb 100644 --- a/drivers/block/amiflop.c +++ b/drivers/block/amiflop.c | |||
@@ -1378,7 +1378,7 @@ static void redo_fd_request(void) | |||
1378 | struct amiga_floppy_struct *floppy; | 1378 | struct amiga_floppy_struct *floppy; |
1379 | char *data; | 1379 | char *data; |
1380 | unsigned long flags; | 1380 | unsigned long flags; |
1381 | int err; | 1381 | blk_status_t err; |
1382 | 1382 | ||
1383 | next_req: | 1383 | next_req: |
1384 | rq = set_next_request(); | 1384 | rq = set_next_request(); |
@@ -1392,7 +1392,7 @@ next_req: | |||
1392 | 1392 | ||
1393 | next_segment: | 1393 | next_segment: |
1394 | /* Here someone could investigate to be more efficient */ | 1394 | /* Here someone could investigate to be more efficient */ |
1395 | for (cnt = 0, err = 0; cnt < blk_rq_cur_sectors(rq); cnt++) { | 1395 | for (cnt = 0, err = BLK_STS_OK; cnt < blk_rq_cur_sectors(rq); cnt++) { |
1396 | #ifdef DEBUG | 1396 | #ifdef DEBUG |
1397 | printk("fd: sector %ld + %d requested for %s\n", | 1397 | printk("fd: sector %ld + %d requested for %s\n", |
1398 | blk_rq_pos(rq), cnt, | 1398 | blk_rq_pos(rq), cnt, |
@@ -1400,7 +1400,7 @@ next_segment: | |||
1400 | #endif | 1400 | #endif |
1401 | block = blk_rq_pos(rq) + cnt; | 1401 | block = blk_rq_pos(rq) + cnt; |
1402 | if ((int)block > floppy->blocks) { | 1402 | if ((int)block > floppy->blocks) { |
1403 | err = -EIO; | 1403 | err = BLK_STS_IOERR; |
1404 | break; | 1404 | break; |
1405 | } | 1405 | } |
1406 | 1406 | ||
@@ -1413,7 +1413,7 @@ next_segment: | |||
1413 | #endif | 1413 | #endif |
1414 | 1414 | ||
1415 | if (get_track(drive, track) == -1) { | 1415 | if (get_track(drive, track) == -1) { |
1416 | err = -EIO; | 1416 | err = BLK_STS_IOERR; |
1417 | break; | 1417 | break; |
1418 | } | 1418 | } |
1419 | 1419 | ||
@@ -1424,7 +1424,7 @@ next_segment: | |||
1424 | 1424 | ||
1425 | /* keep the drive spinning while writes are scheduled */ | 1425 | /* keep the drive spinning while writes are scheduled */ |
1426 | if (!fd_motor_on(drive)) { | 1426 | if (!fd_motor_on(drive)) { |
1427 | err = -EIO; | 1427 | err = BLK_STS_IOERR; |
1428 | break; | 1428 | break; |
1429 | } | 1429 | } |
1430 | /* | 1430 | /* |
diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c index 3c606c09fd5a..5bf0c9d21fc1 100644 --- a/drivers/block/aoe/aoecmd.c +++ b/drivers/block/aoe/aoecmd.c | |||
@@ -1071,7 +1071,7 @@ aoe_end_request(struct aoedev *d, struct request *rq, int fastfail) | |||
1071 | do { | 1071 | do { |
1072 | bio = rq->bio; | 1072 | bio = rq->bio; |
1073 | bok = !fastfail && !bio->bi_error; | 1073 | bok = !fastfail && !bio->bi_error; |
1074 | } while (__blk_end_request(rq, bok ? 0 : -EIO, bio->bi_iter.bi_size)); | 1074 | } while (__blk_end_request(rq, bok ? BLK_STS_OK : BLK_STS_IOERR, bio->bi_iter.bi_size)); |
1075 | 1075 | ||
1076 | /* cf. http://lkml.org/lkml/2006/10/31/28 */ | 1076 | /* cf. http://lkml.org/lkml/2006/10/31/28 */ |
1077 | if (!fastfail) | 1077 | if (!fastfail) |
diff --git a/drivers/block/ataflop.c b/drivers/block/ataflop.c index fa69ecd52cb5..92da886180aa 100644 --- a/drivers/block/ataflop.c +++ b/drivers/block/ataflop.c | |||
@@ -378,7 +378,7 @@ static DEFINE_TIMER(readtrack_timer, fd_readtrack_check, 0, 0); | |||
378 | static DEFINE_TIMER(timeout_timer, fd_times_out, 0, 0); | 378 | static DEFINE_TIMER(timeout_timer, fd_times_out, 0, 0); |
379 | static DEFINE_TIMER(fd_timer, check_change, 0, 0); | 379 | static DEFINE_TIMER(fd_timer, check_change, 0, 0); |
380 | 380 | ||
381 | static void fd_end_request_cur(int err) | 381 | static void fd_end_request_cur(blk_status_t err) |
382 | { | 382 | { |
383 | if (!__blk_end_request_cur(fd_request, err)) | 383 | if (!__blk_end_request_cur(fd_request, err)) |
384 | fd_request = NULL; | 384 | fd_request = NULL; |
@@ -620,7 +620,7 @@ static void fd_error( void ) | |||
620 | fd_request->error_count++; | 620 | fd_request->error_count++; |
621 | if (fd_request->error_count >= MAX_ERRORS) { | 621 | if (fd_request->error_count >= MAX_ERRORS) { |
622 | printk(KERN_ERR "fd%d: too many errors.\n", SelectedDrive ); | 622 | printk(KERN_ERR "fd%d: too many errors.\n", SelectedDrive ); |
623 | fd_end_request_cur(-EIO); | 623 | fd_end_request_cur(BLK_STS_IOERR); |
624 | } | 624 | } |
625 | else if (fd_request->error_count == RECALIBRATE_ERRORS) { | 625 | else if (fd_request->error_count == RECALIBRATE_ERRORS) { |
626 | printk(KERN_WARNING "fd%d: recalibrating\n", SelectedDrive ); | 626 | printk(KERN_WARNING "fd%d: recalibrating\n", SelectedDrive ); |
@@ -739,7 +739,7 @@ static void do_fd_action( int drive ) | |||
739 | } | 739 | } |
740 | else { | 740 | else { |
741 | /* all sectors finished */ | 741 | /* all sectors finished */ |
742 | fd_end_request_cur(0); | 742 | fd_end_request_cur(BLK_STS_OK); |
743 | redo_fd_request(); | 743 | redo_fd_request(); |
744 | return; | 744 | return; |
745 | } | 745 | } |
@@ -1144,7 +1144,7 @@ static void fd_rwsec_done1(int status) | |||
1144 | } | 1144 | } |
1145 | else { | 1145 | else { |
1146 | /* all sectors finished */ | 1146 | /* all sectors finished */ |
1147 | fd_end_request_cur(0); | 1147 | fd_end_request_cur(BLK_STS_OK); |
1148 | redo_fd_request(); | 1148 | redo_fd_request(); |
1149 | } | 1149 | } |
1150 | return; | 1150 | return; |
@@ -1445,7 +1445,7 @@ repeat: | |||
1445 | if (!UD.connected) { | 1445 | if (!UD.connected) { |
1446 | /* drive not connected */ | 1446 | /* drive not connected */ |
1447 | printk(KERN_ERR "Unknown Device: fd%d\n", drive ); | 1447 | printk(KERN_ERR "Unknown Device: fd%d\n", drive ); |
1448 | fd_end_request_cur(-EIO); | 1448 | fd_end_request_cur(BLK_STS_IOERR); |
1449 | goto repeat; | 1449 | goto repeat; |
1450 | } | 1450 | } |
1451 | 1451 | ||
@@ -1461,12 +1461,12 @@ repeat: | |||
1461 | /* user supplied disk type */ | 1461 | /* user supplied disk type */ |
1462 | if (--type >= NUM_DISK_MINORS) { | 1462 | if (--type >= NUM_DISK_MINORS) { |
1463 | printk(KERN_WARNING "fd%d: invalid disk format", drive ); | 1463 | printk(KERN_WARNING "fd%d: invalid disk format", drive ); |
1464 | fd_end_request_cur(-EIO); | 1464 | fd_end_request_cur(BLK_STS_IOERR); |
1465 | goto repeat; | 1465 | goto repeat; |
1466 | } | 1466 | } |
1467 | if (minor2disktype[type].drive_types > DriveType) { | 1467 | if (minor2disktype[type].drive_types > DriveType) { |
1468 | printk(KERN_WARNING "fd%d: unsupported disk format", drive ); | 1468 | printk(KERN_WARNING "fd%d: unsupported disk format", drive ); |
1469 | fd_end_request_cur(-EIO); | 1469 | fd_end_request_cur(BLK_STS_IOERR); |
1470 | goto repeat; | 1470 | goto repeat; |
1471 | } | 1471 | } |
1472 | type = minor2disktype[type].index; | 1472 | type = minor2disktype[type].index; |
@@ -1476,7 +1476,7 @@ repeat: | |||
1476 | } | 1476 | } |
1477 | 1477 | ||
1478 | if (blk_rq_pos(fd_request) + 1 > UDT->blocks) { | 1478 | if (blk_rq_pos(fd_request) + 1 > UDT->blocks) { |
1479 | fd_end_request_cur(-EIO); | 1479 | fd_end_request_cur(BLK_STS_IOERR); |
1480 | goto repeat; | 1480 | goto repeat; |
1481 | } | 1481 | } |
1482 | 1482 | ||
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c index 3761066fe89d..02a611993bb4 100644 --- a/drivers/block/cciss.c +++ b/drivers/block/cciss.c | |||
@@ -1864,7 +1864,8 @@ static void cciss_softirq_done(struct request *rq) | |||
1864 | /* set the residual count for pc requests */ | 1864 | /* set the residual count for pc requests */ |
1865 | if (blk_rq_is_passthrough(rq)) | 1865 | if (blk_rq_is_passthrough(rq)) |
1866 | scsi_req(rq)->resid_len = c->err_info->ResidualCnt; | 1866 | scsi_req(rq)->resid_len = c->err_info->ResidualCnt; |
1867 | blk_end_request_all(rq, scsi_req(rq)->result ? -EIO : 0); | 1867 | blk_end_request_all(rq, scsi_req(rq)->result ? |
1868 | BLK_STS_IOERR : BLK_STS_OK); | ||
1868 | 1869 | ||
1869 | spin_lock_irqsave(&h->lock, flags); | 1870 | spin_lock_irqsave(&h->lock, flags); |
1870 | cmd_free(h, c); | 1871 | cmd_free(h, c); |
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c index 60d4c7653178..cc75a5176057 100644 --- a/drivers/block/floppy.c +++ b/drivers/block/floppy.c | |||
@@ -2202,7 +2202,7 @@ static int do_format(int drive, struct format_descr *tmp_format_req) | |||
2202 | * ============================= | 2202 | * ============================= |
2203 | */ | 2203 | */ |
2204 | 2204 | ||
2205 | static void floppy_end_request(struct request *req, int error) | 2205 | static void floppy_end_request(struct request *req, blk_status_t error) |
2206 | { | 2206 | { |
2207 | unsigned int nr_sectors = current_count_sectors; | 2207 | unsigned int nr_sectors = current_count_sectors; |
2208 | unsigned int drive = (unsigned long)req->rq_disk->private_data; | 2208 | unsigned int drive = (unsigned long)req->rq_disk->private_data; |
@@ -2263,7 +2263,7 @@ static void request_done(int uptodate) | |||
2263 | DRWE->last_error_generation = DRS->generation; | 2263 | DRWE->last_error_generation = DRS->generation; |
2264 | } | 2264 | } |
2265 | spin_lock_irqsave(q->queue_lock, flags); | 2265 | spin_lock_irqsave(q->queue_lock, flags); |
2266 | floppy_end_request(req, -EIO); | 2266 | floppy_end_request(req, BLK_STS_IOERR); |
2267 | spin_unlock_irqrestore(q->queue_lock, flags); | 2267 | spin_unlock_irqrestore(q->queue_lock, flags); |
2268 | } | 2268 | } |
2269 | } | 2269 | } |
diff --git a/drivers/block/loop.c b/drivers/block/loop.c index e288fb30100f..4caf6338c012 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c | |||
@@ -464,7 +464,7 @@ static void lo_complete_rq(struct request *rq) | |||
464 | zero_fill_bio(bio); | 464 | zero_fill_bio(bio); |
465 | } | 465 | } |
466 | 466 | ||
467 | blk_mq_end_request(rq, cmd->ret < 0 ? -EIO : 0); | 467 | blk_mq_end_request(rq, cmd->ret < 0 ? BLK_STS_IOERR : BLK_STS_OK); |
468 | } | 468 | } |
469 | 469 | ||
470 | static void lo_rw_aio_complete(struct kiocb *iocb, long ret, long ret2) | 470 | static void lo_rw_aio_complete(struct kiocb *iocb, long ret, long ret2) |
diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c index 3a779a4f5653..ee6f66bb50c7 100644 --- a/drivers/block/mtip32xx/mtip32xx.c +++ b/drivers/block/mtip32xx/mtip32xx.c | |||
@@ -532,7 +532,7 @@ static int mtip_read_log_page(struct mtip_port *port, u8 page, u16 *buffer, | |||
532 | static int mtip_get_smart_attr(struct mtip_port *port, unsigned int id, | 532 | static int mtip_get_smart_attr(struct mtip_port *port, unsigned int id, |
533 | struct smart_attr *attrib); | 533 | struct smart_attr *attrib); |
534 | 534 | ||
535 | static void mtip_complete_command(struct mtip_cmd *cmd, int status) | 535 | static void mtip_complete_command(struct mtip_cmd *cmd, blk_status_t status) |
536 | { | 536 | { |
537 | struct request *req = blk_mq_rq_from_pdu(cmd); | 537 | struct request *req = blk_mq_rq_from_pdu(cmd); |
538 | 538 | ||
@@ -568,7 +568,7 @@ static void mtip_handle_tfe(struct driver_data *dd) | |||
568 | if (test_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags)) { | 568 | if (test_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags)) { |
569 | cmd = mtip_cmd_from_tag(dd, MTIP_TAG_INTERNAL); | 569 | cmd = mtip_cmd_from_tag(dd, MTIP_TAG_INTERNAL); |
570 | dbg_printk(MTIP_DRV_NAME " TFE for the internal command\n"); | 570 | dbg_printk(MTIP_DRV_NAME " TFE for the internal command\n"); |
571 | mtip_complete_command(cmd, -EIO); | 571 | mtip_complete_command(cmd, BLK_STS_IOERR); |
572 | return; | 572 | return; |
573 | } | 573 | } |
574 | 574 | ||
@@ -667,7 +667,7 @@ static void mtip_handle_tfe(struct driver_data *dd) | |||
667 | tag, | 667 | tag, |
668 | fail_reason != NULL ? | 668 | fail_reason != NULL ? |
669 | fail_reason : "unknown"); | 669 | fail_reason : "unknown"); |
670 | mtip_complete_command(cmd, -ENODATA); | 670 | mtip_complete_command(cmd, BLK_STS_MEDIUM); |
671 | continue; | 671 | continue; |
672 | } | 672 | } |
673 | } | 673 | } |
@@ -690,7 +690,7 @@ static void mtip_handle_tfe(struct driver_data *dd) | |||
690 | dev_warn(&port->dd->pdev->dev, | 690 | dev_warn(&port->dd->pdev->dev, |
691 | "retiring tag %d\n", tag); | 691 | "retiring tag %d\n", tag); |
692 | 692 | ||
693 | mtip_complete_command(cmd, -EIO); | 693 | mtip_complete_command(cmd, BLK_STS_IOERR); |
694 | } | 694 | } |
695 | } | 695 | } |
696 | print_tags(dd, "reissued (TFE)", tagaccum, cmd_cnt); | 696 | print_tags(dd, "reissued (TFE)", tagaccum, cmd_cnt); |
@@ -2753,7 +2753,7 @@ static void mtip_abort_cmd(struct request *req, void *data, | |||
2753 | dbg_printk(MTIP_DRV_NAME " Aborting request, tag = %d\n", req->tag); | 2753 | dbg_printk(MTIP_DRV_NAME " Aborting request, tag = %d\n", req->tag); |
2754 | 2754 | ||
2755 | clear_bit(req->tag, dd->port->cmds_to_issue); | 2755 | clear_bit(req->tag, dd->port->cmds_to_issue); |
2756 | cmd->status = -EIO; | 2756 | cmd->status = BLK_STS_IOERR; |
2757 | mtip_softirq_done_fn(req); | 2757 | mtip_softirq_done_fn(req); |
2758 | } | 2758 | } |
2759 | 2759 | ||
@@ -3597,7 +3597,7 @@ static int mtip_submit_request(struct blk_mq_hw_ctx *hctx, struct request *rq) | |||
3597 | int err; | 3597 | int err; |
3598 | 3598 | ||
3599 | err = mtip_send_trim(dd, blk_rq_pos(rq), blk_rq_sectors(rq)); | 3599 | err = mtip_send_trim(dd, blk_rq_pos(rq), blk_rq_sectors(rq)); |
3600 | blk_mq_end_request(rq, err); | 3600 | blk_mq_end_request(rq, err ? BLK_STS_IOERR : BLK_STS_OK); |
3601 | return 0; | 3601 | return 0; |
3602 | } | 3602 | } |
3603 | 3603 | ||
@@ -3730,7 +3730,7 @@ static enum blk_eh_timer_return mtip_cmd_timeout(struct request *req, | |||
3730 | if (reserved) { | 3730 | if (reserved) { |
3731 | struct mtip_cmd *cmd = blk_mq_rq_to_pdu(req); | 3731 | struct mtip_cmd *cmd = blk_mq_rq_to_pdu(req); |
3732 | 3732 | ||
3733 | cmd->status = -ETIME; | 3733 | cmd->status = BLK_STS_TIMEOUT; |
3734 | return BLK_EH_HANDLED; | 3734 | return BLK_EH_HANDLED; |
3735 | } | 3735 | } |
3736 | 3736 | ||
@@ -3961,7 +3961,7 @@ static void mtip_no_dev_cleanup(struct request *rq, void *data, bool reserv) | |||
3961 | { | 3961 | { |
3962 | struct mtip_cmd *cmd = blk_mq_rq_to_pdu(rq); | 3962 | struct mtip_cmd *cmd = blk_mq_rq_to_pdu(rq); |
3963 | 3963 | ||
3964 | cmd->status = -ENODEV; | 3964 | cmd->status = BLK_STS_IOERR; |
3965 | blk_mq_complete_request(rq); | 3965 | blk_mq_complete_request(rq); |
3966 | } | 3966 | } |
3967 | 3967 | ||
diff --git a/drivers/block/mtip32xx/mtip32xx.h b/drivers/block/mtip32xx/mtip32xx.h index 37b8e3e0bb78..e8286af50e16 100644 --- a/drivers/block/mtip32xx/mtip32xx.h +++ b/drivers/block/mtip32xx/mtip32xx.h | |||
@@ -342,7 +342,7 @@ struct mtip_cmd { | |||
342 | int retries; /* The number of retries left for this command. */ | 342 | int retries; /* The number of retries left for this command. */ |
343 | 343 | ||
344 | int direction; /* Data transfer direction */ | 344 | int direction; /* Data transfer direction */ |
345 | int status; | 345 | blk_status_t status; |
346 | }; | 346 | }; |
347 | 347 | ||
348 | /* Structure used to describe a port. */ | 348 | /* Structure used to describe a port. */ |
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c index 6de9f9943a0e..978d2d2d08d6 100644 --- a/drivers/block/nbd.c +++ b/drivers/block/nbd.c | |||
@@ -116,7 +116,7 @@ struct nbd_cmd { | |||
116 | int index; | 116 | int index; |
117 | int cookie; | 117 | int cookie; |
118 | struct completion send_complete; | 118 | struct completion send_complete; |
119 | int status; | 119 | blk_status_t status; |
120 | }; | 120 | }; |
121 | 121 | ||
122 | #if IS_ENABLED(CONFIG_DEBUG_FS) | 122 | #if IS_ENABLED(CONFIG_DEBUG_FS) |
@@ -286,7 +286,7 @@ static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req, | |||
286 | struct nbd_config *config; | 286 | struct nbd_config *config; |
287 | 287 | ||
288 | if (!refcount_inc_not_zero(&nbd->config_refs)) { | 288 | if (!refcount_inc_not_zero(&nbd->config_refs)) { |
289 | cmd->status = -EIO; | 289 | cmd->status = BLK_STS_TIMEOUT; |
290 | return BLK_EH_HANDLED; | 290 | return BLK_EH_HANDLED; |
291 | } | 291 | } |
292 | 292 | ||
@@ -331,7 +331,7 @@ static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req, | |||
331 | "Connection timed out\n"); | 331 | "Connection timed out\n"); |
332 | } | 332 | } |
333 | set_bit(NBD_TIMEDOUT, &config->runtime_flags); | 333 | set_bit(NBD_TIMEDOUT, &config->runtime_flags); |
334 | cmd->status = -EIO; | 334 | cmd->status = BLK_STS_IOERR; |
335 | sock_shutdown(nbd); | 335 | sock_shutdown(nbd); |
336 | nbd_config_put(nbd); | 336 | nbd_config_put(nbd); |
337 | 337 | ||
@@ -578,7 +578,7 @@ static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd, int index) | |||
578 | if (ntohl(reply.error)) { | 578 | if (ntohl(reply.error)) { |
579 | dev_err(disk_to_dev(nbd->disk), "Other side returned error (%d)\n", | 579 | dev_err(disk_to_dev(nbd->disk), "Other side returned error (%d)\n", |
580 | ntohl(reply.error)); | 580 | ntohl(reply.error)); |
581 | cmd->status = -EIO; | 581 | cmd->status = BLK_STS_IOERR; |
582 | return cmd; | 582 | return cmd; |
583 | } | 583 | } |
584 | 584 | ||
@@ -603,7 +603,7 @@ static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd, int index) | |||
603 | */ | 603 | */ |
604 | if (nbd_disconnected(config) || | 604 | if (nbd_disconnected(config) || |
605 | config->num_connections <= 1) { | 605 | config->num_connections <= 1) { |
606 | cmd->status = -EIO; | 606 | cmd->status = BLK_STS_IOERR; |
607 | return cmd; | 607 | return cmd; |
608 | } | 608 | } |
609 | return ERR_PTR(-EIO); | 609 | return ERR_PTR(-EIO); |
@@ -655,7 +655,7 @@ static void nbd_clear_req(struct request *req, void *data, bool reserved) | |||
655 | if (!blk_mq_request_started(req)) | 655 | if (!blk_mq_request_started(req)) |
656 | return; | 656 | return; |
657 | cmd = blk_mq_rq_to_pdu(req); | 657 | cmd = blk_mq_rq_to_pdu(req); |
658 | cmd->status = -EIO; | 658 | cmd->status = BLK_STS_IOERR; |
659 | blk_mq_complete_request(req); | 659 | blk_mq_complete_request(req); |
660 | } | 660 | } |
661 | 661 | ||
@@ -744,7 +744,7 @@ static int nbd_handle_cmd(struct nbd_cmd *cmd, int index) | |||
744 | nbd_config_put(nbd); | 744 | nbd_config_put(nbd); |
745 | return -EINVAL; | 745 | return -EINVAL; |
746 | } | 746 | } |
747 | cmd->status = 0; | 747 | cmd->status = BLK_STS_OK; |
748 | again: | 748 | again: |
749 | nsock = config->socks[index]; | 749 | nsock = config->socks[index]; |
750 | mutex_lock(&nsock->tx_lock); | 750 | mutex_lock(&nsock->tx_lock); |
diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c index d946e1eeac8e..e6b81d370882 100644 --- a/drivers/block/null_blk.c +++ b/drivers/block/null_blk.c | |||
@@ -229,11 +229,11 @@ static void end_cmd(struct nullb_cmd *cmd) | |||
229 | 229 | ||
230 | switch (queue_mode) { | 230 | switch (queue_mode) { |
231 | case NULL_Q_MQ: | 231 | case NULL_Q_MQ: |
232 | blk_mq_end_request(cmd->rq, 0); | 232 | blk_mq_end_request(cmd->rq, BLK_STS_OK); |
233 | return; | 233 | return; |
234 | case NULL_Q_RQ: | 234 | case NULL_Q_RQ: |
235 | INIT_LIST_HEAD(&cmd->rq->queuelist); | 235 | INIT_LIST_HEAD(&cmd->rq->queuelist); |
236 | blk_end_request_all(cmd->rq, 0); | 236 | blk_end_request_all(cmd->rq, BLK_STS_OK); |
237 | break; | 237 | break; |
238 | case NULL_Q_BIO: | 238 | case NULL_Q_BIO: |
239 | bio_endio(cmd->bio); | 239 | bio_endio(cmd->bio); |
@@ -422,11 +422,12 @@ static void cleanup_queues(struct nullb *nullb) | |||
422 | 422 | ||
423 | #ifdef CONFIG_NVM | 423 | #ifdef CONFIG_NVM |
424 | 424 | ||
425 | static void null_lnvm_end_io(struct request *rq, int error) | 425 | static void null_lnvm_end_io(struct request *rq, blk_status_t status) |
426 | { | 426 | { |
427 | struct nvm_rq *rqd = rq->end_io_data; | 427 | struct nvm_rq *rqd = rq->end_io_data; |
428 | 428 | ||
429 | rqd->error = error; | 429 | /* XXX: lighnvm core seems to expect NVM_RSP_* values here.. */ |
430 | rqd->error = status ? -EIO : 0; | ||
430 | nvm_end_io(rqd); | 431 | nvm_end_io(rqd); |
431 | 432 | ||
432 | blk_put_request(rq); | 433 | blk_put_request(rq); |
diff --git a/drivers/block/paride/pcd.c b/drivers/block/paride/pcd.c index b1267ef34d5a..cffe42d80ce9 100644 --- a/drivers/block/paride/pcd.c +++ b/drivers/block/paride/pcd.c | |||
@@ -783,7 +783,7 @@ static void pcd_request(void) | |||
783 | ps_set_intr(do_pcd_read, NULL, 0, nice); | 783 | ps_set_intr(do_pcd_read, NULL, 0, nice); |
784 | return; | 784 | return; |
785 | } else { | 785 | } else { |
786 | __blk_end_request_all(pcd_req, -EIO); | 786 | __blk_end_request_all(pcd_req, BLK_STS_IOERR); |
787 | pcd_req = NULL; | 787 | pcd_req = NULL; |
788 | } | 788 | } |
789 | } | 789 | } |
@@ -794,7 +794,7 @@ static void do_pcd_request(struct request_queue *q) | |||
794 | pcd_request(); | 794 | pcd_request(); |
795 | } | 795 | } |
796 | 796 | ||
797 | static inline void next_request(int err) | 797 | static inline void next_request(blk_status_t err) |
798 | { | 798 | { |
799 | unsigned long saved_flags; | 799 | unsigned long saved_flags; |
800 | 800 | ||
@@ -837,7 +837,7 @@ static void pcd_start(void) | |||
837 | 837 | ||
838 | if (pcd_command(pcd_current, rd_cmd, 2048, "read block")) { | 838 | if (pcd_command(pcd_current, rd_cmd, 2048, "read block")) { |
839 | pcd_bufblk = -1; | 839 | pcd_bufblk = -1; |
840 | next_request(-EIO); | 840 | next_request(BLK_STS_IOERR); |
841 | return; | 841 | return; |
842 | } | 842 | } |
843 | 843 | ||
@@ -871,7 +871,7 @@ static void do_pcd_read_drq(void) | |||
871 | return; | 871 | return; |
872 | } | 872 | } |
873 | pcd_bufblk = -1; | 873 | pcd_bufblk = -1; |
874 | next_request(-EIO); | 874 | next_request(BLK_STS_IOERR); |
875 | return; | 875 | return; |
876 | } | 876 | } |
877 | 877 | ||
diff --git a/drivers/block/paride/pd.c b/drivers/block/paride/pd.c index 7d2402f90978..c98983be4f9c 100644 --- a/drivers/block/paride/pd.c +++ b/drivers/block/paride/pd.c | |||
@@ -438,7 +438,7 @@ static void run_fsm(void) | |||
438 | phase = NULL; | 438 | phase = NULL; |
439 | spin_lock_irqsave(&pd_lock, saved_flags); | 439 | spin_lock_irqsave(&pd_lock, saved_flags); |
440 | if (!__blk_end_request_cur(pd_req, | 440 | if (!__blk_end_request_cur(pd_req, |
441 | res == Ok ? 0 : -EIO)) { | 441 | res == Ok ? 0 : BLK_STS_IOERR)) { |
442 | if (!set_next_request()) | 442 | if (!set_next_request()) |
443 | stop = 1; | 443 | stop = 1; |
444 | } | 444 | } |
diff --git a/drivers/block/paride/pf.c b/drivers/block/paride/pf.c index f24ca7315ddc..5f46da8d05cd 100644 --- a/drivers/block/paride/pf.c +++ b/drivers/block/paride/pf.c | |||
@@ -801,7 +801,7 @@ static int set_next_request(void) | |||
801 | return pf_req != NULL; | 801 | return pf_req != NULL; |
802 | } | 802 | } |
803 | 803 | ||
804 | static void pf_end_request(int err) | 804 | static void pf_end_request(blk_status_t err) |
805 | { | 805 | { |
806 | if (pf_req && !__blk_end_request_cur(pf_req, err)) | 806 | if (pf_req && !__blk_end_request_cur(pf_req, err)) |
807 | pf_req = NULL; | 807 | pf_req = NULL; |
@@ -821,7 +821,7 @@ repeat: | |||
821 | pf_count = blk_rq_cur_sectors(pf_req); | 821 | pf_count = blk_rq_cur_sectors(pf_req); |
822 | 822 | ||
823 | if (pf_block + pf_count > get_capacity(pf_req->rq_disk)) { | 823 | if (pf_block + pf_count > get_capacity(pf_req->rq_disk)) { |
824 | pf_end_request(-EIO); | 824 | pf_end_request(BLK_STS_IOERR); |
825 | goto repeat; | 825 | goto repeat; |
826 | } | 826 | } |
827 | 827 | ||
@@ -836,7 +836,7 @@ repeat: | |||
836 | pi_do_claimed(pf_current->pi, do_pf_write); | 836 | pi_do_claimed(pf_current->pi, do_pf_write); |
837 | else { | 837 | else { |
838 | pf_busy = 0; | 838 | pf_busy = 0; |
839 | pf_end_request(-EIO); | 839 | pf_end_request(BLK_STS_IOERR); |
840 | goto repeat; | 840 | goto repeat; |
841 | } | 841 | } |
842 | } | 842 | } |
@@ -868,7 +868,7 @@ static int pf_next_buf(void) | |||
868 | return 0; | 868 | return 0; |
869 | } | 869 | } |
870 | 870 | ||
871 | static inline void next_request(int err) | 871 | static inline void next_request(blk_status_t err) |
872 | { | 872 | { |
873 | unsigned long saved_flags; | 873 | unsigned long saved_flags; |
874 | 874 | ||
@@ -896,7 +896,7 @@ static void do_pf_read_start(void) | |||
896 | pi_do_claimed(pf_current->pi, do_pf_read_start); | 896 | pi_do_claimed(pf_current->pi, do_pf_read_start); |
897 | return; | 897 | return; |
898 | } | 898 | } |
899 | next_request(-EIO); | 899 | next_request(BLK_STS_IOERR); |
900 | return; | 900 | return; |
901 | } | 901 | } |
902 | pf_mask = STAT_DRQ; | 902 | pf_mask = STAT_DRQ; |
@@ -915,7 +915,7 @@ static void do_pf_read_drq(void) | |||
915 | pi_do_claimed(pf_current->pi, do_pf_read_start); | 915 | pi_do_claimed(pf_current->pi, do_pf_read_start); |
916 | return; | 916 | return; |
917 | } | 917 | } |
918 | next_request(-EIO); | 918 | next_request(BLK_STS_IOERR); |
919 | return; | 919 | return; |
920 | } | 920 | } |
921 | pi_read_block(pf_current->pi, pf_buf, 512); | 921 | pi_read_block(pf_current->pi, pf_buf, 512); |
@@ -942,7 +942,7 @@ static void do_pf_write_start(void) | |||
942 | pi_do_claimed(pf_current->pi, do_pf_write_start); | 942 | pi_do_claimed(pf_current->pi, do_pf_write_start); |
943 | return; | 943 | return; |
944 | } | 944 | } |
945 | next_request(-EIO); | 945 | next_request(BLK_STS_IOERR); |
946 | return; | 946 | return; |
947 | } | 947 | } |
948 | 948 | ||
@@ -955,7 +955,7 @@ static void do_pf_write_start(void) | |||
955 | pi_do_claimed(pf_current->pi, do_pf_write_start); | 955 | pi_do_claimed(pf_current->pi, do_pf_write_start); |
956 | return; | 956 | return; |
957 | } | 957 | } |
958 | next_request(-EIO); | 958 | next_request(BLK_STS_IOERR); |
959 | return; | 959 | return; |
960 | } | 960 | } |
961 | pi_write_block(pf_current->pi, pf_buf, 512); | 961 | pi_write_block(pf_current->pi, pf_buf, 512); |
@@ -975,7 +975,7 @@ static void do_pf_write_done(void) | |||
975 | pi_do_claimed(pf_current->pi, do_pf_write_start); | 975 | pi_do_claimed(pf_current->pi, do_pf_write_start); |
976 | return; | 976 | return; |
977 | } | 977 | } |
978 | next_request(-EIO); | 978 | next_request(BLK_STS_IOERR); |
979 | return; | 979 | return; |
980 | } | 980 | } |
981 | pi_disconnect(pf_current->pi); | 981 | pi_disconnect(pf_current->pi); |
diff --git a/drivers/block/ps3disk.c b/drivers/block/ps3disk.c index a809e3e9feb8..075662f2cf46 100644 --- a/drivers/block/ps3disk.c +++ b/drivers/block/ps3disk.c | |||
@@ -158,7 +158,7 @@ static int ps3disk_submit_request_sg(struct ps3_storage_device *dev, | |||
158 | if (res) { | 158 | if (res) { |
159 | dev_err(&dev->sbd.core, "%s:%u: %s failed %d\n", __func__, | 159 | dev_err(&dev->sbd.core, "%s:%u: %s failed %d\n", __func__, |
160 | __LINE__, op, res); | 160 | __LINE__, op, res); |
161 | __blk_end_request_all(req, -EIO); | 161 | __blk_end_request_all(req, BLK_STS_IOERR); |
162 | return 0; | 162 | return 0; |
163 | } | 163 | } |
164 | 164 | ||
@@ -180,7 +180,7 @@ static int ps3disk_submit_flush_request(struct ps3_storage_device *dev, | |||
180 | if (res) { | 180 | if (res) { |
181 | dev_err(&dev->sbd.core, "%s:%u: sync cache failed 0x%llx\n", | 181 | dev_err(&dev->sbd.core, "%s:%u: sync cache failed 0x%llx\n", |
182 | __func__, __LINE__, res); | 182 | __func__, __LINE__, res); |
183 | __blk_end_request_all(req, -EIO); | 183 | __blk_end_request_all(req, BLK_STS_IOERR); |
184 | return 0; | 184 | return 0; |
185 | } | 185 | } |
186 | 186 | ||
@@ -208,7 +208,7 @@ static void ps3disk_do_request(struct ps3_storage_device *dev, | |||
208 | break; | 208 | break; |
209 | default: | 209 | default: |
210 | blk_dump_rq_flags(req, DEVICE_NAME " bad request"); | 210 | blk_dump_rq_flags(req, DEVICE_NAME " bad request"); |
211 | __blk_end_request_all(req, -EIO); | 211 | __blk_end_request_all(req, BLK_STS_IOERR); |
212 | } | 212 | } |
213 | } | 213 | } |
214 | } | 214 | } |
@@ -231,7 +231,8 @@ static irqreturn_t ps3disk_interrupt(int irq, void *data) | |||
231 | struct ps3_storage_device *dev = data; | 231 | struct ps3_storage_device *dev = data; |
232 | struct ps3disk_private *priv; | 232 | struct ps3disk_private *priv; |
233 | struct request *req; | 233 | struct request *req; |
234 | int res, read, error; | 234 | int res, read; |
235 | blk_status_t error; | ||
235 | u64 tag, status; | 236 | u64 tag, status; |
236 | const char *op; | 237 | const char *op; |
237 | 238 | ||
@@ -269,7 +270,7 @@ static irqreturn_t ps3disk_interrupt(int irq, void *data) | |||
269 | if (status) { | 270 | if (status) { |
270 | dev_dbg(&dev->sbd.core, "%s:%u: %s failed 0x%llx\n", __func__, | 271 | dev_dbg(&dev->sbd.core, "%s:%u: %s failed 0x%llx\n", __func__, |
271 | __LINE__, op, status); | 272 | __LINE__, op, status); |
272 | error = -EIO; | 273 | error = BLK_STS_IOERR; |
273 | } else { | 274 | } else { |
274 | dev_dbg(&dev->sbd.core, "%s:%u: %s completed\n", __func__, | 275 | dev_dbg(&dev->sbd.core, "%s:%u: %s completed\n", __func__, |
275 | __LINE__, op); | 276 | __LINE__, op); |
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index 454bf9c34882..3e8b43d792c2 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c | |||
@@ -2293,11 +2293,13 @@ static bool rbd_img_obj_end_request(struct rbd_obj_request *obj_request) | |||
2293 | rbd_assert(img_request->obj_request != NULL); | 2293 | rbd_assert(img_request->obj_request != NULL); |
2294 | more = obj_request->which < img_request->obj_request_count - 1; | 2294 | more = obj_request->which < img_request->obj_request_count - 1; |
2295 | } else { | 2295 | } else { |
2296 | blk_status_t status = errno_to_blk_status(result); | ||
2297 | |||
2296 | rbd_assert(img_request->rq != NULL); | 2298 | rbd_assert(img_request->rq != NULL); |
2297 | 2299 | ||
2298 | more = blk_update_request(img_request->rq, result, xferred); | 2300 | more = blk_update_request(img_request->rq, status, xferred); |
2299 | if (!more) | 2301 | if (!more) |
2300 | __blk_mq_end_request(img_request->rq, result); | 2302 | __blk_mq_end_request(img_request->rq, status); |
2301 | } | 2303 | } |
2302 | 2304 | ||
2303 | return more; | 2305 | return more; |
@@ -4149,7 +4151,7 @@ err_rq: | |||
4149 | obj_op_name(op_type), length, offset, result); | 4151 | obj_op_name(op_type), length, offset, result); |
4150 | ceph_put_snap_context(snapc); | 4152 | ceph_put_snap_context(snapc); |
4151 | err: | 4153 | err: |
4152 | blk_mq_end_request(rq, result); | 4154 | blk_mq_end_request(rq, errno_to_blk_status(result)); |
4153 | } | 4155 | } |
4154 | 4156 | ||
4155 | static int rbd_queue_rq(struct blk_mq_hw_ctx *hctx, | 4157 | static int rbd_queue_rq(struct blk_mq_hw_ctx *hctx, |
diff --git a/drivers/block/skd_main.c b/drivers/block/skd_main.c index 27833e4dae2a..e6c526861703 100644 --- a/drivers/block/skd_main.c +++ b/drivers/block/skd_main.c | |||
@@ -451,8 +451,8 @@ static void skd_send_special_fitmsg(struct skd_device *skdev, | |||
451 | struct skd_special_context *skspcl); | 451 | struct skd_special_context *skspcl); |
452 | static void skd_request_fn(struct request_queue *rq); | 452 | static void skd_request_fn(struct request_queue *rq); |
453 | static void skd_end_request(struct skd_device *skdev, | 453 | static void skd_end_request(struct skd_device *skdev, |
454 | struct skd_request_context *skreq, int error); | 454 | struct skd_request_context *skreq, blk_status_t status); |
455 | static int skd_preop_sg_list(struct skd_device *skdev, | 455 | static bool skd_preop_sg_list(struct skd_device *skdev, |
456 | struct skd_request_context *skreq); | 456 | struct skd_request_context *skreq); |
457 | static void skd_postop_sg_list(struct skd_device *skdev, | 457 | static void skd_postop_sg_list(struct skd_device *skdev, |
458 | struct skd_request_context *skreq); | 458 | struct skd_request_context *skreq); |
@@ -491,7 +491,7 @@ static void skd_fail_all_pending(struct skd_device *skdev) | |||
491 | if (req == NULL) | 491 | if (req == NULL) |
492 | break; | 492 | break; |
493 | blk_start_request(req); | 493 | blk_start_request(req); |
494 | __blk_end_request_all(req, -EIO); | 494 | __blk_end_request_all(req, BLK_STS_IOERR); |
495 | } | 495 | } |
496 | } | 496 | } |
497 | 497 | ||
@@ -545,7 +545,6 @@ static void skd_request_fn(struct request_queue *q) | |||
545 | struct request *req = NULL; | 545 | struct request *req = NULL; |
546 | struct skd_scsi_request *scsi_req; | 546 | struct skd_scsi_request *scsi_req; |
547 | unsigned long io_flags; | 547 | unsigned long io_flags; |
548 | int error; | ||
549 | u32 lba; | 548 | u32 lba; |
550 | u32 count; | 549 | u32 count; |
551 | int data_dir; | 550 | int data_dir; |
@@ -716,9 +715,7 @@ static void skd_request_fn(struct request_queue *q) | |||
716 | if (!req->bio) | 715 | if (!req->bio) |
717 | goto skip_sg; | 716 | goto skip_sg; |
718 | 717 | ||
719 | error = skd_preop_sg_list(skdev, skreq); | 718 | if (!skd_preop_sg_list(skdev, skreq)) { |
720 | |||
721 | if (error != 0) { | ||
722 | /* | 719 | /* |
723 | * Complete the native request with error. | 720 | * Complete the native request with error. |
724 | * Note that the request context is still at the | 721 | * Note that the request context is still at the |
@@ -730,7 +727,7 @@ static void skd_request_fn(struct request_queue *q) | |||
730 | */ | 727 | */ |
731 | pr_debug("%s:%s:%d error Out\n", | 728 | pr_debug("%s:%s:%d error Out\n", |
732 | skdev->name, __func__, __LINE__); | 729 | skdev->name, __func__, __LINE__); |
733 | skd_end_request(skdev, skreq, error); | 730 | skd_end_request(skdev, skreq, BLK_STS_RESOURCE); |
734 | continue; | 731 | continue; |
735 | } | 732 | } |
736 | 733 | ||
@@ -805,7 +802,7 @@ skip_sg: | |||
805 | } | 802 | } |
806 | 803 | ||
807 | static void skd_end_request(struct skd_device *skdev, | 804 | static void skd_end_request(struct skd_device *skdev, |
808 | struct skd_request_context *skreq, int error) | 805 | struct skd_request_context *skreq, blk_status_t error) |
809 | { | 806 | { |
810 | if (unlikely(error)) { | 807 | if (unlikely(error)) { |
811 | struct request *req = skreq->req; | 808 | struct request *req = skreq->req; |
@@ -822,7 +819,7 @@ static void skd_end_request(struct skd_device *skdev, | |||
822 | __blk_end_request_all(skreq->req, error); | 819 | __blk_end_request_all(skreq->req, error); |
823 | } | 820 | } |
824 | 821 | ||
825 | static int skd_preop_sg_list(struct skd_device *skdev, | 822 | static bool skd_preop_sg_list(struct skd_device *skdev, |
826 | struct skd_request_context *skreq) | 823 | struct skd_request_context *skreq) |
827 | { | 824 | { |
828 | struct request *req = skreq->req; | 825 | struct request *req = skreq->req; |
@@ -839,7 +836,7 @@ static int skd_preop_sg_list(struct skd_device *skdev, | |||
839 | 836 | ||
840 | n_sg = blk_rq_map_sg(skdev->queue, req, sg); | 837 | n_sg = blk_rq_map_sg(skdev->queue, req, sg); |
841 | if (n_sg <= 0) | 838 | if (n_sg <= 0) |
842 | return -EINVAL; | 839 | return false; |
843 | 840 | ||
844 | /* | 841 | /* |
845 | * Map scatterlist to PCI bus addresses. | 842 | * Map scatterlist to PCI bus addresses. |
@@ -847,7 +844,7 @@ static int skd_preop_sg_list(struct skd_device *skdev, | |||
847 | */ | 844 | */ |
848 | n_sg = pci_map_sg(skdev->pdev, sg, n_sg, pci_dir); | 845 | n_sg = pci_map_sg(skdev->pdev, sg, n_sg, pci_dir); |
849 | if (n_sg <= 0) | 846 | if (n_sg <= 0) |
850 | return -EINVAL; | 847 | return false; |
851 | 848 | ||
852 | SKD_ASSERT(n_sg <= skdev->sgs_per_request); | 849 | SKD_ASSERT(n_sg <= skdev->sgs_per_request); |
853 | 850 | ||
@@ -882,7 +879,7 @@ static int skd_preop_sg_list(struct skd_device *skdev, | |||
882 | } | 879 | } |
883 | } | 880 | } |
884 | 881 | ||
885 | return 0; | 882 | return true; |
886 | } | 883 | } |
887 | 884 | ||
888 | static void skd_postop_sg_list(struct skd_device *skdev, | 885 | static void skd_postop_sg_list(struct skd_device *skdev, |
@@ -2333,7 +2330,7 @@ static void skd_resolve_req_exception(struct skd_device *skdev, | |||
2333 | switch (skd_check_status(skdev, cmp_status, &skreq->err_info)) { | 2330 | switch (skd_check_status(skdev, cmp_status, &skreq->err_info)) { |
2334 | case SKD_CHECK_STATUS_REPORT_GOOD: | 2331 | case SKD_CHECK_STATUS_REPORT_GOOD: |
2335 | case SKD_CHECK_STATUS_REPORT_SMART_ALERT: | 2332 | case SKD_CHECK_STATUS_REPORT_SMART_ALERT: |
2336 | skd_end_request(skdev, skreq, 0); | 2333 | skd_end_request(skdev, skreq, BLK_STS_OK); |
2337 | break; | 2334 | break; |
2338 | 2335 | ||
2339 | case SKD_CHECK_STATUS_BUSY_IMMINENT: | 2336 | case SKD_CHECK_STATUS_BUSY_IMMINENT: |
@@ -2355,7 +2352,7 @@ static void skd_resolve_req_exception(struct skd_device *skdev, | |||
2355 | 2352 | ||
2356 | case SKD_CHECK_STATUS_REPORT_ERROR: | 2353 | case SKD_CHECK_STATUS_REPORT_ERROR: |
2357 | default: | 2354 | default: |
2358 | skd_end_request(skdev, skreq, -EIO); | 2355 | skd_end_request(skdev, skreq, BLK_STS_IOERR); |
2359 | break; | 2356 | break; |
2360 | } | 2357 | } |
2361 | } | 2358 | } |
@@ -2748,7 +2745,7 @@ static int skd_isr_completion_posted(struct skd_device *skdev, | |||
2748 | * native request. | 2745 | * native request. |
2749 | */ | 2746 | */ |
2750 | if (likely(cmp_status == SAM_STAT_GOOD)) | 2747 | if (likely(cmp_status == SAM_STAT_GOOD)) |
2751 | skd_end_request(skdev, skreq, 0); | 2748 | skd_end_request(skdev, skreq, BLK_STS_OK); |
2752 | else | 2749 | else |
2753 | skd_resolve_req_exception(skdev, skreq); | 2750 | skd_resolve_req_exception(skdev, skreq); |
2754 | } | 2751 | } |
@@ -3190,7 +3187,7 @@ static void skd_recover_requests(struct skd_device *skdev, int requeue) | |||
3190 | SKD_MAX_RETRIES) | 3187 | SKD_MAX_RETRIES) |
3191 | blk_requeue_request(skdev->queue, skreq->req); | 3188 | blk_requeue_request(skdev->queue, skreq->req); |
3192 | else | 3189 | else |
3193 | skd_end_request(skdev, skreq, -EIO); | 3190 | skd_end_request(skdev, skreq, BLK_STS_IOERR); |
3194 | 3191 | ||
3195 | skreq->req = NULL; | 3192 | skreq->req = NULL; |
3196 | 3193 | ||
diff --git a/drivers/block/sunvdc.c b/drivers/block/sunvdc.c index 3f3a3ab3d50a..6b16ead1da58 100644 --- a/drivers/block/sunvdc.c +++ b/drivers/block/sunvdc.c | |||
@@ -316,7 +316,7 @@ static void vdc_end_one(struct vdc_port *port, struct vio_dring_state *dr, | |||
316 | 316 | ||
317 | rqe->req = NULL; | 317 | rqe->req = NULL; |
318 | 318 | ||
319 | __blk_end_request(req, (desc->status ? -EIO : 0), desc->size); | 319 | __blk_end_request(req, (desc->status ? BLK_STS_IOERR : 0), desc->size); |
320 | 320 | ||
321 | vdc_blk_queue_start(port); | 321 | vdc_blk_queue_start(port); |
322 | } | 322 | } |
@@ -1023,7 +1023,7 @@ static void vdc_queue_drain(struct vdc_port *port) | |||
1023 | struct request *req; | 1023 | struct request *req; |
1024 | 1024 | ||
1025 | while ((req = blk_fetch_request(port->disk->queue)) != NULL) | 1025 | while ((req = blk_fetch_request(port->disk->queue)) != NULL) |
1026 | __blk_end_request_all(req, -EIO); | 1026 | __blk_end_request_all(req, BLK_STS_IOERR); |
1027 | } | 1027 | } |
1028 | 1028 | ||
1029 | static void vdc_ldc_reset_timer(unsigned long _arg) | 1029 | static void vdc_ldc_reset_timer(unsigned long _arg) |
diff --git a/drivers/block/swim.c b/drivers/block/swim.c index 3064be6cf375..1633aaf24060 100644 --- a/drivers/block/swim.c +++ b/drivers/block/swim.c | |||
@@ -493,7 +493,7 @@ static inline int swim_read_sector(struct floppy_state *fs, | |||
493 | return ret; | 493 | return ret; |
494 | } | 494 | } |
495 | 495 | ||
496 | static int floppy_read_sectors(struct floppy_state *fs, | 496 | static blk_status_t floppy_read_sectors(struct floppy_state *fs, |
497 | int req_sector, int sectors_nb, | 497 | int req_sector, int sectors_nb, |
498 | unsigned char *buffer) | 498 | unsigned char *buffer) |
499 | { | 499 | { |
@@ -516,7 +516,7 @@ static int floppy_read_sectors(struct floppy_state *fs, | |||
516 | ret = swim_read_sector(fs, side, track, sector, | 516 | ret = swim_read_sector(fs, side, track, sector, |
517 | buffer); | 517 | buffer); |
518 | if (try-- == 0) | 518 | if (try-- == 0) |
519 | return -EIO; | 519 | return BLK_STS_IOERR; |
520 | } while (ret != 512); | 520 | } while (ret != 512); |
521 | 521 | ||
522 | buffer += ret; | 522 | buffer += ret; |
@@ -553,7 +553,7 @@ static void do_fd_request(struct request_queue *q) | |||
553 | 553 | ||
554 | req = swim_next_request(swd); | 554 | req = swim_next_request(swd); |
555 | while (req) { | 555 | while (req) { |
556 | int err = -EIO; | 556 | blk_status_t err = BLK_STS_IOERR; |
557 | 557 | ||
558 | fs = req->rq_disk->private_data; | 558 | fs = req->rq_disk->private_data; |
559 | if (blk_rq_pos(req) >= fs->total_secs) | 559 | if (blk_rq_pos(req) >= fs->total_secs) |
diff --git a/drivers/block/swim3.c b/drivers/block/swim3.c index ba4809c9bdba..c7953860ce91 100644 --- a/drivers/block/swim3.c +++ b/drivers/block/swim3.c | |||
@@ -257,7 +257,7 @@ static unsigned int floppy_check_events(struct gendisk *disk, | |||
257 | unsigned int clearing); | 257 | unsigned int clearing); |
258 | static int floppy_revalidate(struct gendisk *disk); | 258 | static int floppy_revalidate(struct gendisk *disk); |
259 | 259 | ||
260 | static bool swim3_end_request(struct floppy_state *fs, int err, unsigned int nr_bytes) | 260 | static bool swim3_end_request(struct floppy_state *fs, blk_status_t err, unsigned int nr_bytes) |
261 | { | 261 | { |
262 | struct request *req = fs->cur_req; | 262 | struct request *req = fs->cur_req; |
263 | int rc; | 263 | int rc; |
@@ -334,7 +334,7 @@ static void start_request(struct floppy_state *fs) | |||
334 | if (fs->mdev->media_bay && | 334 | if (fs->mdev->media_bay && |
335 | check_media_bay(fs->mdev->media_bay) != MB_FD) { | 335 | check_media_bay(fs->mdev->media_bay) != MB_FD) { |
336 | swim3_dbg("%s", " media bay absent, dropping req\n"); | 336 | swim3_dbg("%s", " media bay absent, dropping req\n"); |
337 | swim3_end_request(fs, -ENODEV, 0); | 337 | swim3_end_request(fs, BLK_STS_IOERR, 0); |
338 | continue; | 338 | continue; |
339 | } | 339 | } |
340 | 340 | ||
@@ -350,12 +350,12 @@ static void start_request(struct floppy_state *fs) | |||
350 | if (blk_rq_pos(req) >= fs->total_secs) { | 350 | if (blk_rq_pos(req) >= fs->total_secs) { |
351 | swim3_dbg(" pos out of bounds (%ld, max is %ld)\n", | 351 | swim3_dbg(" pos out of bounds (%ld, max is %ld)\n", |
352 | (long)blk_rq_pos(req), (long)fs->total_secs); | 352 | (long)blk_rq_pos(req), (long)fs->total_secs); |
353 | swim3_end_request(fs, -EIO, 0); | 353 | swim3_end_request(fs, BLK_STS_IOERR, 0); |
354 | continue; | 354 | continue; |
355 | } | 355 | } |
356 | if (fs->ejected) { | 356 | if (fs->ejected) { |
357 | swim3_dbg("%s", " disk ejected\n"); | 357 | swim3_dbg("%s", " disk ejected\n"); |
358 | swim3_end_request(fs, -EIO, 0); | 358 | swim3_end_request(fs, BLK_STS_IOERR, 0); |
359 | continue; | 359 | continue; |
360 | } | 360 | } |
361 | 361 | ||
@@ -364,7 +364,7 @@ static void start_request(struct floppy_state *fs) | |||
364 | fs->write_prot = swim3_readbit(fs, WRITE_PROT); | 364 | fs->write_prot = swim3_readbit(fs, WRITE_PROT); |
365 | if (fs->write_prot) { | 365 | if (fs->write_prot) { |
366 | swim3_dbg("%s", " try to write, disk write protected\n"); | 366 | swim3_dbg("%s", " try to write, disk write protected\n"); |
367 | swim3_end_request(fs, -EIO, 0); | 367 | swim3_end_request(fs, BLK_STS_IOERR, 0); |
368 | continue; | 368 | continue; |
369 | } | 369 | } |
370 | } | 370 | } |
@@ -548,7 +548,7 @@ static void act(struct floppy_state *fs) | |||
548 | if (fs->retries > 5) { | 548 | if (fs->retries > 5) { |
549 | swim3_err("Wrong cylinder in transfer, want: %d got %d\n", | 549 | swim3_err("Wrong cylinder in transfer, want: %d got %d\n", |
550 | fs->req_cyl, fs->cur_cyl); | 550 | fs->req_cyl, fs->cur_cyl); |
551 | swim3_end_request(fs, -EIO, 0); | 551 | swim3_end_request(fs, BLK_STS_IOERR, 0); |
552 | fs->state = idle; | 552 | fs->state = idle; |
553 | return; | 553 | return; |
554 | } | 554 | } |
@@ -584,7 +584,7 @@ static void scan_timeout(unsigned long data) | |||
584 | out_8(&sw->intr_enable, 0); | 584 | out_8(&sw->intr_enable, 0); |
585 | fs->cur_cyl = -1; | 585 | fs->cur_cyl = -1; |
586 | if (fs->retries > 5) { | 586 | if (fs->retries > 5) { |
587 | swim3_end_request(fs, -EIO, 0); | 587 | swim3_end_request(fs, BLK_STS_IOERR, 0); |
588 | fs->state = idle; | 588 | fs->state = idle; |
589 | start_request(fs); | 589 | start_request(fs); |
590 | } else { | 590 | } else { |
@@ -608,7 +608,7 @@ static void seek_timeout(unsigned long data) | |||
608 | out_8(&sw->select, RELAX); | 608 | out_8(&sw->select, RELAX); |
609 | out_8(&sw->intr_enable, 0); | 609 | out_8(&sw->intr_enable, 0); |
610 | swim3_err("%s", "Seek timeout\n"); | 610 | swim3_err("%s", "Seek timeout\n"); |
611 | swim3_end_request(fs, -EIO, 0); | 611 | swim3_end_request(fs, BLK_STS_IOERR, 0); |
612 | fs->state = idle; | 612 | fs->state = idle; |
613 | start_request(fs); | 613 | start_request(fs); |
614 | spin_unlock_irqrestore(&swim3_lock, flags); | 614 | spin_unlock_irqrestore(&swim3_lock, flags); |
@@ -637,7 +637,7 @@ static void settle_timeout(unsigned long data) | |||
637 | goto unlock; | 637 | goto unlock; |
638 | } | 638 | } |
639 | swim3_err("%s", "Seek settle timeout\n"); | 639 | swim3_err("%s", "Seek settle timeout\n"); |
640 | swim3_end_request(fs, -EIO, 0); | 640 | swim3_end_request(fs, BLK_STS_IOERR, 0); |
641 | fs->state = idle; | 641 | fs->state = idle; |
642 | start_request(fs); | 642 | start_request(fs); |
643 | unlock: | 643 | unlock: |
@@ -666,7 +666,7 @@ static void xfer_timeout(unsigned long data) | |||
666 | swim3_err("Timeout %sing sector %ld\n", | 666 | swim3_err("Timeout %sing sector %ld\n", |
667 | (rq_data_dir(fs->cur_req)==WRITE? "writ": "read"), | 667 | (rq_data_dir(fs->cur_req)==WRITE? "writ": "read"), |
668 | (long)blk_rq_pos(fs->cur_req)); | 668 | (long)blk_rq_pos(fs->cur_req)); |
669 | swim3_end_request(fs, -EIO, 0); | 669 | swim3_end_request(fs, BLK_STS_IOERR, 0); |
670 | fs->state = idle; | 670 | fs->state = idle; |
671 | start_request(fs); | 671 | start_request(fs); |
672 | spin_unlock_irqrestore(&swim3_lock, flags); | 672 | spin_unlock_irqrestore(&swim3_lock, flags); |
@@ -703,7 +703,7 @@ static irqreturn_t swim3_interrupt(int irq, void *dev_id) | |||
703 | swim3_err("%s", "Seen sector but cyl=ff?\n"); | 703 | swim3_err("%s", "Seen sector but cyl=ff?\n"); |
704 | fs->cur_cyl = -1; | 704 | fs->cur_cyl = -1; |
705 | if (fs->retries > 5) { | 705 | if (fs->retries > 5) { |
706 | swim3_end_request(fs, -EIO, 0); | 706 | swim3_end_request(fs, BLK_STS_IOERR, 0); |
707 | fs->state = idle; | 707 | fs->state = idle; |
708 | start_request(fs); | 708 | start_request(fs); |
709 | } else { | 709 | } else { |
@@ -786,7 +786,7 @@ static irqreturn_t swim3_interrupt(int irq, void *dev_id) | |||
786 | swim3_err("Error %sing block %ld (err=%x)\n", | 786 | swim3_err("Error %sing block %ld (err=%x)\n", |
787 | rq_data_dir(req) == WRITE? "writ": "read", | 787 | rq_data_dir(req) == WRITE? "writ": "read", |
788 | (long)blk_rq_pos(req), err); | 788 | (long)blk_rq_pos(req), err); |
789 | swim3_end_request(fs, -EIO, 0); | 789 | swim3_end_request(fs, BLK_STS_IOERR, 0); |
790 | fs->state = idle; | 790 | fs->state = idle; |
791 | } | 791 | } |
792 | } else { | 792 | } else { |
@@ -795,7 +795,7 @@ static irqreturn_t swim3_interrupt(int irq, void *dev_id) | |||
795 | swim3_err("fd dma error: stat=%x resid=%d\n", stat, resid); | 795 | swim3_err("fd dma error: stat=%x resid=%d\n", stat, resid); |
796 | swim3_err(" state=%d, dir=%x, intr=%x, err=%x\n", | 796 | swim3_err(" state=%d, dir=%x, intr=%x, err=%x\n", |
797 | fs->state, rq_data_dir(req), intr, err); | 797 | fs->state, rq_data_dir(req), intr, err); |
798 | swim3_end_request(fs, -EIO, 0); | 798 | swim3_end_request(fs, BLK_STS_IOERR, 0); |
799 | fs->state = idle; | 799 | fs->state = idle; |
800 | start_request(fs); | 800 | start_request(fs); |
801 | break; | 801 | break; |
diff --git a/drivers/block/sx8.c b/drivers/block/sx8.c index c8e072caf56f..08586dc14e85 100644 --- a/drivers/block/sx8.c +++ b/drivers/block/sx8.c | |||
@@ -745,7 +745,7 @@ static unsigned int carm_fill_get_fw_ver(struct carm_host *host, | |||
745 | 745 | ||
746 | static inline void carm_end_request_queued(struct carm_host *host, | 746 | static inline void carm_end_request_queued(struct carm_host *host, |
747 | struct carm_request *crq, | 747 | struct carm_request *crq, |
748 | int error) | 748 | blk_status_t error) |
749 | { | 749 | { |
750 | struct request *req = crq->rq; | 750 | struct request *req = crq->rq; |
751 | int rc; | 751 | int rc; |
@@ -791,7 +791,7 @@ static inline void carm_round_robin(struct carm_host *host) | |||
791 | } | 791 | } |
792 | 792 | ||
793 | static inline void carm_end_rq(struct carm_host *host, struct carm_request *crq, | 793 | static inline void carm_end_rq(struct carm_host *host, struct carm_request *crq, |
794 | int error) | 794 | blk_status_t error) |
795 | { | 795 | { |
796 | carm_end_request_queued(host, crq, error); | 796 | carm_end_request_queued(host, crq, error); |
797 | if (max_queue == 1) | 797 | if (max_queue == 1) |
@@ -869,14 +869,14 @@ queue_one_request: | |||
869 | sg = &crq->sg[0]; | 869 | sg = &crq->sg[0]; |
870 | n_elem = blk_rq_map_sg(q, rq, sg); | 870 | n_elem = blk_rq_map_sg(q, rq, sg); |
871 | if (n_elem <= 0) { | 871 | if (n_elem <= 0) { |
872 | carm_end_rq(host, crq, -EIO); | 872 | carm_end_rq(host, crq, BLK_STS_IOERR); |
873 | return; /* request with no s/g entries? */ | 873 | return; /* request with no s/g entries? */ |
874 | } | 874 | } |
875 | 875 | ||
876 | /* map scatterlist to PCI bus addresses */ | 876 | /* map scatterlist to PCI bus addresses */ |
877 | n_elem = pci_map_sg(host->pdev, sg, n_elem, pci_dir); | 877 | n_elem = pci_map_sg(host->pdev, sg, n_elem, pci_dir); |
878 | if (n_elem <= 0) { | 878 | if (n_elem <= 0) { |
879 | carm_end_rq(host, crq, -EIO); | 879 | carm_end_rq(host, crq, BLK_STS_IOERR); |
880 | return; /* request with no s/g entries? */ | 880 | return; /* request with no s/g entries? */ |
881 | } | 881 | } |
882 | crq->n_elem = n_elem; | 882 | crq->n_elem = n_elem; |
@@ -937,7 +937,7 @@ queue_one_request: | |||
937 | 937 | ||
938 | static void carm_handle_array_info(struct carm_host *host, | 938 | static void carm_handle_array_info(struct carm_host *host, |
939 | struct carm_request *crq, u8 *mem, | 939 | struct carm_request *crq, u8 *mem, |
940 | int error) | 940 | blk_status_t error) |
941 | { | 941 | { |
942 | struct carm_port *port; | 942 | struct carm_port *port; |
943 | u8 *msg_data = mem + sizeof(struct carm_array_info); | 943 | u8 *msg_data = mem + sizeof(struct carm_array_info); |
@@ -997,7 +997,7 @@ out: | |||
997 | 997 | ||
998 | static void carm_handle_scan_chan(struct carm_host *host, | 998 | static void carm_handle_scan_chan(struct carm_host *host, |
999 | struct carm_request *crq, u8 *mem, | 999 | struct carm_request *crq, u8 *mem, |
1000 | int error) | 1000 | blk_status_t error) |
1001 | { | 1001 | { |
1002 | u8 *msg_data = mem + IOC_SCAN_CHAN_OFFSET; | 1002 | u8 *msg_data = mem + IOC_SCAN_CHAN_OFFSET; |
1003 | unsigned int i, dev_count = 0; | 1003 | unsigned int i, dev_count = 0; |
@@ -1029,7 +1029,7 @@ out: | |||
1029 | } | 1029 | } |
1030 | 1030 | ||
1031 | static void carm_handle_generic(struct carm_host *host, | 1031 | static void carm_handle_generic(struct carm_host *host, |
1032 | struct carm_request *crq, int error, | 1032 | struct carm_request *crq, blk_status_t error, |
1033 | int cur_state, int next_state) | 1033 | int cur_state, int next_state) |
1034 | { | 1034 | { |
1035 | DPRINTK("ENTER\n"); | 1035 | DPRINTK("ENTER\n"); |
@@ -1045,7 +1045,7 @@ static void carm_handle_generic(struct carm_host *host, | |||
1045 | } | 1045 | } |
1046 | 1046 | ||
1047 | static inline void carm_handle_rw(struct carm_host *host, | 1047 | static inline void carm_handle_rw(struct carm_host *host, |
1048 | struct carm_request *crq, int error) | 1048 | struct carm_request *crq, blk_status_t error) |
1049 | { | 1049 | { |
1050 | int pci_dir; | 1050 | int pci_dir; |
1051 | 1051 | ||
@@ -1067,7 +1067,7 @@ static inline void carm_handle_resp(struct carm_host *host, | |||
1067 | u32 handle = le32_to_cpu(ret_handle_le); | 1067 | u32 handle = le32_to_cpu(ret_handle_le); |
1068 | unsigned int msg_idx; | 1068 | unsigned int msg_idx; |
1069 | struct carm_request *crq; | 1069 | struct carm_request *crq; |
1070 | int error = (status == RMSG_OK) ? 0 : -EIO; | 1070 | blk_status_t error = (status == RMSG_OK) ? 0 : BLK_STS_IOERR; |
1071 | u8 *mem; | 1071 | u8 *mem; |
1072 | 1072 | ||
1073 | VPRINTK("ENTER, handle == 0x%x\n", handle); | 1073 | VPRINTK("ENTER, handle == 0x%x\n", handle); |
@@ -1155,7 +1155,7 @@ static inline void carm_handle_resp(struct carm_host *host, | |||
1155 | err_out: | 1155 | err_out: |
1156 | printk(KERN_WARNING DRV_NAME "(%s): BUG: unhandled message type %d/%d\n", | 1156 | printk(KERN_WARNING DRV_NAME "(%s): BUG: unhandled message type %d/%d\n", |
1157 | pci_name(host->pdev), crq->msg_type, crq->msg_subtype); | 1157 | pci_name(host->pdev), crq->msg_type, crq->msg_subtype); |
1158 | carm_end_rq(host, crq, -EIO); | 1158 | carm_end_rq(host, crq, BLK_STS_IOERR); |
1159 | } | 1159 | } |
1160 | 1160 | ||
1161 | static inline void carm_handle_responses(struct carm_host *host) | 1161 | static inline void carm_handle_responses(struct carm_host *host) |
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c index 553cc4c542b4..205b74d70efc 100644 --- a/drivers/block/virtio_blk.c +++ b/drivers/block/virtio_blk.c | |||
@@ -64,15 +64,15 @@ struct virtblk_req { | |||
64 | struct scatterlist sg[]; | 64 | struct scatterlist sg[]; |
65 | }; | 65 | }; |
66 | 66 | ||
67 | static inline int virtblk_result(struct virtblk_req *vbr) | 67 | static inline blk_status_t virtblk_result(struct virtblk_req *vbr) |
68 | { | 68 | { |
69 | switch (vbr->status) { | 69 | switch (vbr->status) { |
70 | case VIRTIO_BLK_S_OK: | 70 | case VIRTIO_BLK_S_OK: |
71 | return 0; | 71 | return BLK_STS_OK; |
72 | case VIRTIO_BLK_S_UNSUPP: | 72 | case VIRTIO_BLK_S_UNSUPP: |
73 | return -ENOTTY; | 73 | return BLK_STS_NOTSUPP; |
74 | default: | 74 | default: |
75 | return -EIO; | 75 | return BLK_STS_IOERR; |
76 | } | 76 | } |
77 | } | 77 | } |
78 | 78 | ||
@@ -307,7 +307,7 @@ static int virtblk_get_id(struct gendisk *disk, char *id_str) | |||
307 | goto out; | 307 | goto out; |
308 | 308 | ||
309 | blk_execute_rq(vblk->disk->queue, vblk->disk, req, false); | 309 | blk_execute_rq(vblk->disk->queue, vblk->disk, req, false); |
310 | err = virtblk_result(blk_mq_rq_to_pdu(req)); | 310 | err = blk_status_to_errno(virtblk_result(blk_mq_rq_to_pdu(req))); |
311 | out: | 311 | out: |
312 | blk_put_request(req); | 312 | blk_put_request(req); |
313 | return err; | 313 | return err; |
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c index 39459631667c..aedc3c759273 100644 --- a/drivers/block/xen-blkfront.c +++ b/drivers/block/xen-blkfront.c | |||
@@ -1601,14 +1601,18 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id) | |||
1601 | continue; | 1601 | continue; |
1602 | } | 1602 | } |
1603 | 1603 | ||
1604 | blkif_req(req)->error = (bret->status == BLKIF_RSP_OKAY) ? 0 : -EIO; | 1604 | if (bret->status == BLKIF_RSP_OKAY) |
1605 | blkif_req(req)->error = BLK_STS_OK; | ||
1606 | else | ||
1607 | blkif_req(req)->error = BLK_STS_IOERR; | ||
1608 | |||
1605 | switch (bret->operation) { | 1609 | switch (bret->operation) { |
1606 | case BLKIF_OP_DISCARD: | 1610 | case BLKIF_OP_DISCARD: |
1607 | if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) { | 1611 | if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) { |
1608 | struct request_queue *rq = info->rq; | 1612 | struct request_queue *rq = info->rq; |
1609 | printk(KERN_WARNING "blkfront: %s: %s op failed\n", | 1613 | printk(KERN_WARNING "blkfront: %s: %s op failed\n", |
1610 | info->gd->disk_name, op_name(bret->operation)); | 1614 | info->gd->disk_name, op_name(bret->operation)); |
1611 | blkif_req(req)->error = -EOPNOTSUPP; | 1615 | blkif_req(req)->error = BLK_STS_NOTSUPP; |
1612 | info->feature_discard = 0; | 1616 | info->feature_discard = 0; |
1613 | info->feature_secdiscard = 0; | 1617 | info->feature_secdiscard = 0; |
1614 | queue_flag_clear(QUEUE_FLAG_DISCARD, rq); | 1618 | queue_flag_clear(QUEUE_FLAG_DISCARD, rq); |
@@ -1626,11 +1630,11 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id) | |||
1626 | rinfo->shadow[id].req.u.rw.nr_segments == 0)) { | 1630 | rinfo->shadow[id].req.u.rw.nr_segments == 0)) { |
1627 | printk(KERN_WARNING "blkfront: %s: empty %s op failed\n", | 1631 | printk(KERN_WARNING "blkfront: %s: empty %s op failed\n", |
1628 | info->gd->disk_name, op_name(bret->operation)); | 1632 | info->gd->disk_name, op_name(bret->operation)); |
1629 | blkif_req(req)->error = -EOPNOTSUPP; | 1633 | blkif_req(req)->error = BLK_STS_NOTSUPP; |
1630 | } | 1634 | } |
1631 | if (unlikely(blkif_req(req)->error)) { | 1635 | if (unlikely(blkif_req(req)->error)) { |
1632 | if (blkif_req(req)->error == -EOPNOTSUPP) | 1636 | if (blkif_req(req)->error == BLK_STS_NOTSUPP) |
1633 | blkif_req(req)->error = 0; | 1637 | blkif_req(req)->error = BLK_STS_OK; |
1634 | info->feature_fua = 0; | 1638 | info->feature_fua = 0; |
1635 | info->feature_flush = 0; | 1639 | info->feature_flush = 0; |
1636 | xlvbd_flush(info); | 1640 | xlvbd_flush(info); |
@@ -2137,7 +2141,7 @@ static int blkfront_resume(struct xenbus_device *dev) | |||
2137 | merge_bio.tail = shadow[j].request->biotail; | 2141 | merge_bio.tail = shadow[j].request->biotail; |
2138 | bio_list_merge(&info->bio_list, &merge_bio); | 2142 | bio_list_merge(&info->bio_list, &merge_bio); |
2139 | shadow[j].request->bio = NULL; | 2143 | shadow[j].request->bio = NULL; |
2140 | blk_mq_end_request(shadow[j].request, 0); | 2144 | blk_mq_end_request(shadow[j].request, BLK_STS_OK); |
2141 | } | 2145 | } |
2142 | } | 2146 | } |
2143 | 2147 | ||
diff --git a/drivers/block/xsysace.c b/drivers/block/xsysace.c index 757dce2147e0..977fdf066017 100644 --- a/drivers/block/xsysace.c +++ b/drivers/block/xsysace.c | |||
@@ -471,7 +471,7 @@ static struct request *ace_get_next_request(struct request_queue *q) | |||
471 | if (!blk_rq_is_passthrough(req)) | 471 | if (!blk_rq_is_passthrough(req)) |
472 | break; | 472 | break; |
473 | blk_start_request(req); | 473 | blk_start_request(req); |
474 | __blk_end_request_all(req, -EIO); | 474 | __blk_end_request_all(req, BLK_STS_IOERR); |
475 | } | 475 | } |
476 | return req; | 476 | return req; |
477 | } | 477 | } |
@@ -499,11 +499,11 @@ static void ace_fsm_dostate(struct ace_device *ace) | |||
499 | 499 | ||
500 | /* Drop all in-flight and pending requests */ | 500 | /* Drop all in-flight and pending requests */ |
501 | if (ace->req) { | 501 | if (ace->req) { |
502 | __blk_end_request_all(ace->req, -EIO); | 502 | __blk_end_request_all(ace->req, BLK_STS_IOERR); |
503 | ace->req = NULL; | 503 | ace->req = NULL; |
504 | } | 504 | } |
505 | while ((req = blk_fetch_request(ace->queue)) != NULL) | 505 | while ((req = blk_fetch_request(ace->queue)) != NULL) |
506 | __blk_end_request_all(req, -EIO); | 506 | __blk_end_request_all(req, BLK_STS_IOERR); |
507 | 507 | ||
508 | /* Drop back to IDLE state and notify waiters */ | 508 | /* Drop back to IDLE state and notify waiters */ |
509 | ace->fsm_state = ACE_FSM_STATE_IDLE; | 509 | ace->fsm_state = ACE_FSM_STATE_IDLE; |
@@ -728,7 +728,7 @@ static void ace_fsm_dostate(struct ace_device *ace) | |||
728 | } | 728 | } |
729 | 729 | ||
730 | /* bio finished; is there another one? */ | 730 | /* bio finished; is there another one? */ |
731 | if (__blk_end_request_cur(ace->req, 0)) { | 731 | if (__blk_end_request_cur(ace->req, BLK_STS_OK)) { |
732 | /* dev_dbg(ace->dev, "next block; h=%u c=%u\n", | 732 | /* dev_dbg(ace->dev, "next block; h=%u c=%u\n", |
733 | * blk_rq_sectors(ace->req), | 733 | * blk_rq_sectors(ace->req), |
734 | * blk_rq_cur_sectors(ace->req)); | 734 | * blk_rq_cur_sectors(ace->req)); |
diff --git a/drivers/block/z2ram.c b/drivers/block/z2ram.c index 968f9e52effa..41c95c9b2ab4 100644 --- a/drivers/block/z2ram.c +++ b/drivers/block/z2ram.c | |||
@@ -74,14 +74,14 @@ static void do_z2_request(struct request_queue *q) | |||
74 | while (req) { | 74 | while (req) { |
75 | unsigned long start = blk_rq_pos(req) << 9; | 75 | unsigned long start = blk_rq_pos(req) << 9; |
76 | unsigned long len = blk_rq_cur_bytes(req); | 76 | unsigned long len = blk_rq_cur_bytes(req); |
77 | int err = 0; | 77 | blk_status_t err = BLK_STS_OK; |
78 | 78 | ||
79 | if (start + len > z2ram_size) { | 79 | if (start + len > z2ram_size) { |
80 | pr_err(DEVICE_NAME ": bad access: block=%llu, " | 80 | pr_err(DEVICE_NAME ": bad access: block=%llu, " |
81 | "count=%u\n", | 81 | "count=%u\n", |
82 | (unsigned long long)blk_rq_pos(req), | 82 | (unsigned long long)blk_rq_pos(req), |
83 | blk_rq_cur_sectors(req)); | 83 | blk_rq_cur_sectors(req)); |
84 | err = -EIO; | 84 | err = BLK_STS_IOERR; |
85 | goto done; | 85 | goto done; |
86 | } | 86 | } |
87 | while (len) { | 87 | while (len) { |
diff --git a/drivers/cdrom/gdrom.c b/drivers/cdrom/gdrom.c index 1372763a948f..53f8278e66f7 100644 --- a/drivers/cdrom/gdrom.c +++ b/drivers/cdrom/gdrom.c | |||
@@ -583,7 +583,8 @@ static int gdrom_set_interrupt_handlers(void) | |||
583 | */ | 583 | */ |
584 | static void gdrom_readdisk_dma(struct work_struct *work) | 584 | static void gdrom_readdisk_dma(struct work_struct *work) |
585 | { | 585 | { |
586 | int err, block, block_cnt; | 586 | int block, block_cnt; |
587 | blk_status_t err; | ||
587 | struct packet_command *read_command; | 588 | struct packet_command *read_command; |
588 | struct list_head *elem, *next; | 589 | struct list_head *elem, *next; |
589 | struct request *req; | 590 | struct request *req; |
@@ -641,7 +642,7 @@ static void gdrom_readdisk_dma(struct work_struct *work) | |||
641 | __raw_writeb(1, GDROM_DMA_STATUS_REG); | 642 | __raw_writeb(1, GDROM_DMA_STATUS_REG); |
642 | wait_event_interruptible_timeout(request_queue, | 643 | wait_event_interruptible_timeout(request_queue, |
643 | gd.transfer == 0, GDROM_DEFAULT_TIMEOUT); | 644 | gd.transfer == 0, GDROM_DEFAULT_TIMEOUT); |
644 | err = gd.transfer ? -EIO : 0; | 645 | err = gd.transfer ? BLK_STS_IOERR : BLK_STS_OK; |
645 | gd.transfer = 0; | 646 | gd.transfer = 0; |
646 | gd.pending = 0; | 647 | gd.pending = 0; |
647 | /* now seek to take the request spinlock | 648 | /* now seek to take the request spinlock |
@@ -670,11 +671,11 @@ static void gdrom_request(struct request_queue *rq) | |||
670 | break; | 671 | break; |
671 | case REQ_OP_WRITE: | 672 | case REQ_OP_WRITE: |
672 | pr_notice("Read only device - write request ignored\n"); | 673 | pr_notice("Read only device - write request ignored\n"); |
673 | __blk_end_request_all(req, -EIO); | 674 | __blk_end_request_all(req, BLK_STS_IOERR); |
674 | break; | 675 | break; |
675 | default: | 676 | default: |
676 | printk(KERN_DEBUG "gdrom: Non-fs request ignored\n"); | 677 | printk(KERN_DEBUG "gdrom: Non-fs request ignored\n"); |
677 | __blk_end_request_all(req, -EIO); | 678 | __blk_end_request_all(req, BLK_STS_IOERR); |
678 | break; | 679 | break; |
679 | } | 680 | } |
680 | } | 681 | } |
diff --git a/drivers/ide/ide-atapi.c b/drivers/ide/ide-atapi.c index 5901937284e7..d7a49dcfa85e 100644 --- a/drivers/ide/ide-atapi.c +++ b/drivers/ide/ide-atapi.c | |||
@@ -273,7 +273,7 @@ void ide_retry_pc(ide_drive_t *drive) | |||
273 | ide_requeue_and_plug(drive, failed_rq); | 273 | ide_requeue_and_plug(drive, failed_rq); |
274 | if (ide_queue_sense_rq(drive, pc)) { | 274 | if (ide_queue_sense_rq(drive, pc)) { |
275 | blk_start_request(failed_rq); | 275 | blk_start_request(failed_rq); |
276 | ide_complete_rq(drive, -EIO, blk_rq_bytes(failed_rq)); | 276 | ide_complete_rq(drive, BLK_STS_IOERR, blk_rq_bytes(failed_rq)); |
277 | } | 277 | } |
278 | } | 278 | } |
279 | EXPORT_SYMBOL_GPL(ide_retry_pc); | 279 | EXPORT_SYMBOL_GPL(ide_retry_pc); |
@@ -437,7 +437,8 @@ static ide_startstop_t ide_pc_intr(ide_drive_t *drive) | |||
437 | 437 | ||
438 | /* No more interrupts */ | 438 | /* No more interrupts */ |
439 | if ((stat & ATA_DRQ) == 0) { | 439 | if ((stat & ATA_DRQ) == 0) { |
440 | int uptodate, error; | 440 | int uptodate; |
441 | blk_status_t error; | ||
441 | 442 | ||
442 | debug_log("Packet command completed, %d bytes transferred\n", | 443 | debug_log("Packet command completed, %d bytes transferred\n", |
443 | blk_rq_bytes(rq)); | 444 | blk_rq_bytes(rq)); |
@@ -490,7 +491,7 @@ static ide_startstop_t ide_pc_intr(ide_drive_t *drive) | |||
490 | 491 | ||
491 | if (ata_misc_request(rq)) { | 492 | if (ata_misc_request(rq)) { |
492 | scsi_req(rq)->result = 0; | 493 | scsi_req(rq)->result = 0; |
493 | error = 0; | 494 | error = BLK_STS_OK; |
494 | } else { | 495 | } else { |
495 | 496 | ||
496 | if (blk_rq_is_passthrough(rq) && uptodate <= 0) { | 497 | if (blk_rq_is_passthrough(rq) && uptodate <= 0) { |
@@ -498,7 +499,7 @@ static ide_startstop_t ide_pc_intr(ide_drive_t *drive) | |||
498 | scsi_req(rq)->result = -EIO; | 499 | scsi_req(rq)->result = -EIO; |
499 | } | 500 | } |
500 | 501 | ||
501 | error = uptodate ? 0 : -EIO; | 502 | error = uptodate ? BLK_STS_OK : BLK_STS_IOERR; |
502 | } | 503 | } |
503 | 504 | ||
504 | ide_complete_rq(drive, error, blk_rq_bytes(rq)); | 505 | ide_complete_rq(drive, error, blk_rq_bytes(rq)); |
diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c index 07e5ff3a64c3..d55e44ed82b5 100644 --- a/drivers/ide/ide-cd.c +++ b/drivers/ide/ide-cd.c | |||
@@ -228,7 +228,7 @@ static void ide_cd_complete_failed_rq(ide_drive_t *drive, struct request *rq) | |||
228 | scsi_req(failed)->sense_len = scsi_req(rq)->sense_len; | 228 | scsi_req(failed)->sense_len = scsi_req(rq)->sense_len; |
229 | cdrom_analyze_sense_data(drive, failed); | 229 | cdrom_analyze_sense_data(drive, failed); |
230 | 230 | ||
231 | if (ide_end_rq(drive, failed, -EIO, blk_rq_bytes(failed))) | 231 | if (ide_end_rq(drive, failed, BLK_STS_IOERR, blk_rq_bytes(failed))) |
232 | BUG(); | 232 | BUG(); |
233 | } else | 233 | } else |
234 | cdrom_analyze_sense_data(drive, NULL); | 234 | cdrom_analyze_sense_data(drive, NULL); |
@@ -508,7 +508,7 @@ static bool ide_cd_error_cmd(ide_drive_t *drive, struct ide_cmd *cmd) | |||
508 | nr_bytes -= cmd->last_xfer_len; | 508 | nr_bytes -= cmd->last_xfer_len; |
509 | 509 | ||
510 | if (nr_bytes > 0) { | 510 | if (nr_bytes > 0) { |
511 | ide_complete_rq(drive, 0, nr_bytes); | 511 | ide_complete_rq(drive, BLK_STS_OK, nr_bytes); |
512 | return true; | 512 | return true; |
513 | } | 513 | } |
514 | 514 | ||
@@ -674,7 +674,7 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive) | |||
674 | out_end: | 674 | out_end: |
675 | if (blk_rq_is_scsi(rq) && rc == 0) { | 675 | if (blk_rq_is_scsi(rq) && rc == 0) { |
676 | scsi_req(rq)->resid_len = 0; | 676 | scsi_req(rq)->resid_len = 0; |
677 | blk_end_request_all(rq, 0); | 677 | blk_end_request_all(rq, BLK_STS_OK); |
678 | hwif->rq = NULL; | 678 | hwif->rq = NULL; |
679 | } else { | 679 | } else { |
680 | if (sense && uptodate) | 680 | if (sense && uptodate) |
@@ -699,7 +699,7 @@ out_end: | |||
699 | scsi_req(rq)->resid_len += cmd->last_xfer_len; | 699 | scsi_req(rq)->resid_len += cmd->last_xfer_len; |
700 | } | 700 | } |
701 | 701 | ||
702 | ide_complete_rq(drive, uptodate ? 0 : -EIO, blk_rq_bytes(rq)); | 702 | ide_complete_rq(drive, uptodate ? BLK_STS_OK : BLK_STS_IOERR, blk_rq_bytes(rq)); |
703 | 703 | ||
704 | if (sense && rc == 2) | 704 | if (sense && rc == 2) |
705 | ide_error(drive, "request sense failure", stat); | 705 | ide_error(drive, "request sense failure", stat); |
@@ -844,7 +844,7 @@ out_end: | |||
844 | if (nsectors == 0) | 844 | if (nsectors == 0) |
845 | nsectors = 1; | 845 | nsectors = 1; |
846 | 846 | ||
847 | ide_complete_rq(drive, uptodate ? 0 : -EIO, nsectors << 9); | 847 | ide_complete_rq(drive, uptodate ? BLK_STS_OK : BLK_STS_IOERR, nsectors << 9); |
848 | 848 | ||
849 | return ide_stopped; | 849 | return ide_stopped; |
850 | } | 850 | } |
diff --git a/drivers/ide/ide-dma.c b/drivers/ide/ide-dma.c index 51c81223e56d..54d4d78ca46a 100644 --- a/drivers/ide/ide-dma.c +++ b/drivers/ide/ide-dma.c | |||
@@ -104,7 +104,7 @@ ide_startstop_t ide_dma_intr(ide_drive_t *drive) | |||
104 | if ((cmd->tf_flags & IDE_TFLAG_FS) == 0) | 104 | if ((cmd->tf_flags & IDE_TFLAG_FS) == 0) |
105 | ide_finish_cmd(drive, cmd, stat); | 105 | ide_finish_cmd(drive, cmd, stat); |
106 | else | 106 | else |
107 | ide_complete_rq(drive, 0, | 107 | ide_complete_rq(drive, BLK_STS_OK, |
108 | blk_rq_sectors(cmd->rq) << 9); | 108 | blk_rq_sectors(cmd->rq) << 9); |
109 | return ide_stopped; | 109 | return ide_stopped; |
110 | } | 110 | } |
diff --git a/drivers/ide/ide-eh.c b/drivers/ide/ide-eh.c index 4b7ffd7d158d..47d5f3379748 100644 --- a/drivers/ide/ide-eh.c +++ b/drivers/ide/ide-eh.c | |||
@@ -135,7 +135,7 @@ ide_startstop_t ide_error(ide_drive_t *drive, const char *msg, u8 stat) | |||
135 | return ide_stopped; | 135 | return ide_stopped; |
136 | } | 136 | } |
137 | scsi_req(rq)->result = err; | 137 | scsi_req(rq)->result = err; |
138 | ide_complete_rq(drive, err ? -EIO : 0, blk_rq_bytes(rq)); | 138 | ide_complete_rq(drive, err ? BLK_STS_IOERR : BLK_STS_OK, blk_rq_bytes(rq)); |
139 | return ide_stopped; | 139 | return ide_stopped; |
140 | } | 140 | } |
141 | 141 | ||
@@ -143,7 +143,7 @@ ide_startstop_t ide_error(ide_drive_t *drive, const char *msg, u8 stat) | |||
143 | } | 143 | } |
144 | EXPORT_SYMBOL_GPL(ide_error); | 144 | EXPORT_SYMBOL_GPL(ide_error); |
145 | 145 | ||
146 | static inline void ide_complete_drive_reset(ide_drive_t *drive, int err) | 146 | static inline void ide_complete_drive_reset(ide_drive_t *drive, blk_status_t err) |
147 | { | 147 | { |
148 | struct request *rq = drive->hwif->rq; | 148 | struct request *rq = drive->hwif->rq; |
149 | 149 | ||
@@ -151,7 +151,7 @@ static inline void ide_complete_drive_reset(ide_drive_t *drive, int err) | |||
151 | scsi_req(rq)->cmd[0] == REQ_DRIVE_RESET) { | 151 | scsi_req(rq)->cmd[0] == REQ_DRIVE_RESET) { |
152 | if (err <= 0 && scsi_req(rq)->result == 0) | 152 | if (err <= 0 && scsi_req(rq)->result == 0) |
153 | scsi_req(rq)->result = -EIO; | 153 | scsi_req(rq)->result = -EIO; |
154 | ide_complete_rq(drive, err ? err : 0, blk_rq_bytes(rq)); | 154 | ide_complete_rq(drive, err, blk_rq_bytes(rq)); |
155 | } | 155 | } |
156 | } | 156 | } |
157 | 157 | ||
@@ -191,7 +191,7 @@ static ide_startstop_t atapi_reset_pollfunc(ide_drive_t *drive) | |||
191 | } | 191 | } |
192 | /* done polling */ | 192 | /* done polling */ |
193 | hwif->polling = 0; | 193 | hwif->polling = 0; |
194 | ide_complete_drive_reset(drive, 0); | 194 | ide_complete_drive_reset(drive, BLK_STS_OK); |
195 | return ide_stopped; | 195 | return ide_stopped; |
196 | } | 196 | } |
197 | 197 | ||
@@ -225,7 +225,7 @@ static ide_startstop_t reset_pollfunc(ide_drive_t *drive) | |||
225 | ide_hwif_t *hwif = drive->hwif; | 225 | ide_hwif_t *hwif = drive->hwif; |
226 | const struct ide_port_ops *port_ops = hwif->port_ops; | 226 | const struct ide_port_ops *port_ops = hwif->port_ops; |
227 | u8 tmp; | 227 | u8 tmp; |
228 | int err = 0; | 228 | blk_status_t err = BLK_STS_OK; |
229 | 229 | ||
230 | if (port_ops && port_ops->reset_poll) { | 230 | if (port_ops && port_ops->reset_poll) { |
231 | err = port_ops->reset_poll(drive); | 231 | err = port_ops->reset_poll(drive); |
@@ -247,7 +247,7 @@ static ide_startstop_t reset_pollfunc(ide_drive_t *drive) | |||
247 | printk(KERN_ERR "%s: reset timed-out, status=0x%02x\n", | 247 | printk(KERN_ERR "%s: reset timed-out, status=0x%02x\n", |
248 | hwif->name, tmp); | 248 | hwif->name, tmp); |
249 | drive->failures++; | 249 | drive->failures++; |
250 | err = -EIO; | 250 | err = BLK_STS_IOERR; |
251 | } else { | 251 | } else { |
252 | tmp = ide_read_error(drive); | 252 | tmp = ide_read_error(drive); |
253 | 253 | ||
@@ -257,7 +257,7 @@ static ide_startstop_t reset_pollfunc(ide_drive_t *drive) | |||
257 | } else { | 257 | } else { |
258 | ide_reset_report_error(hwif, tmp); | 258 | ide_reset_report_error(hwif, tmp); |
259 | drive->failures++; | 259 | drive->failures++; |
260 | err = -EIO; | 260 | err = BLK_STS_IOERR; |
261 | } | 261 | } |
262 | } | 262 | } |
263 | out: | 263 | out: |
@@ -392,7 +392,7 @@ static ide_startstop_t do_reset1(ide_drive_t *drive, int do_not_try_atapi) | |||
392 | 392 | ||
393 | if (io_ports->ctl_addr == 0) { | 393 | if (io_ports->ctl_addr == 0) { |
394 | spin_unlock_irqrestore(&hwif->lock, flags); | 394 | spin_unlock_irqrestore(&hwif->lock, flags); |
395 | ide_complete_drive_reset(drive, -ENXIO); | 395 | ide_complete_drive_reset(drive, BLK_STS_IOERR); |
396 | return ide_stopped; | 396 | return ide_stopped; |
397 | } | 397 | } |
398 | 398 | ||
diff --git a/drivers/ide/ide-floppy.c b/drivers/ide/ide-floppy.c index 8ac6048cd2df..627b1f62a749 100644 --- a/drivers/ide/ide-floppy.c +++ b/drivers/ide/ide-floppy.c | |||
@@ -143,7 +143,7 @@ static ide_startstop_t ide_floppy_issue_pc(ide_drive_t *drive, | |||
143 | 143 | ||
144 | drive->failed_pc = NULL; | 144 | drive->failed_pc = NULL; |
145 | drive->pc_callback(drive, 0); | 145 | drive->pc_callback(drive, 0); |
146 | ide_complete_rq(drive, -EIO, done); | 146 | ide_complete_rq(drive, BLK_STS_IOERR, done); |
147 | return ide_stopped; | 147 | return ide_stopped; |
148 | } | 148 | } |
149 | 149 | ||
@@ -248,7 +248,7 @@ static ide_startstop_t ide_floppy_do_request(ide_drive_t *drive, | |||
248 | 248 | ||
249 | if (ata_misc_request(rq)) { | 249 | if (ata_misc_request(rq)) { |
250 | scsi_req(rq)->result = 0; | 250 | scsi_req(rq)->result = 0; |
251 | ide_complete_rq(drive, 0, blk_rq_bytes(rq)); | 251 | ide_complete_rq(drive, BLK_STS_OK, blk_rq_bytes(rq)); |
252 | return ide_stopped; | 252 | return ide_stopped; |
253 | } else | 253 | } else |
254 | goto out_end; | 254 | goto out_end; |
@@ -303,7 +303,7 @@ out_end: | |||
303 | drive->failed_pc = NULL; | 303 | drive->failed_pc = NULL; |
304 | if (blk_rq_is_passthrough(rq) && scsi_req(rq)->result == 0) | 304 | if (blk_rq_is_passthrough(rq) && scsi_req(rq)->result == 0) |
305 | scsi_req(rq)->result = -EIO; | 305 | scsi_req(rq)->result = -EIO; |
306 | ide_complete_rq(drive, -EIO, blk_rq_bytes(rq)); | 306 | ide_complete_rq(drive, BLK_STS_IOERR, blk_rq_bytes(rq)); |
307 | return ide_stopped; | 307 | return ide_stopped; |
308 | } | 308 | } |
309 | 309 | ||
diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c index 323af721f8cb..3a234701d92c 100644 --- a/drivers/ide/ide-io.c +++ b/drivers/ide/ide-io.c | |||
@@ -54,7 +54,7 @@ | |||
54 | #include <linux/uaccess.h> | 54 | #include <linux/uaccess.h> |
55 | #include <asm/io.h> | 55 | #include <asm/io.h> |
56 | 56 | ||
57 | int ide_end_rq(ide_drive_t *drive, struct request *rq, int error, | 57 | int ide_end_rq(ide_drive_t *drive, struct request *rq, blk_status_t error, |
58 | unsigned int nr_bytes) | 58 | unsigned int nr_bytes) |
59 | { | 59 | { |
60 | /* | 60 | /* |
@@ -112,7 +112,7 @@ void ide_complete_cmd(ide_drive_t *drive, struct ide_cmd *cmd, u8 stat, u8 err) | |||
112 | } | 112 | } |
113 | } | 113 | } |
114 | 114 | ||
115 | int ide_complete_rq(ide_drive_t *drive, int error, unsigned int nr_bytes) | 115 | int ide_complete_rq(ide_drive_t *drive, blk_status_t error, unsigned int nr_bytes) |
116 | { | 116 | { |
117 | ide_hwif_t *hwif = drive->hwif; | 117 | ide_hwif_t *hwif = drive->hwif; |
118 | struct request *rq = hwif->rq; | 118 | struct request *rq = hwif->rq; |
@@ -122,7 +122,7 @@ int ide_complete_rq(ide_drive_t *drive, int error, unsigned int nr_bytes) | |||
122 | * if failfast is set on a request, override number of sectors | 122 | * if failfast is set on a request, override number of sectors |
123 | * and complete the whole request right now | 123 | * and complete the whole request right now |
124 | */ | 124 | */ |
125 | if (blk_noretry_request(rq) && error <= 0) | 125 | if (blk_noretry_request(rq) && error) |
126 | nr_bytes = blk_rq_sectors(rq) << 9; | 126 | nr_bytes = blk_rq_sectors(rq) << 9; |
127 | 127 | ||
128 | rc = ide_end_rq(drive, rq, error, nr_bytes); | 128 | rc = ide_end_rq(drive, rq, error, nr_bytes); |
@@ -149,7 +149,7 @@ void ide_kill_rq(ide_drive_t *drive, struct request *rq) | |||
149 | scsi_req(rq)->result = -EIO; | 149 | scsi_req(rq)->result = -EIO; |
150 | } | 150 | } |
151 | 151 | ||
152 | ide_complete_rq(drive, -EIO, blk_rq_bytes(rq)); | 152 | ide_complete_rq(drive, BLK_STS_IOERR, blk_rq_bytes(rq)); |
153 | } | 153 | } |
154 | 154 | ||
155 | static void ide_tf_set_specify_cmd(ide_drive_t *drive, struct ide_taskfile *tf) | 155 | static void ide_tf_set_specify_cmd(ide_drive_t *drive, struct ide_taskfile *tf) |
@@ -272,7 +272,7 @@ static ide_startstop_t execute_drive_cmd (ide_drive_t *drive, | |||
272 | printk("%s: DRIVE_CMD (null)\n", drive->name); | 272 | printk("%s: DRIVE_CMD (null)\n", drive->name); |
273 | #endif | 273 | #endif |
274 | scsi_req(rq)->result = 0; | 274 | scsi_req(rq)->result = 0; |
275 | ide_complete_rq(drive, 0, blk_rq_bytes(rq)); | 275 | ide_complete_rq(drive, BLK_STS_OK, blk_rq_bytes(rq)); |
276 | 276 | ||
277 | return ide_stopped; | 277 | return ide_stopped; |
278 | } | 278 | } |
diff --git a/drivers/ide/ide-pm.c b/drivers/ide/ide-pm.c index 0977fc1f40ce..08b54bb3b705 100644 --- a/drivers/ide/ide-pm.c +++ b/drivers/ide/ide-pm.c | |||
@@ -40,7 +40,7 @@ int generic_ide_suspend(struct device *dev, pm_message_t mesg) | |||
40 | return ret; | 40 | return ret; |
41 | } | 41 | } |
42 | 42 | ||
43 | static void ide_end_sync_rq(struct request *rq, int error) | 43 | static void ide_end_sync_rq(struct request *rq, blk_status_t error) |
44 | { | 44 | { |
45 | complete(rq->end_io_data); | 45 | complete(rq->end_io_data); |
46 | } | 46 | } |
@@ -57,7 +57,7 @@ static int ide_pm_execute_rq(struct request *rq) | |||
57 | if (unlikely(blk_queue_dying(q))) { | 57 | if (unlikely(blk_queue_dying(q))) { |
58 | rq->rq_flags |= RQF_QUIET; | 58 | rq->rq_flags |= RQF_QUIET; |
59 | scsi_req(rq)->result = -ENXIO; | 59 | scsi_req(rq)->result = -ENXIO; |
60 | __blk_end_request_all(rq, 0); | 60 | __blk_end_request_all(rq, BLK_STS_OK); |
61 | spin_unlock_irq(q->queue_lock); | 61 | spin_unlock_irq(q->queue_lock); |
62 | return -ENXIO; | 62 | return -ENXIO; |
63 | } | 63 | } |
@@ -235,7 +235,7 @@ void ide_complete_pm_rq(ide_drive_t *drive, struct request *rq) | |||
235 | 235 | ||
236 | drive->hwif->rq = NULL; | 236 | drive->hwif->rq = NULL; |
237 | 237 | ||
238 | if (blk_end_request(rq, 0, 0)) | 238 | if (blk_end_request(rq, BLK_STS_OK, 0)) |
239 | BUG(); | 239 | BUG(); |
240 | } | 240 | } |
241 | 241 | ||
diff --git a/drivers/ide/ide-tape.c b/drivers/ide/ide-tape.c index a0651f948b76..4d062c568777 100644 --- a/drivers/ide/ide-tape.c +++ b/drivers/ide/ide-tape.c | |||
@@ -474,7 +474,7 @@ static ide_startstop_t ide_tape_issue_pc(ide_drive_t *drive, | |||
474 | 474 | ||
475 | drive->failed_pc = NULL; | 475 | drive->failed_pc = NULL; |
476 | drive->pc_callback(drive, 0); | 476 | drive->pc_callback(drive, 0); |
477 | ide_complete_rq(drive, -EIO, blk_rq_bytes(rq)); | 477 | ide_complete_rq(drive, BLK_STS_IOERR, blk_rq_bytes(rq)); |
478 | return ide_stopped; | 478 | return ide_stopped; |
479 | } | 479 | } |
480 | ide_debug_log(IDE_DBG_SENSE, "retry #%d, cmd: 0x%02x", pc->retries, | 480 | ide_debug_log(IDE_DBG_SENSE, "retry #%d, cmd: 0x%02x", pc->retries, |
diff --git a/drivers/ide/ide-taskfile.c b/drivers/ide/ide-taskfile.c index d71199d23c9e..ab1a32cdcb0a 100644 --- a/drivers/ide/ide-taskfile.c +++ b/drivers/ide/ide-taskfile.c | |||
@@ -318,7 +318,7 @@ static void ide_error_cmd(ide_drive_t *drive, struct ide_cmd *cmd) | |||
318 | } | 318 | } |
319 | 319 | ||
320 | if (nr_bytes > 0) | 320 | if (nr_bytes > 0) |
321 | ide_complete_rq(drive, 0, nr_bytes); | 321 | ide_complete_rq(drive, BLK_STS_OK, nr_bytes); |
322 | } | 322 | } |
323 | } | 323 | } |
324 | 324 | ||
@@ -336,7 +336,7 @@ void ide_finish_cmd(ide_drive_t *drive, struct ide_cmd *cmd, u8 stat) | |||
336 | ide_driveid_update(drive); | 336 | ide_driveid_update(drive); |
337 | } | 337 | } |
338 | 338 | ||
339 | ide_complete_rq(drive, err ? -EIO : 0, blk_rq_bytes(rq)); | 339 | ide_complete_rq(drive, err ? BLK_STS_IOERR : BLK_STS_OK, blk_rq_bytes(rq)); |
340 | } | 340 | } |
341 | 341 | ||
342 | /* | 342 | /* |
@@ -394,7 +394,7 @@ out_end: | |||
394 | if ((cmd->tf_flags & IDE_TFLAG_FS) == 0) | 394 | if ((cmd->tf_flags & IDE_TFLAG_FS) == 0) |
395 | ide_finish_cmd(drive, cmd, stat); | 395 | ide_finish_cmd(drive, cmd, stat); |
396 | else | 396 | else |
397 | ide_complete_rq(drive, 0, blk_rq_sectors(cmd->rq) << 9); | 397 | ide_complete_rq(drive, BLK_STS_OK, blk_rq_sectors(cmd->rq) << 9); |
398 | return ide_stopped; | 398 | return ide_stopped; |
399 | out_err: | 399 | out_err: |
400 | ide_error_cmd(drive, cmd); | 400 | ide_error_cmd(drive, cmd); |
diff --git a/drivers/ide/siimage.c b/drivers/ide/siimage.c index 6a1849bb476c..57eea5a9047f 100644 --- a/drivers/ide/siimage.c +++ b/drivers/ide/siimage.c | |||
@@ -406,7 +406,7 @@ static int siimage_dma_test_irq(ide_drive_t *drive) | |||
406 | * yet. | 406 | * yet. |
407 | */ | 407 | */ |
408 | 408 | ||
409 | static int sil_sata_reset_poll(ide_drive_t *drive) | 409 | static blk_status_t sil_sata_reset_poll(ide_drive_t *drive) |
410 | { | 410 | { |
411 | ide_hwif_t *hwif = drive->hwif; | 411 | ide_hwif_t *hwif = drive->hwif; |
412 | void __iomem *sata_status_addr | 412 | void __iomem *sata_status_addr |
@@ -419,11 +419,11 @@ static int sil_sata_reset_poll(ide_drive_t *drive) | |||
419 | if ((sata_stat & 0x03) != 0x03) { | 419 | if ((sata_stat & 0x03) != 0x03) { |
420 | printk(KERN_WARNING "%s: reset phy dead, status=0x%08x\n", | 420 | printk(KERN_WARNING "%s: reset phy dead, status=0x%08x\n", |
421 | hwif->name, sata_stat); | 421 | hwif->name, sata_stat); |
422 | return -ENXIO; | 422 | return BLK_STS_IOERR; |
423 | } | 423 | } |
424 | } | 424 | } |
425 | 425 | ||
426 | return 0; | 426 | return BLK_STS_OK; |
427 | } | 427 | } |
428 | 428 | ||
429 | /** | 429 | /** |
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c index ceeeb495d01c..39262e344ae1 100644 --- a/drivers/md/dm-mpath.c +++ b/drivers/md/dm-mpath.c | |||
@@ -1449,22 +1449,15 @@ static void activate_path_work(struct work_struct *work) | |||
1449 | activate_or_offline_path(pgpath); | 1449 | activate_or_offline_path(pgpath); |
1450 | } | 1450 | } |
1451 | 1451 | ||
1452 | static int noretry_error(int error) | 1452 | static int noretry_error(blk_status_t error) |
1453 | { | 1453 | { |
1454 | switch (error) { | 1454 | switch (error) { |
1455 | case -EBADE: | 1455 | case BLK_STS_NOTSUPP: |
1456 | /* | 1456 | case BLK_STS_NOSPC: |
1457 | * EBADE signals an reservation conflict. | 1457 | case BLK_STS_TARGET: |
1458 | * We shouldn't fail the path here as we can communicate with | 1458 | case BLK_STS_NEXUS: |
1459 | * the target. We should failover to the next path, but in | 1459 | case BLK_STS_MEDIUM: |
1460 | * doing so we might be causing a ping-pong between paths. | 1460 | case BLK_STS_RESOURCE: |
1461 | * So just return the reservation conflict error. | ||
1462 | */ | ||
1463 | case -EOPNOTSUPP: | ||
1464 | case -EREMOTEIO: | ||
1465 | case -EILSEQ: | ||
1466 | case -ENODATA: | ||
1467 | case -ENOSPC: | ||
1468 | return 1; | 1461 | return 1; |
1469 | } | 1462 | } |
1470 | 1463 | ||
@@ -1473,7 +1466,7 @@ static int noretry_error(int error) | |||
1473 | } | 1466 | } |
1474 | 1467 | ||
1475 | static int multipath_end_io(struct dm_target *ti, struct request *clone, | 1468 | static int multipath_end_io(struct dm_target *ti, struct request *clone, |
1476 | int error, union map_info *map_context) | 1469 | blk_status_t error, union map_info *map_context) |
1477 | { | 1470 | { |
1478 | struct dm_mpath_io *mpio = get_mpio(map_context); | 1471 | struct dm_mpath_io *mpio = get_mpio(map_context); |
1479 | struct pgpath *pgpath = mpio->pgpath; | 1472 | struct pgpath *pgpath = mpio->pgpath; |
@@ -1500,7 +1493,7 @@ static int multipath_end_io(struct dm_target *ti, struct request *clone, | |||
1500 | 1493 | ||
1501 | if (atomic_read(&m->nr_valid_paths) == 0 && | 1494 | if (atomic_read(&m->nr_valid_paths) == 0 && |
1502 | !test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) { | 1495 | !test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) { |
1503 | if (error == -EIO) | 1496 | if (error == BLK_STS_IOERR) |
1504 | dm_report_EIO(m); | 1497 | dm_report_EIO(m); |
1505 | /* complete with the original error */ | 1498 | /* complete with the original error */ |
1506 | r = DM_ENDIO_DONE; | 1499 | r = DM_ENDIO_DONE; |
@@ -1525,7 +1518,7 @@ static int multipath_end_io_bio(struct dm_target *ti, struct bio *clone, int *er | |||
1525 | unsigned long flags; | 1518 | unsigned long flags; |
1526 | int r = DM_ENDIO_DONE; | 1519 | int r = DM_ENDIO_DONE; |
1527 | 1520 | ||
1528 | if (!*error || noretry_error(*error)) | 1521 | if (!*error || noretry_error(errno_to_blk_status(*error))) |
1529 | goto done; | 1522 | goto done; |
1530 | 1523 | ||
1531 | if (pgpath) | 1524 | if (pgpath) |
diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c index b639fa7246ee..bee334389173 100644 --- a/drivers/md/dm-rq.c +++ b/drivers/md/dm-rq.c | |||
@@ -119,7 +119,7 @@ static void end_clone_bio(struct bio *clone) | |||
119 | struct dm_rq_target_io *tio = info->tio; | 119 | struct dm_rq_target_io *tio = info->tio; |
120 | struct bio *bio = info->orig; | 120 | struct bio *bio = info->orig; |
121 | unsigned int nr_bytes = info->orig->bi_iter.bi_size; | 121 | unsigned int nr_bytes = info->orig->bi_iter.bi_size; |
122 | int error = clone->bi_error; | 122 | blk_status_t error = errno_to_blk_status(clone->bi_error); |
123 | 123 | ||
124 | bio_put(clone); | 124 | bio_put(clone); |
125 | 125 | ||
@@ -158,7 +158,7 @@ static void end_clone_bio(struct bio *clone) | |||
158 | * Do not use blk_end_request() here, because it may complete | 158 | * Do not use blk_end_request() here, because it may complete |
159 | * the original request before the clone, and break the ordering. | 159 | * the original request before the clone, and break the ordering. |
160 | */ | 160 | */ |
161 | blk_update_request(tio->orig, 0, nr_bytes); | 161 | blk_update_request(tio->orig, BLK_STS_OK, nr_bytes); |
162 | } | 162 | } |
163 | 163 | ||
164 | static struct dm_rq_target_io *tio_from_request(struct request *rq) | 164 | static struct dm_rq_target_io *tio_from_request(struct request *rq) |
@@ -216,7 +216,7 @@ static void rq_completed(struct mapped_device *md, int rw, bool run_queue) | |||
216 | * Must be called without clone's queue lock held, | 216 | * Must be called without clone's queue lock held, |
217 | * see end_clone_request() for more details. | 217 | * see end_clone_request() for more details. |
218 | */ | 218 | */ |
219 | static void dm_end_request(struct request *clone, int error) | 219 | static void dm_end_request(struct request *clone, blk_status_t error) |
220 | { | 220 | { |
221 | int rw = rq_data_dir(clone); | 221 | int rw = rq_data_dir(clone); |
222 | struct dm_rq_target_io *tio = clone->end_io_data; | 222 | struct dm_rq_target_io *tio = clone->end_io_data; |
@@ -285,7 +285,7 @@ static void dm_requeue_original_request(struct dm_rq_target_io *tio, bool delay_ | |||
285 | rq_completed(md, rw, false); | 285 | rq_completed(md, rw, false); |
286 | } | 286 | } |
287 | 287 | ||
288 | static void dm_done(struct request *clone, int error, bool mapped) | 288 | static void dm_done(struct request *clone, blk_status_t error, bool mapped) |
289 | { | 289 | { |
290 | int r = DM_ENDIO_DONE; | 290 | int r = DM_ENDIO_DONE; |
291 | struct dm_rq_target_io *tio = clone->end_io_data; | 291 | struct dm_rq_target_io *tio = clone->end_io_data; |
@@ -298,7 +298,7 @@ static void dm_done(struct request *clone, int error, bool mapped) | |||
298 | r = rq_end_io(tio->ti, clone, error, &tio->info); | 298 | r = rq_end_io(tio->ti, clone, error, &tio->info); |
299 | } | 299 | } |
300 | 300 | ||
301 | if (unlikely(error == -EREMOTEIO)) { | 301 | if (unlikely(error == BLK_STS_TARGET)) { |
302 | if (req_op(clone) == REQ_OP_WRITE_SAME && | 302 | if (req_op(clone) == REQ_OP_WRITE_SAME && |
303 | !clone->q->limits.max_write_same_sectors) | 303 | !clone->q->limits.max_write_same_sectors) |
304 | disable_write_same(tio->md); | 304 | disable_write_same(tio->md); |
@@ -358,7 +358,7 @@ static void dm_softirq_done(struct request *rq) | |||
358 | * Complete the clone and the original request with the error status | 358 | * Complete the clone and the original request with the error status |
359 | * through softirq context. | 359 | * through softirq context. |
360 | */ | 360 | */ |
361 | static void dm_complete_request(struct request *rq, int error) | 361 | static void dm_complete_request(struct request *rq, blk_status_t error) |
362 | { | 362 | { |
363 | struct dm_rq_target_io *tio = tio_from_request(rq); | 363 | struct dm_rq_target_io *tio = tio_from_request(rq); |
364 | 364 | ||
@@ -375,7 +375,7 @@ static void dm_complete_request(struct request *rq, int error) | |||
375 | * Target's rq_end_io() function isn't called. | 375 | * Target's rq_end_io() function isn't called. |
376 | * This may be used when the target's map_rq() or clone_and_map_rq() functions fail. | 376 | * This may be used when the target's map_rq() or clone_and_map_rq() functions fail. |
377 | */ | 377 | */ |
378 | static void dm_kill_unmapped_request(struct request *rq, int error) | 378 | static void dm_kill_unmapped_request(struct request *rq, blk_status_t error) |
379 | { | 379 | { |
380 | rq->rq_flags |= RQF_FAILED; | 380 | rq->rq_flags |= RQF_FAILED; |
381 | dm_complete_request(rq, error); | 381 | dm_complete_request(rq, error); |
@@ -384,7 +384,7 @@ static void dm_kill_unmapped_request(struct request *rq, int error) | |||
384 | /* | 384 | /* |
385 | * Called with the clone's queue lock held (in the case of .request_fn) | 385 | * Called with the clone's queue lock held (in the case of .request_fn) |
386 | */ | 386 | */ |
387 | static void end_clone_request(struct request *clone, int error) | 387 | static void end_clone_request(struct request *clone, blk_status_t error) |
388 | { | 388 | { |
389 | struct dm_rq_target_io *tio = clone->end_io_data; | 389 | struct dm_rq_target_io *tio = clone->end_io_data; |
390 | 390 | ||
@@ -401,7 +401,7 @@ static void end_clone_request(struct request *clone, int error) | |||
401 | 401 | ||
402 | static void dm_dispatch_clone_request(struct request *clone, struct request *rq) | 402 | static void dm_dispatch_clone_request(struct request *clone, struct request *rq) |
403 | { | 403 | { |
404 | int r; | 404 | blk_status_t r; |
405 | 405 | ||
406 | if (blk_queue_io_stat(clone->q)) | 406 | if (blk_queue_io_stat(clone->q)) |
407 | clone->rq_flags |= RQF_IO_STAT; | 407 | clone->rq_flags |= RQF_IO_STAT; |
@@ -506,7 +506,7 @@ static int map_request(struct dm_rq_target_io *tio) | |||
506 | break; | 506 | break; |
507 | case DM_MAPIO_KILL: | 507 | case DM_MAPIO_KILL: |
508 | /* The target wants to complete the I/O */ | 508 | /* The target wants to complete the I/O */ |
509 | dm_kill_unmapped_request(rq, -EIO); | 509 | dm_kill_unmapped_request(rq, BLK_STS_IOERR); |
510 | break; | 510 | break; |
511 | default: | 511 | default: |
512 | DMWARN("unimplemented target map return value: %d", r); | 512 | DMWARN("unimplemented target map return value: %d", r); |
diff --git a/drivers/md/dm-rq.h b/drivers/md/dm-rq.h index f0020d21b95f..9813922e4fe5 100644 --- a/drivers/md/dm-rq.h +++ b/drivers/md/dm-rq.h | |||
@@ -24,7 +24,7 @@ struct dm_rq_target_io { | |||
24 | struct dm_target *ti; | 24 | struct dm_target *ti; |
25 | struct request *orig, *clone; | 25 | struct request *orig, *clone; |
26 | struct kthread_work work; | 26 | struct kthread_work work; |
27 | int error; | 27 | blk_status_t error; |
28 | union map_info info; | 28 | union map_info info; |
29 | struct dm_stats_aux stats_aux; | 29 | struct dm_stats_aux stats_aux; |
30 | unsigned long duration_jiffies; | 30 | unsigned long duration_jiffies; |
diff --git a/drivers/memstick/core/ms_block.c b/drivers/memstick/core/ms_block.c index 99e651c27fb7..22de7f5ed032 100644 --- a/drivers/memstick/core/ms_block.c +++ b/drivers/memstick/core/ms_block.c | |||
@@ -1921,12 +1921,13 @@ static void msb_io_work(struct work_struct *work) | |||
1921 | spin_lock_irqsave(&msb->q_lock, flags); | 1921 | spin_lock_irqsave(&msb->q_lock, flags); |
1922 | 1922 | ||
1923 | if (len) | 1923 | if (len) |
1924 | if (!__blk_end_request(msb->req, 0, len)) | 1924 | if (!__blk_end_request(msb->req, BLK_STS_OK, len)) |
1925 | msb->req = NULL; | 1925 | msb->req = NULL; |
1926 | 1926 | ||
1927 | if (error && msb->req) { | 1927 | if (error && msb->req) { |
1928 | blk_status_t ret = errno_to_blk_status(error); | ||
1928 | dbg_verbose("IO: ending one sector of the request with error"); | 1929 | dbg_verbose("IO: ending one sector of the request with error"); |
1929 | if (!__blk_end_request(msb->req, error, msb->page_size)) | 1930 | if (!__blk_end_request(msb->req, ret, msb->page_size)) |
1930 | msb->req = NULL; | 1931 | msb->req = NULL; |
1931 | } | 1932 | } |
1932 | 1933 | ||
@@ -2014,7 +2015,7 @@ static void msb_submit_req(struct request_queue *q) | |||
2014 | WARN_ON(!msb->io_queue_stopped); | 2015 | WARN_ON(!msb->io_queue_stopped); |
2015 | 2016 | ||
2016 | while ((req = blk_fetch_request(q)) != NULL) | 2017 | while ((req = blk_fetch_request(q)) != NULL) |
2017 | __blk_end_request_all(req, -ENODEV); | 2018 | __blk_end_request_all(req, BLK_STS_IOERR); |
2018 | return; | 2019 | return; |
2019 | } | 2020 | } |
2020 | 2021 | ||
diff --git a/drivers/memstick/core/mspro_block.c b/drivers/memstick/core/mspro_block.c index c00d8a266878..8897962781bb 100644 --- a/drivers/memstick/core/mspro_block.c +++ b/drivers/memstick/core/mspro_block.c | |||
@@ -709,7 +709,8 @@ try_again: | |||
709 | msb->req_sg); | 709 | msb->req_sg); |
710 | 710 | ||
711 | if (!msb->seg_count) { | 711 | if (!msb->seg_count) { |
712 | chunk = __blk_end_request_cur(msb->block_req, -ENOMEM); | 712 | chunk = __blk_end_request_cur(msb->block_req, |
713 | BLK_STS_RESOURCE); | ||
713 | continue; | 714 | continue; |
714 | } | 715 | } |
715 | 716 | ||
@@ -776,7 +777,8 @@ static int mspro_block_complete_req(struct memstick_dev *card, int error) | |||
776 | if (error && !t_len) | 777 | if (error && !t_len) |
777 | t_len = blk_rq_cur_bytes(msb->block_req); | 778 | t_len = blk_rq_cur_bytes(msb->block_req); |
778 | 779 | ||
779 | chunk = __blk_end_request(msb->block_req, error, t_len); | 780 | chunk = __blk_end_request(msb->block_req, |
781 | errno_to_blk_status(error), t_len); | ||
780 | 782 | ||
781 | error = mspro_block_issue_req(card, chunk); | 783 | error = mspro_block_issue_req(card, chunk); |
782 | 784 | ||
@@ -838,7 +840,7 @@ static void mspro_block_submit_req(struct request_queue *q) | |||
838 | 840 | ||
839 | if (msb->eject) { | 841 | if (msb->eject) { |
840 | while ((req = blk_fetch_request(q)) != NULL) | 842 | while ((req = blk_fetch_request(q)) != NULL) |
841 | __blk_end_request_all(req, -ENODEV); | 843 | __blk_end_request_all(req, BLK_STS_IOERR); |
842 | 844 | ||
843 | return; | 845 | return; |
844 | } | 846 | } |
diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c index 8273b078686d..6ff94a948a4b 100644 --- a/drivers/mmc/core/block.c +++ b/drivers/mmc/core/block.c | |||
@@ -1184,9 +1184,10 @@ static void mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req) | |||
1184 | struct mmc_card *card = md->queue.card; | 1184 | struct mmc_card *card = md->queue.card; |
1185 | unsigned int from, nr, arg; | 1185 | unsigned int from, nr, arg; |
1186 | int err = 0, type = MMC_BLK_DISCARD; | 1186 | int err = 0, type = MMC_BLK_DISCARD; |
1187 | blk_status_t status = BLK_STS_OK; | ||
1187 | 1188 | ||
1188 | if (!mmc_can_erase(card)) { | 1189 | if (!mmc_can_erase(card)) { |
1189 | err = -EOPNOTSUPP; | 1190 | status = BLK_STS_NOTSUPP; |
1190 | goto fail; | 1191 | goto fail; |
1191 | } | 1192 | } |
1192 | 1193 | ||
@@ -1212,10 +1213,12 @@ static void mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req) | |||
1212 | if (!err) | 1213 | if (!err) |
1213 | err = mmc_erase(card, from, nr, arg); | 1214 | err = mmc_erase(card, from, nr, arg); |
1214 | } while (err == -EIO && !mmc_blk_reset(md, card->host, type)); | 1215 | } while (err == -EIO && !mmc_blk_reset(md, card->host, type)); |
1215 | if (!err) | 1216 | if (err) |
1217 | status = BLK_STS_IOERR; | ||
1218 | else | ||
1216 | mmc_blk_reset_success(md, type); | 1219 | mmc_blk_reset_success(md, type); |
1217 | fail: | 1220 | fail: |
1218 | blk_end_request(req, err, blk_rq_bytes(req)); | 1221 | blk_end_request(req, status, blk_rq_bytes(req)); |
1219 | } | 1222 | } |
1220 | 1223 | ||
1221 | static void mmc_blk_issue_secdiscard_rq(struct mmc_queue *mq, | 1224 | static void mmc_blk_issue_secdiscard_rq(struct mmc_queue *mq, |
@@ -1225,9 +1228,10 @@ static void mmc_blk_issue_secdiscard_rq(struct mmc_queue *mq, | |||
1225 | struct mmc_card *card = md->queue.card; | 1228 | struct mmc_card *card = md->queue.card; |
1226 | unsigned int from, nr, arg; | 1229 | unsigned int from, nr, arg; |
1227 | int err = 0, type = MMC_BLK_SECDISCARD; | 1230 | int err = 0, type = MMC_BLK_SECDISCARD; |
1231 | blk_status_t status = BLK_STS_OK; | ||
1228 | 1232 | ||
1229 | if (!(mmc_can_secure_erase_trim(card))) { | 1233 | if (!(mmc_can_secure_erase_trim(card))) { |
1230 | err = -EOPNOTSUPP; | 1234 | status = BLK_STS_NOTSUPP; |
1231 | goto out; | 1235 | goto out; |
1232 | } | 1236 | } |
1233 | 1237 | ||
@@ -1254,8 +1258,10 @@ retry: | |||
1254 | err = mmc_erase(card, from, nr, arg); | 1258 | err = mmc_erase(card, from, nr, arg); |
1255 | if (err == -EIO) | 1259 | if (err == -EIO) |
1256 | goto out_retry; | 1260 | goto out_retry; |
1257 | if (err) | 1261 | if (err) { |
1262 | status = BLK_STS_IOERR; | ||
1258 | goto out; | 1263 | goto out; |
1264 | } | ||
1259 | 1265 | ||
1260 | if (arg == MMC_SECURE_TRIM1_ARG) { | 1266 | if (arg == MMC_SECURE_TRIM1_ARG) { |
1261 | if (card->quirks & MMC_QUIRK_INAND_CMD38) { | 1267 | if (card->quirks & MMC_QUIRK_INAND_CMD38) { |
@@ -1270,8 +1276,10 @@ retry: | |||
1270 | err = mmc_erase(card, from, nr, MMC_SECURE_TRIM2_ARG); | 1276 | err = mmc_erase(card, from, nr, MMC_SECURE_TRIM2_ARG); |
1271 | if (err == -EIO) | 1277 | if (err == -EIO) |
1272 | goto out_retry; | 1278 | goto out_retry; |
1273 | if (err) | 1279 | if (err) { |
1280 | status = BLK_STS_IOERR; | ||
1274 | goto out; | 1281 | goto out; |
1282 | } | ||
1275 | } | 1283 | } |
1276 | 1284 | ||
1277 | out_retry: | 1285 | out_retry: |
@@ -1280,7 +1288,7 @@ out_retry: | |||
1280 | if (!err) | 1288 | if (!err) |
1281 | mmc_blk_reset_success(md, type); | 1289 | mmc_blk_reset_success(md, type); |
1282 | out: | 1290 | out: |
1283 | blk_end_request(req, err, blk_rq_bytes(req)); | 1291 | blk_end_request(req, status, blk_rq_bytes(req)); |
1284 | } | 1292 | } |
1285 | 1293 | ||
1286 | static void mmc_blk_issue_flush(struct mmc_queue *mq, struct request *req) | 1294 | static void mmc_blk_issue_flush(struct mmc_queue *mq, struct request *req) |
@@ -1290,10 +1298,7 @@ static void mmc_blk_issue_flush(struct mmc_queue *mq, struct request *req) | |||
1290 | int ret = 0; | 1298 | int ret = 0; |
1291 | 1299 | ||
1292 | ret = mmc_flush_cache(card); | 1300 | ret = mmc_flush_cache(card); |
1293 | if (ret) | 1301 | blk_end_request_all(req, ret ? BLK_STS_IOERR : BLK_STS_OK); |
1294 | ret = -EIO; | ||
1295 | |||
1296 | blk_end_request_all(req, ret); | ||
1297 | } | 1302 | } |
1298 | 1303 | ||
1299 | /* | 1304 | /* |
@@ -1641,7 +1646,7 @@ static void mmc_blk_rw_cmd_abort(struct mmc_queue *mq, struct mmc_card *card, | |||
1641 | { | 1646 | { |
1642 | if (mmc_card_removed(card)) | 1647 | if (mmc_card_removed(card)) |
1643 | req->rq_flags |= RQF_QUIET; | 1648 | req->rq_flags |= RQF_QUIET; |
1644 | while (blk_end_request(req, -EIO, blk_rq_cur_bytes(req))); | 1649 | while (blk_end_request(req, BLK_STS_IOERR, blk_rq_cur_bytes(req))); |
1645 | mmc_queue_req_free(mq, mqrq); | 1650 | mmc_queue_req_free(mq, mqrq); |
1646 | } | 1651 | } |
1647 | 1652 | ||
@@ -1661,7 +1666,7 @@ static void mmc_blk_rw_try_restart(struct mmc_queue *mq, struct request *req, | |||
1661 | */ | 1666 | */ |
1662 | if (mmc_card_removed(mq->card)) { | 1667 | if (mmc_card_removed(mq->card)) { |
1663 | req->rq_flags |= RQF_QUIET; | 1668 | req->rq_flags |= RQF_QUIET; |
1664 | blk_end_request_all(req, -EIO); | 1669 | blk_end_request_all(req, BLK_STS_IOERR); |
1665 | mmc_queue_req_free(mq, mqrq); | 1670 | mmc_queue_req_free(mq, mqrq); |
1666 | return; | 1671 | return; |
1667 | } | 1672 | } |
@@ -1743,7 +1748,7 @@ static void mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *new_req) | |||
1743 | */ | 1748 | */ |
1744 | mmc_blk_reset_success(md, type); | 1749 | mmc_blk_reset_success(md, type); |
1745 | 1750 | ||
1746 | req_pending = blk_end_request(old_req, 0, | 1751 | req_pending = blk_end_request(old_req, BLK_STS_OK, |
1747 | brq->data.bytes_xfered); | 1752 | brq->data.bytes_xfered); |
1748 | /* | 1753 | /* |
1749 | * If the blk_end_request function returns non-zero even | 1754 | * If the blk_end_request function returns non-zero even |
@@ -1811,7 +1816,7 @@ static void mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *new_req) | |||
1811 | * time, so we only reach here after trying to | 1816 | * time, so we only reach here after trying to |
1812 | * read a single sector. | 1817 | * read a single sector. |
1813 | */ | 1818 | */ |
1814 | req_pending = blk_end_request(old_req, -EIO, | 1819 | req_pending = blk_end_request(old_req, BLK_STS_IOERR, |
1815 | brq->data.blksz); | 1820 | brq->data.blksz); |
1816 | if (!req_pending) { | 1821 | if (!req_pending) { |
1817 | mmc_queue_req_free(mq, mq_rq); | 1822 | mmc_queue_req_free(mq, mq_rq); |
@@ -1860,7 +1865,7 @@ void mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req) | |||
1860 | ret = mmc_blk_part_switch(card, md); | 1865 | ret = mmc_blk_part_switch(card, md); |
1861 | if (ret) { | 1866 | if (ret) { |
1862 | if (req) { | 1867 | if (req) { |
1863 | blk_end_request_all(req, -EIO); | 1868 | blk_end_request_all(req, BLK_STS_IOERR); |
1864 | } | 1869 | } |
1865 | goto out; | 1870 | goto out; |
1866 | } | 1871 | } |
diff --git a/drivers/mmc/core/queue.c b/drivers/mmc/core/queue.c index 5c37b6be3e7b..7f20298d892b 100644 --- a/drivers/mmc/core/queue.c +++ b/drivers/mmc/core/queue.c | |||
@@ -133,7 +133,7 @@ static void mmc_request_fn(struct request_queue *q) | |||
133 | if (!mq) { | 133 | if (!mq) { |
134 | while ((req = blk_fetch_request(q)) != NULL) { | 134 | while ((req = blk_fetch_request(q)) != NULL) { |
135 | req->rq_flags |= RQF_QUIET; | 135 | req->rq_flags |= RQF_QUIET; |
136 | __blk_end_request_all(req, -EIO); | 136 | __blk_end_request_all(req, BLK_STS_IOERR); |
137 | } | 137 | } |
138 | return; | 138 | return; |
139 | } | 139 | } |
diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c index 6b8d5cd7dbf6..91c17fba7659 100644 --- a/drivers/mtd/mtd_blkdevs.c +++ b/drivers/mtd/mtd_blkdevs.c | |||
@@ -73,7 +73,7 @@ static void blktrans_dev_put(struct mtd_blktrans_dev *dev) | |||
73 | } | 73 | } |
74 | 74 | ||
75 | 75 | ||
76 | static int do_blktrans_request(struct mtd_blktrans_ops *tr, | 76 | static blk_status_t do_blktrans_request(struct mtd_blktrans_ops *tr, |
77 | struct mtd_blktrans_dev *dev, | 77 | struct mtd_blktrans_dev *dev, |
78 | struct request *req) | 78 | struct request *req) |
79 | { | 79 | { |
@@ -84,33 +84,37 @@ static int do_blktrans_request(struct mtd_blktrans_ops *tr, | |||
84 | nsect = blk_rq_cur_bytes(req) >> tr->blkshift; | 84 | nsect = blk_rq_cur_bytes(req) >> tr->blkshift; |
85 | buf = bio_data(req->bio); | 85 | buf = bio_data(req->bio); |
86 | 86 | ||
87 | if (req_op(req) == REQ_OP_FLUSH) | 87 | if (req_op(req) == REQ_OP_FLUSH) { |
88 | return tr->flush(dev); | 88 | if (tr->flush(dev)) |
89 | return BLK_STS_IOERR; | ||
90 | return BLK_STS_OK; | ||
91 | } | ||
89 | 92 | ||
90 | if (blk_rq_pos(req) + blk_rq_cur_sectors(req) > | 93 | if (blk_rq_pos(req) + blk_rq_cur_sectors(req) > |
91 | get_capacity(req->rq_disk)) | 94 | get_capacity(req->rq_disk)) |
92 | return -EIO; | 95 | return BLK_STS_IOERR; |
93 | 96 | ||
94 | switch (req_op(req)) { | 97 | switch (req_op(req)) { |
95 | case REQ_OP_DISCARD: | 98 | case REQ_OP_DISCARD: |
96 | return tr->discard(dev, block, nsect); | 99 | if (tr->discard(dev, block, nsect)) |
100 | return BLK_STS_IOERR; | ||
101 | return BLK_STS_OK; | ||
97 | case REQ_OP_READ: | 102 | case REQ_OP_READ: |
98 | for (; nsect > 0; nsect--, block++, buf += tr->blksize) | 103 | for (; nsect > 0; nsect--, block++, buf += tr->blksize) |
99 | if (tr->readsect(dev, block, buf)) | 104 | if (tr->readsect(dev, block, buf)) |
100 | return -EIO; | 105 | return BLK_STS_IOERR; |
101 | rq_flush_dcache_pages(req); | 106 | rq_flush_dcache_pages(req); |
102 | return 0; | 107 | return BLK_STS_OK; |
103 | case REQ_OP_WRITE: | 108 | case REQ_OP_WRITE: |
104 | if (!tr->writesect) | 109 | if (!tr->writesect) |
105 | return -EIO; | 110 | return BLK_STS_IOERR; |
106 | 111 | ||
107 | rq_flush_dcache_pages(req); | 112 | rq_flush_dcache_pages(req); |
108 | for (; nsect > 0; nsect--, block++, buf += tr->blksize) | 113 | for (; nsect > 0; nsect--, block++, buf += tr->blksize) |
109 | if (tr->writesect(dev, block, buf)) | 114 | if (tr->writesect(dev, block, buf)) |
110 | return -EIO; | 115 | return BLK_STS_IOERR; |
111 | return 0; | ||
112 | default: | 116 | default: |
113 | return -EIO; | 117 | return BLK_STS_IOERR; |
114 | } | 118 | } |
115 | } | 119 | } |
116 | 120 | ||
@@ -132,7 +136,7 @@ static void mtd_blktrans_work(struct work_struct *work) | |||
132 | spin_lock_irq(rq->queue_lock); | 136 | spin_lock_irq(rq->queue_lock); |
133 | 137 | ||
134 | while (1) { | 138 | while (1) { |
135 | int res; | 139 | blk_status_t res; |
136 | 140 | ||
137 | dev->bg_stop = false; | 141 | dev->bg_stop = false; |
138 | if (!req && !(req = blk_fetch_request(rq))) { | 142 | if (!req && !(req = blk_fetch_request(rq))) { |
@@ -178,7 +182,7 @@ static void mtd_blktrans_request(struct request_queue *rq) | |||
178 | 182 | ||
179 | if (!dev) | 183 | if (!dev) |
180 | while ((req = blk_fetch_request(rq)) != NULL) | 184 | while ((req = blk_fetch_request(rq)) != NULL) |
181 | __blk_end_request_all(req, -ENODEV); | 185 | __blk_end_request_all(req, BLK_STS_IOERR); |
182 | else | 186 | else |
183 | queue_work(dev->wq, &dev->work); | 187 | queue_work(dev->wq, &dev->work); |
184 | } | 188 | } |
diff --git a/drivers/mtd/ubi/block.c b/drivers/mtd/ubi/block.c index 5497e65439df..3ecdb39d1985 100644 --- a/drivers/mtd/ubi/block.c +++ b/drivers/mtd/ubi/block.c | |||
@@ -313,7 +313,7 @@ static void ubiblock_do_work(struct work_struct *work) | |||
313 | ret = ubiblock_read(pdu); | 313 | ret = ubiblock_read(pdu); |
314 | rq_flush_dcache_pages(req); | 314 | rq_flush_dcache_pages(req); |
315 | 315 | ||
316 | blk_mq_end_request(req, ret); | 316 | blk_mq_end_request(req, errno_to_blk_status(ret)); |
317 | } | 317 | } |
318 | 318 | ||
319 | static int ubiblock_queue_rq(struct blk_mq_hw_ctx *hctx, | 319 | static int ubiblock_queue_rq(struct blk_mq_hw_ctx *hctx, |
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index a60926410438..07e95c7d837a 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c | |||
@@ -70,29 +70,21 @@ static DEFINE_SPINLOCK(dev_list_lock); | |||
70 | 70 | ||
71 | static struct class *nvme_class; | 71 | static struct class *nvme_class; |
72 | 72 | ||
73 | static int nvme_error_status(struct request *req) | 73 | static blk_status_t nvme_error_status(struct request *req) |
74 | { | 74 | { |
75 | switch (nvme_req(req)->status & 0x7ff) { | 75 | switch (nvme_req(req)->status & 0x7ff) { |
76 | case NVME_SC_SUCCESS: | 76 | case NVME_SC_SUCCESS: |
77 | return 0; | 77 | return BLK_STS_OK; |
78 | case NVME_SC_CAP_EXCEEDED: | 78 | case NVME_SC_CAP_EXCEEDED: |
79 | return -ENOSPC; | 79 | return BLK_STS_NOSPC; |
80 | default: | ||
81 | return -EIO; | ||
82 | |||
83 | /* | ||
84 | * XXX: these errors are a nasty side-band protocol to | ||
85 | * drivers/md/dm-mpath.c:noretry_error() that aren't documented | ||
86 | * anywhere.. | ||
87 | */ | ||
88 | case NVME_SC_CMD_SEQ_ERROR: | ||
89 | return -EILSEQ; | ||
90 | case NVME_SC_ONCS_NOT_SUPPORTED: | 80 | case NVME_SC_ONCS_NOT_SUPPORTED: |
91 | return -EOPNOTSUPP; | 81 | return BLK_STS_NOTSUPP; |
92 | case NVME_SC_WRITE_FAULT: | 82 | case NVME_SC_WRITE_FAULT: |
93 | case NVME_SC_READ_ERROR: | 83 | case NVME_SC_READ_ERROR: |
94 | case NVME_SC_UNWRITTEN_BLOCK: | 84 | case NVME_SC_UNWRITTEN_BLOCK: |
95 | return -ENODATA; | 85 | return BLK_STS_MEDIUM; |
86 | default: | ||
87 | return BLK_STS_IOERR; | ||
96 | } | 88 | } |
97 | } | 89 | } |
98 | 90 | ||
@@ -555,15 +547,16 @@ int nvme_submit_user_cmd(struct request_queue *q, struct nvme_command *cmd, | |||
555 | result, timeout); | 547 | result, timeout); |
556 | } | 548 | } |
557 | 549 | ||
558 | static void nvme_keep_alive_end_io(struct request *rq, int error) | 550 | static void nvme_keep_alive_end_io(struct request *rq, blk_status_t status) |
559 | { | 551 | { |
560 | struct nvme_ctrl *ctrl = rq->end_io_data; | 552 | struct nvme_ctrl *ctrl = rq->end_io_data; |
561 | 553 | ||
562 | blk_mq_free_request(rq); | 554 | blk_mq_free_request(rq); |
563 | 555 | ||
564 | if (error) { | 556 | if (status) { |
565 | dev_err(ctrl->device, | 557 | dev_err(ctrl->device, |
566 | "failed nvme_keep_alive_end_io error=%d\n", error); | 558 | "failed nvme_keep_alive_end_io error=%d\n", |
559 | status); | ||
567 | return; | 560 | return; |
568 | } | 561 | } |
569 | 562 | ||
diff --git a/drivers/nvme/host/lightnvm.c b/drivers/nvme/host/lightnvm.c index f3885b5e56bd..2d7a2889866f 100644 --- a/drivers/nvme/host/lightnvm.c +++ b/drivers/nvme/host/lightnvm.c | |||
@@ -480,7 +480,7 @@ static inline void nvme_nvm_rqtocmd(struct nvm_rq *rqd, struct nvme_ns *ns, | |||
480 | rqd->bio->bi_iter.bi_sector)); | 480 | rqd->bio->bi_iter.bi_sector)); |
481 | } | 481 | } |
482 | 482 | ||
483 | static void nvme_nvm_end_io(struct request *rq, int error) | 483 | static void nvme_nvm_end_io(struct request *rq, blk_status_t status) |
484 | { | 484 | { |
485 | struct nvm_rq *rqd = rq->end_io_data; | 485 | struct nvm_rq *rqd = rq->end_io_data; |
486 | 486 | ||
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index d52701df7245..819898428763 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c | |||
@@ -706,7 +706,7 @@ static int nvme_queue_rq(struct blk_mq_hw_ctx *hctx, | |||
706 | if (ns && ns->ms && !blk_integrity_rq(req)) { | 706 | if (ns && ns->ms && !blk_integrity_rq(req)) { |
707 | if (!(ns->pi_type && ns->ms == 8) && | 707 | if (!(ns->pi_type && ns->ms == 8) && |
708 | !blk_rq_is_passthrough(req)) { | 708 | !blk_rq_is_passthrough(req)) { |
709 | blk_mq_end_request(req, -EFAULT); | 709 | blk_mq_end_request(req, BLK_STS_NOTSUPP); |
710 | return BLK_MQ_RQ_QUEUE_OK; | 710 | return BLK_MQ_RQ_QUEUE_OK; |
711 | } | 711 | } |
712 | } | 712 | } |
@@ -939,7 +939,7 @@ static int adapter_delete_sq(struct nvme_dev *dev, u16 sqid) | |||
939 | return adapter_delete_queue(dev, nvme_admin_delete_sq, sqid); | 939 | return adapter_delete_queue(dev, nvme_admin_delete_sq, sqid); |
940 | } | 940 | } |
941 | 941 | ||
942 | static void abort_endio(struct request *req, int error) | 942 | static void abort_endio(struct request *req, blk_status_t error) |
943 | { | 943 | { |
944 | struct nvme_iod *iod = blk_mq_rq_to_pdu(req); | 944 | struct nvme_iod *iod = blk_mq_rq_to_pdu(req); |
945 | struct nvme_queue *nvmeq = iod->nvmeq; | 945 | struct nvme_queue *nvmeq = iod->nvmeq; |
@@ -1586,7 +1586,7 @@ static int nvme_setup_io_queues(struct nvme_dev *dev) | |||
1586 | return nvme_create_io_queues(dev); | 1586 | return nvme_create_io_queues(dev); |
1587 | } | 1587 | } |
1588 | 1588 | ||
1589 | static void nvme_del_queue_end(struct request *req, int error) | 1589 | static void nvme_del_queue_end(struct request *req, blk_status_t error) |
1590 | { | 1590 | { |
1591 | struct nvme_queue *nvmeq = req->end_io_data; | 1591 | struct nvme_queue *nvmeq = req->end_io_data; |
1592 | 1592 | ||
@@ -1594,7 +1594,7 @@ static void nvme_del_queue_end(struct request *req, int error) | |||
1594 | complete(&nvmeq->dev->ioq_wait); | 1594 | complete(&nvmeq->dev->ioq_wait); |
1595 | } | 1595 | } |
1596 | 1596 | ||
1597 | static void nvme_del_cq_end(struct request *req, int error) | 1597 | static void nvme_del_cq_end(struct request *req, blk_status_t error) |
1598 | { | 1598 | { |
1599 | struct nvme_queue *nvmeq = req->end_io_data; | 1599 | struct nvme_queue *nvmeq = req->end_io_data; |
1600 | 1600 | ||
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c index 6fb3fd5efc11..b7cbd5d2cdea 100644 --- a/drivers/s390/block/dasd.c +++ b/drivers/s390/block/dasd.c | |||
@@ -2672,7 +2672,7 @@ static void __dasd_process_request_queue(struct dasd_block *block) | |||
2672 | */ | 2672 | */ |
2673 | if (basedev->state < DASD_STATE_READY) { | 2673 | if (basedev->state < DASD_STATE_READY) { |
2674 | while ((req = blk_fetch_request(block->request_queue))) | 2674 | while ((req = blk_fetch_request(block->request_queue))) |
2675 | __blk_end_request_all(req, -EIO); | 2675 | __blk_end_request_all(req, BLK_STS_IOERR); |
2676 | return; | 2676 | return; |
2677 | } | 2677 | } |
2678 | 2678 | ||
@@ -2692,7 +2692,7 @@ static void __dasd_process_request_queue(struct dasd_block *block) | |||
2692 | "Rejecting write request %p", | 2692 | "Rejecting write request %p", |
2693 | req); | 2693 | req); |
2694 | blk_start_request(req); | 2694 | blk_start_request(req); |
2695 | __blk_end_request_all(req, -EIO); | 2695 | __blk_end_request_all(req, BLK_STS_IOERR); |
2696 | continue; | 2696 | continue; |
2697 | } | 2697 | } |
2698 | if (test_bit(DASD_FLAG_ABORTALL, &basedev->flags) && | 2698 | if (test_bit(DASD_FLAG_ABORTALL, &basedev->flags) && |
@@ -2702,7 +2702,7 @@ static void __dasd_process_request_queue(struct dasd_block *block) | |||
2702 | "Rejecting failfast request %p", | 2702 | "Rejecting failfast request %p", |
2703 | req); | 2703 | req); |
2704 | blk_start_request(req); | 2704 | blk_start_request(req); |
2705 | __blk_end_request_all(req, -ETIMEDOUT); | 2705 | __blk_end_request_all(req, BLK_STS_TIMEOUT); |
2706 | continue; | 2706 | continue; |
2707 | } | 2707 | } |
2708 | cqr = basedev->discipline->build_cp(basedev, block, req); | 2708 | cqr = basedev->discipline->build_cp(basedev, block, req); |
@@ -2734,7 +2734,7 @@ static void __dasd_process_request_queue(struct dasd_block *block) | |||
2734 | "on request %p", | 2734 | "on request %p", |
2735 | PTR_ERR(cqr), req); | 2735 | PTR_ERR(cqr), req); |
2736 | blk_start_request(req); | 2736 | blk_start_request(req); |
2737 | __blk_end_request_all(req, -EIO); | 2737 | __blk_end_request_all(req, BLK_STS_IOERR); |
2738 | continue; | 2738 | continue; |
2739 | } | 2739 | } |
2740 | /* | 2740 | /* |
@@ -2755,21 +2755,29 @@ static void __dasd_cleanup_cqr(struct dasd_ccw_req *cqr) | |||
2755 | { | 2755 | { |
2756 | struct request *req; | 2756 | struct request *req; |
2757 | int status; | 2757 | int status; |
2758 | int error = 0; | 2758 | blk_status_t error = BLK_STS_OK; |
2759 | 2759 | ||
2760 | req = (struct request *) cqr->callback_data; | 2760 | req = (struct request *) cqr->callback_data; |
2761 | dasd_profile_end(cqr->block, cqr, req); | 2761 | dasd_profile_end(cqr->block, cqr, req); |
2762 | |||
2762 | status = cqr->block->base->discipline->free_cp(cqr, req); | 2763 | status = cqr->block->base->discipline->free_cp(cqr, req); |
2763 | if (status < 0) | 2764 | if (status < 0) |
2764 | error = status; | 2765 | error = errno_to_blk_status(status); |
2765 | else if (status == 0) { | 2766 | else if (status == 0) { |
2766 | if (cqr->intrc == -EPERM) | 2767 | switch (cqr->intrc) { |
2767 | error = -EBADE; | 2768 | case -EPERM: |
2768 | else if (cqr->intrc == -ENOLINK || | 2769 | error = BLK_STS_NEXUS; |
2769 | cqr->intrc == -ETIMEDOUT) | 2770 | break; |
2770 | error = cqr->intrc; | 2771 | case -ENOLINK: |
2771 | else | 2772 | error = BLK_STS_TRANSPORT; |
2772 | error = -EIO; | 2773 | break; |
2774 | case -ETIMEDOUT: | ||
2775 | error = BLK_STS_TIMEOUT; | ||
2776 | break; | ||
2777 | default: | ||
2778 | error = BLK_STS_IOERR; | ||
2779 | break; | ||
2780 | } | ||
2773 | } | 2781 | } |
2774 | __blk_end_request_all(req, error); | 2782 | __blk_end_request_all(req, error); |
2775 | } | 2783 | } |
@@ -3190,7 +3198,7 @@ static void dasd_flush_request_queue(struct dasd_block *block) | |||
3190 | 3198 | ||
3191 | spin_lock_irq(&block->request_queue_lock); | 3199 | spin_lock_irq(&block->request_queue_lock); |
3192 | while ((req = blk_fetch_request(block->request_queue))) | 3200 | while ((req = blk_fetch_request(block->request_queue))) |
3193 | __blk_end_request_all(req, -EIO); | 3201 | __blk_end_request_all(req, BLK_STS_IOERR); |
3194 | spin_unlock_irq(&block->request_queue_lock); | 3202 | spin_unlock_irq(&block->request_queue_lock); |
3195 | } | 3203 | } |
3196 | 3204 | ||
diff --git a/drivers/s390/block/scm_blk.c b/drivers/s390/block/scm_blk.c index 152de6817875..3c2c84b72877 100644 --- a/drivers/s390/block/scm_blk.c +++ b/drivers/s390/block/scm_blk.c | |||
@@ -231,7 +231,7 @@ static inline void scm_request_init(struct scm_blk_dev *bdev, | |||
231 | aob->request.data = (u64) aobrq; | 231 | aob->request.data = (u64) aobrq; |
232 | scmrq->bdev = bdev; | 232 | scmrq->bdev = bdev; |
233 | scmrq->retries = 4; | 233 | scmrq->retries = 4; |
234 | scmrq->error = 0; | 234 | scmrq->error = BLK_STS_OK; |
235 | /* We don't use all msbs - place aidaws at the end of the aob page. */ | 235 | /* We don't use all msbs - place aidaws at the end of the aob page. */ |
236 | scmrq->next_aidaw = (void *) &aob->msb[nr_requests_per_io]; | 236 | scmrq->next_aidaw = (void *) &aob->msb[nr_requests_per_io]; |
237 | scm_request_cluster_init(scmrq); | 237 | scm_request_cluster_init(scmrq); |
@@ -364,7 +364,7 @@ static void __scmrq_log_error(struct scm_request *scmrq) | |||
364 | { | 364 | { |
365 | struct aob *aob = scmrq->aob; | 365 | struct aob *aob = scmrq->aob; |
366 | 366 | ||
367 | if (scmrq->error == -ETIMEDOUT) | 367 | if (scmrq->error == BLK_STS_TIMEOUT) |
368 | SCM_LOG(1, "Request timeout"); | 368 | SCM_LOG(1, "Request timeout"); |
369 | else { | 369 | else { |
370 | SCM_LOG(1, "Request error"); | 370 | SCM_LOG(1, "Request error"); |
@@ -377,7 +377,7 @@ static void __scmrq_log_error(struct scm_request *scmrq) | |||
377 | scmrq->error); | 377 | scmrq->error); |
378 | } | 378 | } |
379 | 379 | ||
380 | void scm_blk_irq(struct scm_device *scmdev, void *data, int error) | 380 | void scm_blk_irq(struct scm_device *scmdev, void *data, blk_status_t error) |
381 | { | 381 | { |
382 | struct scm_request *scmrq = data; | 382 | struct scm_request *scmrq = data; |
383 | struct scm_blk_dev *bdev = scmrq->bdev; | 383 | struct scm_blk_dev *bdev = scmrq->bdev; |
@@ -397,7 +397,7 @@ static void scm_blk_handle_error(struct scm_request *scmrq) | |||
397 | struct scm_blk_dev *bdev = scmrq->bdev; | 397 | struct scm_blk_dev *bdev = scmrq->bdev; |
398 | unsigned long flags; | 398 | unsigned long flags; |
399 | 399 | ||
400 | if (scmrq->error != -EIO) | 400 | if (scmrq->error != BLK_STS_IOERR) |
401 | goto restart; | 401 | goto restart; |
402 | 402 | ||
403 | /* For -EIO the response block is valid. */ | 403 | /* For -EIO the response block is valid. */ |
diff --git a/drivers/s390/block/scm_blk.h b/drivers/s390/block/scm_blk.h index 09218cdc5129..cd598d1a4eae 100644 --- a/drivers/s390/block/scm_blk.h +++ b/drivers/s390/block/scm_blk.h | |||
@@ -35,7 +35,7 @@ struct scm_request { | |||
35 | struct aob *aob; | 35 | struct aob *aob; |
36 | struct list_head list; | 36 | struct list_head list; |
37 | u8 retries; | 37 | u8 retries; |
38 | int error; | 38 | blk_status_t error; |
39 | #ifdef CONFIG_SCM_BLOCK_CLUSTER_WRITE | 39 | #ifdef CONFIG_SCM_BLOCK_CLUSTER_WRITE |
40 | struct { | 40 | struct { |
41 | enum {CLUSTER_NONE, CLUSTER_READ, CLUSTER_WRITE} state; | 41 | enum {CLUSTER_NONE, CLUSTER_READ, CLUSTER_WRITE} state; |
@@ -50,7 +50,7 @@ struct scm_request { | |||
50 | int scm_blk_dev_setup(struct scm_blk_dev *, struct scm_device *); | 50 | int scm_blk_dev_setup(struct scm_blk_dev *, struct scm_device *); |
51 | void scm_blk_dev_cleanup(struct scm_blk_dev *); | 51 | void scm_blk_dev_cleanup(struct scm_blk_dev *); |
52 | void scm_blk_set_available(struct scm_blk_dev *); | 52 | void scm_blk_set_available(struct scm_blk_dev *); |
53 | void scm_blk_irq(struct scm_device *, void *, int); | 53 | void scm_blk_irq(struct scm_device *, void *, blk_status_t); |
54 | 54 | ||
55 | void scm_request_finish(struct scm_request *); | 55 | void scm_request_finish(struct scm_request *); |
56 | void scm_request_requeue(struct scm_request *); | 56 | void scm_request_requeue(struct scm_request *); |
diff --git a/drivers/s390/cio/eadm_sch.c b/drivers/s390/cio/eadm_sch.c index b3f44bc7f644..0f11f3bcac82 100644 --- a/drivers/s390/cio/eadm_sch.c +++ b/drivers/s390/cio/eadm_sch.c | |||
@@ -135,7 +135,7 @@ static void eadm_subchannel_irq(struct subchannel *sch) | |||
135 | struct eadm_private *private = get_eadm_private(sch); | 135 | struct eadm_private *private = get_eadm_private(sch); |
136 | struct eadm_scsw *scsw = &sch->schib.scsw.eadm; | 136 | struct eadm_scsw *scsw = &sch->schib.scsw.eadm; |
137 | struct irb *irb = this_cpu_ptr(&cio_irb); | 137 | struct irb *irb = this_cpu_ptr(&cio_irb); |
138 | int error = 0; | 138 | blk_status_t error = BLK_STS_OK; |
139 | 139 | ||
140 | EADM_LOG(6, "irq"); | 140 | EADM_LOG(6, "irq"); |
141 | EADM_LOG_HEX(6, irb, sizeof(*irb)); | 141 | EADM_LOG_HEX(6, irb, sizeof(*irb)); |
@@ -144,10 +144,10 @@ static void eadm_subchannel_irq(struct subchannel *sch) | |||
144 | 144 | ||
145 | if ((scsw->stctl & (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)) | 145 | if ((scsw->stctl & (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)) |
146 | && scsw->eswf == 1 && irb->esw.eadm.erw.r) | 146 | && scsw->eswf == 1 && irb->esw.eadm.erw.r) |
147 | error = -EIO; | 147 | error = BLK_STS_IOERR; |
148 | 148 | ||
149 | if (scsw->fctl & SCSW_FCTL_CLEAR_FUNC) | 149 | if (scsw->fctl & SCSW_FCTL_CLEAR_FUNC) |
150 | error = -ETIMEDOUT; | 150 | error = BLK_STS_TIMEOUT; |
151 | 151 | ||
152 | eadm_subchannel_set_timeout(sch, 0); | 152 | eadm_subchannel_set_timeout(sch, 0); |
153 | 153 | ||
diff --git a/drivers/s390/cio/scm.c b/drivers/s390/cio/scm.c index 15268edc54ae..1fa53ecdc2aa 100644 --- a/drivers/s390/cio/scm.c +++ b/drivers/s390/cio/scm.c | |||
@@ -71,7 +71,7 @@ void scm_driver_unregister(struct scm_driver *scmdrv) | |||
71 | } | 71 | } |
72 | EXPORT_SYMBOL_GPL(scm_driver_unregister); | 72 | EXPORT_SYMBOL_GPL(scm_driver_unregister); |
73 | 73 | ||
74 | void scm_irq_handler(struct aob *aob, int error) | 74 | void scm_irq_handler(struct aob *aob, blk_status_t error) |
75 | { | 75 | { |
76 | struct aob_rq_header *aobrq = (void *) aob->request.data; | 76 | struct aob_rq_header *aobrq = (void *) aob->request.data; |
77 | struct scm_device *scmdev = aobrq->scmdev; | 77 | struct scm_device *scmdev = aobrq->scmdev; |
diff --git a/drivers/sbus/char/jsflash.c b/drivers/sbus/char/jsflash.c index 62fed9dc893e..35a69949f92d 100644 --- a/drivers/sbus/char/jsflash.c +++ b/drivers/sbus/char/jsflash.c | |||
@@ -214,7 +214,7 @@ static void jsfd_request(void) | |||
214 | struct jsfd_part *jdp = req->rq_disk->private_data; | 214 | struct jsfd_part *jdp = req->rq_disk->private_data; |
215 | unsigned long offset = blk_rq_pos(req) << 9; | 215 | unsigned long offset = blk_rq_pos(req) << 9; |
216 | size_t len = blk_rq_cur_bytes(req); | 216 | size_t len = blk_rq_cur_bytes(req); |
217 | int err = -EIO; | 217 | blk_status_t err = BLK_STS_IOERR; |
218 | 218 | ||
219 | if ((offset + len) > jdp->dsize) | 219 | if ((offset + len) > jdp->dsize) |
220 | goto end; | 220 | goto end; |
@@ -230,7 +230,7 @@ static void jsfd_request(void) | |||
230 | } | 230 | } |
231 | 231 | ||
232 | jsfd_read(bio_data(req->bio), jdp->dbase + offset, len); | 232 | jsfd_read(bio_data(req->bio), jdp->dbase + offset, len); |
233 | err = 0; | 233 | err = BLK_STS_OK; |
234 | end: | 234 | end: |
235 | if (!__blk_end_request_cur(req, err)) | 235 | if (!__blk_end_request_cur(req, err)) |
236 | req = jsfd_next_request(); | 236 | req = jsfd_next_request(); |
diff --git a/drivers/scsi/osd/osd_initiator.c b/drivers/scsi/osd/osd_initiator.c index 14785177ce7b..1e69a43b279d 100644 --- a/drivers/scsi/osd/osd_initiator.c +++ b/drivers/scsi/osd/osd_initiator.c | |||
@@ -446,7 +446,7 @@ static void _put_request(struct request *rq) | |||
446 | * code paths. | 446 | * code paths. |
447 | */ | 447 | */ |
448 | if (unlikely(rq->bio)) | 448 | if (unlikely(rq->bio)) |
449 | blk_end_request(rq, -ENOMEM, blk_rq_bytes(rq)); | 449 | blk_end_request(rq, BLK_STS_IOERR, blk_rq_bytes(rq)); |
450 | else | 450 | else |
451 | blk_put_request(rq); | 451 | blk_put_request(rq); |
452 | } | 452 | } |
@@ -474,7 +474,7 @@ void osd_end_request(struct osd_request *or) | |||
474 | EXPORT_SYMBOL(osd_end_request); | 474 | EXPORT_SYMBOL(osd_end_request); |
475 | 475 | ||
476 | static void _set_error_resid(struct osd_request *or, struct request *req, | 476 | static void _set_error_resid(struct osd_request *or, struct request *req, |
477 | int error) | 477 | blk_status_t error) |
478 | { | 478 | { |
479 | or->async_error = error; | 479 | or->async_error = error; |
480 | or->req_errors = scsi_req(req)->result; | 480 | or->req_errors = scsi_req(req)->result; |
@@ -489,17 +489,19 @@ static void _set_error_resid(struct osd_request *or, struct request *req, | |||
489 | 489 | ||
490 | int osd_execute_request(struct osd_request *or) | 490 | int osd_execute_request(struct osd_request *or) |
491 | { | 491 | { |
492 | int error; | ||
493 | |||
494 | blk_execute_rq(or->request->q, NULL, or->request, 0); | 492 | blk_execute_rq(or->request->q, NULL, or->request, 0); |
495 | error = scsi_req(or->request)->result ? -EIO : 0; | ||
496 | 493 | ||
497 | _set_error_resid(or, or->request, error); | 494 | if (scsi_req(or->request)->result) { |
498 | return error; | 495 | _set_error_resid(or, or->request, BLK_STS_IOERR); |
496 | return -EIO; | ||
497 | } | ||
498 | |||
499 | _set_error_resid(or, or->request, BLK_STS_OK); | ||
500 | return 0; | ||
499 | } | 501 | } |
500 | EXPORT_SYMBOL(osd_execute_request); | 502 | EXPORT_SYMBOL(osd_execute_request); |
501 | 503 | ||
502 | static void osd_request_async_done(struct request *req, int error) | 504 | static void osd_request_async_done(struct request *req, blk_status_t error) |
503 | { | 505 | { |
504 | struct osd_request *or = req->end_io_data; | 506 | struct osd_request *or = req->end_io_data; |
505 | 507 | ||
@@ -1914,7 +1916,7 @@ analyze: | |||
1914 | /* scsi sense is Empty, the request was never issued to target | 1916 | /* scsi sense is Empty, the request was never issued to target |
1915 | * linux return code might tell us what happened. | 1917 | * linux return code might tell us what happened. |
1916 | */ | 1918 | */ |
1917 | if (or->async_error == -ENOMEM) | 1919 | if (or->async_error == BLK_STS_RESOURCE) |
1918 | osi->osd_err_pri = OSD_ERR_PRI_RESOURCE; | 1920 | osi->osd_err_pri = OSD_ERR_PRI_RESOURCE; |
1919 | else | 1921 | else |
1920 | osi->osd_err_pri = OSD_ERR_PRI_UNREACHABLE; | 1922 | osi->osd_err_pri = OSD_ERR_PRI_UNREACHABLE; |
diff --git a/drivers/scsi/osst.c b/drivers/scsi/osst.c index 67cbed92f07d..d54689c9216e 100644 --- a/drivers/scsi/osst.c +++ b/drivers/scsi/osst.c | |||
@@ -320,7 +320,7 @@ static int osst_chk_result(struct osst_tape * STp, struct osst_request * SRpnt) | |||
320 | 320 | ||
321 | 321 | ||
322 | /* Wakeup from interrupt */ | 322 | /* Wakeup from interrupt */ |
323 | static void osst_end_async(struct request *req, int update) | 323 | static void osst_end_async(struct request *req, blk_status_t status) |
324 | { | 324 | { |
325 | struct scsi_request *rq = scsi_req(req); | 325 | struct scsi_request *rq = scsi_req(req); |
326 | struct osst_request *SRpnt = req->end_io_data; | 326 | struct osst_request *SRpnt = req->end_io_data; |
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c index ecc07dab893d..44904f41924c 100644 --- a/drivers/scsi/scsi_error.c +++ b/drivers/scsi/scsi_error.c | |||
@@ -1874,7 +1874,7 @@ int scsi_decide_disposition(struct scsi_cmnd *scmd) | |||
1874 | } | 1874 | } |
1875 | } | 1875 | } |
1876 | 1876 | ||
1877 | static void eh_lock_door_done(struct request *req, int uptodate) | 1877 | static void eh_lock_door_done(struct request *req, blk_status_t status) |
1878 | { | 1878 | { |
1879 | __blk_put_request(req->q, req); | 1879 | __blk_put_request(req->q, req); |
1880 | } | 1880 | } |
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index 884aaa84c2dd..67a67191520f 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c | |||
@@ -635,7 +635,7 @@ static void scsi_release_bidi_buffers(struct scsi_cmnd *cmd) | |||
635 | cmd->request->next_rq->special = NULL; | 635 | cmd->request->next_rq->special = NULL; |
636 | } | 636 | } |
637 | 637 | ||
638 | static bool scsi_end_request(struct request *req, int error, | 638 | static bool scsi_end_request(struct request *req, blk_status_t error, |
639 | unsigned int bytes, unsigned int bidi_bytes) | 639 | unsigned int bytes, unsigned int bidi_bytes) |
640 | { | 640 | { |
641 | struct scsi_cmnd *cmd = req->special; | 641 | struct scsi_cmnd *cmd = req->special; |
@@ -694,45 +694,28 @@ static bool scsi_end_request(struct request *req, int error, | |||
694 | * @cmd: SCSI command (unused) | 694 | * @cmd: SCSI command (unused) |
695 | * @result: scsi error code | 695 | * @result: scsi error code |
696 | * | 696 | * |
697 | * Translate SCSI error code into standard UNIX errno. | 697 | * Translate SCSI error code into block errors. |
698 | * Return values: | ||
699 | * -ENOLINK temporary transport failure | ||
700 | * -EREMOTEIO permanent target failure, do not retry | ||
701 | * -EBADE permanent nexus failure, retry on other path | ||
702 | * -ENOSPC No write space available | ||
703 | * -ENODATA Medium error | ||
704 | * -EIO unspecified I/O error | ||
705 | */ | 698 | */ |
706 | static int __scsi_error_from_host_byte(struct scsi_cmnd *cmd, int result) | 699 | static blk_status_t __scsi_error_from_host_byte(struct scsi_cmnd *cmd, |
700 | int result) | ||
707 | { | 701 | { |
708 | int error = 0; | 702 | switch (host_byte(result)) { |
709 | |||
710 | switch(host_byte(result)) { | ||
711 | case DID_TRANSPORT_FAILFAST: | 703 | case DID_TRANSPORT_FAILFAST: |
712 | error = -ENOLINK; | 704 | return BLK_STS_TRANSPORT; |
713 | break; | ||
714 | case DID_TARGET_FAILURE: | 705 | case DID_TARGET_FAILURE: |
715 | set_host_byte(cmd, DID_OK); | 706 | set_host_byte(cmd, DID_OK); |
716 | error = -EREMOTEIO; | 707 | return BLK_STS_TARGET; |
717 | break; | ||
718 | case DID_NEXUS_FAILURE: | 708 | case DID_NEXUS_FAILURE: |
719 | set_host_byte(cmd, DID_OK); | 709 | return BLK_STS_NEXUS; |
720 | error = -EBADE; | ||
721 | break; | ||
722 | case DID_ALLOC_FAILURE: | 710 | case DID_ALLOC_FAILURE: |
723 | set_host_byte(cmd, DID_OK); | 711 | set_host_byte(cmd, DID_OK); |
724 | error = -ENOSPC; | 712 | return BLK_STS_NOSPC; |
725 | break; | ||
726 | case DID_MEDIUM_ERROR: | 713 | case DID_MEDIUM_ERROR: |
727 | set_host_byte(cmd, DID_OK); | 714 | set_host_byte(cmd, DID_OK); |
728 | error = -ENODATA; | 715 | return BLK_STS_MEDIUM; |
729 | break; | ||
730 | default: | 716 | default: |
731 | error = -EIO; | 717 | return BLK_STS_IOERR; |
732 | break; | ||
733 | } | 718 | } |
734 | |||
735 | return error; | ||
736 | } | 719 | } |
737 | 720 | ||
738 | /* | 721 | /* |
@@ -769,7 +752,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) | |||
769 | int result = cmd->result; | 752 | int result = cmd->result; |
770 | struct request_queue *q = cmd->device->request_queue; | 753 | struct request_queue *q = cmd->device->request_queue; |
771 | struct request *req = cmd->request; | 754 | struct request *req = cmd->request; |
772 | int error = 0; | 755 | blk_status_t error = BLK_STS_OK; |
773 | struct scsi_sense_hdr sshdr; | 756 | struct scsi_sense_hdr sshdr; |
774 | bool sense_valid = false; | 757 | bool sense_valid = false; |
775 | int sense_deferred = 0, level = 0; | 758 | int sense_deferred = 0, level = 0; |
@@ -808,7 +791,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) | |||
808 | * both sides at once. | 791 | * both sides at once. |
809 | */ | 792 | */ |
810 | scsi_req(req->next_rq)->resid_len = scsi_in(cmd)->resid; | 793 | scsi_req(req->next_rq)->resid_len = scsi_in(cmd)->resid; |
811 | if (scsi_end_request(req, 0, blk_rq_bytes(req), | 794 | if (scsi_end_request(req, BLK_STS_OK, blk_rq_bytes(req), |
812 | blk_rq_bytes(req->next_rq))) | 795 | blk_rq_bytes(req->next_rq))) |
813 | BUG(); | 796 | BUG(); |
814 | return; | 797 | return; |
@@ -850,7 +833,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) | |||
850 | scsi_print_sense(cmd); | 833 | scsi_print_sense(cmd); |
851 | result = 0; | 834 | result = 0; |
852 | /* for passthrough error may be set */ | 835 | /* for passthrough error may be set */ |
853 | error = 0; | 836 | error = BLK_STS_OK; |
854 | } | 837 | } |
855 | 838 | ||
856 | /* | 839 | /* |
@@ -922,18 +905,18 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) | |||
922 | action = ACTION_REPREP; | 905 | action = ACTION_REPREP; |
923 | } else if (sshdr.asc == 0x10) /* DIX */ { | 906 | } else if (sshdr.asc == 0x10) /* DIX */ { |
924 | action = ACTION_FAIL; | 907 | action = ACTION_FAIL; |
925 | error = -EILSEQ; | 908 | error = BLK_STS_PROTECTION; |
926 | /* INVALID COMMAND OPCODE or INVALID FIELD IN CDB */ | 909 | /* INVALID COMMAND OPCODE or INVALID FIELD IN CDB */ |
927 | } else if (sshdr.asc == 0x20 || sshdr.asc == 0x24) { | 910 | } else if (sshdr.asc == 0x20 || sshdr.asc == 0x24) { |
928 | action = ACTION_FAIL; | 911 | action = ACTION_FAIL; |
929 | error = -EREMOTEIO; | 912 | error = BLK_STS_TARGET; |
930 | } else | 913 | } else |
931 | action = ACTION_FAIL; | 914 | action = ACTION_FAIL; |
932 | break; | 915 | break; |
933 | case ABORTED_COMMAND: | 916 | case ABORTED_COMMAND: |
934 | action = ACTION_FAIL; | 917 | action = ACTION_FAIL; |
935 | if (sshdr.asc == 0x10) /* DIF */ | 918 | if (sshdr.asc == 0x10) /* DIF */ |
936 | error = -EILSEQ; | 919 | error = BLK_STS_PROTECTION; |
937 | break; | 920 | break; |
938 | case NOT_READY: | 921 | case NOT_READY: |
939 | /* If the device is in the process of becoming | 922 | /* If the device is in the process of becoming |
diff --git a/drivers/scsi/scsi_transport_sas.c b/drivers/scsi/scsi_transport_sas.c index d16414bfe2ef..cc970c811bcb 100644 --- a/drivers/scsi/scsi_transport_sas.c +++ b/drivers/scsi/scsi_transport_sas.c | |||
@@ -172,7 +172,7 @@ static void sas_smp_request(struct request_queue *q, struct Scsi_Host *shost, | |||
172 | struct sas_rphy *rphy) | 172 | struct sas_rphy *rphy) |
173 | { | 173 | { |
174 | struct request *req; | 174 | struct request *req; |
175 | int ret; | 175 | blk_status_t ret; |
176 | int (*handler)(struct Scsi_Host *, struct sas_rphy *, struct request *); | 176 | int (*handler)(struct Scsi_Host *, struct sas_rphy *, struct request *); |
177 | 177 | ||
178 | while ((req = blk_fetch_request(q)) != NULL) { | 178 | while ((req = blk_fetch_request(q)) != NULL) { |
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c index 82c33a6edbea..f3387c6089c5 100644 --- a/drivers/scsi/sg.c +++ b/drivers/scsi/sg.c | |||
@@ -177,7 +177,7 @@ typedef struct sg_device { /* holds the state of each scsi generic device */ | |||
177 | } Sg_device; | 177 | } Sg_device; |
178 | 178 | ||
179 | /* tasklet or soft irq callback */ | 179 | /* tasklet or soft irq callback */ |
180 | static void sg_rq_end_io(struct request *rq, int uptodate); | 180 | static void sg_rq_end_io(struct request *rq, blk_status_t status); |
181 | static int sg_start_req(Sg_request *srp, unsigned char *cmd); | 181 | static int sg_start_req(Sg_request *srp, unsigned char *cmd); |
182 | static int sg_finish_rem_req(Sg_request * srp); | 182 | static int sg_finish_rem_req(Sg_request * srp); |
183 | static int sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size); | 183 | static int sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size); |
@@ -808,7 +808,7 @@ sg_common_write(Sg_fd * sfp, Sg_request * srp, | |||
808 | if (atomic_read(&sdp->detaching)) { | 808 | if (atomic_read(&sdp->detaching)) { |
809 | if (srp->bio) { | 809 | if (srp->bio) { |
810 | scsi_req_free_cmd(scsi_req(srp->rq)); | 810 | scsi_req_free_cmd(scsi_req(srp->rq)); |
811 | blk_end_request_all(srp->rq, -EIO); | 811 | blk_end_request_all(srp->rq, BLK_STS_IOERR); |
812 | srp->rq = NULL; | 812 | srp->rq = NULL; |
813 | } | 813 | } |
814 | 814 | ||
@@ -1300,7 +1300,7 @@ sg_rq_end_io_usercontext(struct work_struct *work) | |||
1300 | * level when a command is completed (or has failed). | 1300 | * level when a command is completed (or has failed). |
1301 | */ | 1301 | */ |
1302 | static void | 1302 | static void |
1303 | sg_rq_end_io(struct request *rq, int uptodate) | 1303 | sg_rq_end_io(struct request *rq, blk_status_t status) |
1304 | { | 1304 | { |
1305 | struct sg_request *srp = rq->end_io_data; | 1305 | struct sg_request *srp = rq->end_io_data; |
1306 | struct scsi_request *req = scsi_req(rq); | 1306 | struct scsi_request *req = scsi_req(rq); |
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c index 1ea34d6f5437..6b1c4ac54e66 100644 --- a/drivers/scsi/st.c +++ b/drivers/scsi/st.c | |||
@@ -511,7 +511,7 @@ static void st_do_stats(struct scsi_tape *STp, struct request *req) | |||
511 | atomic64_dec(&STp->stats->in_flight); | 511 | atomic64_dec(&STp->stats->in_flight); |
512 | } | 512 | } |
513 | 513 | ||
514 | static void st_scsi_execute_end(struct request *req, int uptodate) | 514 | static void st_scsi_execute_end(struct request *req, blk_status_t status) |
515 | { | 515 | { |
516 | struct st_request *SRpnt = req->end_io_data; | 516 | struct st_request *SRpnt = req->end_io_data; |
517 | struct scsi_request *rq = scsi_req(req); | 517 | struct scsi_request *rq = scsi_req(req); |
diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c index 3e4abb13f8ea..323ab47645d0 100644 --- a/drivers/target/target_core_pscsi.c +++ b/drivers/target/target_core_pscsi.c | |||
@@ -55,7 +55,7 @@ static inline struct pscsi_dev_virt *PSCSI_DEV(struct se_device *dev) | |||
55 | } | 55 | } |
56 | 56 | ||
57 | static sense_reason_t pscsi_execute_cmd(struct se_cmd *cmd); | 57 | static sense_reason_t pscsi_execute_cmd(struct se_cmd *cmd); |
58 | static void pscsi_req_done(struct request *, int); | 58 | static void pscsi_req_done(struct request *, blk_status_t); |
59 | 59 | ||
60 | /* pscsi_attach_hba(): | 60 | /* pscsi_attach_hba(): |
61 | * | 61 | * |
@@ -1045,7 +1045,7 @@ static sector_t pscsi_get_blocks(struct se_device *dev) | |||
1045 | return 0; | 1045 | return 0; |
1046 | } | 1046 | } |
1047 | 1047 | ||
1048 | static void pscsi_req_done(struct request *req, int uptodate) | 1048 | static void pscsi_req_done(struct request *req, blk_status_t status) |
1049 | { | 1049 | { |
1050 | struct se_cmd *cmd = req->end_io_data; | 1050 | struct se_cmd *cmd = req->end_io_data; |
1051 | struct pscsi_plugin_task *pt = cmd->priv; | 1051 | struct pscsi_plugin_task *pt = cmd->priv; |
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index fcd641032f8d..0cf6735046d3 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h | |||
@@ -230,8 +230,8 @@ static inline u16 blk_mq_unique_tag_to_tag(u32 unique_tag) | |||
230 | 230 | ||
231 | int blk_mq_request_started(struct request *rq); | 231 | int blk_mq_request_started(struct request *rq); |
232 | void blk_mq_start_request(struct request *rq); | 232 | void blk_mq_start_request(struct request *rq); |
233 | void blk_mq_end_request(struct request *rq, int error); | 233 | void blk_mq_end_request(struct request *rq, blk_status_t error); |
234 | void __blk_mq_end_request(struct request *rq, int error); | 234 | void __blk_mq_end_request(struct request *rq, blk_status_t error); |
235 | 235 | ||
236 | void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list); | 236 | void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list); |
237 | void blk_mq_add_to_requeue_list(struct request *rq, bool at_head, | 237 | void blk_mq_add_to_requeue_list(struct request *rq, bool at_head, |
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h index 61339bc44400..59378939a8cd 100644 --- a/include/linux/blk_types.h +++ b/include/linux/blk_types.h | |||
@@ -17,6 +17,22 @@ struct io_context; | |||
17 | struct cgroup_subsys_state; | 17 | struct cgroup_subsys_state; |
18 | typedef void (bio_end_io_t) (struct bio *); | 18 | typedef void (bio_end_io_t) (struct bio *); |
19 | 19 | ||
20 | /* | ||
21 | * Block error status values. See block/blk-core:blk_errors for the details. | ||
22 | */ | ||
23 | typedef u8 __bitwise blk_status_t; | ||
24 | #define BLK_STS_OK 0 | ||
25 | #define BLK_STS_NOTSUPP ((__force blk_status_t)1) | ||
26 | #define BLK_STS_TIMEOUT ((__force blk_status_t)2) | ||
27 | #define BLK_STS_NOSPC ((__force blk_status_t)3) | ||
28 | #define BLK_STS_TRANSPORT ((__force blk_status_t)4) | ||
29 | #define BLK_STS_TARGET ((__force blk_status_t)5) | ||
30 | #define BLK_STS_NEXUS ((__force blk_status_t)6) | ||
31 | #define BLK_STS_MEDIUM ((__force blk_status_t)7) | ||
32 | #define BLK_STS_PROTECTION ((__force blk_status_t)8) | ||
33 | #define BLK_STS_RESOURCE ((__force blk_status_t)9) | ||
34 | #define BLK_STS_IOERR ((__force blk_status_t)10) | ||
35 | |||
20 | struct blk_issue_stat { | 36 | struct blk_issue_stat { |
21 | u64 stat; | 37 | u64 stat; |
22 | }; | 38 | }; |
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 019f18c65098..2a8871638453 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h | |||
@@ -55,7 +55,7 @@ struct blk_stat_callback; | |||
55 | */ | 55 | */ |
56 | #define BLKCG_MAX_POLS 3 | 56 | #define BLKCG_MAX_POLS 3 |
57 | 57 | ||
58 | typedef void (rq_end_io_fn)(struct request *, int); | 58 | typedef void (rq_end_io_fn)(struct request *, blk_status_t); |
59 | 59 | ||
60 | #define BLK_RL_SYNCFULL (1U << 0) | 60 | #define BLK_RL_SYNCFULL (1U << 0) |
61 | #define BLK_RL_ASYNCFULL (1U << 1) | 61 | #define BLK_RL_ASYNCFULL (1U << 1) |
@@ -940,7 +940,7 @@ extern int blk_rq_prep_clone(struct request *rq, struct request *rq_src, | |||
940 | int (*bio_ctr)(struct bio *, struct bio *, void *), | 940 | int (*bio_ctr)(struct bio *, struct bio *, void *), |
941 | void *data); | 941 | void *data); |
942 | extern void blk_rq_unprep_clone(struct request *rq); | 942 | extern void blk_rq_unprep_clone(struct request *rq); |
943 | extern int blk_insert_cloned_request(struct request_queue *q, | 943 | extern blk_status_t blk_insert_cloned_request(struct request_queue *q, |
944 | struct request *rq); | 944 | struct request *rq); |
945 | extern int blk_rq_append_bio(struct request *rq, struct bio *bio); | 945 | extern int blk_rq_append_bio(struct request *rq, struct bio *bio); |
946 | extern void blk_delay_queue(struct request_queue *, unsigned long); | 946 | extern void blk_delay_queue(struct request_queue *, unsigned long); |
@@ -980,6 +980,9 @@ extern void blk_execute_rq(struct request_queue *, struct gendisk *, | |||
980 | extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *, | 980 | extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *, |
981 | struct request *, int, rq_end_io_fn *); | 981 | struct request *, int, rq_end_io_fn *); |
982 | 982 | ||
983 | int blk_status_to_errno(blk_status_t status); | ||
984 | blk_status_t errno_to_blk_status(int errno); | ||
985 | |||
983 | bool blk_mq_poll(struct request_queue *q, blk_qc_t cookie); | 986 | bool blk_mq_poll(struct request_queue *q, blk_qc_t cookie); |
984 | 987 | ||
985 | static inline struct request_queue *bdev_get_queue(struct block_device *bdev) | 988 | static inline struct request_queue *bdev_get_queue(struct block_device *bdev) |
@@ -1112,16 +1115,16 @@ extern struct request *blk_fetch_request(struct request_queue *q); | |||
1112 | * blk_end_request() for parts of the original function. | 1115 | * blk_end_request() for parts of the original function. |
1113 | * This prevents code duplication in drivers. | 1116 | * This prevents code duplication in drivers. |
1114 | */ | 1117 | */ |
1115 | extern bool blk_update_request(struct request *rq, int error, | 1118 | extern bool blk_update_request(struct request *rq, blk_status_t error, |
1116 | unsigned int nr_bytes); | 1119 | unsigned int nr_bytes); |
1117 | extern void blk_finish_request(struct request *rq, int error); | 1120 | extern void blk_finish_request(struct request *rq, blk_status_t error); |
1118 | extern bool blk_end_request(struct request *rq, int error, | 1121 | extern bool blk_end_request(struct request *rq, blk_status_t error, |
1119 | unsigned int nr_bytes); | 1122 | unsigned int nr_bytes); |
1120 | extern void blk_end_request_all(struct request *rq, int error); | 1123 | extern void blk_end_request_all(struct request *rq, blk_status_t error); |
1121 | extern bool __blk_end_request(struct request *rq, int error, | 1124 | extern bool __blk_end_request(struct request *rq, blk_status_t error, |
1122 | unsigned int nr_bytes); | 1125 | unsigned int nr_bytes); |
1123 | extern void __blk_end_request_all(struct request *rq, int error); | 1126 | extern void __blk_end_request_all(struct request *rq, blk_status_t error); |
1124 | extern bool __blk_end_request_cur(struct request *rq, int error); | 1127 | extern bool __blk_end_request_cur(struct request *rq, blk_status_t error); |
1125 | 1128 | ||
1126 | extern void blk_complete_request(struct request *); | 1129 | extern void blk_complete_request(struct request *); |
1127 | extern void __blk_complete_request(struct request *); | 1130 | extern void __blk_complete_request(struct request *); |
diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h index dec227acc13b..5de5c53251ec 100644 --- a/include/linux/device-mapper.h +++ b/include/linux/device-mapper.h | |||
@@ -74,7 +74,7 @@ typedef void (*dm_release_clone_request_fn) (struct request *clone); | |||
74 | typedef int (*dm_endio_fn) (struct dm_target *ti, | 74 | typedef int (*dm_endio_fn) (struct dm_target *ti, |
75 | struct bio *bio, int *error); | 75 | struct bio *bio, int *error); |
76 | typedef int (*dm_request_endio_fn) (struct dm_target *ti, | 76 | typedef int (*dm_request_endio_fn) (struct dm_target *ti, |
77 | struct request *clone, int error, | 77 | struct request *clone, blk_status_t error, |
78 | union map_info *map_context); | 78 | union map_info *map_context); |
79 | 79 | ||
80 | typedef void (*dm_presuspend_fn) (struct dm_target *ti); | 80 | typedef void (*dm_presuspend_fn) (struct dm_target *ti); |
diff --git a/include/linux/ide.h b/include/linux/ide.h index 6980ca322074..dc152e4b7f73 100644 --- a/include/linux/ide.h +++ b/include/linux/ide.h | |||
@@ -671,7 +671,7 @@ struct ide_port_ops { | |||
671 | void (*init_dev)(ide_drive_t *); | 671 | void (*init_dev)(ide_drive_t *); |
672 | void (*set_pio_mode)(struct hwif_s *, ide_drive_t *); | 672 | void (*set_pio_mode)(struct hwif_s *, ide_drive_t *); |
673 | void (*set_dma_mode)(struct hwif_s *, ide_drive_t *); | 673 | void (*set_dma_mode)(struct hwif_s *, ide_drive_t *); |
674 | int (*reset_poll)(ide_drive_t *); | 674 | blk_status_t (*reset_poll)(ide_drive_t *); |
675 | void (*pre_reset)(ide_drive_t *); | 675 | void (*pre_reset)(ide_drive_t *); |
676 | void (*resetproc)(ide_drive_t *); | 676 | void (*resetproc)(ide_drive_t *); |
677 | void (*maskproc)(ide_drive_t *, int); | 677 | void (*maskproc)(ide_drive_t *, int); |
@@ -1092,7 +1092,7 @@ int generic_ide_ioctl(ide_drive_t *, struct block_device *, unsigned, unsigned l | |||
1092 | extern int ide_vlb_clk; | 1092 | extern int ide_vlb_clk; |
1093 | extern int ide_pci_clk; | 1093 | extern int ide_pci_clk; |
1094 | 1094 | ||
1095 | int ide_end_rq(ide_drive_t *, struct request *, int, unsigned int); | 1095 | int ide_end_rq(ide_drive_t *, struct request *, blk_status_t, unsigned int); |
1096 | void ide_kill_rq(ide_drive_t *, struct request *); | 1096 | void ide_kill_rq(ide_drive_t *, struct request *); |
1097 | 1097 | ||
1098 | void __ide_set_handler(ide_drive_t *, ide_handler_t *, unsigned int); | 1098 | void __ide_set_handler(ide_drive_t *, ide_handler_t *, unsigned int); |
@@ -1123,7 +1123,7 @@ extern int ide_devset_execute(ide_drive_t *drive, | |||
1123 | const struct ide_devset *setting, int arg); | 1123 | const struct ide_devset *setting, int arg); |
1124 | 1124 | ||
1125 | void ide_complete_cmd(ide_drive_t *, struct ide_cmd *, u8, u8); | 1125 | void ide_complete_cmd(ide_drive_t *, struct ide_cmd *, u8, u8); |
1126 | int ide_complete_rq(ide_drive_t *, int, unsigned int); | 1126 | int ide_complete_rq(ide_drive_t *, blk_status_t, unsigned int); |
1127 | 1127 | ||
1128 | void ide_tf_readback(ide_drive_t *drive, struct ide_cmd *cmd); | 1128 | void ide_tf_readback(ide_drive_t *drive, struct ide_cmd *cmd); |
1129 | void ide_tf_dump(const char *, struct ide_cmd *); | 1129 | void ide_tf_dump(const char *, struct ide_cmd *); |
diff --git a/include/scsi/osd_initiator.h b/include/scsi/osd_initiator.h index a09cca829082..a29d3086eb56 100644 --- a/include/scsi/osd_initiator.h +++ b/include/scsi/osd_initiator.h | |||
@@ -157,7 +157,7 @@ struct osd_request { | |||
157 | 157 | ||
158 | osd_req_done_fn *async_done; | 158 | osd_req_done_fn *async_done; |
159 | void *async_private; | 159 | void *async_private; |
160 | int async_error; | 160 | blk_status_t async_error; |
161 | int req_errors; | 161 | int req_errors; |
162 | }; | 162 | }; |
163 | 163 | ||