diff options
author | Sebastian Ott <sebott@linux.vnet.ibm.com> | 2017-06-15 11:13:15 -0400 |
---|---|---|
committer | Martin Schwidefsky <schwidefsky@de.ibm.com> | 2017-08-09 09:09:42 -0400 |
commit | a3c1a2194a7c776c08ad704cd8a3b3ea694c60e6 (patch) | |
tree | d4dd56947221acb864d9ef6b213ab7421c4cb957 | |
parent | 5db23179998ded72cb8f58a296c0e99716d7df5b (diff) |
s390/scm: use common completion path
Since commit caf7df122721 ("block: remove the errors field from struct request")
rq->errors can't be (mis)used by block device drivers to store the error
condition for usage during async completion. Because of that I simply used
async completion only for the non-error paths.
This patch places the error within the private data of struct request and
uses async completion for all paths again.
Signed-off-by: Sebastian Ott <sebott@linux.vnet.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
-rw-r--r-- | drivers/s390/block/scm_blk.c | 13 |
1 files changed, 8 insertions, 5 deletions
diff --git a/drivers/s390/block/scm_blk.c b/drivers/s390/block/scm_blk.c index 0071febac9e6..2e7fd966c515 100644 --- a/drivers/s390/block/scm_blk.c +++ b/drivers/s390/block/scm_blk.c | |||
@@ -249,13 +249,13 @@ static void scm_request_requeue(struct scm_request *scmrq) | |||
249 | static void scm_request_finish(struct scm_request *scmrq) | 249 | static void scm_request_finish(struct scm_request *scmrq) |
250 | { | 250 | { |
251 | struct scm_blk_dev *bdev = scmrq->bdev; | 251 | struct scm_blk_dev *bdev = scmrq->bdev; |
252 | int *error; | ||
252 | int i; | 253 | int i; |
253 | 254 | ||
254 | for (i = 0; i < nr_requests_per_io && scmrq->request[i]; i++) { | 255 | for (i = 0; i < nr_requests_per_io && scmrq->request[i]; i++) { |
255 | if (scmrq->error) | 256 | error = blk_mq_rq_to_pdu(scmrq->request[i]); |
256 | blk_mq_end_request(scmrq->request[i], scmrq->error); | 257 | *error = scmrq->error; |
257 | else | 258 | blk_mq_complete_request(scmrq->request[i]); |
258 | blk_mq_complete_request(scmrq->request[i]); | ||
259 | } | 259 | } |
260 | 260 | ||
261 | atomic_dec(&bdev->queued_reqs); | 261 | atomic_dec(&bdev->queued_reqs); |
@@ -415,7 +415,9 @@ void scm_blk_irq(struct scm_device *scmdev, void *data, blk_status_t error) | |||
415 | 415 | ||
416 | static void scm_blk_request_done(struct request *req) | 416 | static void scm_blk_request_done(struct request *req) |
417 | { | 417 | { |
418 | blk_mq_end_request(req, 0); | 418 | int *error = blk_mq_rq_to_pdu(req); |
419 | |||
420 | blk_mq_end_request(req, *error); | ||
419 | } | 421 | } |
420 | 422 | ||
421 | static const struct block_device_operations scm_blk_devops = { | 423 | static const struct block_device_operations scm_blk_devops = { |
@@ -448,6 +450,7 @@ int scm_blk_dev_setup(struct scm_blk_dev *bdev, struct scm_device *scmdev) | |||
448 | atomic_set(&bdev->queued_reqs, 0); | 450 | atomic_set(&bdev->queued_reqs, 0); |
449 | 451 | ||
450 | bdev->tag_set.ops = &scm_mq_ops; | 452 | bdev->tag_set.ops = &scm_mq_ops; |
453 | bdev->tag_set.cmd_size = sizeof(int); | ||
451 | bdev->tag_set.nr_hw_queues = nr_requests; | 454 | bdev->tag_set.nr_hw_queues = nr_requests; |
452 | bdev->tag_set.queue_depth = nr_requests_per_io * nr_requests; | 455 | bdev->tag_set.queue_depth = nr_requests_per_io * nr_requests; |
453 | bdev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE; | 456 | bdev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE; |