diff options
author | Kiyoshi Ueda <k-ueda@ct.jp.nec.com> | 2008-01-28 04:29:42 -0500 |
---|---|---|
committer | Jens Axboe <jens.axboe@oracle.com> | 2008-01-28 04:36:54 -0500 |
commit | 4c4e21486154c8db93c7e82a4b3ab5f621cd1d97 (patch) | |
tree | cf3b6e8e8e098e7f8a7b95aa3980f96b5c9f01f1 /drivers/s390/char/tape_block.c | |
parent | fd539832c7d3a242269374dbcae2cd54da150930 (diff) |
blk_end_request: changing s390 (take 4)
This patch converts s390 to use blk_end_request interfaces.
Related 'uptodate' arguments are converted to 'error'.
As a result, the interfaces of internal functions below are changed:
o dasd_end_request
o tapeblock_end_request
Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
Cc: Heiko Carstens <heiko.carstens@de.ibm.com>
Cc: linux390@de.ibm.com
Signed-off-by: Kiyoshi Ueda <k-ueda@ct.jp.nec.com>
Signed-off-by: Jun'ichi Nomura <j-nomura@ce.jp.nec.com>
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'drivers/s390/char/tape_block.c')
-rw-r--r-- | drivers/s390/char/tape_block.c | 13 |
1 files changed, 6 insertions, 7 deletions
diff --git a/drivers/s390/char/tape_block.c b/drivers/s390/char/tape_block.c index eeb92e2ed0cc..ddc4a114e7f4 100644 --- a/drivers/s390/char/tape_block.c +++ b/drivers/s390/char/tape_block.c | |||
@@ -74,11 +74,10 @@ tapeblock_trigger_requeue(struct tape_device *device) | |||
74 | * Post finished request. | 74 | * Post finished request. |
75 | */ | 75 | */ |
76 | static void | 76 | static void |
77 | tapeblock_end_request(struct request *req, int uptodate) | 77 | tapeblock_end_request(struct request *req, int error) |
78 | { | 78 | { |
79 | if (end_that_request_first(req, uptodate, req->hard_nr_sectors)) | 79 | if (__blk_end_request(req, error, blk_rq_bytes(req))) |
80 | BUG(); | 80 | BUG(); |
81 | end_that_request_last(req, uptodate); | ||
82 | } | 81 | } |
83 | 82 | ||
84 | static void | 83 | static void |
@@ -91,7 +90,7 @@ __tapeblock_end_request(struct tape_request *ccw_req, void *data) | |||
91 | 90 | ||
92 | device = ccw_req->device; | 91 | device = ccw_req->device; |
93 | req = (struct request *) data; | 92 | req = (struct request *) data; |
94 | tapeblock_end_request(req, ccw_req->rc == 0); | 93 | tapeblock_end_request(req, (ccw_req->rc == 0) ? 0 : -EIO); |
95 | if (ccw_req->rc == 0) | 94 | if (ccw_req->rc == 0) |
96 | /* Update position. */ | 95 | /* Update position. */ |
97 | device->blk_data.block_position = | 96 | device->blk_data.block_position = |
@@ -119,7 +118,7 @@ tapeblock_start_request(struct tape_device *device, struct request *req) | |||
119 | ccw_req = device->discipline->bread(device, req); | 118 | ccw_req = device->discipline->bread(device, req); |
120 | if (IS_ERR(ccw_req)) { | 119 | if (IS_ERR(ccw_req)) { |
121 | DBF_EVENT(1, "TBLOCK: bread failed\n"); | 120 | DBF_EVENT(1, "TBLOCK: bread failed\n"); |
122 | tapeblock_end_request(req, 0); | 121 | tapeblock_end_request(req, -EIO); |
123 | return PTR_ERR(ccw_req); | 122 | return PTR_ERR(ccw_req); |
124 | } | 123 | } |
125 | ccw_req->callback = __tapeblock_end_request; | 124 | ccw_req->callback = __tapeblock_end_request; |
@@ -132,7 +131,7 @@ tapeblock_start_request(struct tape_device *device, struct request *req) | |||
132 | * Start/enqueueing failed. No retries in | 131 | * Start/enqueueing failed. No retries in |
133 | * this case. | 132 | * this case. |
134 | */ | 133 | */ |
135 | tapeblock_end_request(req, 0); | 134 | tapeblock_end_request(req, -EIO); |
136 | device->discipline->free_bread(ccw_req); | 135 | device->discipline->free_bread(ccw_req); |
137 | } | 136 | } |
138 | 137 | ||
@@ -177,7 +176,7 @@ tapeblock_requeue(struct work_struct *work) { | |||
177 | if (rq_data_dir(req) == WRITE) { | 176 | if (rq_data_dir(req) == WRITE) { |
178 | DBF_EVENT(1, "TBLOCK: Rejecting write request\n"); | 177 | DBF_EVENT(1, "TBLOCK: Rejecting write request\n"); |
179 | blkdev_dequeue_request(req); | 178 | blkdev_dequeue_request(req); |
180 | tapeblock_end_request(req, 0); | 179 | tapeblock_end_request(req, -EIO); |
181 | continue; | 180 | continue; |
182 | } | 181 | } |
183 | spin_unlock_irq(&device->blk_data.request_queue_lock); | 182 | spin_unlock_irq(&device->blk_data.request_queue_lock); |