diff options
Diffstat (limited to 'drivers/ide/ide-io.c')
-rw-r--r-- | drivers/ide/ide-io.c | 57 |
1 files changed, 57 insertions, 0 deletions
diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c index 4f2f138de2ca..622a55c72f03 100644 --- a/drivers/ide/ide-io.c +++ b/drivers/ide/ide-io.c | |||
@@ -223,6 +223,63 @@ static ide_startstop_t ide_start_power_step(ide_drive_t *drive, struct request * | |||
223 | } | 223 | } |
224 | 224 | ||
225 | /** | 225 | /** |
226 | * ide_end_dequeued_request - complete an IDE I/O | ||
227 | * @drive: IDE device for the I/O | ||
228 | * @uptodate: | ||
229 | * @nr_sectors: number of sectors completed | ||
230 | * | ||
231 | * Complete an I/O that is no longer on the request queue. This | ||
232 | * typically occurs when we pull the request and issue a REQUEST_SENSE. | ||
233 | * We must still finish the old request but we must not tamper with the | ||
234 | * queue in the meantime. | ||
235 | * | ||
236 | * NOTE: This path does not handle barrier, but barrier is not supported | ||
237 | * on ide-cd anyway. | ||
238 | */ | ||
239 | |||
240 | int ide_end_dequeued_request(ide_drive_t *drive, struct request *rq, | ||
241 | int uptodate, int nr_sectors) | ||
242 | { | ||
243 | unsigned long flags; | ||
244 | int ret = 1; | ||
245 | |||
246 | spin_lock_irqsave(&ide_lock, flags); | ||
247 | |||
248 | BUG_ON(!(rq->flags & REQ_STARTED)); | ||
249 | |||
250 | /* | ||
251 | * if failfast is set on a request, override number of sectors and | ||
252 | * complete the whole request right now | ||
253 | */ | ||
254 | if (blk_noretry_request(rq) && end_io_error(uptodate)) | ||
255 | nr_sectors = rq->hard_nr_sectors; | ||
256 | |||
257 | if (!blk_fs_request(rq) && end_io_error(uptodate) && !rq->errors) | ||
258 | rq->errors = -EIO; | ||
259 | |||
260 | /* | ||
261 | * decide whether to reenable DMA -- 3 is a random magic for now, | ||
262 | * if we DMA timeout more than 3 times, just stay in PIO | ||
263 | */ | ||
264 | if (drive->state == DMA_PIO_RETRY && drive->retry_pio <= 3) { | ||
265 | drive->state = 0; | ||
266 | HWGROUP(drive)->hwif->ide_dma_on(drive); | ||
267 | } | ||
268 | |||
269 | if (!end_that_request_first(rq, uptodate, nr_sectors)) { | ||
270 | add_disk_randomness(rq->rq_disk); | ||
271 | if (blk_rq_tagged(rq)) | ||
272 | blk_queue_end_tag(drive->queue, rq); | ||
273 | end_that_request_last(rq, uptodate); | ||
274 | ret = 0; | ||
275 | } | ||
276 | spin_unlock_irqrestore(&ide_lock, flags); | ||
277 | return ret; | ||
278 | } | ||
279 | EXPORT_SYMBOL_GPL(ide_end_dequeued_request); | ||
280 | |||
281 | |||
282 | /** | ||
226 | * ide_complete_pm_request - end the current Power Management request | 283 | * ide_complete_pm_request - end the current Power Management request |
227 | * @drive: target drive | 284 | * @drive: target drive |
228 | * @rq: request | 285 | * @rq: request |