diff options
Diffstat (limited to 'drivers/ide/ide-io.c')
-rw-r--r-- | drivers/ide/ide-io.c | 61 |
1 files changed, 31 insertions, 30 deletions
diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c index 8445b484ae69..b137f27a34d5 100644 --- a/drivers/ide/ide-io.c +++ b/drivers/ide/ide-io.c | |||
@@ -68,8 +68,10 @@ int ide_end_rq(ide_drive_t *drive, struct request *rq, blk_status_t error, | |||
68 | } | 68 | } |
69 | 69 | ||
70 | if (!blk_update_request(rq, error, nr_bytes)) { | 70 | if (!blk_update_request(rq, error, nr_bytes)) { |
71 | if (rq == drive->sense_rq) | 71 | if (rq == drive->sense_rq) { |
72 | drive->sense_rq = NULL; | 72 | drive->sense_rq = NULL; |
73 | drive->sense_rq_active = false; | ||
74 | } | ||
73 | 75 | ||
74 | __blk_mq_end_request(rq, error); | 76 | __blk_mq_end_request(rq, error); |
75 | return 0; | 77 | return 0; |
@@ -451,16 +453,11 @@ void ide_requeue_and_plug(ide_drive_t *drive, struct request *rq) | |||
451 | blk_mq_delay_run_hw_queue(q->queue_hw_ctx[0], 3); | 453 | blk_mq_delay_run_hw_queue(q->queue_hw_ctx[0], 3); |
452 | } | 454 | } |
453 | 455 | ||
454 | /* | 456 | blk_status_t ide_issue_rq(ide_drive_t *drive, struct request *rq, |
455 | * Issue a new request to a device. | 457 | bool local_requeue) |
456 | */ | ||
457 | blk_status_t ide_queue_rq(struct blk_mq_hw_ctx *hctx, | ||
458 | const struct blk_mq_queue_data *bd) | ||
459 | { | 458 | { |
460 | ide_drive_t *drive = hctx->queue->queuedata; | 459 | ide_hwif_t *hwif = drive->hwif; |
461 | ide_hwif_t *hwif = drive->hwif; | ||
462 | struct ide_host *host = hwif->host; | 460 | struct ide_host *host = hwif->host; |
463 | struct request *rq = bd->rq; | ||
464 | ide_startstop_t startstop; | 461 | ide_startstop_t startstop; |
465 | 462 | ||
466 | if (!blk_rq_is_passthrough(rq) && !(rq->rq_flags & RQF_DONTPREP)) { | 463 | if (!blk_rq_is_passthrough(rq) && !(rq->rq_flags & RQF_DONTPREP)) { |
@@ -474,8 +471,6 @@ blk_status_t ide_queue_rq(struct blk_mq_hw_ctx *hctx, | |||
474 | if (ide_lock_host(host, hwif)) | 471 | if (ide_lock_host(host, hwif)) |
475 | return BLK_STS_DEV_RESOURCE; | 472 | return BLK_STS_DEV_RESOURCE; |
476 | 473 | ||
477 | blk_mq_start_request(rq); | ||
478 | |||
479 | spin_lock_irq(&hwif->lock); | 474 | spin_lock_irq(&hwif->lock); |
480 | 475 | ||
481 | if (!ide_lock_port(hwif)) { | 476 | if (!ide_lock_port(hwif)) { |
@@ -511,18 +506,6 @@ repeat: | |||
511 | drive->dev_flags &= ~(IDE_DFLAG_SLEEPING | IDE_DFLAG_PARKED); | 506 | drive->dev_flags &= ~(IDE_DFLAG_SLEEPING | IDE_DFLAG_PARKED); |
512 | 507 | ||
513 | /* | 508 | /* |
514 | * we know that the queue isn't empty, but this can happen | ||
515 | * if ->prep_rq() decides to kill a request | ||
516 | */ | ||
517 | if (!rq) { | ||
518 | rq = bd->rq; | ||
519 | if (!rq) { | ||
520 | ide_unlock_port(hwif); | ||
521 | goto out; | ||
522 | } | ||
523 | } | ||
524 | |||
525 | /* | ||
526 | * Sanity: don't accept a request that isn't a PM request | 509 | * Sanity: don't accept a request that isn't a PM request |
527 | * if we are currently power managed. This is very important as | 510 | * if we are currently power managed. This is very important as |
528 | * blk_stop_queue() doesn't prevent the blk_fetch_request() | 511 | * blk_stop_queue() doesn't prevent the blk_fetch_request() |
@@ -560,9 +543,12 @@ repeat: | |||
560 | } | 543 | } |
561 | } else { | 544 | } else { |
562 | plug_device: | 545 | plug_device: |
546 | if (local_requeue) | ||
547 | list_add(&rq->queuelist, &drive->rq_list); | ||
563 | spin_unlock_irq(&hwif->lock); | 548 | spin_unlock_irq(&hwif->lock); |
564 | ide_unlock_host(host); | 549 | ide_unlock_host(host); |
565 | ide_requeue_and_plug(drive, rq); | 550 | if (!local_requeue) |
551 | ide_requeue_and_plug(drive, rq); | ||
566 | return BLK_STS_OK; | 552 | return BLK_STS_OK; |
567 | } | 553 | } |
568 | 554 | ||
@@ -573,6 +559,26 @@ out: | |||
573 | return BLK_STS_OK; | 559 | return BLK_STS_OK; |
574 | } | 560 | } |
575 | 561 | ||
562 | /* | ||
563 | * Issue a new request to a device. | ||
564 | */ | ||
565 | blk_status_t ide_queue_rq(struct blk_mq_hw_ctx *hctx, | ||
566 | const struct blk_mq_queue_data *bd) | ||
567 | { | ||
568 | ide_drive_t *drive = hctx->queue->queuedata; | ||
569 | ide_hwif_t *hwif = drive->hwif; | ||
570 | |||
571 | spin_lock_irq(&hwif->lock); | ||
572 | if (drive->sense_rq_active) { | ||
573 | spin_unlock_irq(&hwif->lock); | ||
574 | return BLK_STS_DEV_RESOURCE; | ||
575 | } | ||
576 | spin_unlock_irq(&hwif->lock); | ||
577 | |||
578 | blk_mq_start_request(bd->rq); | ||
579 | return ide_issue_rq(drive, bd->rq, false); | ||
580 | } | ||
581 | |||
576 | static int drive_is_ready(ide_drive_t *drive) | 582 | static int drive_is_ready(ide_drive_t *drive) |
577 | { | 583 | { |
578 | ide_hwif_t *hwif = drive->hwif; | 584 | ide_hwif_t *hwif = drive->hwif; |
@@ -893,13 +899,8 @@ EXPORT_SYMBOL_GPL(ide_pad_transfer); | |||
893 | 899 | ||
894 | void ide_insert_request_head(ide_drive_t *drive, struct request *rq) | 900 | void ide_insert_request_head(ide_drive_t *drive, struct request *rq) |
895 | { | 901 | { |
896 | ide_hwif_t *hwif = drive->hwif; | 902 | drive->sense_rq_active = true; |
897 | unsigned long flags; | ||
898 | |||
899 | spin_lock_irqsave(&hwif->lock, flags); | ||
900 | list_add_tail(&rq->queuelist, &drive->rq_list); | 903 | list_add_tail(&rq->queuelist, &drive->rq_list); |
901 | spin_unlock_irqrestore(&hwif->lock, flags); | ||
902 | |||
903 | kblockd_schedule_work(&drive->rq_work); | 904 | kblockd_schedule_work(&drive->rq_work); |
904 | } | 905 | } |
905 | EXPORT_SYMBOL_GPL(ide_insert_request_head); | 906 | EXPORT_SYMBOL_GPL(ide_insert_request_head); |