diff options
Diffstat (limited to 'drivers/ide/ide-io.c')
-rw-r--r-- | drivers/ide/ide-io.c | 57 |
1 files changed, 35 insertions, 22 deletions
diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c index 6415a2e2ba87..bba4297f2f03 100644 --- a/drivers/ide/ide-io.c +++ b/drivers/ide/ide-io.c | |||
@@ -116,9 +116,9 @@ void ide_complete_cmd(ide_drive_t *drive, struct ide_cmd *cmd, u8 stat, u8 err) | |||
116 | unsigned int ide_rq_bytes(struct request *rq) | 116 | unsigned int ide_rq_bytes(struct request *rq) |
117 | { | 117 | { |
118 | if (blk_pc_request(rq)) | 118 | if (blk_pc_request(rq)) |
119 | return rq->data_len; | 119 | return blk_rq_bytes(rq); |
120 | else | 120 | else |
121 | return rq->hard_cur_sectors << 9; | 121 | return blk_rq_cur_sectors(rq) << 9; |
122 | } | 122 | } |
123 | EXPORT_SYMBOL_GPL(ide_rq_bytes); | 123 | EXPORT_SYMBOL_GPL(ide_rq_bytes); |
124 | 124 | ||
@@ -133,7 +133,7 @@ int ide_complete_rq(ide_drive_t *drive, int error, unsigned int nr_bytes) | |||
133 | * and complete the whole request right now | 133 | * and complete the whole request right now |
134 | */ | 134 | */ |
135 | if (blk_noretry_request(rq) && error <= 0) | 135 | if (blk_noretry_request(rq) && error <= 0) |
136 | nr_bytes = rq->hard_nr_sectors << 9; | 136 | nr_bytes = blk_rq_sectors(rq) << 9; |
137 | 137 | ||
138 | rc = ide_end_rq(drive, rq, error, nr_bytes); | 138 | rc = ide_end_rq(drive, rq, error, nr_bytes); |
139 | if (rc == 0) | 139 | if (rc == 0) |
@@ -248,14 +248,7 @@ void ide_map_sg(ide_drive_t *drive, struct ide_cmd *cmd) | |||
248 | struct scatterlist *sg = hwif->sg_table; | 248 | struct scatterlist *sg = hwif->sg_table; |
249 | struct request *rq = cmd->rq; | 249 | struct request *rq = cmd->rq; |
250 | 250 | ||
251 | if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE) { | 251 | cmd->sg_nents = blk_rq_map_sg(drive->queue, rq, sg); |
252 | sg_init_one(sg, rq->buffer, rq->nr_sectors * SECTOR_SIZE); | ||
253 | cmd->sg_nents = 1; | ||
254 | } else if (!rq->bio) { | ||
255 | sg_init_one(sg, rq->data, rq->data_len); | ||
256 | cmd->sg_nents = 1; | ||
257 | } else | ||
258 | cmd->sg_nents = blk_rq_map_sg(drive->queue, rq, sg); | ||
259 | } | 252 | } |
260 | EXPORT_SYMBOL_GPL(ide_map_sg); | 253 | EXPORT_SYMBOL_GPL(ide_map_sg); |
261 | 254 | ||
@@ -286,7 +279,7 @@ static ide_startstop_t execute_drive_cmd (ide_drive_t *drive, | |||
286 | 279 | ||
287 | if (cmd) { | 280 | if (cmd) { |
288 | if (cmd->protocol == ATA_PROT_PIO) { | 281 | if (cmd->protocol == ATA_PROT_PIO) { |
289 | ide_init_sg_cmd(cmd, rq->nr_sectors << 9); | 282 | ide_init_sg_cmd(cmd, blk_rq_sectors(rq) << 9); |
290 | ide_map_sg(drive, cmd); | 283 | ide_map_sg(drive, cmd); |
291 | } | 284 | } |
292 | 285 | ||
@@ -371,7 +364,7 @@ static ide_startstop_t start_request (ide_drive_t *drive, struct request *rq) | |||
371 | if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE) | 364 | if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE) |
372 | return execute_drive_cmd(drive, rq); | 365 | return execute_drive_cmd(drive, rq); |
373 | else if (blk_pm_request(rq)) { | 366 | else if (blk_pm_request(rq)) { |
374 | struct request_pm_state *pm = rq->data; | 367 | struct request_pm_state *pm = rq->special; |
375 | #ifdef DEBUG_PM | 368 | #ifdef DEBUG_PM |
376 | printk("%s: start_power_step(step: %d)\n", | 369 | printk("%s: start_power_step(step: %d)\n", |
377 | drive->name, pm->pm_step); | 370 | drive->name, pm->pm_step); |
@@ -394,7 +387,7 @@ static ide_startstop_t start_request (ide_drive_t *drive, struct request *rq) | |||
394 | 387 | ||
395 | drv = *(struct ide_driver **)rq->rq_disk->private_data; | 388 | drv = *(struct ide_driver **)rq->rq_disk->private_data; |
396 | 389 | ||
397 | return drv->do_request(drive, rq, rq->sector); | 390 | return drv->do_request(drive, rq, blk_rq_pos(rq)); |
398 | } | 391 | } |
399 | return do_special(drive); | 392 | return do_special(drive); |
400 | kill_rq: | 393 | kill_rq: |
@@ -484,6 +477,9 @@ void do_ide_request(struct request_queue *q) | |||
484 | 477 | ||
485 | spin_unlock_irq(q->queue_lock); | 478 | spin_unlock_irq(q->queue_lock); |
486 | 479 | ||
480 | /* HLD do_request() callback might sleep, make sure it's okay */ | ||
481 | might_sleep(); | ||
482 | |||
487 | if (ide_lock_host(host, hwif)) | 483 | if (ide_lock_host(host, hwif)) |
488 | goto plug_device_2; | 484 | goto plug_device_2; |
489 | 485 | ||
@@ -491,10 +487,10 @@ void do_ide_request(struct request_queue *q) | |||
491 | 487 | ||
492 | if (!ide_lock_port(hwif)) { | 488 | if (!ide_lock_port(hwif)) { |
493 | ide_hwif_t *prev_port; | 489 | ide_hwif_t *prev_port; |
490 | |||
491 | WARN_ON_ONCE(hwif->rq); | ||
494 | repeat: | 492 | repeat: |
495 | prev_port = hwif->host->cur_port; | 493 | prev_port = hwif->host->cur_port; |
496 | hwif->rq = NULL; | ||
497 | |||
498 | if (drive->dev_flags & IDE_DFLAG_SLEEPING && | 494 | if (drive->dev_flags & IDE_DFLAG_SLEEPING && |
499 | time_after(drive->sleep, jiffies)) { | 495 | time_after(drive->sleep, jiffies)) { |
500 | ide_unlock_port(hwif); | 496 | ide_unlock_port(hwif); |
@@ -523,7 +519,9 @@ repeat: | |||
523 | * we know that the queue isn't empty, but this can happen | 519 | * we know that the queue isn't empty, but this can happen |
524 | * if the q->prep_rq_fn() decides to kill a request | 520 | * if the q->prep_rq_fn() decides to kill a request |
525 | */ | 521 | */ |
526 | rq = elv_next_request(drive->queue); | 522 | if (!rq) |
523 | rq = blk_fetch_request(drive->queue); | ||
524 | |||
527 | spin_unlock_irq(q->queue_lock); | 525 | spin_unlock_irq(q->queue_lock); |
528 | spin_lock_irq(&hwif->lock); | 526 | spin_lock_irq(&hwif->lock); |
529 | 527 | ||
@@ -535,7 +533,7 @@ repeat: | |||
535 | /* | 533 | /* |
536 | * Sanity: don't accept a request that isn't a PM request | 534 | * Sanity: don't accept a request that isn't a PM request |
537 | * if we are currently power managed. This is very important as | 535 | * if we are currently power managed. This is very important as |
538 | * blk_stop_queue() doesn't prevent the elv_next_request() | 536 | * blk_stop_queue() doesn't prevent the blk_fetch_request() |
539 | * above to return us whatever is in the queue. Since we call | 537 | * above to return us whatever is in the queue. Since we call |
540 | * ide_do_request() ourselves, we end up taking requests while | 538 | * ide_do_request() ourselves, we end up taking requests while |
541 | * the queue is blocked... | 539 | * the queue is blocked... |
@@ -559,8 +557,11 @@ repeat: | |||
559 | startstop = start_request(drive, rq); | 557 | startstop = start_request(drive, rq); |
560 | spin_lock_irq(&hwif->lock); | 558 | spin_lock_irq(&hwif->lock); |
561 | 559 | ||
562 | if (startstop == ide_stopped) | 560 | if (startstop == ide_stopped) { |
561 | rq = hwif->rq; | ||
562 | hwif->rq = NULL; | ||
563 | goto repeat; | 563 | goto repeat; |
564 | } | ||
564 | } else | 565 | } else |
565 | goto plug_device; | 566 | goto plug_device; |
566 | out: | 567 | out: |
@@ -576,18 +577,24 @@ plug_device: | |||
576 | plug_device_2: | 577 | plug_device_2: |
577 | spin_lock_irq(q->queue_lock); | 578 | spin_lock_irq(q->queue_lock); |
578 | 579 | ||
580 | if (rq) | ||
581 | blk_requeue_request(q, rq); | ||
579 | if (!elv_queue_empty(q)) | 582 | if (!elv_queue_empty(q)) |
580 | blk_plug_device(q); | 583 | blk_plug_device(q); |
581 | } | 584 | } |
582 | 585 | ||
583 | static void ide_plug_device(ide_drive_t *drive) | 586 | static void ide_requeue_and_plug(ide_drive_t *drive, struct request *rq) |
584 | { | 587 | { |
585 | struct request_queue *q = drive->queue; | 588 | struct request_queue *q = drive->queue; |
586 | unsigned long flags; | 589 | unsigned long flags; |
587 | 590 | ||
588 | spin_lock_irqsave(q->queue_lock, flags); | 591 | spin_lock_irqsave(q->queue_lock, flags); |
592 | |||
593 | if (rq) | ||
594 | blk_requeue_request(q, rq); | ||
589 | if (!elv_queue_empty(q)) | 595 | if (!elv_queue_empty(q)) |
590 | blk_plug_device(q); | 596 | blk_plug_device(q); |
597 | |||
591 | spin_unlock_irqrestore(q->queue_lock, flags); | 598 | spin_unlock_irqrestore(q->queue_lock, flags); |
592 | } | 599 | } |
593 | 600 | ||
@@ -636,6 +643,7 @@ void ide_timer_expiry (unsigned long data) | |||
636 | unsigned long flags; | 643 | unsigned long flags; |
637 | int wait = -1; | 644 | int wait = -1; |
638 | int plug_device = 0; | 645 | int plug_device = 0; |
646 | struct request *uninitialized_var(rq_in_flight); | ||
639 | 647 | ||
640 | spin_lock_irqsave(&hwif->lock, flags); | 648 | spin_lock_irqsave(&hwif->lock, flags); |
641 | 649 | ||
@@ -697,6 +705,8 @@ void ide_timer_expiry (unsigned long data) | |||
697 | spin_lock_irq(&hwif->lock); | 705 | spin_lock_irq(&hwif->lock); |
698 | enable_irq(hwif->irq); | 706 | enable_irq(hwif->irq); |
699 | if (startstop == ide_stopped && hwif->polling == 0) { | 707 | if (startstop == ide_stopped && hwif->polling == 0) { |
708 | rq_in_flight = hwif->rq; | ||
709 | hwif->rq = NULL; | ||
700 | ide_unlock_port(hwif); | 710 | ide_unlock_port(hwif); |
701 | plug_device = 1; | 711 | plug_device = 1; |
702 | } | 712 | } |
@@ -705,7 +715,7 @@ void ide_timer_expiry (unsigned long data) | |||
705 | 715 | ||
706 | if (plug_device) { | 716 | if (plug_device) { |
707 | ide_unlock_host(hwif->host); | 717 | ide_unlock_host(hwif->host); |
708 | ide_plug_device(drive); | 718 | ide_requeue_and_plug(drive, rq_in_flight); |
709 | } | 719 | } |
710 | } | 720 | } |
711 | 721 | ||
@@ -791,6 +801,7 @@ irqreturn_t ide_intr (int irq, void *dev_id) | |||
791 | ide_startstop_t startstop; | 801 | ide_startstop_t startstop; |
792 | irqreturn_t irq_ret = IRQ_NONE; | 802 | irqreturn_t irq_ret = IRQ_NONE; |
793 | int plug_device = 0; | 803 | int plug_device = 0; |
804 | struct request *uninitialized_var(rq_in_flight); | ||
794 | 805 | ||
795 | if (host->host_flags & IDE_HFLAG_SERIALIZE) { | 806 | if (host->host_flags & IDE_HFLAG_SERIALIZE) { |
796 | if (hwif != host->cur_port) | 807 | if (hwif != host->cur_port) |
@@ -870,6 +881,8 @@ irqreturn_t ide_intr (int irq, void *dev_id) | |||
870 | */ | 881 | */ |
871 | if (startstop == ide_stopped && hwif->polling == 0) { | 882 | if (startstop == ide_stopped && hwif->polling == 0) { |
872 | BUG_ON(hwif->handler); | 883 | BUG_ON(hwif->handler); |
884 | rq_in_flight = hwif->rq; | ||
885 | hwif->rq = NULL; | ||
873 | ide_unlock_port(hwif); | 886 | ide_unlock_port(hwif); |
874 | plug_device = 1; | 887 | plug_device = 1; |
875 | } | 888 | } |
@@ -879,7 +892,7 @@ out: | |||
879 | out_early: | 892 | out_early: |
880 | if (plug_device) { | 893 | if (plug_device) { |
881 | ide_unlock_host(hwif->host); | 894 | ide_unlock_host(hwif->host); |
882 | ide_plug_device(drive); | 895 | ide_requeue_and_plug(drive, rq_in_flight); |
883 | } | 896 | } |
884 | 897 | ||
885 | return irq_ret; | 898 | return irq_ret; |