aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/libata-core.c
diff options
context:
space:
mode:
authorTejun Heo <htejun@gmail.com>2006-05-15 07:59:15 -0400
committerTejun Heo <htejun@gmail.com>2006-05-15 07:59:15 -0400
commit12436c30f4808e00fa008c6787c609bc6ae216ba (patch)
tree22ddaad8def4f4a77637a3da62a8d8f422a95c29 /drivers/scsi/libata-core.c
parent88ce7550c38f46c8697f53727a571bf838bee398 (diff)
parent7894eaf291238a62a565e9e9777483beeb00eeae (diff)
Merge branch 'irq-pio'
Conflicts: drivers/scsi/libata-core.c include/linux/libata.h
Diffstat (limited to 'drivers/scsi/libata-core.c')
-rw-r--r--drivers/scsi/libata-core.c803
1 files changed, 495 insertions, 308 deletions
diff --git a/drivers/scsi/libata-core.c b/drivers/scsi/libata-core.c
index 2969599ec0b9..c859b96b891a 100644
--- a/drivers/scsi/libata-core.c
+++ b/drivers/scsi/libata-core.c
@@ -1345,11 +1345,19 @@ static int ata_dev_configure(struct ata_device *dev, int print_info)
1345 dev->cylinders, dev->heads, dev->sectors); 1345 dev->cylinders, dev->heads, dev->sectors);
1346 } 1346 }
1347 1347
1348 if (dev->id[59] & 0x100) {
1349 dev->multi_count = dev->id[59] & 0xff;
1350 DPRINTK("ata%u: dev %u multi count %u\n",
1351 ap->id, dev->devno, dev->multi_count);
1352 }
1353
1348 dev->cdb_len = 16; 1354 dev->cdb_len = 16;
1349 } 1355 }
1350 1356
1351 /* ATAPI-specific feature tests */ 1357 /* ATAPI-specific feature tests */
1352 else if (dev->class == ATA_DEV_ATAPI) { 1358 else if (dev->class == ATA_DEV_ATAPI) {
1359 char *cdb_intr_string = "";
1360
1353 rc = atapi_cdb_len(id); 1361 rc = atapi_cdb_len(id);
1354 if ((rc < 12) || (rc > ATAPI_CDB_LEN)) { 1362 if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
1355 ata_dev_printk(dev, KERN_WARNING, 1363 ata_dev_printk(dev, KERN_WARNING,
@@ -1359,10 +1367,16 @@ static int ata_dev_configure(struct ata_device *dev, int print_info)
1359 } 1367 }
1360 dev->cdb_len = (unsigned int) rc; 1368 dev->cdb_len = (unsigned int) rc;
1361 1369
1370 if (ata_id_cdb_intr(dev->id)) {
1371 dev->flags |= ATA_DFLAG_CDB_INTR;
1372 cdb_intr_string = ", CDB intr";
1373 }
1374
1362 /* print device info to dmesg */ 1375 /* print device info to dmesg */
1363 if (print_info) 1376 if (print_info)
1364 ata_dev_printk(dev, KERN_INFO, "ATAPI, max %s\n", 1377 ata_dev_printk(dev, KERN_INFO, "ATAPI, max %s%s\n",
1365 ata_mode_string(xfer_mask)); 1378 ata_mode_string(xfer_mask),
1379 cdb_intr_string);
1366 } 1380 }
1367 1381
1368 ap->host->max_cmd_len = 0; 1382 ap->host->max_cmd_len = 0;
@@ -3211,6 +3225,15 @@ int ata_check_atapi_dma(struct ata_queued_cmd *qc)
3211 if (ap->ops->check_atapi_dma) 3225 if (ap->ops->check_atapi_dma)
3212 rc = ap->ops->check_atapi_dma(qc); 3226 rc = ap->ops->check_atapi_dma(qc);
3213 3227
3228 /* We don't support polling DMA.
3229 * Use PIO if the LLDD handles only interrupts in
3230 * the HSM_ST_LAST state and the ATAPI device
3231 * generates CDB interrupts.
3232 */
3233 if ((ap->flags & ATA_FLAG_PIO_POLLING) &&
3234 (qc->dev->flags & ATA_DFLAG_CDB_INTR))
3235 rc = 1;
3236
3214 return rc; 3237 return rc;
3215} 3238}
3216/** 3239/**
@@ -3458,7 +3481,6 @@ void ata_poll_qc_complete(struct ata_queued_cmd *qc)
3458 qc = ata_qc_from_tag(ap, qc->tag); 3481 qc = ata_qc_from_tag(ap, qc->tag);
3459 if (qc) { 3482 if (qc) {
3460 if (!(qc->err_mask & AC_ERR_HSM)) { 3483 if (!(qc->err_mask & AC_ERR_HSM)) {
3461 ap->flags &= ~ATA_FLAG_NOINTR;
3462 ata_irq_on(ap); 3484 ata_irq_on(ap);
3463 ata_qc_complete(qc); 3485 ata_qc_complete(qc);
3464 } else 3486 } else
@@ -3466,7 +3488,6 @@ void ata_poll_qc_complete(struct ata_queued_cmd *qc)
3466 } 3488 }
3467 } else { 3489 } else {
3468 /* old EH */ 3490 /* old EH */
3469 ap->flags &= ~ATA_FLAG_NOINTR;
3470 ata_irq_on(ap); 3491 ata_irq_on(ap);
3471 ata_qc_complete(qc); 3492 ata_qc_complete(qc);
3472 } 3493 }
@@ -3475,105 +3496,6 @@ void ata_poll_qc_complete(struct ata_queued_cmd *qc)
3475} 3496}
3476 3497
3477/** 3498/**
3478 * ata_pio_poll - poll using PIO, depending on current state
3479 * @qc: qc in progress
3480 *
3481 * LOCKING:
3482 * None. (executing in kernel thread context)
3483 *
3484 * RETURNS:
3485 * timeout value to use
3486 */
3487static unsigned long ata_pio_poll(struct ata_queued_cmd *qc)
3488{
3489 struct ata_port *ap = qc->ap;
3490 u8 status;
3491 unsigned int poll_state = HSM_ST_UNKNOWN;
3492 unsigned int reg_state = HSM_ST_UNKNOWN;
3493
3494 switch (ap->hsm_task_state) {
3495 case HSM_ST:
3496 case HSM_ST_POLL:
3497 poll_state = HSM_ST_POLL;
3498 reg_state = HSM_ST;
3499 break;
3500 case HSM_ST_LAST:
3501 case HSM_ST_LAST_POLL:
3502 poll_state = HSM_ST_LAST_POLL;
3503 reg_state = HSM_ST_LAST;
3504 break;
3505 default:
3506 BUG();
3507 break;
3508 }
3509
3510 status = ata_chk_status(ap);
3511 if (status & ATA_BUSY) {
3512 if (time_after(jiffies, ap->pio_task_timeout)) {
3513 qc->err_mask |= AC_ERR_TIMEOUT;
3514 ap->hsm_task_state = HSM_ST_TMOUT;
3515 return 0;
3516 }
3517 ap->hsm_task_state = poll_state;
3518 return ATA_SHORT_PAUSE;
3519 }
3520
3521 ap->hsm_task_state = reg_state;
3522 return 0;
3523}
3524
3525/**
3526 * ata_pio_complete - check if drive is busy or idle
3527 * @qc: qc to complete
3528 *
3529 * LOCKING:
3530 * None. (executing in kernel thread context)
3531 *
3532 * RETURNS:
3533 * Non-zero if qc completed, zero otherwise.
3534 */
3535static int ata_pio_complete(struct ata_queued_cmd *qc)
3536{
3537 struct ata_port *ap = qc->ap;
3538 u8 drv_stat;
3539
3540 /*
3541 * This is purely heuristic. This is a fast path. Sometimes when
3542 * we enter, BSY will be cleared in a chk-status or two. If not,
3543 * the drive is probably seeking or something. Snooze for a couple
3544 * msecs, then chk-status again. If still busy, fall back to
3545 * HSM_ST_POLL state.
3546 */
3547 drv_stat = ata_busy_wait(ap, ATA_BUSY, 10);
3548 if (drv_stat & ATA_BUSY) {
3549 msleep(2);
3550 drv_stat = ata_busy_wait(ap, ATA_BUSY, 10);
3551 if (drv_stat & ATA_BUSY) {
3552 ap->hsm_task_state = HSM_ST_LAST_POLL;
3553 ap->pio_task_timeout = jiffies + ATA_TMOUT_PIO;
3554 return 0;
3555 }
3556 }
3557
3558 drv_stat = ata_wait_idle(ap);
3559 if (!ata_ok(drv_stat)) {
3560 qc->err_mask |= __ac_err_mask(drv_stat);
3561 ap->hsm_task_state = HSM_ST_ERR;
3562 return 0;
3563 }
3564
3565 ap->hsm_task_state = HSM_ST_IDLE;
3566
3567 WARN_ON(qc->err_mask);
3568 ata_poll_qc_complete(qc);
3569
3570 /* another command may start at this point */
3571
3572 return 1;
3573}
3574
3575
3576/**
3577 * swap_buf_le16 - swap halves of 16-bit words in place 3499 * swap_buf_le16 - swap halves of 16-bit words in place
3578 * @buf: Buffer to swap 3500 * @buf: Buffer to swap
3579 * @buf_words: Number of 16-bit words in buffer. 3501 * @buf_words: Number of 16-bit words in buffer.
@@ -3741,7 +3663,23 @@ static void ata_pio_sector(struct ata_queued_cmd *qc)
3741 page = nth_page(page, (offset >> PAGE_SHIFT)); 3663 page = nth_page(page, (offset >> PAGE_SHIFT));
3742 offset %= PAGE_SIZE; 3664 offset %= PAGE_SIZE;
3743 3665
3744 buf = kmap(page) + offset; 3666 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
3667
3668 if (PageHighMem(page)) {
3669 unsigned long flags;
3670
3671 local_irq_save(flags);
3672 buf = kmap_atomic(page, KM_IRQ0);
3673
3674 /* do the actual data transfer */
3675 ata_data_xfer(ap, buf + offset, ATA_SECT_SIZE, do_write);
3676
3677 kunmap_atomic(buf, KM_IRQ0);
3678 local_irq_restore(flags);
3679 } else {
3680 buf = page_address(page);
3681 ata_data_xfer(ap, buf + offset, ATA_SECT_SIZE, do_write);
3682 }
3745 3683
3746 qc->cursect++; 3684 qc->cursect++;
3747 qc->cursg_ofs++; 3685 qc->cursg_ofs++;
@@ -3750,14 +3688,68 @@ static void ata_pio_sector(struct ata_queued_cmd *qc)
3750 qc->cursg++; 3688 qc->cursg++;
3751 qc->cursg_ofs = 0; 3689 qc->cursg_ofs = 0;
3752 } 3690 }
3691}
3753 3692
3754 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read"); 3693/**
3694 * ata_pio_sectors - Transfer one or many 512-byte sectors.
3695 * @qc: Command on going
3696 *
3697 * Transfer one or many ATA_SECT_SIZE of data from/to the
3698 * ATA device for the DRQ request.
3699 *
3700 * LOCKING:
3701 * Inherited from caller.
3702 */
3703
3704static void ata_pio_sectors(struct ata_queued_cmd *qc)
3705{
3706 if (is_multi_taskfile(&qc->tf)) {
3707 /* READ/WRITE MULTIPLE */
3708 unsigned int nsect;
3709
3710 WARN_ON(qc->dev->multi_count == 0);
3711
3712 nsect = min(qc->nsect - qc->cursect, qc->dev->multi_count);
3713 while (nsect--)
3714 ata_pio_sector(qc);
3715 } else
3716 ata_pio_sector(qc);
3717}
3718
3719/**
3720 * atapi_send_cdb - Write CDB bytes to hardware
3721 * @ap: Port to which ATAPI device is attached.
3722 * @qc: Taskfile currently active
3723 *
3724 * When device has indicated its readiness to accept
3725 * a CDB, this function is called. Send the CDB.
3726 *
3727 * LOCKING:
3728 * caller.
3729 */
3730
3731static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc)
3732{
3733 /* send SCSI cdb */
3734 DPRINTK("send cdb\n");
3735 WARN_ON(qc->dev->cdb_len < 12);
3755 3736
3756 /* do the actual data transfer */ 3737 ata_data_xfer(ap, qc->cdb, qc->dev->cdb_len, 1);
3757 do_write = (qc->tf.flags & ATA_TFLAG_WRITE); 3738 ata_altstatus(ap); /* flush */
3758 ata_data_xfer(ap, buf, ATA_SECT_SIZE, do_write);
3759 3739
3760 kunmap(page); 3740 switch (qc->tf.protocol) {
3741 case ATA_PROT_ATAPI:
3742 ap->hsm_task_state = HSM_ST;
3743 break;
3744 case ATA_PROT_ATAPI_NODATA:
3745 ap->hsm_task_state = HSM_ST_LAST;
3746 break;
3747 case ATA_PROT_ATAPI_DMA:
3748 ap->hsm_task_state = HSM_ST_LAST;
3749 /* initiate bmdma */
3750 ap->ops->bmdma_start(qc);
3751 break;
3752 }
3761} 3753}
3762 3754
3763/** 3755/**
@@ -3823,7 +3815,23 @@ next_sg:
3823 /* don't cross page boundaries */ 3815 /* don't cross page boundaries */
3824 count = min(count, (unsigned int)PAGE_SIZE - offset); 3816 count = min(count, (unsigned int)PAGE_SIZE - offset);
3825 3817
3826 buf = kmap(page) + offset; 3818 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
3819
3820 if (PageHighMem(page)) {
3821 unsigned long flags;
3822
3823 local_irq_save(flags);
3824 buf = kmap_atomic(page, KM_IRQ0);
3825
3826 /* do the actual data transfer */
3827 ata_data_xfer(ap, buf + offset, count, do_write);
3828
3829 kunmap_atomic(buf, KM_IRQ0);
3830 local_irq_restore(flags);
3831 } else {
3832 buf = page_address(page);
3833 ata_data_xfer(ap, buf + offset, count, do_write);
3834 }
3827 3835
3828 bytes -= count; 3836 bytes -= count;
3829 qc->curbytes += count; 3837 qc->curbytes += count;
@@ -3834,13 +3842,6 @@ next_sg:
3834 qc->cursg_ofs = 0; 3842 qc->cursg_ofs = 0;
3835 } 3843 }
3836 3844
3837 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
3838
3839 /* do the actual data transfer */
3840 ata_data_xfer(ap, buf, count, do_write);
3841
3842 kunmap(page);
3843
3844 if (bytes) 3845 if (bytes)
3845 goto next_sg; 3846 goto next_sg;
3846} 3847}
@@ -3877,6 +3878,8 @@ static void atapi_pio_bytes(struct ata_queued_cmd *qc)
3877 if (do_write != i_write) 3878 if (do_write != i_write)
3878 goto err_out; 3879 goto err_out;
3879 3880
3881 VPRINTK("ata%u: xfering %d bytes\n", ap->id, bytes);
3882
3880 __atapi_pio_bytes(qc, bytes); 3883 __atapi_pio_bytes(qc, bytes);
3881 3884
3882 return; 3885 return;
@@ -3888,186 +3891,294 @@ err_out:
3888} 3891}
3889 3892
3890/** 3893/**
3891 * ata_pio_block - start PIO on a block 3894 * ata_hsm_ok_in_wq - Check if the qc can be handled in the workqueue.
3892 * @qc: qc to transfer block for 3895 * @ap: the target ata_port
3896 * @qc: qc on going
3893 * 3897 *
3894 * LOCKING: 3898 * RETURNS:
3895 * None. (executing in kernel thread context) 3899 * 1 if ok in workqueue, 0 otherwise.
3896 */ 3900 */
3897static void ata_pio_block(struct ata_queued_cmd *qc) 3901
3902static inline int ata_hsm_ok_in_wq(struct ata_port *ap, struct ata_queued_cmd *qc)
3898{ 3903{
3899 struct ata_port *ap = qc->ap; 3904 if (qc->tf.flags & ATA_TFLAG_POLLING)
3900 u8 status; 3905 return 1;
3901 3906
3902 /* 3907 if (ap->hsm_task_state == HSM_ST_FIRST) {
3903 * This is purely heuristic. This is a fast path. 3908 if (qc->tf.protocol == ATA_PROT_PIO &&
3904 * Sometimes when we enter, BSY will be cleared in 3909 (qc->tf.flags & ATA_TFLAG_WRITE))
3905 * a chk-status or two. If not, the drive is probably seeking 3910 return 1;
3906 * or something. Snooze for a couple msecs, then
3907 * chk-status again. If still busy, fall back to
3908 * HSM_ST_POLL state.
3909 */
3910 status = ata_busy_wait(ap, ATA_BUSY, 5);
3911 if (status & ATA_BUSY) {
3912 msleep(2);
3913 status = ata_busy_wait(ap, ATA_BUSY, 10);
3914 if (status & ATA_BUSY) {
3915 ap->hsm_task_state = HSM_ST_POLL;
3916 ap->pio_task_timeout = jiffies + ATA_TMOUT_PIO;
3917 return;
3918 }
3919 }
3920 3911
3921 /* check error */ 3912 if (is_atapi_taskfile(&qc->tf) &&
3922 if (status & (ATA_ERR | ATA_DF)) { 3913 !(qc->dev->flags & ATA_DFLAG_CDB_INTR))
3923 qc->err_mask |= AC_ERR_DEV; 3914 return 1;
3924 ap->hsm_task_state = HSM_ST_ERR;
3925 return;
3926 } 3915 }
3927 3916
3928 /* transfer data if any */ 3917 return 0;
3929 if (is_atapi_taskfile(&qc->tf)) { 3918}
3930 /* DRQ=0 means no more data to transfer */
3931 if ((status & ATA_DRQ) == 0) {
3932 ap->hsm_task_state = HSM_ST_LAST;
3933 return;
3934 }
3935 3919
3936 atapi_pio_bytes(qc); 3920/**
3937 } else { 3921 * ata_hsm_move - move the HSM to the next state.
3938 /* handle BSY=0, DRQ=0 as error */ 3922 * @ap: the target ata_port
3939 if ((status & ATA_DRQ) == 0) { 3923 * @qc: qc on going
3924 * @status: current device status
3925 * @in_wq: 1 if called from workqueue, 0 otherwise
3926 *
3927 * RETURNS:
3928 * 1 when poll next status needed, 0 otherwise.
3929 */
3930
3931static int ata_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
3932 u8 status, int in_wq)
3933{
3934 unsigned long flags = 0;
3935 int poll_next;
3936
3937 WARN_ON((qc->flags & ATA_QCFLAG_ACTIVE) == 0);
3938
3939 /* Make sure ata_qc_issue_prot() does not throw things
3940 * like DMA polling into the workqueue. Notice that
3941 * in_wq is not equivalent to (qc->tf.flags & ATA_TFLAG_POLLING).
3942 */
3943 WARN_ON(in_wq != ata_hsm_ok_in_wq(ap, qc));
3944
3945fsm_start:
3946 DPRINTK("ata%u: protocol %d task_state %d (dev_stat 0x%X)\n",
3947 ap->id, qc->tf.protocol, ap->hsm_task_state, status);
3948
3949 switch (ap->hsm_task_state) {
3950 case HSM_ST_FIRST:
3951 /* Send first data block or PACKET CDB */
3952
3953 /* If polling, we will stay in the work queue after
3954 * sending the data. Otherwise, interrupt handler
3955 * takes over after sending the data.
3956 */
3957 poll_next = (qc->tf.flags & ATA_TFLAG_POLLING);
3958
3959 /* check device status */
3960 if (unlikely((status & (ATA_BUSY | ATA_DRQ)) != ATA_DRQ)) {
3961 /* Wrong status. Let EH handle this */
3940 qc->err_mask |= AC_ERR_HSM; 3962 qc->err_mask |= AC_ERR_HSM;
3941 ap->hsm_task_state = HSM_ST_ERR; 3963 ap->hsm_task_state = HSM_ST_ERR;
3942 return; 3964 goto fsm_start;
3943 } 3965 }
3944 3966
3945 ata_pio_sector(qc); 3967 /* Device should not ask for data transfer (DRQ=1)
3946 } 3968 * when it finds something wrong.
3947} 3969 * We ignore DRQ here and stop the HSM by
3970 * changing hsm_task_state to HSM_ST_ERR and
3971 * let the EH abort the command or reset the device.
3972 */
3973 if (unlikely(status & (ATA_ERR | ATA_DF))) {
3974 printk(KERN_WARNING "ata%d: DRQ=1 with device error, dev_stat 0x%X\n",
3975 ap->id, status);
3976 qc->err_mask |= AC_ERR_DEV;
3977 ap->hsm_task_state = HSM_ST_ERR;
3978 goto fsm_start;
3979 }
3948 3980
3949static void ata_pio_error(struct ata_queued_cmd *qc) 3981 /* Send the CDB (atapi) or the first data block (ata pio out).
3950{ 3982 * During the state transition, interrupt handler shouldn't
3951 struct ata_port *ap = qc->ap; 3983 * be invoked before the data transfer is complete and
3984 * hsm_task_state is changed. Hence, the following locking.
3985 */
3986 if (in_wq)
3987 spin_lock_irqsave(&ap->host_set->lock, flags);
3952 3988
3953 if (qc->tf.command != ATA_CMD_PACKET) 3989 if (qc->tf.protocol == ATA_PROT_PIO) {
3954 ata_dev_printk(qc->dev, KERN_WARNING, "PIO error\n"); 3990 /* PIO data out protocol.
3991 * send first data block.
3992 */
3955 3993
3956 /* make sure qc->err_mask is available to 3994 /* ata_pio_sectors() might change the state
3957 * know what's wrong and recover 3995 * to HSM_ST_LAST. so, the state is changed here
3958 */ 3996 * before ata_pio_sectors().
3959 WARN_ON(qc->err_mask == 0); 3997 */
3998 ap->hsm_task_state = HSM_ST;
3999 ata_pio_sectors(qc);
4000 ata_altstatus(ap); /* flush */
4001 } else
4002 /* send CDB */
4003 atapi_send_cdb(ap, qc);
4004
4005 if (in_wq)
4006 spin_unlock_irqrestore(&ap->host_set->lock, flags);
4007
4008 /* if polling, ata_pio_task() handles the rest.
4009 * otherwise, interrupt handler takes over from here.
4010 */
4011 break;
3960 4012
3961 ap->hsm_task_state = HSM_ST_IDLE; 4013 case HSM_ST:
4014 /* complete command or read/write the data register */
4015 if (qc->tf.protocol == ATA_PROT_ATAPI) {
4016 /* ATAPI PIO protocol */
4017 if ((status & ATA_DRQ) == 0) {
4018 /* no more data to transfer */
4019 ap->hsm_task_state = HSM_ST_LAST;
4020 goto fsm_start;
4021 }
3962 4022
3963 ata_poll_qc_complete(qc); 4023 /* Device should not ask for data transfer (DRQ=1)
3964} 4024 * when it finds something wrong.
4025 * We ignore DRQ here and stop the HSM by
4026 * changing hsm_task_state to HSM_ST_ERR and
4027 * let the EH abort the command or reset the device.
4028 */
4029 if (unlikely(status & (ATA_ERR | ATA_DF))) {
4030 printk(KERN_WARNING "ata%d: DRQ=1 with device error, dev_stat 0x%X\n",
4031 ap->id, status);
4032 qc->err_mask |= AC_ERR_DEV;
4033 ap->hsm_task_state = HSM_ST_ERR;
4034 goto fsm_start;
4035 }
3965 4036
3966static void ata_pio_task(void *_data) 4037 atapi_pio_bytes(qc);
3967{
3968 struct ata_queued_cmd *qc = _data;
3969 struct ata_port *ap = qc->ap;
3970 unsigned long timeout;
3971 int qc_completed;
3972 4038
3973fsm_start: 4039 if (unlikely(ap->hsm_task_state == HSM_ST_ERR))
3974 timeout = 0; 4040 /* bad ireason reported by device */
3975 qc_completed = 0; 4041 goto fsm_start;
3976 4042
3977 switch (ap->hsm_task_state) { 4043 } else {
3978 case HSM_ST_IDLE: 4044 /* ATA PIO protocol */
3979 return; 4045 if (unlikely((status & ATA_DRQ) == 0)) {
4046 /* handle BSY=0, DRQ=0 as error */
4047 qc->err_mask |= AC_ERR_HSM;
4048 ap->hsm_task_state = HSM_ST_ERR;
4049 goto fsm_start;
4050 }
3980 4051
3981 case HSM_ST: 4052 /* For PIO reads, some devices may ask for
3982 ata_pio_block(qc); 4053 * data transfer (DRQ=1) alone with ERR=1.
4054 * We respect DRQ here and transfer one
4055 * block of junk data before changing the
4056 * hsm_task_state to HSM_ST_ERR.
4057 *
4058 * For PIO writes, ERR=1 DRQ=1 doesn't make
4059 * sense since the data block has been
4060 * transferred to the device.
4061 */
4062 if (unlikely(status & (ATA_ERR | ATA_DF))) {
4063 /* data might be corrputed */
4064 qc->err_mask |= AC_ERR_DEV;
4065
4066 if (!(qc->tf.flags & ATA_TFLAG_WRITE)) {
4067 ata_pio_sectors(qc);
4068 ata_altstatus(ap);
4069 status = ata_wait_idle(ap);
4070 }
4071
4072 /* ata_pio_sectors() might change the
4073 * state to HSM_ST_LAST. so, the state
4074 * is changed after ata_pio_sectors().
4075 */
4076 ap->hsm_task_state = HSM_ST_ERR;
4077 goto fsm_start;
4078 }
4079
4080 ata_pio_sectors(qc);
4081
4082 if (ap->hsm_task_state == HSM_ST_LAST &&
4083 (!(qc->tf.flags & ATA_TFLAG_WRITE))) {
4084 /* all data read */
4085 ata_altstatus(ap);
4086 status = ata_wait_idle(ap);
4087 goto fsm_start;
4088 }
4089 }
4090
4091 ata_altstatus(ap); /* flush */
4092 poll_next = 1;
3983 break; 4093 break;
3984 4094
3985 case HSM_ST_LAST: 4095 case HSM_ST_LAST:
3986 qc_completed = ata_pio_complete(qc); 4096 if (unlikely(!ata_ok(status))) {
3987 break; 4097 qc->err_mask |= __ac_err_mask(status);
4098 ap->hsm_task_state = HSM_ST_ERR;
4099 goto fsm_start;
4100 }
4101
4102 /* no more data to transfer */
4103 DPRINTK("ata%u: dev %u command complete, drv_stat 0x%x\n",
4104 ap->id, qc->dev->devno, status);
4105
4106 WARN_ON(qc->err_mask);
3988 4107
3989 case HSM_ST_POLL: 4108 ap->hsm_task_state = HSM_ST_IDLE;
3990 case HSM_ST_LAST_POLL: 4109
3991 timeout = ata_pio_poll(qc); 4110 /* complete taskfile transaction */
4111 if (in_wq)
4112 ata_poll_qc_complete(qc);
4113 else
4114 ata_qc_complete(qc);
4115
4116 poll_next = 0;
3992 break; 4117 break;
3993 4118
3994 case HSM_ST_TMOUT:
3995 case HSM_ST_ERR: 4119 case HSM_ST_ERR:
3996 ata_pio_error(qc); 4120 if (qc->tf.command != ATA_CMD_PACKET)
3997 return; 4121 printk(KERN_ERR "ata%u: dev %u command error, drv_stat 0x%x\n",
4122 ap->id, qc->dev->devno, status);
4123
4124 /* make sure qc->err_mask is available to
4125 * know what's wrong and recover
4126 */
4127 WARN_ON(qc->err_mask == 0);
4128
4129 ap->hsm_task_state = HSM_ST_IDLE;
4130
4131 /* complete taskfile transaction */
4132 if (in_wq)
4133 ata_poll_qc_complete(qc);
4134 else
4135 ata_qc_complete(qc);
4136
4137 poll_next = 0;
4138 break;
4139 default:
4140 poll_next = 0;
4141 BUG();
3998 } 4142 }
3999 4143
4000 if (timeout) 4144 return poll_next;
4001 ata_port_queue_task(ap, ata_pio_task, qc, timeout);
4002 else if (!qc_completed)
4003 goto fsm_start;
4004} 4145}
4005 4146
4006/** 4147static void ata_pio_task(void *_data)
4007 * atapi_packet_task - Write CDB bytes to hardware
4008 * @_data: qc in progress
4009 *
4010 * When device has indicated its readiness to accept
4011 * a CDB, this function is called. Send the CDB.
4012 * If DMA is to be performed, exit immediately.
4013 * Otherwise, we are in polling mode, so poll
4014 * status under operation succeeds or fails.
4015 *
4016 * LOCKING:
4017 * Kernel thread context (may sleep)
4018 */
4019static void atapi_packet_task(void *_data)
4020{ 4148{
4021 struct ata_queued_cmd *qc = _data; 4149 struct ata_queued_cmd *qc = _data;
4022 struct ata_port *ap = qc->ap; 4150 struct ata_port *ap = qc->ap;
4023 u8 status; 4151 u8 status;
4152 int poll_next;
4024 4153
4025 /* sleep-wait for BSY to clear */ 4154fsm_start:
4026 DPRINTK("busy wait\n"); 4155 WARN_ON(ap->hsm_task_state == HSM_ST_IDLE);
4027 if (ata_busy_sleep(ap, ATA_TMOUT_CDB_QUICK, ATA_TMOUT_CDB)) {
4028 qc->err_mask |= AC_ERR_TIMEOUT;
4029 goto err_out;
4030 }
4031
4032 /* make sure DRQ is set */
4033 status = ata_chk_status(ap);
4034 if ((status & (ATA_BUSY | ATA_DRQ)) != ATA_DRQ) {
4035 qc->err_mask |= AC_ERR_HSM;
4036 goto err_out;
4037 }
4038
4039 /* send SCSI cdb */
4040 DPRINTK("send cdb\n");
4041 WARN_ON(qc->dev->cdb_len < 12);
4042
4043 if (qc->tf.protocol == ATA_PROT_ATAPI_DMA ||
4044 qc->tf.protocol == ATA_PROT_ATAPI_NODATA) {
4045 unsigned long flags;
4046
4047 /* Once we're done issuing command and kicking bmdma,
4048 * irq handler takes over. To not lose irq, we need
4049 * to clear NOINTR flag before sending cdb, but
4050 * interrupt handler shouldn't be invoked before we're
4051 * finished. Hence, the following locking.
4052 */
4053 spin_lock_irqsave(&ap->host_set->lock, flags);
4054 ap->flags &= ~ATA_FLAG_NOINTR;
4055 ata_data_xfer(ap, qc->cdb, qc->dev->cdb_len, 1);
4056 if (qc->tf.protocol == ATA_PROT_ATAPI_DMA)
4057 ap->ops->bmdma_start(qc); /* initiate bmdma */
4058 spin_unlock_irqrestore(&ap->host_set->lock, flags);
4059 } else {
4060 ata_data_xfer(ap, qc->cdb, qc->dev->cdb_len, 1);
4061 4156
4062 /* PIO commands are handled by polling */ 4157 /*
4063 ap->hsm_task_state = HSM_ST; 4158 * This is purely heuristic. This is a fast path.
4064 ata_port_queue_task(ap, ata_pio_task, qc, 0); 4159 * Sometimes when we enter, BSY will be cleared in
4160 * a chk-status or two. If not, the drive is probably seeking
4161 * or something. Snooze for a couple msecs, then
4162 * chk-status again. If still busy, queue delayed work.
4163 */
4164 status = ata_busy_wait(ap, ATA_BUSY, 5);
4165 if (status & ATA_BUSY) {
4166 msleep(2);
4167 status = ata_busy_wait(ap, ATA_BUSY, 10);
4168 if (status & ATA_BUSY) {
4169 ata_port_queue_task(ap, ata_pio_task, qc, ATA_SHORT_PAUSE);
4170 return;
4171 }
4065 } 4172 }
4066 4173
4067 return; 4174 /* move the HSM */
4175 poll_next = ata_hsm_move(ap, qc, status, 1);
4068 4176
4069err_out: 4177 /* another command or interrupt handler
4070 ata_poll_qc_complete(qc); 4178 * may be running at this point.
4179 */
4180 if (poll_next)
4181 goto fsm_start;
4071} 4182}
4072 4183
4073/** 4184/**
@@ -4322,43 +4433,105 @@ unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc)
4322{ 4433{
4323 struct ata_port *ap = qc->ap; 4434 struct ata_port *ap = qc->ap;
4324 4435
4436 /* Use polling pio if the LLD doesn't handle
4437 * interrupt driven pio and atapi CDB interrupt.
4438 */
4439 if (ap->flags & ATA_FLAG_PIO_POLLING) {
4440 switch (qc->tf.protocol) {
4441 case ATA_PROT_PIO:
4442 case ATA_PROT_ATAPI:
4443 case ATA_PROT_ATAPI_NODATA:
4444 qc->tf.flags |= ATA_TFLAG_POLLING;
4445 break;
4446 case ATA_PROT_ATAPI_DMA:
4447 if (qc->dev->flags & ATA_DFLAG_CDB_INTR)
4448 /* see ata_check_atapi_dma() */
4449 BUG();
4450 break;
4451 default:
4452 break;
4453 }
4454 }
4455
4456 /* select the device */
4325 ata_dev_select(ap, qc->dev->devno, 1, 0); 4457 ata_dev_select(ap, qc->dev->devno, 1, 0);
4326 4458
4459 /* start the command */
4327 switch (qc->tf.protocol) { 4460 switch (qc->tf.protocol) {
4328 case ATA_PROT_NODATA: 4461 case ATA_PROT_NODATA:
4462 if (qc->tf.flags & ATA_TFLAG_POLLING)
4463 ata_qc_set_polling(qc);
4464
4329 ata_tf_to_host(ap, &qc->tf); 4465 ata_tf_to_host(ap, &qc->tf);
4466 ap->hsm_task_state = HSM_ST_LAST;
4467
4468 if (qc->tf.flags & ATA_TFLAG_POLLING)
4469 ata_port_queue_task(ap, ata_pio_task, qc, 0);
4470
4330 break; 4471 break;
4331 4472
4332 case ATA_PROT_DMA: 4473 case ATA_PROT_DMA:
4474 WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
4475
4333 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */ 4476 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
4334 ap->ops->bmdma_setup(qc); /* set up bmdma */ 4477 ap->ops->bmdma_setup(qc); /* set up bmdma */
4335 ap->ops->bmdma_start(qc); /* initiate bmdma */ 4478 ap->ops->bmdma_start(qc); /* initiate bmdma */
4479 ap->hsm_task_state = HSM_ST_LAST;
4336 break; 4480 break;
4337 4481
4338 case ATA_PROT_PIO: /* load tf registers, initiate polling pio */ 4482 case ATA_PROT_PIO:
4339 ata_qc_set_polling(qc); 4483 if (qc->tf.flags & ATA_TFLAG_POLLING)
4340 ata_tf_to_host(ap, &qc->tf); 4484 ata_qc_set_polling(qc);
4341 ap->hsm_task_state = HSM_ST;
4342 ata_port_queue_task(ap, ata_pio_task, qc, 0);
4343 break;
4344 4485
4345 case ATA_PROT_ATAPI:
4346 ata_qc_set_polling(qc);
4347 ata_tf_to_host(ap, &qc->tf); 4486 ata_tf_to_host(ap, &qc->tf);
4348 ata_port_queue_task(ap, atapi_packet_task, qc, 0); 4487
4488 if (qc->tf.flags & ATA_TFLAG_WRITE) {
4489 /* PIO data out protocol */
4490 ap->hsm_task_state = HSM_ST_FIRST;
4491 ata_port_queue_task(ap, ata_pio_task, qc, 0);
4492
4493 /* always send first data block using
4494 * the ata_pio_task() codepath.
4495 */
4496 } else {
4497 /* PIO data in protocol */
4498 ap->hsm_task_state = HSM_ST;
4499
4500 if (qc->tf.flags & ATA_TFLAG_POLLING)
4501 ata_port_queue_task(ap, ata_pio_task, qc, 0);
4502
4503 /* if polling, ata_pio_task() handles the rest.
4504 * otherwise, interrupt handler takes over from here.
4505 */
4506 }
4507
4349 break; 4508 break;
4350 4509
4510 case ATA_PROT_ATAPI:
4351 case ATA_PROT_ATAPI_NODATA: 4511 case ATA_PROT_ATAPI_NODATA:
4352 ap->flags |= ATA_FLAG_NOINTR; 4512 if (qc->tf.flags & ATA_TFLAG_POLLING)
4513 ata_qc_set_polling(qc);
4514
4353 ata_tf_to_host(ap, &qc->tf); 4515 ata_tf_to_host(ap, &qc->tf);
4354 ata_port_queue_task(ap, atapi_packet_task, qc, 0); 4516
4517 ap->hsm_task_state = HSM_ST_FIRST;
4518
4519 /* send cdb by polling if no cdb interrupt */
4520 if ((!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) ||
4521 (qc->tf.flags & ATA_TFLAG_POLLING))
4522 ata_port_queue_task(ap, ata_pio_task, qc, 0);
4355 break; 4523 break;
4356 4524
4357 case ATA_PROT_ATAPI_DMA: 4525 case ATA_PROT_ATAPI_DMA:
4358 ap->flags |= ATA_FLAG_NOINTR; 4526 WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
4527
4359 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */ 4528 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
4360 ap->ops->bmdma_setup(qc); /* set up bmdma */ 4529 ap->ops->bmdma_setup(qc); /* set up bmdma */
4361 ata_port_queue_task(ap, atapi_packet_task, qc, 0); 4530 ap->hsm_task_state = HSM_ST_FIRST;
4531
4532 /* send cdb by polling if no cdb interrupt */
4533 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
4534 ata_port_queue_task(ap, ata_pio_task, qc, 0);
4362 break; 4535 break;
4363 4536
4364 default: 4537 default:
@@ -4388,52 +4561,66 @@ unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc)
4388inline unsigned int ata_host_intr (struct ata_port *ap, 4561inline unsigned int ata_host_intr (struct ata_port *ap,
4389 struct ata_queued_cmd *qc) 4562 struct ata_queued_cmd *qc)
4390{ 4563{
4391 u8 status, host_stat; 4564 u8 status, host_stat = 0;
4392
4393 switch (qc->tf.protocol) {
4394
4395 case ATA_PROT_DMA:
4396 case ATA_PROT_ATAPI_DMA:
4397 case ATA_PROT_ATAPI:
4398 /* check status of DMA engine */
4399 host_stat = ap->ops->bmdma_status(ap);
4400 VPRINTK("ata%u: host_stat 0x%X\n", ap->id, host_stat);
4401 4565
4402 /* if it's not our irq... */ 4566 VPRINTK("ata%u: protocol %d task_state %d\n",
4403 if (!(host_stat & ATA_DMA_INTR)) 4567 ap->id, qc->tf.protocol, ap->hsm_task_state);
4404 goto idle_irq;
4405
4406 /* before we do anything else, clear DMA-Start bit */
4407 ap->ops->bmdma_stop(qc);
4408 4568
4409 /* fall through */ 4569 /* Check whether we are expecting interrupt in this state */
4410 4570 switch (ap->hsm_task_state) {
4411 case ATA_PROT_ATAPI_NODATA: 4571 case HSM_ST_FIRST:
4412 case ATA_PROT_NODATA: 4572 /* Some pre-ATAPI-4 devices assert INTRQ
4413 /* check altstatus */ 4573 * at this state when ready to receive CDB.
4414 status = ata_altstatus(ap); 4574 */
4415 if (status & ATA_BUSY)
4416 goto idle_irq;
4417 4575
4418 /* check main status, clearing INTRQ */ 4576 /* Check the ATA_DFLAG_CDB_INTR flag is enough here.
4419 status = ata_chk_status(ap); 4577 * The flag was turned on only for atapi devices.
4420 if (unlikely(status & ATA_BUSY)) 4578 * No need to check is_atapi_taskfile(&qc->tf) again.
4579 */
4580 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
4421 goto idle_irq; 4581 goto idle_irq;
4422 DPRINTK("ata%u: protocol %d (dev_stat 0x%X)\n",
4423 ap->id, qc->tf.protocol, status);
4424
4425 /* ack bmdma irq events */
4426 ap->ops->irq_clear(ap);
4427
4428 /* complete taskfile transaction */
4429 qc->err_mask |= ac_err_mask(status);
4430 ata_qc_complete(qc);
4431 break; 4582 break;
4432 4583 case HSM_ST_LAST:
4584 if (qc->tf.protocol == ATA_PROT_DMA ||
4585 qc->tf.protocol == ATA_PROT_ATAPI_DMA) {
4586 /* check status of DMA engine */
4587 host_stat = ap->ops->bmdma_status(ap);
4588 VPRINTK("ata%u: host_stat 0x%X\n", ap->id, host_stat);
4589
4590 /* if it's not our irq... */
4591 if (!(host_stat & ATA_DMA_INTR))
4592 goto idle_irq;
4593
4594 /* before we do anything else, clear DMA-Start bit */
4595 ap->ops->bmdma_stop(qc);
4596
4597 if (unlikely(host_stat & ATA_DMA_ERR)) {
4598 /* error when transfering data to/from memory */
4599 qc->err_mask |= AC_ERR_HOST_BUS;
4600 ap->hsm_task_state = HSM_ST_ERR;
4601 }
4602 }
4603 break;
4604 case HSM_ST:
4605 break;
4433 default: 4606 default:
4434 goto idle_irq; 4607 goto idle_irq;
4435 } 4608 }
4436 4609
4610 /* check altstatus */
4611 status = ata_altstatus(ap);
4612 if (status & ATA_BUSY)
4613 goto idle_irq;
4614
4615 /* check main status, clearing INTRQ */
4616 status = ata_chk_status(ap);
4617 if (unlikely(status & ATA_BUSY))
4618 goto idle_irq;
4619
4620 /* ack bmdma irq events */
4621 ap->ops->irq_clear(ap);
4622
4623 ata_hsm_move(ap, qc, status, 0);
4437 return 1; /* irq handled */ 4624 return 1; /* irq handled */
4438 4625
4439idle_irq: 4626idle_irq:
@@ -4480,11 +4667,11 @@ irqreturn_t ata_interrupt (int irq, void *dev_instance, struct pt_regs *regs)
4480 4667
4481 ap = host_set->ports[i]; 4668 ap = host_set->ports[i];
4482 if (ap && 4669 if (ap &&
4483 !(ap->flags & (ATA_FLAG_DISABLED | ATA_FLAG_NOINTR))) { 4670 !(ap->flags & ATA_FLAG_DISABLED)) {
4484 struct ata_queued_cmd *qc; 4671 struct ata_queued_cmd *qc;
4485 4672
4486 qc = ata_qc_from_tag(ap, ap->active_tag); 4673 qc = ata_qc_from_tag(ap, ap->active_tag);
4487 if (qc && (!(qc->tf.ctl & ATA_NIEN)) && 4674 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)) &&
4488 (qc->flags & ATA_QCFLAG_ACTIVE)) 4675 (qc->flags & ATA_QCFLAG_ACTIVE))
4489 handled |= ata_host_intr(ap, qc); 4676 handled |= ata_host_intr(ap, qc);
4490 } 4677 }