diff options
| -rw-r--r-- | drivers/scsi/libata-core.c | 803 | ||||
| -rw-r--r-- | drivers/scsi/libata-eh.c | 2 | ||||
| -rw-r--r-- | drivers/scsi/pdc_adma.c | 8 | ||||
| -rw-r--r-- | drivers/scsi/sata_mv.c | 6 | ||||
| -rw-r--r-- | drivers/scsi/sata_nv.c | 4 | ||||
| -rw-r--r-- | drivers/scsi/sata_promise.c | 7 | ||||
| -rw-r--r-- | drivers/scsi/sata_qstor.c | 11 | ||||
| -rw-r--r-- | drivers/scsi/sata_sx4.c | 6 | ||||
| -rw-r--r-- | drivers/scsi/sata_vsc.c | 15 | ||||
| -rw-r--r-- | include/linux/ata.h | 12 | ||||
| -rw-r--r-- | include/linux/libata.h | 24 |
11 files changed, 550 insertions, 348 deletions
diff --git a/drivers/scsi/libata-core.c b/drivers/scsi/libata-core.c index 2969599ec0b9..c859b96b891a 100644 --- a/drivers/scsi/libata-core.c +++ b/drivers/scsi/libata-core.c | |||
| @@ -1345,11 +1345,19 @@ static int ata_dev_configure(struct ata_device *dev, int print_info) | |||
| 1345 | dev->cylinders, dev->heads, dev->sectors); | 1345 | dev->cylinders, dev->heads, dev->sectors); |
| 1346 | } | 1346 | } |
| 1347 | 1347 | ||
| 1348 | if (dev->id[59] & 0x100) { | ||
| 1349 | dev->multi_count = dev->id[59] & 0xff; | ||
| 1350 | DPRINTK("ata%u: dev %u multi count %u\n", | ||
| 1351 | ap->id, dev->devno, dev->multi_count); | ||
| 1352 | } | ||
| 1353 | |||
| 1348 | dev->cdb_len = 16; | 1354 | dev->cdb_len = 16; |
| 1349 | } | 1355 | } |
| 1350 | 1356 | ||
| 1351 | /* ATAPI-specific feature tests */ | 1357 | /* ATAPI-specific feature tests */ |
| 1352 | else if (dev->class == ATA_DEV_ATAPI) { | 1358 | else if (dev->class == ATA_DEV_ATAPI) { |
| 1359 | char *cdb_intr_string = ""; | ||
| 1360 | |||
| 1353 | rc = atapi_cdb_len(id); | 1361 | rc = atapi_cdb_len(id); |
| 1354 | if ((rc < 12) || (rc > ATAPI_CDB_LEN)) { | 1362 | if ((rc < 12) || (rc > ATAPI_CDB_LEN)) { |
| 1355 | ata_dev_printk(dev, KERN_WARNING, | 1363 | ata_dev_printk(dev, KERN_WARNING, |
| @@ -1359,10 +1367,16 @@ static int ata_dev_configure(struct ata_device *dev, int print_info) | |||
| 1359 | } | 1367 | } |
| 1360 | dev->cdb_len = (unsigned int) rc; | 1368 | dev->cdb_len = (unsigned int) rc; |
| 1361 | 1369 | ||
| 1370 | if (ata_id_cdb_intr(dev->id)) { | ||
| 1371 | dev->flags |= ATA_DFLAG_CDB_INTR; | ||
| 1372 | cdb_intr_string = ", CDB intr"; | ||
| 1373 | } | ||
| 1374 | |||
| 1362 | /* print device info to dmesg */ | 1375 | /* print device info to dmesg */ |
| 1363 | if (print_info) | 1376 | if (print_info) |
| 1364 | ata_dev_printk(dev, KERN_INFO, "ATAPI, max %s\n", | 1377 | ata_dev_printk(dev, KERN_INFO, "ATAPI, max %s%s\n", |
| 1365 | ata_mode_string(xfer_mask)); | 1378 | ata_mode_string(xfer_mask), |
| 1379 | cdb_intr_string); | ||
| 1366 | } | 1380 | } |
| 1367 | 1381 | ||
| 1368 | ap->host->max_cmd_len = 0; | 1382 | ap->host->max_cmd_len = 0; |
| @@ -3211,6 +3225,15 @@ int ata_check_atapi_dma(struct ata_queued_cmd *qc) | |||
| 3211 | if (ap->ops->check_atapi_dma) | 3225 | if (ap->ops->check_atapi_dma) |
| 3212 | rc = ap->ops->check_atapi_dma(qc); | 3226 | rc = ap->ops->check_atapi_dma(qc); |
| 3213 | 3227 | ||
| 3228 | /* We don't support polling DMA. | ||
| 3229 | * Use PIO if the LLDD handles only interrupts in | ||
| 3230 | * the HSM_ST_LAST state and the ATAPI device | ||
| 3231 | * generates CDB interrupts. | ||
| 3232 | */ | ||
| 3233 | if ((ap->flags & ATA_FLAG_PIO_POLLING) && | ||
| 3234 | (qc->dev->flags & ATA_DFLAG_CDB_INTR)) | ||
| 3235 | rc = 1; | ||
| 3236 | |||
| 3214 | return rc; | 3237 | return rc; |
| 3215 | } | 3238 | } |
| 3216 | /** | 3239 | /** |
| @@ -3458,7 +3481,6 @@ void ata_poll_qc_complete(struct ata_queued_cmd *qc) | |||
| 3458 | qc = ata_qc_from_tag(ap, qc->tag); | 3481 | qc = ata_qc_from_tag(ap, qc->tag); |
| 3459 | if (qc) { | 3482 | if (qc) { |
| 3460 | if (!(qc->err_mask & AC_ERR_HSM)) { | 3483 | if (!(qc->err_mask & AC_ERR_HSM)) { |
| 3461 | ap->flags &= ~ATA_FLAG_NOINTR; | ||
| 3462 | ata_irq_on(ap); | 3484 | ata_irq_on(ap); |
| 3463 | ata_qc_complete(qc); | 3485 | ata_qc_complete(qc); |
| 3464 | } else | 3486 | } else |
| @@ -3466,7 +3488,6 @@ void ata_poll_qc_complete(struct ata_queued_cmd *qc) | |||
| 3466 | } | 3488 | } |
| 3467 | } else { | 3489 | } else { |
| 3468 | /* old EH */ | 3490 | /* old EH */ |
| 3469 | ap->flags &= ~ATA_FLAG_NOINTR; | ||
| 3470 | ata_irq_on(ap); | 3491 | ata_irq_on(ap); |
| 3471 | ata_qc_complete(qc); | 3492 | ata_qc_complete(qc); |
| 3472 | } | 3493 | } |
| @@ -3475,105 +3496,6 @@ void ata_poll_qc_complete(struct ata_queued_cmd *qc) | |||
| 3475 | } | 3496 | } |
| 3476 | 3497 | ||
| 3477 | /** | 3498 | /** |
| 3478 | * ata_pio_poll - poll using PIO, depending on current state | ||
| 3479 | * @qc: qc in progress | ||
| 3480 | * | ||
| 3481 | * LOCKING: | ||
| 3482 | * None. (executing in kernel thread context) | ||
| 3483 | * | ||
| 3484 | * RETURNS: | ||
| 3485 | * timeout value to use | ||
| 3486 | */ | ||
| 3487 | static unsigned long ata_pio_poll(struct ata_queued_cmd *qc) | ||
| 3488 | { | ||
| 3489 | struct ata_port *ap = qc->ap; | ||
| 3490 | u8 status; | ||
| 3491 | unsigned int poll_state = HSM_ST_UNKNOWN; | ||
| 3492 | unsigned int reg_state = HSM_ST_UNKNOWN; | ||
| 3493 | |||
| 3494 | switch (ap->hsm_task_state) { | ||
| 3495 | case HSM_ST: | ||
| 3496 | case HSM_ST_POLL: | ||
| 3497 | poll_state = HSM_ST_POLL; | ||
| 3498 | reg_state = HSM_ST; | ||
| 3499 | break; | ||
| 3500 | case HSM_ST_LAST: | ||
| 3501 | case HSM_ST_LAST_POLL: | ||
| 3502 | poll_state = HSM_ST_LAST_POLL; | ||
| 3503 | reg_state = HSM_ST_LAST; | ||
| 3504 | break; | ||
| 3505 | default: | ||
| 3506 | BUG(); | ||
| 3507 | break; | ||
| 3508 | } | ||
| 3509 | |||
| 3510 | status = ata_chk_status(ap); | ||
| 3511 | if (status & ATA_BUSY) { | ||
| 3512 | if (time_after(jiffies, ap->pio_task_timeout)) { | ||
| 3513 | qc->err_mask |= AC_ERR_TIMEOUT; | ||
| 3514 | ap->hsm_task_state = HSM_ST_TMOUT; | ||
| 3515 | return 0; | ||
| 3516 | } | ||
| 3517 | ap->hsm_task_state = poll_state; | ||
| 3518 | return ATA_SHORT_PAUSE; | ||
| 3519 | } | ||
| 3520 | |||
| 3521 | ap->hsm_task_state = reg_state; | ||
| 3522 | return 0; | ||
| 3523 | } | ||
| 3524 | |||
| 3525 | /** | ||
| 3526 | * ata_pio_complete - check if drive is busy or idle | ||
| 3527 | * @qc: qc to complete | ||
| 3528 | * | ||
| 3529 | * LOCKING: | ||
| 3530 | * None. (executing in kernel thread context) | ||
| 3531 | * | ||
| 3532 | * RETURNS: | ||
| 3533 | * Non-zero if qc completed, zero otherwise. | ||
| 3534 | */ | ||
| 3535 | static int ata_pio_complete(struct ata_queued_cmd *qc) | ||
| 3536 | { | ||
| 3537 | struct ata_port *ap = qc->ap; | ||
| 3538 | u8 drv_stat; | ||
| 3539 | |||
| 3540 | /* | ||
| 3541 | * This is purely heuristic. This is a fast path. Sometimes when | ||
| 3542 | * we enter, BSY will be cleared in a chk-status or two. If not, | ||
| 3543 | * the drive is probably seeking or something. Snooze for a couple | ||
| 3544 | * msecs, then chk-status again. If still busy, fall back to | ||
| 3545 | * HSM_ST_POLL state. | ||
| 3546 | */ | ||
| 3547 | drv_stat = ata_busy_wait(ap, ATA_BUSY, 10); | ||
| 3548 | if (drv_stat & ATA_BUSY) { | ||
| 3549 | msleep(2); | ||
| 3550 | drv_stat = ata_busy_wait(ap, ATA_BUSY, 10); | ||
| 3551 | if (drv_stat & ATA_BUSY) { | ||
| 3552 | ap->hsm_task_state = HSM_ST_LAST_POLL; | ||
| 3553 | ap->pio_task_timeout = jiffies + ATA_TMOUT_PIO; | ||
| 3554 | return 0; | ||
| 3555 | } | ||
| 3556 | } | ||
| 3557 | |||
| 3558 | drv_stat = ata_wait_idle(ap); | ||
| 3559 | if (!ata_ok(drv_stat)) { | ||
| 3560 | qc->err_mask |= __ac_err_mask(drv_stat); | ||
| 3561 | ap->hsm_task_state = HSM_ST_ERR; | ||
| 3562 | return 0; | ||
| 3563 | } | ||
| 3564 | |||
| 3565 | ap->hsm_task_state = HSM_ST_IDLE; | ||
| 3566 | |||
| 3567 | WARN_ON(qc->err_mask); | ||
| 3568 | ata_poll_qc_complete(qc); | ||
| 3569 | |||
| 3570 | /* another command may start at this point */ | ||
| 3571 | |||
| 3572 | return 1; | ||
| 3573 | } | ||
| 3574 | |||
| 3575 | |||
| 3576 | /** | ||
| 3577 | * swap_buf_le16 - swap halves of 16-bit words in place | 3499 | * swap_buf_le16 - swap halves of 16-bit words in place |
| 3578 | * @buf: Buffer to swap | 3500 | * @buf: Buffer to swap |
| 3579 | * @buf_words: Number of 16-bit words in buffer. | 3501 | * @buf_words: Number of 16-bit words in buffer. |
| @@ -3741,7 +3663,23 @@ static void ata_pio_sector(struct ata_queued_cmd *qc) | |||
| 3741 | page = nth_page(page, (offset >> PAGE_SHIFT)); | 3663 | page = nth_page(page, (offset >> PAGE_SHIFT)); |
| 3742 | offset %= PAGE_SIZE; | 3664 | offset %= PAGE_SIZE; |
| 3743 | 3665 | ||
| 3744 | buf = kmap(page) + offset; | 3666 | DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read"); |
| 3667 | |||
| 3668 | if (PageHighMem(page)) { | ||
| 3669 | unsigned long flags; | ||
| 3670 | |||
| 3671 | local_irq_save(flags); | ||
| 3672 | buf = kmap_atomic(page, KM_IRQ0); | ||
| 3673 | |||
| 3674 | /* do the actual data transfer */ | ||
| 3675 | ata_data_xfer(ap, buf + offset, ATA_SECT_SIZE, do_write); | ||
| 3676 | |||
| 3677 | kunmap_atomic(buf, KM_IRQ0); | ||
| 3678 | local_irq_restore(flags); | ||
| 3679 | } else { | ||
| 3680 | buf = page_address(page); | ||
| 3681 | ata_data_xfer(ap, buf + offset, ATA_SECT_SIZE, do_write); | ||
| 3682 | } | ||
| 3745 | 3683 | ||
| 3746 | qc->cursect++; | 3684 | qc->cursect++; |
| 3747 | qc->cursg_ofs++; | 3685 | qc->cursg_ofs++; |
| @@ -3750,14 +3688,68 @@ static void ata_pio_sector(struct ata_queued_cmd *qc) | |||
| 3750 | qc->cursg++; | 3688 | qc->cursg++; |
| 3751 | qc->cursg_ofs = 0; | 3689 | qc->cursg_ofs = 0; |
| 3752 | } | 3690 | } |
| 3691 | } | ||
| 3753 | 3692 | ||
| 3754 | DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read"); | 3693 | /** |
| 3694 | * ata_pio_sectors - Transfer one or many 512-byte sectors. | ||
| 3695 | * @qc: Command on going | ||
| 3696 | * | ||
| 3697 | * Transfer one or many ATA_SECT_SIZE of data from/to the | ||
| 3698 | * ATA device for the DRQ request. | ||
| 3699 | * | ||
| 3700 | * LOCKING: | ||
| 3701 | * Inherited from caller. | ||
| 3702 | */ | ||
| 3703 | |||
| 3704 | static void ata_pio_sectors(struct ata_queued_cmd *qc) | ||
| 3705 | { | ||
| 3706 | if (is_multi_taskfile(&qc->tf)) { | ||
| 3707 | /* READ/WRITE MULTIPLE */ | ||
| 3708 | unsigned int nsect; | ||
| 3709 | |||
| 3710 | WARN_ON(qc->dev->multi_count == 0); | ||
| 3711 | |||
| 3712 | nsect = min(qc->nsect - qc->cursect, qc->dev->multi_count); | ||
| 3713 | while (nsect--) | ||
| 3714 | ata_pio_sector(qc); | ||
| 3715 | } else | ||
| 3716 | ata_pio_sector(qc); | ||
| 3717 | } | ||
| 3718 | |||
| 3719 | /** | ||
| 3720 | * atapi_send_cdb - Write CDB bytes to hardware | ||
| 3721 | * @ap: Port to which ATAPI device is attached. | ||
| 3722 | * @qc: Taskfile currently active | ||
| 3723 | * | ||
| 3724 | * When device has indicated its readiness to accept | ||
| 3725 | * a CDB, this function is called. Send the CDB. | ||
| 3726 | * | ||
| 3727 | * LOCKING: | ||
| 3728 | * caller. | ||
| 3729 | */ | ||
| 3730 | |||
| 3731 | static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc) | ||
| 3732 | { | ||
| 3733 | /* send SCSI cdb */ | ||
| 3734 | DPRINTK("send cdb\n"); | ||
| 3735 | WARN_ON(qc->dev->cdb_len < 12); | ||
| 3755 | 3736 | ||
| 3756 | /* do the actual data transfer */ | 3737 | ata_data_xfer(ap, qc->cdb, qc->dev->cdb_len, 1); |
| 3757 | do_write = (qc->tf.flags & ATA_TFLAG_WRITE); | 3738 | ata_altstatus(ap); /* flush */ |
| 3758 | ata_data_xfer(ap, buf, ATA_SECT_SIZE, do_write); | ||
| 3759 | 3739 | ||
| 3760 | kunmap(page); | 3740 | switch (qc->tf.protocol) { |
| 3741 | case ATA_PROT_ATAPI: | ||
| 3742 | ap->hsm_task_state = HSM_ST; | ||
| 3743 | break; | ||
| 3744 | case ATA_PROT_ATAPI_NODATA: | ||
| 3745 | ap->hsm_task_state = HSM_ST_LAST; | ||
| 3746 | break; | ||
| 3747 | case ATA_PROT_ATAPI_DMA: | ||
| 3748 | ap->hsm_task_state = HSM_ST_LAST; | ||
| 3749 | /* initiate bmdma */ | ||
| 3750 | ap->ops->bmdma_start(qc); | ||
| 3751 | break; | ||
| 3752 | } | ||
| 3761 | } | 3753 | } |
| 3762 | 3754 | ||
| 3763 | /** | 3755 | /** |
| @@ -3823,7 +3815,23 @@ next_sg: | |||
| 3823 | /* don't cross page boundaries */ | 3815 | /* don't cross page boundaries */ |
| 3824 | count = min(count, (unsigned int)PAGE_SIZE - offset); | 3816 | count = min(count, (unsigned int)PAGE_SIZE - offset); |
| 3825 | 3817 | ||
| 3826 | buf = kmap(page) + offset; | 3818 | DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read"); |
| 3819 | |||
| 3820 | if (PageHighMem(page)) { | ||
| 3821 | unsigned long flags; | ||
| 3822 | |||
| 3823 | local_irq_save(flags); | ||
| 3824 | buf = kmap_atomic(page, KM_IRQ0); | ||
| 3825 | |||
| 3826 | /* do the actual data transfer */ | ||
| 3827 | ata_data_xfer(ap, buf + offset, count, do_write); | ||
| 3828 | |||
| 3829 | kunmap_atomic(buf, KM_IRQ0); | ||
| 3830 | local_irq_restore(flags); | ||
| 3831 | } else { | ||
| 3832 | buf = page_address(page); | ||
| 3833 | ata_data_xfer(ap, buf + offset, count, do_write); | ||
| 3834 | } | ||
| 3827 | 3835 | ||
| 3828 | bytes -= count; | 3836 | bytes -= count; |
| 3829 | qc->curbytes += count; | 3837 | qc->curbytes += count; |
| @@ -3834,13 +3842,6 @@ next_sg: | |||
| 3834 | qc->cursg_ofs = 0; | 3842 | qc->cursg_ofs = 0; |
| 3835 | } | 3843 | } |
| 3836 | 3844 | ||
| 3837 | DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read"); | ||
| 3838 | |||
| 3839 | /* do the actual data transfer */ | ||
| 3840 | ata_data_xfer(ap, buf, count, do_write); | ||
| 3841 | |||
| 3842 | kunmap(page); | ||
| 3843 | |||
| 3844 | if (bytes) | 3845 | if (bytes) |
| 3845 | goto next_sg; | 3846 | goto next_sg; |
| 3846 | } | 3847 | } |
| @@ -3877,6 +3878,8 @@ static void atapi_pio_bytes(struct ata_queued_cmd *qc) | |||
| 3877 | if (do_write != i_write) | 3878 | if (do_write != i_write) |
| 3878 | goto err_out; | 3879 | goto err_out; |
| 3879 | 3880 | ||
| 3881 | VPRINTK("ata%u: xfering %d bytes\n", ap->id, bytes); | ||
| 3882 | |||
| 3880 | __atapi_pio_bytes(qc, bytes); | 3883 | __atapi_pio_bytes(qc, bytes); |
| 3881 | 3884 | ||
| 3882 | return; | 3885 | return; |
| @@ -3888,186 +3891,294 @@ err_out: | |||
| 3888 | } | 3891 | } |
| 3889 | 3892 | ||
| 3890 | /** | 3893 | /** |
| 3891 | * ata_pio_block - start PIO on a block | 3894 | * ata_hsm_ok_in_wq - Check if the qc can be handled in the workqueue. |
| 3892 | * @qc: qc to transfer block for | 3895 | * @ap: the target ata_port |
| 3896 | * @qc: qc on going | ||
| 3893 | * | 3897 | * |
| 3894 | * LOCKING: | 3898 | * RETURNS: |
| 3895 | * None. (executing in kernel thread context) | 3899 | * 1 if ok in workqueue, 0 otherwise. |
| 3896 | */ | 3900 | */ |
| 3897 | static void ata_pio_block(struct ata_queued_cmd *qc) | 3901 | |
| 3902 | static inline int ata_hsm_ok_in_wq(struct ata_port *ap, struct ata_queued_cmd *qc) | ||
| 3898 | { | 3903 | { |
| 3899 | struct ata_port *ap = qc->ap; | 3904 | if (qc->tf.flags & ATA_TFLAG_POLLING) |
| 3900 | u8 status; | 3905 | return 1; |
| 3901 | 3906 | ||
| 3902 | /* | 3907 | if (ap->hsm_task_state == HSM_ST_FIRST) { |
| 3903 | * This is purely heuristic. This is a fast path. | 3908 | if (qc->tf.protocol == ATA_PROT_PIO && |
| 3904 | * Sometimes when we enter, BSY will be cleared in | 3909 | (qc->tf.flags & ATA_TFLAG_WRITE)) |
| 3905 | * a chk-status or two. If not, the drive is probably seeking | 3910 | return 1; |
| 3906 | * or something. Snooze for a couple msecs, then | ||
| 3907 | * chk-status again. If still busy, fall back to | ||
| 3908 | * HSM_ST_POLL state. | ||
| 3909 | */ | ||
| 3910 | status = ata_busy_wait(ap, ATA_BUSY, 5); | ||
| 3911 | if (status & ATA_BUSY) { | ||
| 3912 | msleep(2); | ||
| 3913 | status = ata_busy_wait(ap, ATA_BUSY, 10); | ||
| 3914 | if (status & ATA_BUSY) { | ||
| 3915 | ap->hsm_task_state = HSM_ST_POLL; | ||
| 3916 | ap->pio_task_timeout = jiffies + ATA_TMOUT_PIO; | ||
| 3917 | return; | ||
| 3918 | } | ||
| 3919 | } | ||
| 3920 | 3911 | ||
| 3921 | /* check error */ | 3912 | if (is_atapi_taskfile(&qc->tf) && |
| 3922 | if (status & (ATA_ERR | ATA_DF)) { | 3913 | !(qc->dev->flags & ATA_DFLAG_CDB_INTR)) |
| 3923 | qc->err_mask |= AC_ERR_DEV; | 3914 | return 1; |
| 3924 | ap->hsm_task_state = HSM_ST_ERR; | ||
| 3925 | return; | ||
| 3926 | } | 3915 | } |
| 3927 | 3916 | ||
| 3928 | /* transfer data if any */ | 3917 | return 0; |
| 3929 | if (is_atapi_taskfile(&qc->tf)) { | 3918 | } |
| 3930 | /* DRQ=0 means no more data to transfer */ | ||
| 3931 | if ((status & ATA_DRQ) == 0) { | ||
| 3932 | ap->hsm_task_state = HSM_ST_LAST; | ||
| 3933 | return; | ||
| 3934 | } | ||
| 3935 | 3919 | ||
| 3936 | atapi_pio_bytes(qc); | 3920 | /** |
| 3937 | } else { | 3921 | * ata_hsm_move - move the HSM to the next state. |
| 3938 | /* handle BSY=0, DRQ=0 as error */ | 3922 | * @ap: the target ata_port |
| 3939 | if ((status & ATA_DRQ) == 0) { | 3923 | * @qc: qc on going |
| 3924 | * @status: current device status | ||
| 3925 | * @in_wq: 1 if called from workqueue, 0 otherwise | ||
| 3926 | * | ||
| 3927 | * RETURNS: | ||
| 3928 | * 1 when poll next status needed, 0 otherwise. | ||
| 3929 | */ | ||
| 3930 | |||
| 3931 | static int ata_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc, | ||
| 3932 | u8 status, int in_wq) | ||
| 3933 | { | ||
| 3934 | unsigned long flags = 0; | ||
| 3935 | int poll_next; | ||
| 3936 | |||
| 3937 | WARN_ON((qc->flags & ATA_QCFLAG_ACTIVE) == 0); | ||
| 3938 | |||
| 3939 | /* Make sure ata_qc_issue_prot() does not throw things | ||
| 3940 | * like DMA polling into the workqueue. Notice that | ||
| 3941 | * in_wq is not equivalent to (qc->tf.flags & ATA_TFLAG_POLLING). | ||
| 3942 | */ | ||
| 3943 | WARN_ON(in_wq != ata_hsm_ok_in_wq(ap, qc)); | ||
| 3944 | |||
| 3945 | fsm_start: | ||
| 3946 | DPRINTK("ata%u: protocol %d task_state %d (dev_stat 0x%X)\n", | ||
| 3947 | ap->id, qc->tf.protocol, ap->hsm_task_state, status); | ||
| 3948 | |||
| 3949 | switch (ap->hsm_task_state) { | ||
| 3950 | case HSM_ST_FIRST: | ||
| 3951 | /* Send first data block or PACKET CDB */ | ||
| 3952 | |||
| 3953 | /* If polling, we will stay in the work queue after | ||
| 3954 | * sending the data. Otherwise, interrupt handler | ||
| 3955 | * takes over after sending the data. | ||
| 3956 | */ | ||
| 3957 | poll_next = (qc->tf.flags & ATA_TFLAG_POLLING); | ||
| 3958 | |||
| 3959 | /* check device status */ | ||
| 3960 | if (unlikely((status & (ATA_BUSY | ATA_DRQ)) != ATA_DRQ)) { | ||
| 3961 | /* Wrong status. Let EH handle this */ | ||
| 3940 | qc->err_mask |= AC_ERR_HSM; | 3962 | qc->err_mask |= AC_ERR_HSM; |
| 3941 | ap->hsm_task_state = HSM_ST_ERR; | 3963 | ap->hsm_task_state = HSM_ST_ERR; |
| 3942 | return; | 3964 | goto fsm_start; |
| 3943 | } | 3965 | } |
| 3944 | 3966 | ||
| 3945 | ata_pio_sector(qc); | 3967 | /* Device should not ask for data transfer (DRQ=1) |
| 3946 | } | 3968 | * when it finds something wrong. |
| 3947 | } | 3969 | * We ignore DRQ here and stop the HSM by |
| 3970 | * changing hsm_task_state to HSM_ST_ERR and | ||
| 3971 | * let the EH abort the command or reset the device. | ||
| 3972 | */ | ||
| 3973 | if (unlikely(status & (ATA_ERR | ATA_DF))) { | ||
| 3974 | printk(KERN_WARNING "ata%d: DRQ=1 with device error, dev_stat 0x%X\n", | ||
| 3975 | ap->id, status); | ||
| 3976 | qc->err_mask |= AC_ERR_DEV; | ||
| 3977 | ap->hsm_task_state = HSM_ST_ERR; | ||
| 3978 | goto fsm_start; | ||
| 3979 | } | ||
| 3948 | 3980 | ||
| 3949 | static void ata_pio_error(struct ata_queued_cmd *qc) | 3981 | /* Send the CDB (atapi) or the first data block (ata pio out). |
| 3950 | { | 3982 | * During the state transition, interrupt handler shouldn't |
| 3951 | struct ata_port *ap = qc->ap; | 3983 | * be invoked before the data transfer is complete and |
| 3984 | * hsm_task_state is changed. Hence, the following locking. | ||
| 3985 | */ | ||
| 3986 | if (in_wq) | ||
| 3987 | spin_lock_irqsave(&ap->host_set->lock, flags); | ||
| 3952 | 3988 | ||
| 3953 | if (qc->tf.command != ATA_CMD_PACKET) | 3989 | if (qc->tf.protocol == ATA_PROT_PIO) { |
| 3954 | ata_dev_printk(qc->dev, KERN_WARNING, "PIO error\n"); | 3990 | /* PIO data out protocol. |
| 3991 | * send first data block. | ||
| 3992 | */ | ||
| 3955 | 3993 | ||
| 3956 | /* make sure qc->err_mask is available to | 3994 | /* ata_pio_sectors() might change the state |
| 3957 | * know what's wrong and recover | 3995 | * to HSM_ST_LAST. so, the state is changed here |
| 3958 | */ | 3996 | * before ata_pio_sectors(). |
| 3959 | WARN_ON(qc->err_mask == 0); | 3997 | */ |
| 3998 | ap->hsm_task_state = HSM_ST; | ||
| 3999 | ata_pio_sectors(qc); | ||
| 4000 | ata_altstatus(ap); /* flush */ | ||
| 4001 | } else | ||
| 4002 | /* send CDB */ | ||
| 4003 | atapi_send_cdb(ap, qc); | ||
| 4004 | |||
| 4005 | if (in_wq) | ||
| 4006 | spin_unlock_irqrestore(&ap->host_set->lock, flags); | ||
| 4007 | |||
| 4008 | /* if polling, ata_pio_task() handles the rest. | ||
| 4009 | * otherwise, interrupt handler takes over from here. | ||
| 4010 | */ | ||
| 4011 | break; | ||
| 3960 | 4012 | ||
| 3961 | ap->hsm_task_state = HSM_ST_IDLE; | 4013 | case HSM_ST: |
| 4014 | /* complete command or read/write the data register */ | ||
| 4015 | if (qc->tf.protocol == ATA_PROT_ATAPI) { | ||
| 4016 | /* ATAPI PIO protocol */ | ||
| 4017 | if ((status & ATA_DRQ) == 0) { | ||
| 4018 | /* no more data to transfer */ | ||
| 4019 | ap->hsm_task_state = HSM_ST_LAST; | ||
| 4020 | goto fsm_start; | ||
| 4021 | } | ||
| 3962 | 4022 | ||
| 3963 | ata_poll_qc_complete(qc); | 4023 | /* Device should not ask for data transfer (DRQ=1) |
| 3964 | } | 4024 | * when it finds something wrong. |
| 4025 | * We ignore DRQ here and stop the HSM by | ||
| 4026 | * changing hsm_task_state to HSM_ST_ERR and | ||
| 4027 | * let the EH abort the command or reset the device. | ||
| 4028 | */ | ||
| 4029 | if (unlikely(status & (ATA_ERR | ATA_DF))) { | ||
| 4030 | printk(KERN_WARNING "ata%d: DRQ=1 with device error, dev_stat 0x%X\n", | ||
| 4031 | ap->id, status); | ||
| 4032 | qc->err_mask |= AC_ERR_DEV; | ||
| 4033 | ap->hsm_task_state = HSM_ST_ERR; | ||
| 4034 | goto fsm_start; | ||
| 4035 | } | ||
| 3965 | 4036 | ||
| 3966 | static void ata_pio_task(void *_data) | 4037 | atapi_pio_bytes(qc); |
| 3967 | { | ||
| 3968 | struct ata_queued_cmd *qc = _data; | ||
| 3969 | struct ata_port *ap = qc->ap; | ||
| 3970 | unsigned long timeout; | ||
| 3971 | int qc_completed; | ||
| 3972 | 4038 | ||
| 3973 | fsm_start: | 4039 | if (unlikely(ap->hsm_task_state == HSM_ST_ERR)) |
| 3974 | timeout = 0; | 4040 | /* bad ireason reported by device */ |
| 3975 | qc_completed = 0; | 4041 | goto fsm_start; |
| 3976 | 4042 | ||
| 3977 | switch (ap->hsm_task_state) { | 4043 | } else { |
| 3978 | case HSM_ST_IDLE: | 4044 | /* ATA PIO protocol */ |
| 3979 | return; | 4045 | if (unlikely((status & ATA_DRQ) == 0)) { |
| 4046 | /* handle BSY=0, DRQ=0 as error */ | ||
| 4047 | qc->err_mask |= AC_ERR_HSM; | ||
| 4048 | ap->hsm_task_state = HSM_ST_ERR; | ||
| 4049 | goto fsm_start; | ||
| 4050 | } | ||
| 3980 | 4051 | ||
| 3981 | case HSM_ST: | 4052 | /* For PIO reads, some devices may ask for |
| 3982 | ata_pio_block(qc); | 4053 | * data transfer (DRQ=1) alone with ERR=1. |
| 4054 | * We respect DRQ here and transfer one | ||
| 4055 | * block of junk data before changing the | ||
| 4056 | * hsm_task_state to HSM_ST_ERR. | ||
| 4057 | * | ||
| 4058 | * For PIO writes, ERR=1 DRQ=1 doesn't make | ||
| 4059 | * sense since the data block has been | ||
| 4060 | * transferred to the device. | ||
| 4061 | */ | ||
| 4062 | if (unlikely(status & (ATA_ERR | ATA_DF))) { | ||
| 4063 | /* data might be corrputed */ | ||
| 4064 | qc->err_mask |= AC_ERR_DEV; | ||
| 4065 | |||
| 4066 | if (!(qc->tf.flags & ATA_TFLAG_WRITE)) { | ||
| 4067 | ata_pio_sectors(qc); | ||
| 4068 | ata_altstatus(ap); | ||
| 4069 | status = ata_wait_idle(ap); | ||
| 4070 | } | ||
| 4071 | |||
| 4072 | /* ata_pio_sectors() might change the | ||
| 4073 | * state to HSM_ST_LAST. so, the state | ||
| 4074 | * is changed after ata_pio_sectors(). | ||
| 4075 | */ | ||
| 4076 | ap->hsm_task_state = HSM_ST_ERR; | ||
| 4077 | goto fsm_start; | ||
| 4078 | } | ||
| 4079 | |||
| 4080 | ata_pio_sectors(qc); | ||
| 4081 | |||
| 4082 | if (ap->hsm_task_state == HSM_ST_LAST && | ||
| 4083 | (!(qc->tf.flags & ATA_TFLAG_WRITE))) { | ||
| 4084 | /* all data read */ | ||
| 4085 | ata_altstatus(ap); | ||
| 4086 | status = ata_wait_idle(ap); | ||
| 4087 | goto fsm_start; | ||
| 4088 | } | ||
| 4089 | } | ||
| 4090 | |||
| 4091 | ata_altstatus(ap); /* flush */ | ||
| 4092 | poll_next = 1; | ||
| 3983 | break; | 4093 | break; |
| 3984 | 4094 | ||
| 3985 | case HSM_ST_LAST: | 4095 | case HSM_ST_LAST: |
| 3986 | qc_completed = ata_pio_complete(qc); | 4096 | if (unlikely(!ata_ok(status))) { |
| 3987 | break; | 4097 | qc->err_mask |= __ac_err_mask(status); |
| 4098 | ap->hsm_task_state = HSM_ST_ERR; | ||
| 4099 | goto fsm_start; | ||
| 4100 | } | ||
| 4101 | |||
| 4102 | /* no more data to transfer */ | ||
| 4103 | DPRINTK("ata%u: dev %u command complete, drv_stat 0x%x\n", | ||
| 4104 | ap->id, qc->dev->devno, status); | ||
| 4105 | |||
| 4106 | WARN_ON(qc->err_mask); | ||
| 3988 | 4107 | ||
| 3989 | case HSM_ST_POLL: | 4108 | ap->hsm_task_state = HSM_ST_IDLE; |
| 3990 | case HSM_ST_LAST_POLL: | 4109 | |
| 3991 | timeout = ata_pio_poll(qc); | 4110 | /* complete taskfile transaction */ |
| 4111 | if (in_wq) | ||
| 4112 | ata_poll_qc_complete(qc); | ||
| 4113 | else | ||
| 4114 | ata_qc_complete(qc); | ||
| 4115 | |||
| 4116 | poll_next = 0; | ||
| 3992 | break; | 4117 | break; |
| 3993 | 4118 | ||
| 3994 | case HSM_ST_TMOUT: | ||
| 3995 | case HSM_ST_ERR: | 4119 | case HSM_ST_ERR: |
| 3996 | ata_pio_error(qc); | 4120 | if (qc->tf.command != ATA_CMD_PACKET) |
| 3997 | return; | 4121 | printk(KERN_ERR "ata%u: dev %u command error, drv_stat 0x%x\n", |
| 4122 | ap->id, qc->dev->devno, status); | ||
| 4123 | |||
| 4124 | /* make sure qc->err_mask is available to | ||
| 4125 | * know what's wrong and recover | ||
| 4126 | */ | ||
| 4127 | WARN_ON(qc->err_mask == 0); | ||
| 4128 | |||
| 4129 | ap->hsm_task_state = HSM_ST_IDLE; | ||
| 4130 | |||
| 4131 | /* complete taskfile transaction */ | ||
| 4132 | if (in_wq) | ||
| 4133 | ata_poll_qc_complete(qc); | ||
| 4134 | else | ||
| 4135 | ata_qc_complete(qc); | ||
| 4136 | |||
| 4137 | poll_next = 0; | ||
| 4138 | break; | ||
| 4139 | default: | ||
| 4140 | poll_next = 0; | ||
| 4141 | BUG(); | ||
| 3998 | } | 4142 | } |
| 3999 | 4143 | ||
| 4000 | if (timeout) | 4144 | return poll_next; |
| 4001 | ata_port_queue_task(ap, ata_pio_task, qc, timeout); | ||
| 4002 | else if (!qc_completed) | ||
| 4003 | goto fsm_start; | ||
| 4004 | } | 4145 | } |
| 4005 | 4146 | ||
| 4006 | /** | 4147 | static void ata_pio_task(void *_data) |
| 4007 | * atapi_packet_task - Write CDB bytes to hardware | ||
| 4008 | * @_data: qc in progress | ||
| 4009 | * | ||
| 4010 | * When device has indicated its readiness to accept | ||
| 4011 | * a CDB, this function is called. Send the CDB. | ||
| 4012 | * If DMA is to be performed, exit immediately. | ||
| 4013 | * Otherwise, we are in polling mode, so poll | ||
| 4014 | * status under operation succeeds or fails. | ||
| 4015 | * | ||
| 4016 | * LOCKING: | ||
| 4017 | * Kernel thread context (may sleep) | ||
| 4018 | */ | ||
| 4019 | static void atapi_packet_task(void *_data) | ||
| 4020 | { | 4148 | { |
| 4021 | struct ata_queued_cmd *qc = _data; | 4149 | struct ata_queued_cmd *qc = _data; |
| 4022 | struct ata_port *ap = qc->ap; | 4150 | struct ata_port *ap = qc->ap; |
| 4023 | u8 status; | 4151 | u8 status; |
| 4152 | int poll_next; | ||
| 4024 | 4153 | ||
| 4025 | /* sleep-wait for BSY to clear */ | 4154 | fsm_start: |
| 4026 | DPRINTK("busy wait\n"); | 4155 | WARN_ON(ap->hsm_task_state == HSM_ST_IDLE); |
| 4027 | if (ata_busy_sleep(ap, ATA_TMOUT_CDB_QUICK, ATA_TMOUT_CDB)) { | ||
| 4028 | qc->err_mask |= AC_ERR_TIMEOUT; | ||
| 4029 | goto err_out; | ||
| 4030 | } | ||
| 4031 | |||
| 4032 | /* make sure DRQ is set */ | ||
| 4033 | status = ata_chk_status(ap); | ||
| 4034 | if ((status & (ATA_BUSY | ATA_DRQ)) != ATA_DRQ) { | ||
| 4035 | qc->err_mask |= AC_ERR_HSM; | ||
| 4036 | goto err_out; | ||
| 4037 | } | ||
| 4038 | |||
| 4039 | /* send SCSI cdb */ | ||
| 4040 | DPRINTK("send cdb\n"); | ||
| 4041 | WARN_ON(qc->dev->cdb_len < 12); | ||
| 4042 | |||
| 4043 | if (qc->tf.protocol == ATA_PROT_ATAPI_DMA || | ||
| 4044 | qc->tf.protocol == ATA_PROT_ATAPI_NODATA) { | ||
| 4045 | unsigned long flags; | ||
| 4046 | |||
| 4047 | /* Once we're done issuing command and kicking bmdma, | ||
| 4048 | * irq handler takes over. To not lose irq, we need | ||
| 4049 | * to clear NOINTR flag before sending cdb, but | ||
| 4050 | * interrupt handler shouldn't be invoked before we're | ||
| 4051 | * finished. Hence, the following locking. | ||
| 4052 | */ | ||
| 4053 | spin_lock_irqsave(&ap->host_set->lock, flags); | ||
| 4054 | ap->flags &= ~ATA_FLAG_NOINTR; | ||
| 4055 | ata_data_xfer(ap, qc->cdb, qc->dev->cdb_len, 1); | ||
| 4056 | if (qc->tf.protocol == ATA_PROT_ATAPI_DMA) | ||
| 4057 | ap->ops->bmdma_start(qc); /* initiate bmdma */ | ||
| 4058 | spin_unlock_irqrestore(&ap->host_set->lock, flags); | ||
| 4059 | } else { | ||
| 4060 | ata_data_xfer(ap, qc->cdb, qc->dev->cdb_len, 1); | ||
| 4061 | 4156 | ||
| 4062 | /* PIO commands are handled by polling */ | 4157 | /* |
| 4063 | ap->hsm_task_state = HSM_ST; | 4158 | * This is purely heuristic. This is a fast path. |
| 4064 | ata_port_queue_task(ap, ata_pio_task, qc, 0); | 4159 | * Sometimes when we enter, BSY will be cleared in |
| 4160 | * a chk-status or two. If not, the drive is probably seeking | ||
| 4161 | * or something. Snooze for a couple msecs, then | ||
| 4162 | * chk-status again. If still busy, queue delayed work. | ||
| 4163 | */ | ||
| 4164 | status = ata_busy_wait(ap, ATA_BUSY, 5); | ||
| 4165 | if (status & ATA_BUSY) { | ||
| 4166 | msleep(2); | ||
| 4167 | status = ata_busy_wait(ap, ATA_BUSY, 10); | ||
| 4168 | if (status & ATA_BUSY) { | ||
| 4169 | ata_port_queue_task(ap, ata_pio_task, qc, ATA_SHORT_PAUSE); | ||
| 4170 | return; | ||
| 4171 | } | ||
| 4065 | } | 4172 | } |
| 4066 | 4173 | ||
| 4067 | return; | 4174 | /* move the HSM */ |
| 4175 | poll_next = ata_hsm_move(ap, qc, status, 1); | ||
| 4068 | 4176 | ||
| 4069 | err_out: | 4177 | /* another command or interrupt handler |
| 4070 | ata_poll_qc_complete(qc); | 4178 | * may be running at this point. |
| 4179 | */ | ||
| 4180 | if (poll_next) | ||
| 4181 | goto fsm_start; | ||
| 4071 | } | 4182 | } |
| 4072 | 4183 | ||
| 4073 | /** | 4184 | /** |
| @@ -4322,43 +4433,105 @@ unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc) | |||
| 4322 | { | 4433 | { |
| 4323 | struct ata_port *ap = qc->ap; | 4434 | struct ata_port *ap = qc->ap; |
| 4324 | 4435 | ||
| 4436 | /* Use polling pio if the LLD doesn't handle | ||
| 4437 | * interrupt driven pio and atapi CDB interrupt. | ||
| 4438 | */ | ||
| 4439 | if (ap->flags & ATA_FLAG_PIO_POLLING) { | ||
| 4440 | switch (qc->tf.protocol) { | ||
| 4441 | case ATA_PROT_PIO: | ||
| 4442 | case ATA_PROT_ATAPI: | ||
| 4443 | case ATA_PROT_ATAPI_NODATA: | ||
| 4444 | qc->tf.flags |= ATA_TFLAG_POLLING; | ||
| 4445 | break; | ||
| 4446 | case ATA_PROT_ATAPI_DMA: | ||
| 4447 | if (qc->dev->flags & ATA_DFLAG_CDB_INTR) | ||
| 4448 | /* see ata_check_atapi_dma() */ | ||
| 4449 | BUG(); | ||
| 4450 | break; | ||
| 4451 | default: | ||
| 4452 | break; | ||
| 4453 | } | ||
| 4454 | } | ||
| 4455 | |||
| 4456 | /* select the device */ | ||
| 4325 | ata_dev_select(ap, qc->dev->devno, 1, 0); | 4457 | ata_dev_select(ap, qc->dev->devno, 1, 0); |
| 4326 | 4458 | ||
| 4459 | /* start the command */ | ||
| 4327 | switch (qc->tf.protocol) { | 4460 | switch (qc->tf.protocol) { |
| 4328 | case ATA_PROT_NODATA: | 4461 | case ATA_PROT_NODATA: |
| 4462 | if (qc->tf.flags & ATA_TFLAG_POLLING) | ||
| 4463 | ata_qc_set_polling(qc); | ||
| 4464 | |||
| 4329 | ata_tf_to_host(ap, &qc->tf); | 4465 | ata_tf_to_host(ap, &qc->tf); |
| 4466 | ap->hsm_task_state = HSM_ST_LAST; | ||
| 4467 | |||
| 4468 | if (qc->tf.flags & ATA_TFLAG_POLLING) | ||
| 4469 | ata_port_queue_task(ap, ata_pio_task, qc, 0); | ||
| 4470 | |||
| 4330 | break; | 4471 | break; |
| 4331 | 4472 | ||
| 4332 | case ATA_PROT_DMA: | 4473 | case ATA_PROT_DMA: |
| 4474 | WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING); | ||
| 4475 | |||
| 4333 | ap->ops->tf_load(ap, &qc->tf); /* load tf registers */ | 4476 | ap->ops->tf_load(ap, &qc->tf); /* load tf registers */ |
| 4334 | ap->ops->bmdma_setup(qc); /* set up bmdma */ | 4477 | ap->ops->bmdma_setup(qc); /* set up bmdma */ |
| 4335 | ap->ops->bmdma_start(qc); /* initiate bmdma */ | 4478 | ap->ops->bmdma_start(qc); /* initiate bmdma */ |
| 4479 | ap->hsm_task_state = HSM_ST_LAST; | ||
| 4336 | break; | 4480 | break; |
| 4337 | 4481 | ||
| 4338 | case ATA_PROT_PIO: /* load tf registers, initiate polling pio */ | 4482 | case ATA_PROT_PIO: |
| 4339 | ata_qc_set_polling(qc); | 4483 | if (qc->tf.flags & ATA_TFLAG_POLLING) |
| 4340 | ata_tf_to_host(ap, &qc->tf); | 4484 | ata_qc_set_polling(qc); |
| 4341 | ap->hsm_task_state = HSM_ST; | ||
| 4342 | ata_port_queue_task(ap, ata_pio_task, qc, 0); | ||
| 4343 | break; | ||
| 4344 | 4485 | ||
| 4345 | case ATA_PROT_ATAPI: | ||
| 4346 | ata_qc_set_polling(qc); | ||
| 4347 | ata_tf_to_host(ap, &qc->tf); | 4486 | ata_tf_to_host(ap, &qc->tf); |
| 4348 | ata_port_queue_task(ap, atapi_packet_task, qc, 0); | 4487 | |
| 4488 | if (qc->tf.flags & ATA_TFLAG_WRITE) { | ||
| 4489 | /* PIO data out protocol */ | ||
| 4490 | ap->hsm_task_state = HSM_ST_FIRST; | ||
| 4491 | ata_port_queue_task(ap, ata_pio_task, qc, 0); | ||
| 4492 | |||
| 4493 | /* always send first data block using | ||
| 4494 | * the ata_pio_task() codepath. | ||
| 4495 | */ | ||
| 4496 | } else { | ||
| 4497 | /* PIO data in protocol */ | ||
| 4498 | ap->hsm_task_state = HSM_ST; | ||
| 4499 | |||
| 4500 | if (qc->tf.flags & ATA_TFLAG_POLLING) | ||
| 4501 | ata_port_queue_task(ap, ata_pio_task, qc, 0); | ||
| 4502 | |||
| 4503 | /* if polling, ata_pio_task() handles the rest. | ||
| 4504 | * otherwise, interrupt handler takes over from here. | ||
| 4505 | */ | ||
| 4506 | } | ||
| 4507 | |||
| 4349 | break; | 4508 | break; |
| 4350 | 4509 | ||
| 4510 | case ATA_PROT_ATAPI: | ||
| 4351 | case ATA_PROT_ATAPI_NODATA: | 4511 | case ATA_PROT_ATAPI_NODATA: |
| 4352 | ap->flags |= ATA_FLAG_NOINTR; | 4512 | if (qc->tf.flags & ATA_TFLAG_POLLING) |
| 4513 | ata_qc_set_polling(qc); | ||
| 4514 | |||
| 4353 | ata_tf_to_host(ap, &qc->tf); | 4515 | ata_tf_to_host(ap, &qc->tf); |
| 4354 | ata_port_queue_task(ap, atapi_packet_task, qc, 0); | 4516 | |
| 4517 | ap->hsm_task_state = HSM_ST_FIRST; | ||
| 4518 | |||
| 4519 | /* send cdb by polling if no cdb interrupt */ | ||
| 4520 | if ((!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) || | ||
| 4521 | (qc->tf.flags & ATA_TFLAG_POLLING)) | ||
| 4522 | ata_port_queue_task(ap, ata_pio_task, qc, 0); | ||
| 4355 | break; | 4523 | break; |
| 4356 | 4524 | ||
| 4357 | case ATA_PROT_ATAPI_DMA: | 4525 | case ATA_PROT_ATAPI_DMA: |
| 4358 | ap->flags |= ATA_FLAG_NOINTR; | 4526 | WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING); |
| 4527 | |||
| 4359 | ap->ops->tf_load(ap, &qc->tf); /* load tf registers */ | 4528 | ap->ops->tf_load(ap, &qc->tf); /* load tf registers */ |
| 4360 | ap->ops->bmdma_setup(qc); /* set up bmdma */ | 4529 | ap->ops->bmdma_setup(qc); /* set up bmdma */ |
| 4361 | ata_port_queue_task(ap, atapi_packet_task, qc, 0); | 4530 | ap->hsm_task_state = HSM_ST_FIRST; |
| 4531 | |||
| 4532 | /* send cdb by polling if no cdb interrupt */ | ||
| 4533 | if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) | ||
| 4534 | ata_port_queue_task(ap, ata_pio_task, qc, 0); | ||
| 4362 | break; | 4535 | break; |
| 4363 | 4536 | ||
| 4364 | default: | 4537 | default: |
| @@ -4388,52 +4561,66 @@ unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc) | |||
| 4388 | inline unsigned int ata_host_intr (struct ata_port *ap, | 4561 | inline unsigned int ata_host_intr (struct ata_port *ap, |
| 4389 | struct ata_queued_cmd *qc) | 4562 | struct ata_queued_cmd *qc) |
| 4390 | { | 4563 | { |
| 4391 | u8 status, host_stat; | 4564 | u8 status, host_stat = 0; |
| 4392 | |||
| 4393 | switch (qc->tf.protocol) { | ||
| 4394 | |||
| 4395 | case ATA_PROT_DMA: | ||
| 4396 | case ATA_PROT_ATAPI_DMA: | ||
| 4397 | case ATA_PROT_ATAPI: | ||
| 4398 | /* check status of DMA engine */ | ||
| 4399 | host_stat = ap->ops->bmdma_status(ap); | ||
| 4400 | VPRINTK("ata%u: host_stat 0x%X\n", ap->id, host_stat); | ||
| 4401 | 4565 | ||
| 4402 | /* if it's not our irq... */ | 4566 | VPRINTK("ata%u: protocol %d task_state %d\n", |
| 4403 | if (!(host_stat & ATA_DMA_INTR)) | 4567 | ap->id, qc->tf.protocol, ap->hsm_task_state); |
| 4404 | goto idle_irq; | ||
| 4405 | |||
| 4406 | /* before we do anything else, clear DMA-Start bit */ | ||
| 4407 | ap->ops->bmdma_stop(qc); | ||
| 4408 | 4568 | ||
| 4409 | /* fall through */ | 4569 | /* Check whether we are expecting interrupt in this state */ |
| 4410 | 4570 | switch (ap->hsm_task_state) { | |
| 4411 | case ATA_PROT_ATAPI_NODATA: | 4571 | case HSM_ST_FIRST: |
| 4412 | case ATA_PROT_NODATA: | 4572 | /* Some pre-ATAPI-4 devices assert INTRQ |
| 4413 | /* check altstatus */ | 4573 | * at this state when ready to receive CDB. |
| 4414 | status = ata_altstatus(ap); | 4574 | */ |
| 4415 | if (status & ATA_BUSY) | ||
| 4416 | goto idle_irq; | ||
| 4417 | 4575 | ||
| 4418 | /* check main status, clearing INTRQ */ | 4576 | /* Check the ATA_DFLAG_CDB_INTR flag is enough here. |
| 4419 | status = ata_chk_status(ap); | 4577 | * The flag was turned on only for atapi devices. |
| 4420 | if (unlikely(status & ATA_BUSY)) | 4578 | * No need to check is_atapi_taskfile(&qc->tf) again. |
| 4579 | */ | ||
| 4580 | if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) | ||
| 4421 | goto idle_irq; | 4581 | goto idle_irq; |
| 4422 | DPRINTK("ata%u: protocol %d (dev_stat 0x%X)\n", | ||
| 4423 | ap->id, qc->tf.protocol, status); | ||
| 4424 | |||
| 4425 | /* ack bmdma irq events */ | ||
| 4426 | ap->ops->irq_clear(ap); | ||
| 4427 | |||
| 4428 | /* complete taskfile transaction */ | ||
| 4429 | qc->err_mask |= ac_err_mask(status); | ||
| 4430 | ata_qc_complete(qc); | ||
| 4431 | break; | 4582 | break; |
| 4432 | 4583 | case HSM_ST_LAST: | |
| 4584 | if (qc->tf.protocol == ATA_PROT_DMA || | ||
| 4585 | qc->tf.protocol == ATA_PROT_ATAPI_DMA) { | ||
| 4586 | /* check status of DMA engine */ | ||
| 4587 | host_stat = ap->ops->bmdma_status(ap); | ||
| 4588 | VPRINTK("ata%u: host_stat 0x%X\n", ap->id, host_stat); | ||
| 4589 | |||
| 4590 | /* if it's not our irq... */ | ||
| 4591 | if (!(host_stat & ATA_DMA_INTR)) | ||
| 4592 | goto idle_irq; | ||
| 4593 | |||
| 4594 | /* before we do anything else, clear DMA-Start bit */ | ||
| 4595 | ap->ops->bmdma_stop(qc); | ||
| 4596 | |||
| 4597 | if (unlikely(host_stat & ATA_DMA_ERR)) { | ||
| 4598 | /* error when transfering data to/from memory */ | ||
| 4599 | qc->err_mask |= AC_ERR_HOST_BUS; | ||
| 4600 | ap->hsm_task_state = HSM_ST_ERR; | ||
| 4601 | } | ||
| 4602 | } | ||
| 4603 | break; | ||
| 4604 | case HSM_ST: | ||
| 4605 | break; | ||
| 4433 | default: | 4606 | default: |
| 4434 | goto idle_irq; | 4607 | goto idle_irq; |
| 4435 | } | 4608 | } |
| 4436 | 4609 | ||
| 4610 | /* check altstatus */ | ||
| 4611 | status = ata_altstatus(ap); | ||
| 4612 | if (status & ATA_BUSY) | ||
| 4613 | goto idle_irq; | ||
| 4614 | |||
| 4615 | /* check main status, clearing INTRQ */ | ||
| 4616 | status = ata_chk_status(ap); | ||
| 4617 | if (unlikely(status & ATA_BUSY)) | ||
| 4618 | goto idle_irq; | ||
| 4619 | |||
| 4620 | /* ack bmdma irq events */ | ||
| 4621 | ap->ops->irq_clear(ap); | ||
| 4622 | |||
| 4623 | ata_hsm_move(ap, qc, status, 0); | ||
| 4437 | return 1; /* irq handled */ | 4624 | return 1; /* irq handled */ |
| 4438 | 4625 | ||
| 4439 | idle_irq: | 4626 | idle_irq: |
| @@ -4480,11 +4667,11 @@ irqreturn_t ata_interrupt (int irq, void *dev_instance, struct pt_regs *regs) | |||
| 4480 | 4667 | ||
| 4481 | ap = host_set->ports[i]; | 4668 | ap = host_set->ports[i]; |
| 4482 | if (ap && | 4669 | if (ap && |
| 4483 | !(ap->flags & (ATA_FLAG_DISABLED | ATA_FLAG_NOINTR))) { | 4670 | !(ap->flags & ATA_FLAG_DISABLED)) { |
| 4484 | struct ata_queued_cmd *qc; | 4671 | struct ata_queued_cmd *qc; |
| 4485 | 4672 | ||
| 4486 | qc = ata_qc_from_tag(ap, ap->active_tag); | 4673 | qc = ata_qc_from_tag(ap, ap->active_tag); |
| 4487 | if (qc && (!(qc->tf.ctl & ATA_NIEN)) && | 4674 | if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)) && |
| 4488 | (qc->flags & ATA_QCFLAG_ACTIVE)) | 4675 | (qc->flags & ATA_QCFLAG_ACTIVE)) |
| 4489 | handled |= ata_host_intr(ap, qc); | 4676 | handled |= ata_host_intr(ap, qc); |
| 4490 | } | 4677 | } |
diff --git a/drivers/scsi/libata-eh.c b/drivers/scsi/libata-eh.c index cd133f83e595..e401f353f848 100644 --- a/drivers/scsi/libata-eh.c +++ b/drivers/scsi/libata-eh.c | |||
| @@ -350,7 +350,7 @@ static void ata_qc_timeout(struct ata_queued_cmd *qc) | |||
| 350 | qc->tf.command, drv_stat, host_stat); | 350 | qc->tf.command, drv_stat, host_stat); |
| 351 | 351 | ||
| 352 | /* complete taskfile transaction */ | 352 | /* complete taskfile transaction */ |
| 353 | qc->err_mask |= ac_err_mask(drv_stat); | 353 | qc->err_mask |= AC_ERR_TIMEOUT; |
| 354 | break; | 354 | break; |
| 355 | } | 355 | } |
| 356 | 356 | ||
diff --git a/drivers/scsi/pdc_adma.c b/drivers/scsi/pdc_adma.c index b9a3c566f833..a341fa8d3291 100644 --- a/drivers/scsi/pdc_adma.c +++ b/drivers/scsi/pdc_adma.c | |||
| @@ -455,13 +455,13 @@ static inline unsigned int adma_intr_pkt(struct ata_host_set *host_set) | |||
| 455 | continue; | 455 | continue; |
| 456 | handled = 1; | 456 | handled = 1; |
| 457 | adma_enter_reg_mode(ap); | 457 | adma_enter_reg_mode(ap); |
| 458 | if (ap->flags & (ATA_FLAG_DISABLED | ATA_FLAG_NOINTR)) | 458 | if (ap->flags & ATA_FLAG_DISABLED) |
| 459 | continue; | 459 | continue; |
| 460 | pp = ap->private_data; | 460 | pp = ap->private_data; |
| 461 | if (!pp || pp->state != adma_state_pkt) | 461 | if (!pp || pp->state != adma_state_pkt) |
| 462 | continue; | 462 | continue; |
| 463 | qc = ata_qc_from_tag(ap, ap->active_tag); | 463 | qc = ata_qc_from_tag(ap, ap->active_tag); |
| 464 | if (qc && (!(qc->tf.ctl & ATA_NIEN))) { | 464 | if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) { |
| 465 | if ((status & (aPERR | aPSD | aUIRQ))) | 465 | if ((status & (aPERR | aPSD | aUIRQ))) |
| 466 | qc->err_mask |= AC_ERR_OTHER; | 466 | qc->err_mask |= AC_ERR_OTHER; |
| 467 | else if (pp->pkt[0] != cDONE) | 467 | else if (pp->pkt[0] != cDONE) |
| @@ -480,13 +480,13 @@ static inline unsigned int adma_intr_mmio(struct ata_host_set *host_set) | |||
| 480 | for (port_no = 0; port_no < host_set->n_ports; ++port_no) { | 480 | for (port_no = 0; port_no < host_set->n_ports; ++port_no) { |
| 481 | struct ata_port *ap; | 481 | struct ata_port *ap; |
| 482 | ap = host_set->ports[port_no]; | 482 | ap = host_set->ports[port_no]; |
| 483 | if (ap && (!(ap->flags & (ATA_FLAG_DISABLED | ATA_FLAG_NOINTR)))) { | 483 | if (ap && (!(ap->flags & ATA_FLAG_DISABLED))) { |
| 484 | struct ata_queued_cmd *qc; | 484 | struct ata_queued_cmd *qc; |
| 485 | struct adma_port_priv *pp = ap->private_data; | 485 | struct adma_port_priv *pp = ap->private_data; |
| 486 | if (!pp || pp->state != adma_state_mmio) | 486 | if (!pp || pp->state != adma_state_mmio) |
| 487 | continue; | 487 | continue; |
| 488 | qc = ata_qc_from_tag(ap, ap->active_tag); | 488 | qc = ata_qc_from_tag(ap, ap->active_tag); |
| 489 | if (qc && (!(qc->tf.ctl & ATA_NIEN))) { | 489 | if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) { |
| 490 | 490 | ||
| 491 | /* check main status, clearing INTRQ */ | 491 | /* check main status, clearing INTRQ */ |
| 492 | u8 status = ata_check_status(ap); | 492 | u8 status = ata_check_status(ap); |
diff --git a/drivers/scsi/sata_mv.c b/drivers/scsi/sata_mv.c index e8d00f0efdf2..e6d141dd0385 100644 --- a/drivers/scsi/sata_mv.c +++ b/drivers/scsi/sata_mv.c | |||
| @@ -87,7 +87,7 @@ enum { | |||
| 87 | MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */ | 87 | MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */ |
| 88 | MV_COMMON_FLAGS = (ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | | 88 | MV_COMMON_FLAGS = (ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | |
| 89 | ATA_FLAG_SATA_RESET | ATA_FLAG_MMIO | | 89 | ATA_FLAG_SATA_RESET | ATA_FLAG_MMIO | |
| 90 | ATA_FLAG_NO_ATAPI), | 90 | ATA_FLAG_PIO_POLLING), |
| 91 | MV_6XXX_FLAGS = MV_FLAG_IRQ_COALESCE, | 91 | MV_6XXX_FLAGS = MV_FLAG_IRQ_COALESCE, |
| 92 | 92 | ||
| 93 | CRQB_FLAG_READ = (1 << 0), | 93 | CRQB_FLAG_READ = (1 << 0), |
| @@ -1396,7 +1396,7 @@ static void mv_host_intr(struct ata_host_set *host_set, u32 relevant, | |||
| 1396 | } | 1396 | } |
| 1397 | } | 1397 | } |
| 1398 | 1398 | ||
| 1399 | if (ap->flags & (ATA_FLAG_DISABLED | ATA_FLAG_NOINTR)) | 1399 | if (ap && (ap->flags & ATA_FLAG_DISABLED)) |
| 1400 | continue; | 1400 | continue; |
| 1401 | 1401 | ||
| 1402 | err_mask = ac_err_mask(ata_status); | 1402 | err_mask = ac_err_mask(ata_status); |
| @@ -1417,7 +1417,7 @@ static void mv_host_intr(struct ata_host_set *host_set, u32 relevant, | |||
| 1417 | VPRINTK("port %u IRQ found for qc, " | 1417 | VPRINTK("port %u IRQ found for qc, " |
| 1418 | "ata_status 0x%x\n", port,ata_status); | 1418 | "ata_status 0x%x\n", port,ata_status); |
| 1419 | /* mark qc status appropriately */ | 1419 | /* mark qc status appropriately */ |
| 1420 | if (!(qc->tf.ctl & ATA_NIEN)) { | 1420 | if (!(qc->tf.flags & ATA_TFLAG_POLLING)) { |
| 1421 | qc->err_mask |= err_mask; | 1421 | qc->err_mask |= err_mask; |
| 1422 | ata_qc_complete(qc); | 1422 | ata_qc_complete(qc); |
| 1423 | } | 1423 | } |
diff --git a/drivers/scsi/sata_nv.c b/drivers/scsi/sata_nv.c index 3a70875be8ba..70c51088d371 100644 --- a/drivers/scsi/sata_nv.c +++ b/drivers/scsi/sata_nv.c | |||
| @@ -279,11 +279,11 @@ static irqreturn_t nv_interrupt (int irq, void *dev_instance, | |||
| 279 | 279 | ||
| 280 | ap = host_set->ports[i]; | 280 | ap = host_set->ports[i]; |
| 281 | if (ap && | 281 | if (ap && |
| 282 | !(ap->flags & (ATA_FLAG_DISABLED | ATA_FLAG_NOINTR))) { | 282 | !(ap->flags & ATA_FLAG_DISABLED)) { |
| 283 | struct ata_queued_cmd *qc; | 283 | struct ata_queued_cmd *qc; |
| 284 | 284 | ||
| 285 | qc = ata_qc_from_tag(ap, ap->active_tag); | 285 | qc = ata_qc_from_tag(ap, ap->active_tag); |
| 286 | if (qc && (!(qc->tf.ctl & ATA_NIEN))) | 286 | if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) |
| 287 | handled += ata_host_intr(ap, qc); | 287 | handled += ata_host_intr(ap, qc); |
| 288 | else | 288 | else |
| 289 | // No request pending? Clear interrupt status | 289 | // No request pending? Clear interrupt status |
diff --git a/drivers/scsi/sata_promise.c b/drivers/scsi/sata_promise.c index e9d61bc2b247..bb000438cb6c 100644 --- a/drivers/scsi/sata_promise.c +++ b/drivers/scsi/sata_promise.c | |||
| @@ -76,7 +76,8 @@ enum { | |||
| 76 | PDC_RESET = (1 << 11), /* HDMA reset */ | 76 | PDC_RESET = (1 << 11), /* HDMA reset */ |
| 77 | 77 | ||
| 78 | PDC_COMMON_FLAGS = ATA_FLAG_NO_LEGACY | ATA_FLAG_SRST | | 78 | PDC_COMMON_FLAGS = ATA_FLAG_NO_LEGACY | ATA_FLAG_SRST | |
| 79 | ATA_FLAG_MMIO | ATA_FLAG_NO_ATAPI, | 79 | ATA_FLAG_MMIO | ATA_FLAG_NO_ATAPI | |
| 80 | ATA_FLAG_PIO_POLLING, | ||
| 80 | }; | 81 | }; |
| 81 | 82 | ||
| 82 | 83 | ||
| @@ -534,11 +535,11 @@ static irqreturn_t pdc_interrupt (int irq, void *dev_instance, struct pt_regs *r | |||
| 534 | ap = host_set->ports[i]; | 535 | ap = host_set->ports[i]; |
| 535 | tmp = mask & (1 << (i + 1)); | 536 | tmp = mask & (1 << (i + 1)); |
| 536 | if (tmp && ap && | 537 | if (tmp && ap && |
| 537 | !(ap->flags & (ATA_FLAG_DISABLED | ATA_FLAG_NOINTR))) { | 538 | !(ap->flags & ATA_FLAG_DISABLED)) { |
| 538 | struct ata_queued_cmd *qc; | 539 | struct ata_queued_cmd *qc; |
| 539 | 540 | ||
| 540 | qc = ata_qc_from_tag(ap, ap->active_tag); | 541 | qc = ata_qc_from_tag(ap, ap->active_tag); |
| 541 | if (qc && (!(qc->tf.ctl & ATA_NIEN))) | 542 | if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) |
| 542 | handled += pdc_host_intr(ap, qc); | 543 | handled += pdc_host_intr(ap, qc); |
| 543 | } | 544 | } |
| 544 | } | 545 | } |
diff --git a/drivers/scsi/sata_qstor.c b/drivers/scsi/sata_qstor.c index 259c2dec4e21..54283e06070e 100644 --- a/drivers/scsi/sata_qstor.c +++ b/drivers/scsi/sata_qstor.c | |||
| @@ -175,7 +175,7 @@ static const struct ata_port_info qs_port_info[] = { | |||
| 175 | .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | | 175 | .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | |
| 176 | ATA_FLAG_SATA_RESET | | 176 | ATA_FLAG_SATA_RESET | |
| 177 | //FIXME ATA_FLAG_SRST | | 177 | //FIXME ATA_FLAG_SRST | |
| 178 | ATA_FLAG_MMIO, | 178 | ATA_FLAG_MMIO | ATA_FLAG_PIO_POLLING, |
| 179 | .pio_mask = 0x10, /* pio4 */ | 179 | .pio_mask = 0x10, /* pio4 */ |
| 180 | .udma_mask = 0x7f, /* udma0-6 */ | 180 | .udma_mask = 0x7f, /* udma0-6 */ |
| 181 | .port_ops = &qs_ata_ops, | 181 | .port_ops = &qs_ata_ops, |
| @@ -394,14 +394,13 @@ static inline unsigned int qs_intr_pkt(struct ata_host_set *host_set) | |||
| 394 | DPRINTK("SFF=%08x%08x: sCHAN=%u sHST=%d sDST=%02x\n", | 394 | DPRINTK("SFF=%08x%08x: sCHAN=%u sHST=%d sDST=%02x\n", |
| 395 | sff1, sff0, port_no, sHST, sDST); | 395 | sff1, sff0, port_no, sHST, sDST); |
| 396 | handled = 1; | 396 | handled = 1; |
| 397 | if (ap && !(ap->flags & | 397 | if (ap && !(ap->flags & ATA_FLAG_DISABLED)) { |
| 398 | (ATA_FLAG_DISABLED|ATA_FLAG_NOINTR))) { | ||
| 399 | struct ata_queued_cmd *qc; | 398 | struct ata_queued_cmd *qc; |
| 400 | struct qs_port_priv *pp = ap->private_data; | 399 | struct qs_port_priv *pp = ap->private_data; |
| 401 | if (!pp || pp->state != qs_state_pkt) | 400 | if (!pp || pp->state != qs_state_pkt) |
| 402 | continue; | 401 | continue; |
| 403 | qc = ata_qc_from_tag(ap, ap->active_tag); | 402 | qc = ata_qc_from_tag(ap, ap->active_tag); |
| 404 | if (qc && (!(qc->tf.ctl & ATA_NIEN))) { | 403 | if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) { |
| 405 | switch (sHST) { | 404 | switch (sHST) { |
| 406 | case 0: /* successful CPB */ | 405 | case 0: /* successful CPB */ |
| 407 | case 3: /* device error */ | 406 | case 3: /* device error */ |
| @@ -428,13 +427,13 @@ static inline unsigned int qs_intr_mmio(struct ata_host_set *host_set) | |||
| 428 | struct ata_port *ap; | 427 | struct ata_port *ap; |
| 429 | ap = host_set->ports[port_no]; | 428 | ap = host_set->ports[port_no]; |
| 430 | if (ap && | 429 | if (ap && |
| 431 | !(ap->flags & (ATA_FLAG_DISABLED | ATA_FLAG_NOINTR))) { | 430 | !(ap->flags & ATA_FLAG_DISABLED)) { |
| 432 | struct ata_queued_cmd *qc; | 431 | struct ata_queued_cmd *qc; |
| 433 | struct qs_port_priv *pp = ap->private_data; | 432 | struct qs_port_priv *pp = ap->private_data; |
| 434 | if (!pp || pp->state != qs_state_mmio) | 433 | if (!pp || pp->state != qs_state_mmio) |
| 435 | continue; | 434 | continue; |
| 436 | qc = ata_qc_from_tag(ap, ap->active_tag); | 435 | qc = ata_qc_from_tag(ap, ap->active_tag); |
| 437 | if (qc && (!(qc->tf.ctl & ATA_NIEN))) { | 436 | if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) { |
| 438 | 437 | ||
| 439 | /* check main status, clearing INTRQ */ | 438 | /* check main status, clearing INTRQ */ |
| 440 | u8 status = ata_check_status(ap); | 439 | u8 status = ata_check_status(ap); |
diff --git a/drivers/scsi/sata_sx4.c b/drivers/scsi/sata_sx4.c index 96d7b73f5fdc..70a695488291 100644 --- a/drivers/scsi/sata_sx4.c +++ b/drivers/scsi/sata_sx4.c | |||
| @@ -218,7 +218,7 @@ static const struct ata_port_info pdc_port_info[] = { | |||
| 218 | .sht = &pdc_sata_sht, | 218 | .sht = &pdc_sata_sht, |
| 219 | .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | | 219 | .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | |
| 220 | ATA_FLAG_SRST | ATA_FLAG_MMIO | | 220 | ATA_FLAG_SRST | ATA_FLAG_MMIO | |
| 221 | ATA_FLAG_NO_ATAPI, | 221 | ATA_FLAG_PIO_POLLING, |
| 222 | .pio_mask = 0x1f, /* pio0-4 */ | 222 | .pio_mask = 0x1f, /* pio0-4 */ |
| 223 | .mwdma_mask = 0x07, /* mwdma0-2 */ | 223 | .mwdma_mask = 0x07, /* mwdma0-2 */ |
| 224 | .udma_mask = 0x7f, /* udma0-6 ; FIXME */ | 224 | .udma_mask = 0x7f, /* udma0-6 ; FIXME */ |
| @@ -833,11 +833,11 @@ static irqreturn_t pdc20621_interrupt (int irq, void *dev_instance, struct pt_re | |||
| 833 | tmp = mask & (1 << i); | 833 | tmp = mask & (1 << i); |
| 834 | VPRINTK("seq %u, port_no %u, ap %p, tmp %x\n", i, port_no, ap, tmp); | 834 | VPRINTK("seq %u, port_no %u, ap %p, tmp %x\n", i, port_no, ap, tmp); |
| 835 | if (tmp && ap && | 835 | if (tmp && ap && |
| 836 | !(ap->flags & (ATA_FLAG_DISABLED | ATA_FLAG_NOINTR))) { | 836 | !(ap->flags & ATA_FLAG_DISABLED)) { |
| 837 | struct ata_queued_cmd *qc; | 837 | struct ata_queued_cmd *qc; |
| 838 | 838 | ||
| 839 | qc = ata_qc_from_tag(ap, ap->active_tag); | 839 | qc = ata_qc_from_tag(ap, ap->active_tag); |
| 840 | if (qc && (!(qc->tf.ctl & ATA_NIEN))) | 840 | if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) |
| 841 | handled += pdc20621_host_intr(ap, qc, (i > 4), | 841 | handled += pdc20621_host_intr(ap, qc, (i > 4), |
| 842 | mmio_base); | 842 | mmio_base); |
| 843 | } | 843 | } |
diff --git a/drivers/scsi/sata_vsc.c b/drivers/scsi/sata_vsc.c index 9646c3932129..0372be7ff1c9 100644 --- a/drivers/scsi/sata_vsc.c +++ b/drivers/scsi/sata_vsc.c | |||
| @@ -221,14 +221,21 @@ static irqreturn_t vsc_sata_interrupt (int irq, void *dev_instance, | |||
| 221 | 221 | ||
| 222 | ap = host_set->ports[i]; | 222 | ap = host_set->ports[i]; |
| 223 | 223 | ||
| 224 | if (ap && !(ap->flags & | 224 | if (is_vsc_sata_int_err(i, int_status)) { |
| 225 | (ATA_FLAG_DISABLED|ATA_FLAG_NOINTR))) { | 225 | u32 err_status; |
| 226 | printk(KERN_DEBUG "%s: ignoring interrupt(s)\n", __FUNCTION__); | ||
| 227 | err_status = ap ? vsc_sata_scr_read(ap, SCR_ERROR) : 0; | ||
| 228 | vsc_sata_scr_write(ap, SCR_ERROR, err_status); | ||
| 229 | handled++; | ||
| 230 | } | ||
| 231 | |||
| 232 | if (ap && !(ap->flags & ATA_FLAG_DISABLED)) { | ||
| 226 | struct ata_queued_cmd *qc; | 233 | struct ata_queued_cmd *qc; |
| 227 | 234 | ||
| 228 | qc = ata_qc_from_tag(ap, ap->active_tag); | 235 | qc = ata_qc_from_tag(ap, ap->active_tag); |
| 229 | if (qc && (!(qc->tf.ctl & ATA_NIEN))) { | 236 | if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) |
| 230 | handled += ata_host_intr(ap, qc); | 237 | handled += ata_host_intr(ap, qc); |
| 231 | } else if (is_vsc_sata_int_err(i, int_status)) { | 238 | else if (is_vsc_sata_int_err(i, int_status)) { |
| 232 | /* | 239 | /* |
| 233 | * On some chips (i.e. Intel 31244), an error | 240 | * On some chips (i.e. Intel 31244), an error |
| 234 | * interrupt will sneak in at initialization | 241 | * interrupt will sneak in at initialization |
diff --git a/include/linux/ata.h b/include/linux/ata.h index a7c41f3df8f4..1cbeb434af9a 100644 --- a/include/linux/ata.h +++ b/include/linux/ata.h | |||
| @@ -212,6 +212,7 @@ enum { | |||
| 212 | ATA_TFLAG_WRITE = (1 << 3), /* data dir: host->dev==1 (write) */ | 212 | ATA_TFLAG_WRITE = (1 << 3), /* data dir: host->dev==1 (write) */ |
| 213 | ATA_TFLAG_LBA = (1 << 4), /* enable LBA */ | 213 | ATA_TFLAG_LBA = (1 << 4), /* enable LBA */ |
| 214 | ATA_TFLAG_FUA = (1 << 5), /* enable FUA */ | 214 | ATA_TFLAG_FUA = (1 << 5), /* enable FUA */ |
| 215 | ATA_TFLAG_POLLING = (1 << 6), /* set nIEN to 1 and use polling */ | ||
| 215 | }; | 216 | }; |
| 216 | 217 | ||
| 217 | enum ata_tf_protocols { | 218 | enum ata_tf_protocols { |
| @@ -285,6 +286,8 @@ struct ata_taskfile { | |||
| 285 | ((u64) (id)[(n) + 1] << 16) | \ | 286 | ((u64) (id)[(n) + 1] << 16) | \ |
| 286 | ((u64) (id)[(n) + 0]) ) | 287 | ((u64) (id)[(n) + 0]) ) |
| 287 | 288 | ||
| 289 | #define ata_id_cdb_intr(id) (((id)[0] & 0x60) == 0x20) | ||
| 290 | |||
| 288 | static inline unsigned int ata_id_major_version(const u16 *id) | 291 | static inline unsigned int ata_id_major_version(const u16 *id) |
| 289 | { | 292 | { |
| 290 | unsigned int mver; | 293 | unsigned int mver; |
| @@ -324,6 +327,15 @@ static inline int is_atapi_taskfile(const struct ata_taskfile *tf) | |||
| 324 | (tf->protocol == ATA_PROT_ATAPI_DMA); | 327 | (tf->protocol == ATA_PROT_ATAPI_DMA); |
| 325 | } | 328 | } |
| 326 | 329 | ||
| 330 | static inline int is_multi_taskfile(struct ata_taskfile *tf) | ||
| 331 | { | ||
| 332 | return (tf->command == ATA_CMD_READ_MULTI) || | ||
| 333 | (tf->command == ATA_CMD_WRITE_MULTI) || | ||
| 334 | (tf->command == ATA_CMD_READ_MULTI_EXT) || | ||
| 335 | (tf->command == ATA_CMD_WRITE_MULTI_EXT) || | ||
| 336 | (tf->command == ATA_CMD_WRITE_MULTI_FUA_EXT); | ||
| 337 | } | ||
| 338 | |||
| 327 | static inline int ata_ok(u8 status) | 339 | static inline int ata_ok(u8 status) |
| 328 | { | 340 | { |
| 329 | return ((status & (ATA_BUSY | ATA_DRDY | ATA_DF | ATA_DRQ | ATA_ERR)) | 341 | return ((status & (ATA_BUSY | ATA_DRDY | ATA_DF | ATA_DRQ | ATA_ERR)) |
diff --git a/include/linux/libata.h b/include/linux/libata.h index 6ccacbf889e3..db17723e23fb 100644 --- a/include/linux/libata.h +++ b/include/linux/libata.h | |||
| @@ -124,6 +124,7 @@ enum { | |||
| 124 | /* struct ata_device stuff */ | 124 | /* struct ata_device stuff */ |
| 125 | ATA_DFLAG_LBA = (1 << 0), /* device supports LBA */ | 125 | ATA_DFLAG_LBA = (1 << 0), /* device supports LBA */ |
| 126 | ATA_DFLAG_LBA48 = (1 << 1), /* device supports LBA48 */ | 126 | ATA_DFLAG_LBA48 = (1 << 1), /* device supports LBA48 */ |
| 127 | ATA_DFLAG_CDB_INTR = (1 << 2), /* device asserts INTRQ when ready for CDB */ | ||
| 127 | ATA_DFLAG_CFG_MASK = (1 << 8) - 1, | 128 | ATA_DFLAG_CFG_MASK = (1 << 8) - 1, |
| 128 | 129 | ||
| 129 | ATA_DFLAG_PIO = (1 << 8), /* device currently in PIO mode */ | 130 | ATA_DFLAG_PIO = (1 << 8), /* device currently in PIO mode */ |
| @@ -147,9 +148,9 @@ enum { | |||
| 147 | ATA_FLAG_PIO_DMA = (1 << 7), /* PIO cmds via DMA */ | 148 | ATA_FLAG_PIO_DMA = (1 << 7), /* PIO cmds via DMA */ |
| 148 | ATA_FLAG_PIO_LBA48 = (1 << 8), /* Host DMA engine is LBA28 only */ | 149 | ATA_FLAG_PIO_LBA48 = (1 << 8), /* Host DMA engine is LBA28 only */ |
| 149 | ATA_FLAG_IRQ_MASK = (1 << 9), /* Mask IRQ in PIO xfers */ | 150 | ATA_FLAG_IRQ_MASK = (1 << 9), /* Mask IRQ in PIO xfers */ |
| 151 | ATA_FLAG_PIO_POLLING = (1 << 10), /* use polling PIO if LLD | ||
| 152 | * doesn't handle PIO interrupts */ | ||
| 150 | 153 | ||
| 151 | ATA_FLAG_NOINTR = (1 << 13), /* FIXME: Remove this once | ||
| 152 | * proper HSM is in place. */ | ||
| 153 | ATA_FLAG_DEBUGMSG = (1 << 14), | 154 | ATA_FLAG_DEBUGMSG = (1 << 14), |
| 154 | ATA_FLAG_FLUSH_PORT_TASK = (1 << 15), /* flush port task */ | 155 | ATA_FLAG_FLUSH_PORT_TASK = (1 << 15), /* flush port task */ |
| 155 | 156 | ||
| @@ -178,11 +179,8 @@ enum { | |||
| 178 | ATA_HOST_SIMPLEX = (1 << 0), /* Host is simplex, one DMA channel per host_set only */ | 179 | ATA_HOST_SIMPLEX = (1 << 0), /* Host is simplex, one DMA channel per host_set only */ |
| 179 | 180 | ||
| 180 | /* various lengths of time */ | 181 | /* various lengths of time */ |
| 181 | ATA_TMOUT_PIO = 30 * HZ, | ||
| 182 | ATA_TMOUT_BOOT = 30 * HZ, /* heuristic */ | 182 | ATA_TMOUT_BOOT = 30 * HZ, /* heuristic */ |
| 183 | ATA_TMOUT_BOOT_QUICK = 7 * HZ, /* heuristic */ | 183 | ATA_TMOUT_BOOT_QUICK = 7 * HZ, /* heuristic */ |
| 184 | ATA_TMOUT_CDB = 30 * HZ, | ||
| 185 | ATA_TMOUT_CDB_QUICK = 5 * HZ, | ||
| 186 | ATA_TMOUT_INTERNAL = 30 * HZ, | 184 | ATA_TMOUT_INTERNAL = 30 * HZ, |
| 187 | ATA_TMOUT_INTERNAL_QUICK = 5 * HZ, | 185 | ATA_TMOUT_INTERNAL_QUICK = 5 * HZ, |
| 188 | 186 | ||
| @@ -252,14 +250,13 @@ enum { | |||
| 252 | }; | 250 | }; |
| 253 | 251 | ||
| 254 | enum hsm_task_states { | 252 | enum hsm_task_states { |
| 255 | HSM_ST_UNKNOWN, | 253 | HSM_ST_UNKNOWN, /* state unknown */ |
| 256 | HSM_ST_IDLE, | 254 | HSM_ST_IDLE, /* no command on going */ |
| 257 | HSM_ST_POLL, | 255 | HSM_ST, /* (waiting the device to) transfer data */ |
| 258 | HSM_ST_TMOUT, | 256 | HSM_ST_LAST, /* (waiting the device to) complete command */ |
| 259 | HSM_ST, | 257 | HSM_ST_ERR, /* error */ |
| 260 | HSM_ST_LAST, | 258 | HSM_ST_FIRST, /* (waiting the device to) |
| 261 | HSM_ST_LAST_POLL, | 259 | write CDB or first data block */ |
| 262 | HSM_ST_ERR, | ||
| 263 | }; | 260 | }; |
| 264 | 261 | ||
| 265 | enum ata_completion_errors { | 262 | enum ata_completion_errors { |
| @@ -485,7 +482,6 @@ struct ata_port { | |||
| 485 | struct work_struct port_task; | 482 | struct work_struct port_task; |
| 486 | 483 | ||
| 487 | unsigned int hsm_task_state; | 484 | unsigned int hsm_task_state; |
| 488 | unsigned long pio_task_timeout; | ||
| 489 | 485 | ||
| 490 | u32 msg_enable; | 486 | u32 msg_enable; |
| 491 | struct list_head eh_done_q; | 487 | struct list_head eh_done_q; |
