diff options
-rw-r--r-- | drivers/scsi/libata-core.c | 803 | ||||
-rw-r--r-- | drivers/scsi/libata-eh.c | 2 | ||||
-rw-r--r-- | drivers/scsi/pdc_adma.c | 8 | ||||
-rw-r--r-- | drivers/scsi/sata_mv.c | 6 | ||||
-rw-r--r-- | drivers/scsi/sata_nv.c | 4 | ||||
-rw-r--r-- | drivers/scsi/sata_promise.c | 7 | ||||
-rw-r--r-- | drivers/scsi/sata_qstor.c | 11 | ||||
-rw-r--r-- | drivers/scsi/sata_sx4.c | 6 | ||||
-rw-r--r-- | drivers/scsi/sata_vsc.c | 15 | ||||
-rw-r--r-- | include/linux/ata.h | 12 | ||||
-rw-r--r-- | include/linux/libata.h | 24 |
11 files changed, 550 insertions, 348 deletions
diff --git a/drivers/scsi/libata-core.c b/drivers/scsi/libata-core.c index 3387fe35c54f..2b2feb6462a9 100644 --- a/drivers/scsi/libata-core.c +++ b/drivers/scsi/libata-core.c | |||
@@ -1309,11 +1309,19 @@ static int ata_dev_configure(struct ata_port *ap, struct ata_device *dev, | |||
1309 | dev->cylinders, dev->heads, dev->sectors); | 1309 | dev->cylinders, dev->heads, dev->sectors); |
1310 | } | 1310 | } |
1311 | 1311 | ||
1312 | if (dev->id[59] & 0x100) { | ||
1313 | dev->multi_count = dev->id[59] & 0xff; | ||
1314 | DPRINTK("ata%u: dev %u multi count %u\n", | ||
1315 | ap->id, dev->devno, dev->multi_count); | ||
1316 | } | ||
1317 | |||
1312 | dev->cdb_len = 16; | 1318 | dev->cdb_len = 16; |
1313 | } | 1319 | } |
1314 | 1320 | ||
1315 | /* ATAPI-specific feature tests */ | 1321 | /* ATAPI-specific feature tests */ |
1316 | else if (dev->class == ATA_DEV_ATAPI) { | 1322 | else if (dev->class == ATA_DEV_ATAPI) { |
1323 | char *cdb_intr_string = ""; | ||
1324 | |||
1317 | rc = atapi_cdb_len(id); | 1325 | rc = atapi_cdb_len(id); |
1318 | if ((rc < 12) || (rc > ATAPI_CDB_LEN)) { | 1326 | if ((rc < 12) || (rc > ATAPI_CDB_LEN)) { |
1319 | printk(KERN_WARNING "ata%u: unsupported CDB len\n", ap->id); | 1327 | printk(KERN_WARNING "ata%u: unsupported CDB len\n", ap->id); |
@@ -1322,10 +1330,16 @@ static int ata_dev_configure(struct ata_port *ap, struct ata_device *dev, | |||
1322 | } | 1330 | } |
1323 | dev->cdb_len = (unsigned int) rc; | 1331 | dev->cdb_len = (unsigned int) rc; |
1324 | 1332 | ||
1333 | if (ata_id_cdb_intr(dev->id)) { | ||
1334 | dev->flags |= ATA_DFLAG_CDB_INTR; | ||
1335 | cdb_intr_string = ", CDB intr"; | ||
1336 | } | ||
1337 | |||
1325 | /* print device info to dmesg */ | 1338 | /* print device info to dmesg */ |
1326 | if (print_info) | 1339 | if (print_info) |
1327 | printk(KERN_INFO "ata%u: dev %u ATAPI, max %s\n", | 1340 | printk(KERN_INFO "ata%u: dev %u ATAPI, max %s%s\n", |
1328 | ap->id, dev->devno, ata_mode_string(xfer_mask)); | 1341 | ap->id, dev->devno, ata_mode_string(xfer_mask), |
1342 | cdb_intr_string); | ||
1329 | } | 1343 | } |
1330 | 1344 | ||
1331 | ap->host->max_cmd_len = 0; | 1345 | ap->host->max_cmd_len = 0; |
@@ -3171,6 +3185,15 @@ int ata_check_atapi_dma(struct ata_queued_cmd *qc) | |||
3171 | if (ap->ops->check_atapi_dma) | 3185 | if (ap->ops->check_atapi_dma) |
3172 | rc = ap->ops->check_atapi_dma(qc); | 3186 | rc = ap->ops->check_atapi_dma(qc); |
3173 | 3187 | ||
3188 | /* We don't support polling DMA. | ||
3189 | * Use PIO if the LLDD handles only interrupts in | ||
3190 | * the HSM_ST_LAST state and the ATAPI device | ||
3191 | * generates CDB interrupts. | ||
3192 | */ | ||
3193 | if ((ap->flags & ATA_FLAG_PIO_POLLING) && | ||
3194 | (qc->dev->flags & ATA_DFLAG_CDB_INTR)) | ||
3195 | rc = 1; | ||
3196 | |||
3174 | return rc; | 3197 | return rc; |
3175 | } | 3198 | } |
3176 | /** | 3199 | /** |
@@ -3413,112 +3436,12 @@ void ata_poll_qc_complete(struct ata_queued_cmd *qc) | |||
3413 | unsigned long flags; | 3436 | unsigned long flags; |
3414 | 3437 | ||
3415 | spin_lock_irqsave(&ap->host_set->lock, flags); | 3438 | spin_lock_irqsave(&ap->host_set->lock, flags); |
3416 | ap->flags &= ~ATA_FLAG_NOINTR; | ||
3417 | ata_irq_on(ap); | 3439 | ata_irq_on(ap); |
3418 | ata_qc_complete(qc); | 3440 | ata_qc_complete(qc); |
3419 | spin_unlock_irqrestore(&ap->host_set->lock, flags); | 3441 | spin_unlock_irqrestore(&ap->host_set->lock, flags); |
3420 | } | 3442 | } |
3421 | 3443 | ||
3422 | /** | 3444 | /** |
3423 | * ata_pio_poll - poll using PIO, depending on current state | ||
3424 | * @qc: qc in progress | ||
3425 | * | ||
3426 | * LOCKING: | ||
3427 | * None. (executing in kernel thread context) | ||
3428 | * | ||
3429 | * RETURNS: | ||
3430 | * timeout value to use | ||
3431 | */ | ||
3432 | static unsigned long ata_pio_poll(struct ata_queued_cmd *qc) | ||
3433 | { | ||
3434 | struct ata_port *ap = qc->ap; | ||
3435 | u8 status; | ||
3436 | unsigned int poll_state = HSM_ST_UNKNOWN; | ||
3437 | unsigned int reg_state = HSM_ST_UNKNOWN; | ||
3438 | |||
3439 | switch (ap->hsm_task_state) { | ||
3440 | case HSM_ST: | ||
3441 | case HSM_ST_POLL: | ||
3442 | poll_state = HSM_ST_POLL; | ||
3443 | reg_state = HSM_ST; | ||
3444 | break; | ||
3445 | case HSM_ST_LAST: | ||
3446 | case HSM_ST_LAST_POLL: | ||
3447 | poll_state = HSM_ST_LAST_POLL; | ||
3448 | reg_state = HSM_ST_LAST; | ||
3449 | break; | ||
3450 | default: | ||
3451 | BUG(); | ||
3452 | break; | ||
3453 | } | ||
3454 | |||
3455 | status = ata_chk_status(ap); | ||
3456 | if (status & ATA_BUSY) { | ||
3457 | if (time_after(jiffies, ap->pio_task_timeout)) { | ||
3458 | qc->err_mask |= AC_ERR_TIMEOUT; | ||
3459 | ap->hsm_task_state = HSM_ST_TMOUT; | ||
3460 | return 0; | ||
3461 | } | ||
3462 | ap->hsm_task_state = poll_state; | ||
3463 | return ATA_SHORT_PAUSE; | ||
3464 | } | ||
3465 | |||
3466 | ap->hsm_task_state = reg_state; | ||
3467 | return 0; | ||
3468 | } | ||
3469 | |||
3470 | /** | ||
3471 | * ata_pio_complete - check if drive is busy or idle | ||
3472 | * @qc: qc to complete | ||
3473 | * | ||
3474 | * LOCKING: | ||
3475 | * None. (executing in kernel thread context) | ||
3476 | * | ||
3477 | * RETURNS: | ||
3478 | * Non-zero if qc completed, zero otherwise. | ||
3479 | */ | ||
3480 | static int ata_pio_complete(struct ata_queued_cmd *qc) | ||
3481 | { | ||
3482 | struct ata_port *ap = qc->ap; | ||
3483 | u8 drv_stat; | ||
3484 | |||
3485 | /* | ||
3486 | * This is purely heuristic. This is a fast path. Sometimes when | ||
3487 | * we enter, BSY will be cleared in a chk-status or two. If not, | ||
3488 | * the drive is probably seeking or something. Snooze for a couple | ||
3489 | * msecs, then chk-status again. If still busy, fall back to | ||
3490 | * HSM_ST_POLL state. | ||
3491 | */ | ||
3492 | drv_stat = ata_busy_wait(ap, ATA_BUSY, 10); | ||
3493 | if (drv_stat & ATA_BUSY) { | ||
3494 | msleep(2); | ||
3495 | drv_stat = ata_busy_wait(ap, ATA_BUSY, 10); | ||
3496 | if (drv_stat & ATA_BUSY) { | ||
3497 | ap->hsm_task_state = HSM_ST_LAST_POLL; | ||
3498 | ap->pio_task_timeout = jiffies + ATA_TMOUT_PIO; | ||
3499 | return 0; | ||
3500 | } | ||
3501 | } | ||
3502 | |||
3503 | drv_stat = ata_wait_idle(ap); | ||
3504 | if (!ata_ok(drv_stat)) { | ||
3505 | qc->err_mask |= __ac_err_mask(drv_stat); | ||
3506 | ap->hsm_task_state = HSM_ST_ERR; | ||
3507 | return 0; | ||
3508 | } | ||
3509 | |||
3510 | ap->hsm_task_state = HSM_ST_IDLE; | ||
3511 | |||
3512 | WARN_ON(qc->err_mask); | ||
3513 | ata_poll_qc_complete(qc); | ||
3514 | |||
3515 | /* another command may start at this point */ | ||
3516 | |||
3517 | return 1; | ||
3518 | } | ||
3519 | |||
3520 | |||
3521 | /** | ||
3522 | * swap_buf_le16 - swap halves of 16-bit words in place | 3445 | * swap_buf_le16 - swap halves of 16-bit words in place |
3523 | * @buf: Buffer to swap | 3446 | * @buf: Buffer to swap |
3524 | * @buf_words: Number of 16-bit words in buffer. | 3447 | * @buf_words: Number of 16-bit words in buffer. |
@@ -3686,7 +3609,23 @@ static void ata_pio_sector(struct ata_queued_cmd *qc) | |||
3686 | page = nth_page(page, (offset >> PAGE_SHIFT)); | 3609 | page = nth_page(page, (offset >> PAGE_SHIFT)); |
3687 | offset %= PAGE_SIZE; | 3610 | offset %= PAGE_SIZE; |
3688 | 3611 | ||
3689 | buf = kmap(page) + offset; | 3612 | DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read"); |
3613 | |||
3614 | if (PageHighMem(page)) { | ||
3615 | unsigned long flags; | ||
3616 | |||
3617 | local_irq_save(flags); | ||
3618 | buf = kmap_atomic(page, KM_IRQ0); | ||
3619 | |||
3620 | /* do the actual data transfer */ | ||
3621 | ata_data_xfer(ap, buf + offset, ATA_SECT_SIZE, do_write); | ||
3622 | |||
3623 | kunmap_atomic(buf, KM_IRQ0); | ||
3624 | local_irq_restore(flags); | ||
3625 | } else { | ||
3626 | buf = page_address(page); | ||
3627 | ata_data_xfer(ap, buf + offset, ATA_SECT_SIZE, do_write); | ||
3628 | } | ||
3690 | 3629 | ||
3691 | qc->cursect++; | 3630 | qc->cursect++; |
3692 | qc->cursg_ofs++; | 3631 | qc->cursg_ofs++; |
@@ -3695,14 +3634,68 @@ static void ata_pio_sector(struct ata_queued_cmd *qc) | |||
3695 | qc->cursg++; | 3634 | qc->cursg++; |
3696 | qc->cursg_ofs = 0; | 3635 | qc->cursg_ofs = 0; |
3697 | } | 3636 | } |
3637 | } | ||
3698 | 3638 | ||
3699 | DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read"); | 3639 | /** |
3640 | * ata_pio_sectors - Transfer one or many 512-byte sectors. | ||
3641 | * @qc: Command on going | ||
3642 | * | ||
3643 | * Transfer one or many ATA_SECT_SIZE of data from/to the | ||
3644 | * ATA device for the DRQ request. | ||
3645 | * | ||
3646 | * LOCKING: | ||
3647 | * Inherited from caller. | ||
3648 | */ | ||
3649 | |||
3650 | static void ata_pio_sectors(struct ata_queued_cmd *qc) | ||
3651 | { | ||
3652 | if (is_multi_taskfile(&qc->tf)) { | ||
3653 | /* READ/WRITE MULTIPLE */ | ||
3654 | unsigned int nsect; | ||
3655 | |||
3656 | WARN_ON(qc->dev->multi_count == 0); | ||
3657 | |||
3658 | nsect = min(qc->nsect - qc->cursect, qc->dev->multi_count); | ||
3659 | while (nsect--) | ||
3660 | ata_pio_sector(qc); | ||
3661 | } else | ||
3662 | ata_pio_sector(qc); | ||
3663 | } | ||
3664 | |||
3665 | /** | ||
3666 | * atapi_send_cdb - Write CDB bytes to hardware | ||
3667 | * @ap: Port to which ATAPI device is attached. | ||
3668 | * @qc: Taskfile currently active | ||
3669 | * | ||
3670 | * When device has indicated its readiness to accept | ||
3671 | * a CDB, this function is called. Send the CDB. | ||
3672 | * | ||
3673 | * LOCKING: | ||
3674 | * caller. | ||
3675 | */ | ||
3700 | 3676 | ||
3701 | /* do the actual data transfer */ | 3677 | static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc) |
3702 | do_write = (qc->tf.flags & ATA_TFLAG_WRITE); | 3678 | { |
3703 | ata_data_xfer(ap, buf, ATA_SECT_SIZE, do_write); | 3679 | /* send SCSI cdb */ |
3680 | DPRINTK("send cdb\n"); | ||
3681 | WARN_ON(qc->dev->cdb_len < 12); | ||
3682 | |||
3683 | ata_data_xfer(ap, qc->cdb, qc->dev->cdb_len, 1); | ||
3684 | ata_altstatus(ap); /* flush */ | ||
3704 | 3685 | ||
3705 | kunmap(page); | 3686 | switch (qc->tf.protocol) { |
3687 | case ATA_PROT_ATAPI: | ||
3688 | ap->hsm_task_state = HSM_ST; | ||
3689 | break; | ||
3690 | case ATA_PROT_ATAPI_NODATA: | ||
3691 | ap->hsm_task_state = HSM_ST_LAST; | ||
3692 | break; | ||
3693 | case ATA_PROT_ATAPI_DMA: | ||
3694 | ap->hsm_task_state = HSM_ST_LAST; | ||
3695 | /* initiate bmdma */ | ||
3696 | ap->ops->bmdma_start(qc); | ||
3697 | break; | ||
3698 | } | ||
3706 | } | 3699 | } |
3707 | 3700 | ||
3708 | /** | 3701 | /** |
@@ -3768,7 +3761,23 @@ next_sg: | |||
3768 | /* don't cross page boundaries */ | 3761 | /* don't cross page boundaries */ |
3769 | count = min(count, (unsigned int)PAGE_SIZE - offset); | 3762 | count = min(count, (unsigned int)PAGE_SIZE - offset); |
3770 | 3763 | ||
3771 | buf = kmap(page) + offset; | 3764 | DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read"); |
3765 | |||
3766 | if (PageHighMem(page)) { | ||
3767 | unsigned long flags; | ||
3768 | |||
3769 | local_irq_save(flags); | ||
3770 | buf = kmap_atomic(page, KM_IRQ0); | ||
3771 | |||
3772 | /* do the actual data transfer */ | ||
3773 | ata_data_xfer(ap, buf + offset, count, do_write); | ||
3774 | |||
3775 | kunmap_atomic(buf, KM_IRQ0); | ||
3776 | local_irq_restore(flags); | ||
3777 | } else { | ||
3778 | buf = page_address(page); | ||
3779 | ata_data_xfer(ap, buf + offset, count, do_write); | ||
3780 | } | ||
3772 | 3781 | ||
3773 | bytes -= count; | 3782 | bytes -= count; |
3774 | qc->curbytes += count; | 3783 | qc->curbytes += count; |
@@ -3779,13 +3788,6 @@ next_sg: | |||
3779 | qc->cursg_ofs = 0; | 3788 | qc->cursg_ofs = 0; |
3780 | } | 3789 | } |
3781 | 3790 | ||
3782 | DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read"); | ||
3783 | |||
3784 | /* do the actual data transfer */ | ||
3785 | ata_data_xfer(ap, buf, count, do_write); | ||
3786 | |||
3787 | kunmap(page); | ||
3788 | |||
3789 | if (bytes) | 3791 | if (bytes) |
3790 | goto next_sg; | 3792 | goto next_sg; |
3791 | } | 3793 | } |
@@ -3822,6 +3824,8 @@ static void atapi_pio_bytes(struct ata_queued_cmd *qc) | |||
3822 | if (do_write != i_write) | 3824 | if (do_write != i_write) |
3823 | goto err_out; | 3825 | goto err_out; |
3824 | 3826 | ||
3827 | VPRINTK("ata%u: xfering %d bytes\n", ap->id, bytes); | ||
3828 | |||
3825 | __atapi_pio_bytes(qc, bytes); | 3829 | __atapi_pio_bytes(qc, bytes); |
3826 | 3830 | ||
3827 | return; | 3831 | return; |
@@ -3834,187 +3838,294 @@ err_out: | |||
3834 | } | 3838 | } |
3835 | 3839 | ||
3836 | /** | 3840 | /** |
3837 | * ata_pio_block - start PIO on a block | 3841 | * ata_hsm_ok_in_wq - Check if the qc can be handled in the workqueue. |
3838 | * @qc: qc to transfer block for | 3842 | * @ap: the target ata_port |
3843 | * @qc: qc on going | ||
3839 | * | 3844 | * |
3840 | * LOCKING: | 3845 | * RETURNS: |
3841 | * None. (executing in kernel thread context) | 3846 | * 1 if ok in workqueue, 0 otherwise. |
3842 | */ | 3847 | */ |
3843 | static void ata_pio_block(struct ata_queued_cmd *qc) | 3848 | |
3849 | static inline int ata_hsm_ok_in_wq(struct ata_port *ap, struct ata_queued_cmd *qc) | ||
3844 | { | 3850 | { |
3845 | struct ata_port *ap = qc->ap; | 3851 | if (qc->tf.flags & ATA_TFLAG_POLLING) |
3846 | u8 status; | 3852 | return 1; |
3847 | 3853 | ||
3848 | /* | 3854 | if (ap->hsm_task_state == HSM_ST_FIRST) { |
3849 | * This is purely heuristic. This is a fast path. | 3855 | if (qc->tf.protocol == ATA_PROT_PIO && |
3850 | * Sometimes when we enter, BSY will be cleared in | 3856 | (qc->tf.flags & ATA_TFLAG_WRITE)) |
3851 | * a chk-status or two. If not, the drive is probably seeking | 3857 | return 1; |
3852 | * or something. Snooze for a couple msecs, then | ||
3853 | * chk-status again. If still busy, fall back to | ||
3854 | * HSM_ST_POLL state. | ||
3855 | */ | ||
3856 | status = ata_busy_wait(ap, ATA_BUSY, 5); | ||
3857 | if (status & ATA_BUSY) { | ||
3858 | msleep(2); | ||
3859 | status = ata_busy_wait(ap, ATA_BUSY, 10); | ||
3860 | if (status & ATA_BUSY) { | ||
3861 | ap->hsm_task_state = HSM_ST_POLL; | ||
3862 | ap->pio_task_timeout = jiffies + ATA_TMOUT_PIO; | ||
3863 | return; | ||
3864 | } | ||
3865 | } | ||
3866 | 3858 | ||
3867 | /* check error */ | 3859 | if (is_atapi_taskfile(&qc->tf) && |
3868 | if (status & (ATA_ERR | ATA_DF)) { | 3860 | !(qc->dev->flags & ATA_DFLAG_CDB_INTR)) |
3869 | qc->err_mask |= AC_ERR_DEV; | 3861 | return 1; |
3870 | ap->hsm_task_state = HSM_ST_ERR; | ||
3871 | return; | ||
3872 | } | 3862 | } |
3873 | 3863 | ||
3874 | /* transfer data if any */ | 3864 | return 0; |
3875 | if (is_atapi_taskfile(&qc->tf)) { | 3865 | } |
3876 | /* DRQ=0 means no more data to transfer */ | ||
3877 | if ((status & ATA_DRQ) == 0) { | ||
3878 | ap->hsm_task_state = HSM_ST_LAST; | ||
3879 | return; | ||
3880 | } | ||
3881 | 3866 | ||
3882 | atapi_pio_bytes(qc); | 3867 | /** |
3883 | } else { | 3868 | * ata_hsm_move - move the HSM to the next state. |
3884 | /* handle BSY=0, DRQ=0 as error */ | 3869 | * @ap: the target ata_port |
3885 | if ((status & ATA_DRQ) == 0) { | 3870 | * @qc: qc on going |
3871 | * @status: current device status | ||
3872 | * @in_wq: 1 if called from workqueue, 0 otherwise | ||
3873 | * | ||
3874 | * RETURNS: | ||
3875 | * 1 when poll next status needed, 0 otherwise. | ||
3876 | */ | ||
3877 | |||
3878 | static int ata_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc, | ||
3879 | u8 status, int in_wq) | ||
3880 | { | ||
3881 | unsigned long flags = 0; | ||
3882 | int poll_next; | ||
3883 | |||
3884 | WARN_ON((qc->flags & ATA_QCFLAG_ACTIVE) == 0); | ||
3885 | |||
3886 | /* Make sure ata_qc_issue_prot() does not throw things | ||
3887 | * like DMA polling into the workqueue. Notice that | ||
3888 | * in_wq is not equivalent to (qc->tf.flags & ATA_TFLAG_POLLING). | ||
3889 | */ | ||
3890 | WARN_ON(in_wq != ata_hsm_ok_in_wq(ap, qc)); | ||
3891 | |||
3892 | fsm_start: | ||
3893 | DPRINTK("ata%u: protocol %d task_state %d (dev_stat 0x%X)\n", | ||
3894 | ap->id, qc->tf.protocol, ap->hsm_task_state, status); | ||
3895 | |||
3896 | switch (ap->hsm_task_state) { | ||
3897 | case HSM_ST_FIRST: | ||
3898 | /* Send first data block or PACKET CDB */ | ||
3899 | |||
3900 | /* If polling, we will stay in the work queue after | ||
3901 | * sending the data. Otherwise, interrupt handler | ||
3902 | * takes over after sending the data. | ||
3903 | */ | ||
3904 | poll_next = (qc->tf.flags & ATA_TFLAG_POLLING); | ||
3905 | |||
3906 | /* check device status */ | ||
3907 | if (unlikely((status & (ATA_BUSY | ATA_DRQ)) != ATA_DRQ)) { | ||
3908 | /* Wrong status. Let EH handle this */ | ||
3886 | qc->err_mask |= AC_ERR_HSM; | 3909 | qc->err_mask |= AC_ERR_HSM; |
3887 | ap->hsm_task_state = HSM_ST_ERR; | 3910 | ap->hsm_task_state = HSM_ST_ERR; |
3888 | return; | 3911 | goto fsm_start; |
3889 | } | 3912 | } |
3890 | 3913 | ||
3891 | ata_pio_sector(qc); | 3914 | /* Device should not ask for data transfer (DRQ=1) |
3892 | } | 3915 | * when it finds something wrong. |
3893 | } | 3916 | * We ignore DRQ here and stop the HSM by |
3917 | * changing hsm_task_state to HSM_ST_ERR and | ||
3918 | * let the EH abort the command or reset the device. | ||
3919 | */ | ||
3920 | if (unlikely(status & (ATA_ERR | ATA_DF))) { | ||
3921 | printk(KERN_WARNING "ata%d: DRQ=1 with device error, dev_stat 0x%X\n", | ||
3922 | ap->id, status); | ||
3923 | qc->err_mask |= AC_ERR_DEV; | ||
3924 | ap->hsm_task_state = HSM_ST_ERR; | ||
3925 | goto fsm_start; | ||
3926 | } | ||
3894 | 3927 | ||
3895 | static void ata_pio_error(struct ata_queued_cmd *qc) | 3928 | /* Send the CDB (atapi) or the first data block (ata pio out). |
3896 | { | 3929 | * During the state transition, interrupt handler shouldn't |
3897 | struct ata_port *ap = qc->ap; | 3930 | * be invoked before the data transfer is complete and |
3931 | * hsm_task_state is changed. Hence, the following locking. | ||
3932 | */ | ||
3933 | if (in_wq) | ||
3934 | spin_lock_irqsave(&ap->host_set->lock, flags); | ||
3898 | 3935 | ||
3899 | if (qc->tf.command != ATA_CMD_PACKET) | 3936 | if (qc->tf.protocol == ATA_PROT_PIO) { |
3900 | printk(KERN_WARNING "ata%u: dev %u PIO error\n", | 3937 | /* PIO data out protocol. |
3901 | ap->id, qc->dev->devno); | 3938 | * send first data block. |
3939 | */ | ||
3902 | 3940 | ||
3903 | /* make sure qc->err_mask is available to | 3941 | /* ata_pio_sectors() might change the state |
3904 | * know what's wrong and recover | 3942 | * to HSM_ST_LAST. so, the state is changed here |
3905 | */ | 3943 | * before ata_pio_sectors(). |
3906 | WARN_ON(qc->err_mask == 0); | 3944 | */ |
3945 | ap->hsm_task_state = HSM_ST; | ||
3946 | ata_pio_sectors(qc); | ||
3947 | ata_altstatus(ap); /* flush */ | ||
3948 | } else | ||
3949 | /* send CDB */ | ||
3950 | atapi_send_cdb(ap, qc); | ||
3951 | |||
3952 | if (in_wq) | ||
3953 | spin_unlock_irqrestore(&ap->host_set->lock, flags); | ||
3954 | |||
3955 | /* if polling, ata_pio_task() handles the rest. | ||
3956 | * otherwise, interrupt handler takes over from here. | ||
3957 | */ | ||
3958 | break; | ||
3907 | 3959 | ||
3908 | ap->hsm_task_state = HSM_ST_IDLE; | 3960 | case HSM_ST: |
3961 | /* complete command or read/write the data register */ | ||
3962 | if (qc->tf.protocol == ATA_PROT_ATAPI) { | ||
3963 | /* ATAPI PIO protocol */ | ||
3964 | if ((status & ATA_DRQ) == 0) { | ||
3965 | /* no more data to transfer */ | ||
3966 | ap->hsm_task_state = HSM_ST_LAST; | ||
3967 | goto fsm_start; | ||
3968 | } | ||
3909 | 3969 | ||
3910 | ata_poll_qc_complete(qc); | 3970 | /* Device should not ask for data transfer (DRQ=1) |
3911 | } | 3971 | * when it finds something wrong. |
3972 | * We ignore DRQ here and stop the HSM by | ||
3973 | * changing hsm_task_state to HSM_ST_ERR and | ||
3974 | * let the EH abort the command or reset the device. | ||
3975 | */ | ||
3976 | if (unlikely(status & (ATA_ERR | ATA_DF))) { | ||
3977 | printk(KERN_WARNING "ata%d: DRQ=1 with device error, dev_stat 0x%X\n", | ||
3978 | ap->id, status); | ||
3979 | qc->err_mask |= AC_ERR_DEV; | ||
3980 | ap->hsm_task_state = HSM_ST_ERR; | ||
3981 | goto fsm_start; | ||
3982 | } | ||
3912 | 3983 | ||
3913 | static void ata_pio_task(void *_data) | 3984 | atapi_pio_bytes(qc); |
3914 | { | ||
3915 | struct ata_queued_cmd *qc = _data; | ||
3916 | struct ata_port *ap = qc->ap; | ||
3917 | unsigned long timeout; | ||
3918 | int qc_completed; | ||
3919 | 3985 | ||
3920 | fsm_start: | 3986 | if (unlikely(ap->hsm_task_state == HSM_ST_ERR)) |
3921 | timeout = 0; | 3987 | /* bad ireason reported by device */ |
3922 | qc_completed = 0; | 3988 | goto fsm_start; |
3923 | 3989 | ||
3924 | switch (ap->hsm_task_state) { | 3990 | } else { |
3925 | case HSM_ST_IDLE: | 3991 | /* ATA PIO protocol */ |
3926 | return; | 3992 | if (unlikely((status & ATA_DRQ) == 0)) { |
3993 | /* handle BSY=0, DRQ=0 as error */ | ||
3994 | qc->err_mask |= AC_ERR_HSM; | ||
3995 | ap->hsm_task_state = HSM_ST_ERR; | ||
3996 | goto fsm_start; | ||
3997 | } | ||
3927 | 3998 | ||
3928 | case HSM_ST: | 3999 | /* For PIO reads, some devices may ask for |
3929 | ata_pio_block(qc); | 4000 | * data transfer (DRQ=1) alone with ERR=1. |
4001 | * We respect DRQ here and transfer one | ||
4002 | * block of junk data before changing the | ||
4003 | * hsm_task_state to HSM_ST_ERR. | ||
4004 | * | ||
4005 | * For PIO writes, ERR=1 DRQ=1 doesn't make | ||
4006 | * sense since the data block has been | ||
4007 | * transferred to the device. | ||
4008 | */ | ||
4009 | if (unlikely(status & (ATA_ERR | ATA_DF))) { | ||
4010 | /* data might be corrputed */ | ||
4011 | qc->err_mask |= AC_ERR_DEV; | ||
4012 | |||
4013 | if (!(qc->tf.flags & ATA_TFLAG_WRITE)) { | ||
4014 | ata_pio_sectors(qc); | ||
4015 | ata_altstatus(ap); | ||
4016 | status = ata_wait_idle(ap); | ||
4017 | } | ||
4018 | |||
4019 | /* ata_pio_sectors() might change the | ||
4020 | * state to HSM_ST_LAST. so, the state | ||
4021 | * is changed after ata_pio_sectors(). | ||
4022 | */ | ||
4023 | ap->hsm_task_state = HSM_ST_ERR; | ||
4024 | goto fsm_start; | ||
4025 | } | ||
4026 | |||
4027 | ata_pio_sectors(qc); | ||
4028 | |||
4029 | if (ap->hsm_task_state == HSM_ST_LAST && | ||
4030 | (!(qc->tf.flags & ATA_TFLAG_WRITE))) { | ||
4031 | /* all data read */ | ||
4032 | ata_altstatus(ap); | ||
4033 | status = ata_wait_idle(ap); | ||
4034 | goto fsm_start; | ||
4035 | } | ||
4036 | } | ||
4037 | |||
4038 | ata_altstatus(ap); /* flush */ | ||
4039 | poll_next = 1; | ||
3930 | break; | 4040 | break; |
3931 | 4041 | ||
3932 | case HSM_ST_LAST: | 4042 | case HSM_ST_LAST: |
3933 | qc_completed = ata_pio_complete(qc); | 4043 | if (unlikely(!ata_ok(status))) { |
3934 | break; | 4044 | qc->err_mask |= __ac_err_mask(status); |
4045 | ap->hsm_task_state = HSM_ST_ERR; | ||
4046 | goto fsm_start; | ||
4047 | } | ||
4048 | |||
4049 | /* no more data to transfer */ | ||
4050 | DPRINTK("ata%u: dev %u command complete, drv_stat 0x%x\n", | ||
4051 | ap->id, qc->dev->devno, status); | ||
4052 | |||
4053 | WARN_ON(qc->err_mask); | ||
4054 | |||
4055 | ap->hsm_task_state = HSM_ST_IDLE; | ||
3935 | 4056 | ||
3936 | case HSM_ST_POLL: | 4057 | /* complete taskfile transaction */ |
3937 | case HSM_ST_LAST_POLL: | 4058 | if (in_wq) |
3938 | timeout = ata_pio_poll(qc); | 4059 | ata_poll_qc_complete(qc); |
4060 | else | ||
4061 | ata_qc_complete(qc); | ||
4062 | |||
4063 | poll_next = 0; | ||
3939 | break; | 4064 | break; |
3940 | 4065 | ||
3941 | case HSM_ST_TMOUT: | ||
3942 | case HSM_ST_ERR: | 4066 | case HSM_ST_ERR: |
3943 | ata_pio_error(qc); | 4067 | if (qc->tf.command != ATA_CMD_PACKET) |
3944 | return; | 4068 | printk(KERN_ERR "ata%u: dev %u command error, drv_stat 0x%x\n", |
4069 | ap->id, qc->dev->devno, status); | ||
4070 | |||
4071 | /* make sure qc->err_mask is available to | ||
4072 | * know what's wrong and recover | ||
4073 | */ | ||
4074 | WARN_ON(qc->err_mask == 0); | ||
4075 | |||
4076 | ap->hsm_task_state = HSM_ST_IDLE; | ||
4077 | |||
4078 | /* complete taskfile transaction */ | ||
4079 | if (in_wq) | ||
4080 | ata_poll_qc_complete(qc); | ||
4081 | else | ||
4082 | ata_qc_complete(qc); | ||
4083 | |||
4084 | poll_next = 0; | ||
4085 | break; | ||
4086 | default: | ||
4087 | poll_next = 0; | ||
4088 | BUG(); | ||
3945 | } | 4089 | } |
3946 | 4090 | ||
3947 | if (timeout) | 4091 | return poll_next; |
3948 | ata_port_queue_task(ap, ata_pio_task, qc, timeout); | ||
3949 | else if (!qc_completed) | ||
3950 | goto fsm_start; | ||
3951 | } | 4092 | } |
3952 | 4093 | ||
3953 | /** | 4094 | static void ata_pio_task(void *_data) |
3954 | * atapi_packet_task - Write CDB bytes to hardware | ||
3955 | * @_data: qc in progress | ||
3956 | * | ||
3957 | * When device has indicated its readiness to accept | ||
3958 | * a CDB, this function is called. Send the CDB. | ||
3959 | * If DMA is to be performed, exit immediately. | ||
3960 | * Otherwise, we are in polling mode, so poll | ||
3961 | * status under operation succeeds or fails. | ||
3962 | * | ||
3963 | * LOCKING: | ||
3964 | * Kernel thread context (may sleep) | ||
3965 | */ | ||
3966 | static void atapi_packet_task(void *_data) | ||
3967 | { | 4095 | { |
3968 | struct ata_queued_cmd *qc = _data; | 4096 | struct ata_queued_cmd *qc = _data; |
3969 | struct ata_port *ap = qc->ap; | 4097 | struct ata_port *ap = qc->ap; |
3970 | u8 status; | 4098 | u8 status; |
4099 | int poll_next; | ||
3971 | 4100 | ||
3972 | /* sleep-wait for BSY to clear */ | 4101 | fsm_start: |
3973 | DPRINTK("busy wait\n"); | 4102 | WARN_ON(ap->hsm_task_state == HSM_ST_IDLE); |
3974 | if (ata_busy_sleep(ap, ATA_TMOUT_CDB_QUICK, ATA_TMOUT_CDB)) { | ||
3975 | qc->err_mask |= AC_ERR_TIMEOUT; | ||
3976 | goto err_out; | ||
3977 | } | ||
3978 | |||
3979 | /* make sure DRQ is set */ | ||
3980 | status = ata_chk_status(ap); | ||
3981 | if ((status & (ATA_BUSY | ATA_DRQ)) != ATA_DRQ) { | ||
3982 | qc->err_mask |= AC_ERR_HSM; | ||
3983 | goto err_out; | ||
3984 | } | ||
3985 | |||
3986 | /* send SCSI cdb */ | ||
3987 | DPRINTK("send cdb\n"); | ||
3988 | WARN_ON(qc->dev->cdb_len < 12); | ||
3989 | |||
3990 | if (qc->tf.protocol == ATA_PROT_ATAPI_DMA || | ||
3991 | qc->tf.protocol == ATA_PROT_ATAPI_NODATA) { | ||
3992 | unsigned long flags; | ||
3993 | |||
3994 | /* Once we're done issuing command and kicking bmdma, | ||
3995 | * irq handler takes over. To not lose irq, we need | ||
3996 | * to clear NOINTR flag before sending cdb, but | ||
3997 | * interrupt handler shouldn't be invoked before we're | ||
3998 | * finished. Hence, the following locking. | ||
3999 | */ | ||
4000 | spin_lock_irqsave(&ap->host_set->lock, flags); | ||
4001 | ap->flags &= ~ATA_FLAG_NOINTR; | ||
4002 | ata_data_xfer(ap, qc->cdb, qc->dev->cdb_len, 1); | ||
4003 | if (qc->tf.protocol == ATA_PROT_ATAPI_DMA) | ||
4004 | ap->ops->bmdma_start(qc); /* initiate bmdma */ | ||
4005 | spin_unlock_irqrestore(&ap->host_set->lock, flags); | ||
4006 | } else { | ||
4007 | ata_data_xfer(ap, qc->cdb, qc->dev->cdb_len, 1); | ||
4008 | 4103 | ||
4009 | /* PIO commands are handled by polling */ | 4104 | /* |
4010 | ap->hsm_task_state = HSM_ST; | 4105 | * This is purely heuristic. This is a fast path. |
4011 | ata_port_queue_task(ap, ata_pio_task, qc, 0); | 4106 | * Sometimes when we enter, BSY will be cleared in |
4107 | * a chk-status or two. If not, the drive is probably seeking | ||
4108 | * or something. Snooze for a couple msecs, then | ||
4109 | * chk-status again. If still busy, queue delayed work. | ||
4110 | */ | ||
4111 | status = ata_busy_wait(ap, ATA_BUSY, 5); | ||
4112 | if (status & ATA_BUSY) { | ||
4113 | msleep(2); | ||
4114 | status = ata_busy_wait(ap, ATA_BUSY, 10); | ||
4115 | if (status & ATA_BUSY) { | ||
4116 | ata_port_queue_task(ap, ata_pio_task, qc, ATA_SHORT_PAUSE); | ||
4117 | return; | ||
4118 | } | ||
4012 | } | 4119 | } |
4013 | 4120 | ||
4014 | return; | 4121 | /* move the HSM */ |
4122 | poll_next = ata_hsm_move(ap, qc, status, 1); | ||
4015 | 4123 | ||
4016 | err_out: | 4124 | /* another command or interrupt handler |
4017 | ata_poll_qc_complete(qc); | 4125 | * may be running at this point. |
4126 | */ | ||
4127 | if (poll_next) | ||
4128 | goto fsm_start; | ||
4018 | } | 4129 | } |
4019 | 4130 | ||
4020 | /** | 4131 | /** |
@@ -4204,43 +4315,105 @@ unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc) | |||
4204 | { | 4315 | { |
4205 | struct ata_port *ap = qc->ap; | 4316 | struct ata_port *ap = qc->ap; |
4206 | 4317 | ||
4318 | /* Use polling pio if the LLD doesn't handle | ||
4319 | * interrupt driven pio and atapi CDB interrupt. | ||
4320 | */ | ||
4321 | if (ap->flags & ATA_FLAG_PIO_POLLING) { | ||
4322 | switch (qc->tf.protocol) { | ||
4323 | case ATA_PROT_PIO: | ||
4324 | case ATA_PROT_ATAPI: | ||
4325 | case ATA_PROT_ATAPI_NODATA: | ||
4326 | qc->tf.flags |= ATA_TFLAG_POLLING; | ||
4327 | break; | ||
4328 | case ATA_PROT_ATAPI_DMA: | ||
4329 | if (qc->dev->flags & ATA_DFLAG_CDB_INTR) | ||
4330 | /* see ata_check_atapi_dma() */ | ||
4331 | BUG(); | ||
4332 | break; | ||
4333 | default: | ||
4334 | break; | ||
4335 | } | ||
4336 | } | ||
4337 | |||
4338 | /* select the device */ | ||
4207 | ata_dev_select(ap, qc->dev->devno, 1, 0); | 4339 | ata_dev_select(ap, qc->dev->devno, 1, 0); |
4208 | 4340 | ||
4341 | /* start the command */ | ||
4209 | switch (qc->tf.protocol) { | 4342 | switch (qc->tf.protocol) { |
4210 | case ATA_PROT_NODATA: | 4343 | case ATA_PROT_NODATA: |
4344 | if (qc->tf.flags & ATA_TFLAG_POLLING) | ||
4345 | ata_qc_set_polling(qc); | ||
4346 | |||
4211 | ata_tf_to_host(ap, &qc->tf); | 4347 | ata_tf_to_host(ap, &qc->tf); |
4348 | ap->hsm_task_state = HSM_ST_LAST; | ||
4349 | |||
4350 | if (qc->tf.flags & ATA_TFLAG_POLLING) | ||
4351 | ata_port_queue_task(ap, ata_pio_task, qc, 0); | ||
4352 | |||
4212 | break; | 4353 | break; |
4213 | 4354 | ||
4214 | case ATA_PROT_DMA: | 4355 | case ATA_PROT_DMA: |
4356 | WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING); | ||
4357 | |||
4215 | ap->ops->tf_load(ap, &qc->tf); /* load tf registers */ | 4358 | ap->ops->tf_load(ap, &qc->tf); /* load tf registers */ |
4216 | ap->ops->bmdma_setup(qc); /* set up bmdma */ | 4359 | ap->ops->bmdma_setup(qc); /* set up bmdma */ |
4217 | ap->ops->bmdma_start(qc); /* initiate bmdma */ | 4360 | ap->ops->bmdma_start(qc); /* initiate bmdma */ |
4361 | ap->hsm_task_state = HSM_ST_LAST; | ||
4218 | break; | 4362 | break; |
4219 | 4363 | ||
4220 | case ATA_PROT_PIO: /* load tf registers, initiate polling pio */ | 4364 | case ATA_PROT_PIO: |
4221 | ata_qc_set_polling(qc); | 4365 | if (qc->tf.flags & ATA_TFLAG_POLLING) |
4222 | ata_tf_to_host(ap, &qc->tf); | 4366 | ata_qc_set_polling(qc); |
4223 | ap->hsm_task_state = HSM_ST; | ||
4224 | ata_port_queue_task(ap, ata_pio_task, qc, 0); | ||
4225 | break; | ||
4226 | 4367 | ||
4227 | case ATA_PROT_ATAPI: | ||
4228 | ata_qc_set_polling(qc); | ||
4229 | ata_tf_to_host(ap, &qc->tf); | 4368 | ata_tf_to_host(ap, &qc->tf); |
4230 | ata_port_queue_task(ap, atapi_packet_task, qc, 0); | 4369 | |
4370 | if (qc->tf.flags & ATA_TFLAG_WRITE) { | ||
4371 | /* PIO data out protocol */ | ||
4372 | ap->hsm_task_state = HSM_ST_FIRST; | ||
4373 | ata_port_queue_task(ap, ata_pio_task, qc, 0); | ||
4374 | |||
4375 | /* always send first data block using | ||
4376 | * the ata_pio_task() codepath. | ||
4377 | */ | ||
4378 | } else { | ||
4379 | /* PIO data in protocol */ | ||
4380 | ap->hsm_task_state = HSM_ST; | ||
4381 | |||
4382 | if (qc->tf.flags & ATA_TFLAG_POLLING) | ||
4383 | ata_port_queue_task(ap, ata_pio_task, qc, 0); | ||
4384 | |||
4385 | /* if polling, ata_pio_task() handles the rest. | ||
4386 | * otherwise, interrupt handler takes over from here. | ||
4387 | */ | ||
4388 | } | ||
4389 | |||
4231 | break; | 4390 | break; |
4232 | 4391 | ||
4392 | case ATA_PROT_ATAPI: | ||
4233 | case ATA_PROT_ATAPI_NODATA: | 4393 | case ATA_PROT_ATAPI_NODATA: |
4234 | ap->flags |= ATA_FLAG_NOINTR; | 4394 | if (qc->tf.flags & ATA_TFLAG_POLLING) |
4395 | ata_qc_set_polling(qc); | ||
4396 | |||
4235 | ata_tf_to_host(ap, &qc->tf); | 4397 | ata_tf_to_host(ap, &qc->tf); |
4236 | ata_port_queue_task(ap, atapi_packet_task, qc, 0); | 4398 | |
4399 | ap->hsm_task_state = HSM_ST_FIRST; | ||
4400 | |||
4401 | /* send cdb by polling if no cdb interrupt */ | ||
4402 | if ((!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) || | ||
4403 | (qc->tf.flags & ATA_TFLAG_POLLING)) | ||
4404 | ata_port_queue_task(ap, ata_pio_task, qc, 0); | ||
4237 | break; | 4405 | break; |
4238 | 4406 | ||
4239 | case ATA_PROT_ATAPI_DMA: | 4407 | case ATA_PROT_ATAPI_DMA: |
4240 | ap->flags |= ATA_FLAG_NOINTR; | 4408 | WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING); |
4409 | |||
4241 | ap->ops->tf_load(ap, &qc->tf); /* load tf registers */ | 4410 | ap->ops->tf_load(ap, &qc->tf); /* load tf registers */ |
4242 | ap->ops->bmdma_setup(qc); /* set up bmdma */ | 4411 | ap->ops->bmdma_setup(qc); /* set up bmdma */ |
4243 | ata_port_queue_task(ap, atapi_packet_task, qc, 0); | 4412 | ap->hsm_task_state = HSM_ST_FIRST; |
4413 | |||
4414 | /* send cdb by polling if no cdb interrupt */ | ||
4415 | if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) | ||
4416 | ata_port_queue_task(ap, ata_pio_task, qc, 0); | ||
4244 | break; | 4417 | break; |
4245 | 4418 | ||
4246 | default: | 4419 | default: |
@@ -4270,52 +4443,66 @@ unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc) | |||
4270 | inline unsigned int ata_host_intr (struct ata_port *ap, | 4443 | inline unsigned int ata_host_intr (struct ata_port *ap, |
4271 | struct ata_queued_cmd *qc) | 4444 | struct ata_queued_cmd *qc) |
4272 | { | 4445 | { |
4273 | u8 status, host_stat; | 4446 | u8 status, host_stat = 0; |
4274 | |||
4275 | switch (qc->tf.protocol) { | ||
4276 | 4447 | ||
4277 | case ATA_PROT_DMA: | 4448 | VPRINTK("ata%u: protocol %d task_state %d\n", |
4278 | case ATA_PROT_ATAPI_DMA: | 4449 | ap->id, qc->tf.protocol, ap->hsm_task_state); |
4279 | case ATA_PROT_ATAPI: | ||
4280 | /* check status of DMA engine */ | ||
4281 | host_stat = ap->ops->bmdma_status(ap); | ||
4282 | VPRINTK("ata%u: host_stat 0x%X\n", ap->id, host_stat); | ||
4283 | |||
4284 | /* if it's not our irq... */ | ||
4285 | if (!(host_stat & ATA_DMA_INTR)) | ||
4286 | goto idle_irq; | ||
4287 | |||
4288 | /* before we do anything else, clear DMA-Start bit */ | ||
4289 | ap->ops->bmdma_stop(qc); | ||
4290 | 4450 | ||
4291 | /* fall through */ | 4451 | /* Check whether we are expecting interrupt in this state */ |
4292 | 4452 | switch (ap->hsm_task_state) { | |
4293 | case ATA_PROT_ATAPI_NODATA: | 4453 | case HSM_ST_FIRST: |
4294 | case ATA_PROT_NODATA: | 4454 | /* Some pre-ATAPI-4 devices assert INTRQ |
4295 | /* check altstatus */ | 4455 | * at this state when ready to receive CDB. |
4296 | status = ata_altstatus(ap); | 4456 | */ |
4297 | if (status & ATA_BUSY) | ||
4298 | goto idle_irq; | ||
4299 | 4457 | ||
4300 | /* check main status, clearing INTRQ */ | 4458 | /* Check the ATA_DFLAG_CDB_INTR flag is enough here. |
4301 | status = ata_chk_status(ap); | 4459 | * The flag was turned on only for atapi devices. |
4302 | if (unlikely(status & ATA_BUSY)) | 4460 | * No need to check is_atapi_taskfile(&qc->tf) again. |
4461 | */ | ||
4462 | if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) | ||
4303 | goto idle_irq; | 4463 | goto idle_irq; |
4304 | DPRINTK("ata%u: protocol %d (dev_stat 0x%X)\n", | ||
4305 | ap->id, qc->tf.protocol, status); | ||
4306 | |||
4307 | /* ack bmdma irq events */ | ||
4308 | ap->ops->irq_clear(ap); | ||
4309 | |||
4310 | /* complete taskfile transaction */ | ||
4311 | qc->err_mask |= ac_err_mask(status); | ||
4312 | ata_qc_complete(qc); | ||
4313 | break; | 4464 | break; |
4314 | 4465 | case HSM_ST_LAST: | |
4466 | if (qc->tf.protocol == ATA_PROT_DMA || | ||
4467 | qc->tf.protocol == ATA_PROT_ATAPI_DMA) { | ||
4468 | /* check status of DMA engine */ | ||
4469 | host_stat = ap->ops->bmdma_status(ap); | ||
4470 | VPRINTK("ata%u: host_stat 0x%X\n", ap->id, host_stat); | ||
4471 | |||
4472 | /* if it's not our irq... */ | ||
4473 | if (!(host_stat & ATA_DMA_INTR)) | ||
4474 | goto idle_irq; | ||
4475 | |||
4476 | /* before we do anything else, clear DMA-Start bit */ | ||
4477 | ap->ops->bmdma_stop(qc); | ||
4478 | |||
4479 | if (unlikely(host_stat & ATA_DMA_ERR)) { | ||
4480 | /* error when transfering data to/from memory */ | ||
4481 | qc->err_mask |= AC_ERR_HOST_BUS; | ||
4482 | ap->hsm_task_state = HSM_ST_ERR; | ||
4483 | } | ||
4484 | } | ||
4485 | break; | ||
4486 | case HSM_ST: | ||
4487 | break; | ||
4315 | default: | 4488 | default: |
4316 | goto idle_irq; | 4489 | goto idle_irq; |
4317 | } | 4490 | } |
4318 | 4491 | ||
4492 | /* check altstatus */ | ||
4493 | status = ata_altstatus(ap); | ||
4494 | if (status & ATA_BUSY) | ||
4495 | goto idle_irq; | ||
4496 | |||
4497 | /* check main status, clearing INTRQ */ | ||
4498 | status = ata_chk_status(ap); | ||
4499 | if (unlikely(status & ATA_BUSY)) | ||
4500 | goto idle_irq; | ||
4501 | |||
4502 | /* ack bmdma irq events */ | ||
4503 | ap->ops->irq_clear(ap); | ||
4504 | |||
4505 | ata_hsm_move(ap, qc, status, 0); | ||
4319 | return 1; /* irq handled */ | 4506 | return 1; /* irq handled */ |
4320 | 4507 | ||
4321 | idle_irq: | 4508 | idle_irq: |
@@ -4362,11 +4549,11 @@ irqreturn_t ata_interrupt (int irq, void *dev_instance, struct pt_regs *regs) | |||
4362 | 4549 | ||
4363 | ap = host_set->ports[i]; | 4550 | ap = host_set->ports[i]; |
4364 | if (ap && | 4551 | if (ap && |
4365 | !(ap->flags & (ATA_FLAG_DISABLED | ATA_FLAG_NOINTR))) { | 4552 | !(ap->flags & ATA_FLAG_DISABLED)) { |
4366 | struct ata_queued_cmd *qc; | 4553 | struct ata_queued_cmd *qc; |
4367 | 4554 | ||
4368 | qc = ata_qc_from_tag(ap, ap->active_tag); | 4555 | qc = ata_qc_from_tag(ap, ap->active_tag); |
4369 | if (qc && (!(qc->tf.ctl & ATA_NIEN)) && | 4556 | if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)) && |
4370 | (qc->flags & ATA_QCFLAG_ACTIVE)) | 4557 | (qc->flags & ATA_QCFLAG_ACTIVE)) |
4371 | handled |= ata_host_intr(ap, qc); | 4558 | handled |= ata_host_intr(ap, qc); |
4372 | } | 4559 | } |
diff --git a/drivers/scsi/libata-eh.c b/drivers/scsi/libata-eh.c index e73f5612aea8..9a8eea11b0be 100644 --- a/drivers/scsi/libata-eh.c +++ b/drivers/scsi/libata-eh.c | |||
@@ -172,7 +172,7 @@ static void ata_qc_timeout(struct ata_queued_cmd *qc) | |||
172 | ap->id, qc->tf.command, drv_stat, host_stat); | 172 | ap->id, qc->tf.command, drv_stat, host_stat); |
173 | 173 | ||
174 | /* complete taskfile transaction */ | 174 | /* complete taskfile transaction */ |
175 | qc->err_mask |= ac_err_mask(drv_stat); | 175 | qc->err_mask |= AC_ERR_TIMEOUT; |
176 | break; | 176 | break; |
177 | } | 177 | } |
178 | 178 | ||
diff --git a/drivers/scsi/pdc_adma.c b/drivers/scsi/pdc_adma.c index 1364b1da9e2a..1fca6f1afa99 100644 --- a/drivers/scsi/pdc_adma.c +++ b/drivers/scsi/pdc_adma.c | |||
@@ -456,13 +456,13 @@ static inline unsigned int adma_intr_pkt(struct ata_host_set *host_set) | |||
456 | continue; | 456 | continue; |
457 | handled = 1; | 457 | handled = 1; |
458 | adma_enter_reg_mode(ap); | 458 | adma_enter_reg_mode(ap); |
459 | if (ap->flags & (ATA_FLAG_DISABLED | ATA_FLAG_NOINTR)) | 459 | if (ap->flags & ATA_FLAG_DISABLED) |
460 | continue; | 460 | continue; |
461 | pp = ap->private_data; | 461 | pp = ap->private_data; |
462 | if (!pp || pp->state != adma_state_pkt) | 462 | if (!pp || pp->state != adma_state_pkt) |
463 | continue; | 463 | continue; |
464 | qc = ata_qc_from_tag(ap, ap->active_tag); | 464 | qc = ata_qc_from_tag(ap, ap->active_tag); |
465 | if (qc && (!(qc->tf.ctl & ATA_NIEN))) { | 465 | if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) { |
466 | if ((status & (aPERR | aPSD | aUIRQ))) | 466 | if ((status & (aPERR | aPSD | aUIRQ))) |
467 | qc->err_mask |= AC_ERR_OTHER; | 467 | qc->err_mask |= AC_ERR_OTHER; |
468 | else if (pp->pkt[0] != cDONE) | 468 | else if (pp->pkt[0] != cDONE) |
@@ -481,13 +481,13 @@ static inline unsigned int adma_intr_mmio(struct ata_host_set *host_set) | |||
481 | for (port_no = 0; port_no < host_set->n_ports; ++port_no) { | 481 | for (port_no = 0; port_no < host_set->n_ports; ++port_no) { |
482 | struct ata_port *ap; | 482 | struct ata_port *ap; |
483 | ap = host_set->ports[port_no]; | 483 | ap = host_set->ports[port_no]; |
484 | if (ap && (!(ap->flags & (ATA_FLAG_DISABLED | ATA_FLAG_NOINTR)))) { | 484 | if (ap && (!(ap->flags & ATA_FLAG_DISABLED))) { |
485 | struct ata_queued_cmd *qc; | 485 | struct ata_queued_cmd *qc; |
486 | struct adma_port_priv *pp = ap->private_data; | 486 | struct adma_port_priv *pp = ap->private_data; |
487 | if (!pp || pp->state != adma_state_mmio) | 487 | if (!pp || pp->state != adma_state_mmio) |
488 | continue; | 488 | continue; |
489 | qc = ata_qc_from_tag(ap, ap->active_tag); | 489 | qc = ata_qc_from_tag(ap, ap->active_tag); |
490 | if (qc && (!(qc->tf.ctl & ATA_NIEN))) { | 490 | if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) { |
491 | 491 | ||
492 | /* check main status, clearing INTRQ */ | 492 | /* check main status, clearing INTRQ */ |
493 | u8 status = ata_check_status(ap); | 493 | u8 status = ata_check_status(ap); |
diff --git a/drivers/scsi/sata_mv.c b/drivers/scsi/sata_mv.c index e9152f850003..fd9f2173f062 100644 --- a/drivers/scsi/sata_mv.c +++ b/drivers/scsi/sata_mv.c | |||
@@ -87,7 +87,7 @@ enum { | |||
87 | MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */ | 87 | MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */ |
88 | MV_COMMON_FLAGS = (ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | | 88 | MV_COMMON_FLAGS = (ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | |
89 | ATA_FLAG_SATA_RESET | ATA_FLAG_MMIO | | 89 | ATA_FLAG_SATA_RESET | ATA_FLAG_MMIO | |
90 | ATA_FLAG_NO_ATAPI), | 90 | ATA_FLAG_PIO_POLLING), |
91 | MV_6XXX_FLAGS = MV_FLAG_IRQ_COALESCE, | 91 | MV_6XXX_FLAGS = MV_FLAG_IRQ_COALESCE, |
92 | 92 | ||
93 | CRQB_FLAG_READ = (1 << 0), | 93 | CRQB_FLAG_READ = (1 << 0), |
@@ -1397,7 +1397,7 @@ static void mv_host_intr(struct ata_host_set *host_set, u32 relevant, | |||
1397 | } | 1397 | } |
1398 | } | 1398 | } |
1399 | 1399 | ||
1400 | if (ap->flags & (ATA_FLAG_DISABLED | ATA_FLAG_NOINTR)) | 1400 | if (ap && (ap->flags & ATA_FLAG_DISABLED)) |
1401 | continue; | 1401 | continue; |
1402 | 1402 | ||
1403 | err_mask = ac_err_mask(ata_status); | 1403 | err_mask = ac_err_mask(ata_status); |
@@ -1418,7 +1418,7 @@ static void mv_host_intr(struct ata_host_set *host_set, u32 relevant, | |||
1418 | VPRINTK("port %u IRQ found for qc, " | 1418 | VPRINTK("port %u IRQ found for qc, " |
1419 | "ata_status 0x%x\n", port,ata_status); | 1419 | "ata_status 0x%x\n", port,ata_status); |
1420 | /* mark qc status appropriately */ | 1420 | /* mark qc status appropriately */ |
1421 | if (!(qc->tf.ctl & ATA_NIEN)) { | 1421 | if (!(qc->tf.flags & ATA_TFLAG_POLLING)) { |
1422 | qc->err_mask |= err_mask; | 1422 | qc->err_mask |= err_mask; |
1423 | ata_qc_complete(qc); | 1423 | ata_qc_complete(qc); |
1424 | } | 1424 | } |
diff --git a/drivers/scsi/sata_nv.c b/drivers/scsi/sata_nv.c index c92fbd0989bd..3a0004c3c8b5 100644 --- a/drivers/scsi/sata_nv.c +++ b/drivers/scsi/sata_nv.c | |||
@@ -280,11 +280,11 @@ static irqreturn_t nv_interrupt (int irq, void *dev_instance, | |||
280 | 280 | ||
281 | ap = host_set->ports[i]; | 281 | ap = host_set->ports[i]; |
282 | if (ap && | 282 | if (ap && |
283 | !(ap->flags & (ATA_FLAG_DISABLED | ATA_FLAG_NOINTR))) { | 283 | !(ap->flags & ATA_FLAG_DISABLED)) { |
284 | struct ata_queued_cmd *qc; | 284 | struct ata_queued_cmd *qc; |
285 | 285 | ||
286 | qc = ata_qc_from_tag(ap, ap->active_tag); | 286 | qc = ata_qc_from_tag(ap, ap->active_tag); |
287 | if (qc && (!(qc->tf.ctl & ATA_NIEN))) | 287 | if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) |
288 | handled += ata_host_intr(ap, qc); | 288 | handled += ata_host_intr(ap, qc); |
289 | else | 289 | else |
290 | // No request pending? Clear interrupt status | 290 | // No request pending? Clear interrupt status |
diff --git a/drivers/scsi/sata_promise.c b/drivers/scsi/sata_promise.c index 000844a4129f..03c647cbfb59 100644 --- a/drivers/scsi/sata_promise.c +++ b/drivers/scsi/sata_promise.c | |||
@@ -76,7 +76,8 @@ enum { | |||
76 | PDC_RESET = (1 << 11), /* HDMA reset */ | 76 | PDC_RESET = (1 << 11), /* HDMA reset */ |
77 | 77 | ||
78 | PDC_COMMON_FLAGS = ATA_FLAG_NO_LEGACY | ATA_FLAG_SRST | | 78 | PDC_COMMON_FLAGS = ATA_FLAG_NO_LEGACY | ATA_FLAG_SRST | |
79 | ATA_FLAG_MMIO | ATA_FLAG_NO_ATAPI, | 79 | ATA_FLAG_MMIO | ATA_FLAG_NO_ATAPI | |
80 | ATA_FLAG_PIO_POLLING, | ||
80 | }; | 81 | }; |
81 | 82 | ||
82 | 83 | ||
@@ -534,11 +535,11 @@ static irqreturn_t pdc_interrupt (int irq, void *dev_instance, struct pt_regs *r | |||
534 | ap = host_set->ports[i]; | 535 | ap = host_set->ports[i]; |
535 | tmp = mask & (1 << (i + 1)); | 536 | tmp = mask & (1 << (i + 1)); |
536 | if (tmp && ap && | 537 | if (tmp && ap && |
537 | !(ap->flags & (ATA_FLAG_DISABLED | ATA_FLAG_NOINTR))) { | 538 | !(ap->flags & ATA_FLAG_DISABLED)) { |
538 | struct ata_queued_cmd *qc; | 539 | struct ata_queued_cmd *qc; |
539 | 540 | ||
540 | qc = ata_qc_from_tag(ap, ap->active_tag); | 541 | qc = ata_qc_from_tag(ap, ap->active_tag); |
541 | if (qc && (!(qc->tf.ctl & ATA_NIEN))) | 542 | if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) |
542 | handled += pdc_host_intr(ap, qc); | 543 | handled += pdc_host_intr(ap, qc); |
543 | } | 544 | } |
544 | } | 545 | } |
diff --git a/drivers/scsi/sata_qstor.c b/drivers/scsi/sata_qstor.c index 0b5446ada5ca..1b7a9316810c 100644 --- a/drivers/scsi/sata_qstor.c +++ b/drivers/scsi/sata_qstor.c | |||
@@ -176,7 +176,7 @@ static const struct ata_port_info qs_port_info[] = { | |||
176 | .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | | 176 | .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | |
177 | ATA_FLAG_SATA_RESET | | 177 | ATA_FLAG_SATA_RESET | |
178 | //FIXME ATA_FLAG_SRST | | 178 | //FIXME ATA_FLAG_SRST | |
179 | ATA_FLAG_MMIO, | 179 | ATA_FLAG_MMIO | ATA_FLAG_PIO_POLLING, |
180 | .pio_mask = 0x10, /* pio4 */ | 180 | .pio_mask = 0x10, /* pio4 */ |
181 | .udma_mask = 0x7f, /* udma0-6 */ | 181 | .udma_mask = 0x7f, /* udma0-6 */ |
182 | .port_ops = &qs_ata_ops, | 182 | .port_ops = &qs_ata_ops, |
@@ -395,14 +395,13 @@ static inline unsigned int qs_intr_pkt(struct ata_host_set *host_set) | |||
395 | DPRINTK("SFF=%08x%08x: sCHAN=%u sHST=%d sDST=%02x\n", | 395 | DPRINTK("SFF=%08x%08x: sCHAN=%u sHST=%d sDST=%02x\n", |
396 | sff1, sff0, port_no, sHST, sDST); | 396 | sff1, sff0, port_no, sHST, sDST); |
397 | handled = 1; | 397 | handled = 1; |
398 | if (ap && !(ap->flags & | 398 | if (ap && !(ap->flags & ATA_FLAG_DISABLED)) { |
399 | (ATA_FLAG_DISABLED|ATA_FLAG_NOINTR))) { | ||
400 | struct ata_queued_cmd *qc; | 399 | struct ata_queued_cmd *qc; |
401 | struct qs_port_priv *pp = ap->private_data; | 400 | struct qs_port_priv *pp = ap->private_data; |
402 | if (!pp || pp->state != qs_state_pkt) | 401 | if (!pp || pp->state != qs_state_pkt) |
403 | continue; | 402 | continue; |
404 | qc = ata_qc_from_tag(ap, ap->active_tag); | 403 | qc = ata_qc_from_tag(ap, ap->active_tag); |
405 | if (qc && (!(qc->tf.ctl & ATA_NIEN))) { | 404 | if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) { |
406 | switch (sHST) { | 405 | switch (sHST) { |
407 | case 0: /* successful CPB */ | 406 | case 0: /* successful CPB */ |
408 | case 3: /* device error */ | 407 | case 3: /* device error */ |
@@ -429,13 +428,13 @@ static inline unsigned int qs_intr_mmio(struct ata_host_set *host_set) | |||
429 | struct ata_port *ap; | 428 | struct ata_port *ap; |
430 | ap = host_set->ports[port_no]; | 429 | ap = host_set->ports[port_no]; |
431 | if (ap && | 430 | if (ap && |
432 | !(ap->flags & (ATA_FLAG_DISABLED | ATA_FLAG_NOINTR))) { | 431 | !(ap->flags & ATA_FLAG_DISABLED)) { |
433 | struct ata_queued_cmd *qc; | 432 | struct ata_queued_cmd *qc; |
434 | struct qs_port_priv *pp = ap->private_data; | 433 | struct qs_port_priv *pp = ap->private_data; |
435 | if (!pp || pp->state != qs_state_mmio) | 434 | if (!pp || pp->state != qs_state_mmio) |
436 | continue; | 435 | continue; |
437 | qc = ata_qc_from_tag(ap, ap->active_tag); | 436 | qc = ata_qc_from_tag(ap, ap->active_tag); |
438 | if (qc && (!(qc->tf.ctl & ATA_NIEN))) { | 437 | if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) { |
439 | 438 | ||
440 | /* check main status, clearing INTRQ */ | 439 | /* check main status, clearing INTRQ */ |
441 | u8 status = ata_check_status(ap); | 440 | u8 status = ata_check_status(ap); |
diff --git a/drivers/scsi/sata_sx4.c b/drivers/scsi/sata_sx4.c index 801ac3787d5d..13dba63f6167 100644 --- a/drivers/scsi/sata_sx4.c +++ b/drivers/scsi/sata_sx4.c | |||
@@ -219,7 +219,7 @@ static const struct ata_port_info pdc_port_info[] = { | |||
219 | .sht = &pdc_sata_sht, | 219 | .sht = &pdc_sata_sht, |
220 | .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | | 220 | .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | |
221 | ATA_FLAG_SRST | ATA_FLAG_MMIO | | 221 | ATA_FLAG_SRST | ATA_FLAG_MMIO | |
222 | ATA_FLAG_NO_ATAPI, | 222 | ATA_FLAG_PIO_POLLING, |
223 | .pio_mask = 0x1f, /* pio0-4 */ | 223 | .pio_mask = 0x1f, /* pio0-4 */ |
224 | .mwdma_mask = 0x07, /* mwdma0-2 */ | 224 | .mwdma_mask = 0x07, /* mwdma0-2 */ |
225 | .udma_mask = 0x7f, /* udma0-6 ; FIXME */ | 225 | .udma_mask = 0x7f, /* udma0-6 ; FIXME */ |
@@ -834,11 +834,11 @@ static irqreturn_t pdc20621_interrupt (int irq, void *dev_instance, struct pt_re | |||
834 | tmp = mask & (1 << i); | 834 | tmp = mask & (1 << i); |
835 | VPRINTK("seq %u, port_no %u, ap %p, tmp %x\n", i, port_no, ap, tmp); | 835 | VPRINTK("seq %u, port_no %u, ap %p, tmp %x\n", i, port_no, ap, tmp); |
836 | if (tmp && ap && | 836 | if (tmp && ap && |
837 | !(ap->flags & (ATA_FLAG_DISABLED | ATA_FLAG_NOINTR))) { | 837 | !(ap->flags & ATA_FLAG_DISABLED)) { |
838 | struct ata_queued_cmd *qc; | 838 | struct ata_queued_cmd *qc; |
839 | 839 | ||
840 | qc = ata_qc_from_tag(ap, ap->active_tag); | 840 | qc = ata_qc_from_tag(ap, ap->active_tag); |
841 | if (qc && (!(qc->tf.ctl & ATA_NIEN))) | 841 | if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) |
842 | handled += pdc20621_host_intr(ap, qc, (i > 4), | 842 | handled += pdc20621_host_intr(ap, qc, (i > 4), |
843 | mmio_base); | 843 | mmio_base); |
844 | } | 844 | } |
diff --git a/drivers/scsi/sata_vsc.c b/drivers/scsi/sata_vsc.c index cecc1f76256b..b7d6a31628c2 100644 --- a/drivers/scsi/sata_vsc.c +++ b/drivers/scsi/sata_vsc.c | |||
@@ -221,14 +221,21 @@ static irqreturn_t vsc_sata_interrupt (int irq, void *dev_instance, | |||
221 | 221 | ||
222 | ap = host_set->ports[i]; | 222 | ap = host_set->ports[i]; |
223 | 223 | ||
224 | if (ap && !(ap->flags & | 224 | if (is_vsc_sata_int_err(i, int_status)) { |
225 | (ATA_FLAG_DISABLED|ATA_FLAG_NOINTR))) { | 225 | u32 err_status; |
226 | printk(KERN_DEBUG "%s: ignoring interrupt(s)\n", __FUNCTION__); | ||
227 | err_status = ap ? vsc_sata_scr_read(ap, SCR_ERROR) : 0; | ||
228 | vsc_sata_scr_write(ap, SCR_ERROR, err_status); | ||
229 | handled++; | ||
230 | } | ||
231 | |||
232 | if (ap && !(ap->flags & ATA_FLAG_DISABLED)) { | ||
226 | struct ata_queued_cmd *qc; | 233 | struct ata_queued_cmd *qc; |
227 | 234 | ||
228 | qc = ata_qc_from_tag(ap, ap->active_tag); | 235 | qc = ata_qc_from_tag(ap, ap->active_tag); |
229 | if (qc && (!(qc->tf.ctl & ATA_NIEN))) { | 236 | if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) |
230 | handled += ata_host_intr(ap, qc); | 237 | handled += ata_host_intr(ap, qc); |
231 | } else if (is_vsc_sata_int_err(i, int_status)) { | 238 | else if (is_vsc_sata_int_err(i, int_status)) { |
232 | /* | 239 | /* |
233 | * On some chips (i.e. Intel 31244), an error | 240 | * On some chips (i.e. Intel 31244), an error |
234 | * interrupt will sneak in at initialization | 241 | * interrupt will sneak in at initialization |
diff --git a/include/linux/ata.h b/include/linux/ata.h index 312a2c0c64e6..206d859083ea 100644 --- a/include/linux/ata.h +++ b/include/linux/ata.h | |||
@@ -199,6 +199,7 @@ enum { | |||
199 | ATA_TFLAG_WRITE = (1 << 3), /* data dir: host->dev==1 (write) */ | 199 | ATA_TFLAG_WRITE = (1 << 3), /* data dir: host->dev==1 (write) */ |
200 | ATA_TFLAG_LBA = (1 << 4), /* enable LBA */ | 200 | ATA_TFLAG_LBA = (1 << 4), /* enable LBA */ |
201 | ATA_TFLAG_FUA = (1 << 5), /* enable FUA */ | 201 | ATA_TFLAG_FUA = (1 << 5), /* enable FUA */ |
202 | ATA_TFLAG_POLLING = (1 << 6), /* set nIEN to 1 and use polling */ | ||
202 | }; | 203 | }; |
203 | 204 | ||
204 | enum ata_tf_protocols { | 205 | enum ata_tf_protocols { |
@@ -272,6 +273,8 @@ struct ata_taskfile { | |||
272 | ((u64) (id)[(n) + 1] << 16) | \ | 273 | ((u64) (id)[(n) + 1] << 16) | \ |
273 | ((u64) (id)[(n) + 0]) ) | 274 | ((u64) (id)[(n) + 0]) ) |
274 | 275 | ||
276 | #define ata_id_cdb_intr(id) (((id)[0] & 0x60) == 0x20) | ||
277 | |||
275 | static inline unsigned int ata_id_major_version(const u16 *id) | 278 | static inline unsigned int ata_id_major_version(const u16 *id) |
276 | { | 279 | { |
277 | unsigned int mver; | 280 | unsigned int mver; |
@@ -311,6 +314,15 @@ static inline int is_atapi_taskfile(const struct ata_taskfile *tf) | |||
311 | (tf->protocol == ATA_PROT_ATAPI_DMA); | 314 | (tf->protocol == ATA_PROT_ATAPI_DMA); |
312 | } | 315 | } |
313 | 316 | ||
317 | static inline int is_multi_taskfile(struct ata_taskfile *tf) | ||
318 | { | ||
319 | return (tf->command == ATA_CMD_READ_MULTI) || | ||
320 | (tf->command == ATA_CMD_WRITE_MULTI) || | ||
321 | (tf->command == ATA_CMD_READ_MULTI_EXT) || | ||
322 | (tf->command == ATA_CMD_WRITE_MULTI_EXT) || | ||
323 | (tf->command == ATA_CMD_WRITE_MULTI_FUA_EXT); | ||
324 | } | ||
325 | |||
314 | static inline int ata_ok(u8 status) | 326 | static inline int ata_ok(u8 status) |
315 | { | 327 | { |
316 | return ((status & (ATA_BUSY | ATA_DRDY | ATA_DF | ATA_DRQ | ATA_ERR)) | 328 | return ((status & (ATA_BUSY | ATA_DRDY | ATA_DF | ATA_DRQ | ATA_ERR)) |
diff --git a/include/linux/libata.h b/include/linux/libata.h index 03231cb6b406..44d074ff7789 100644 --- a/include/linux/libata.h +++ b/include/linux/libata.h | |||
@@ -121,6 +121,7 @@ enum { | |||
121 | /* struct ata_device stuff */ | 121 | /* struct ata_device stuff */ |
122 | ATA_DFLAG_LBA = (1 << 0), /* device supports LBA */ | 122 | ATA_DFLAG_LBA = (1 << 0), /* device supports LBA */ |
123 | ATA_DFLAG_LBA48 = (1 << 1), /* device supports LBA48 */ | 123 | ATA_DFLAG_LBA48 = (1 << 1), /* device supports LBA48 */ |
124 | ATA_DFLAG_CDB_INTR = (1 << 2), /* device asserts INTRQ when ready for CDB */ | ||
124 | ATA_DFLAG_CFG_MASK = (1 << 8) - 1, | 125 | ATA_DFLAG_CFG_MASK = (1 << 8) - 1, |
125 | 126 | ||
126 | ATA_DFLAG_PIO = (1 << 8), /* device currently in PIO mode */ | 127 | ATA_DFLAG_PIO = (1 << 8), /* device currently in PIO mode */ |
@@ -144,9 +145,9 @@ enum { | |||
144 | ATA_FLAG_PIO_DMA = (1 << 7), /* PIO cmds via DMA */ | 145 | ATA_FLAG_PIO_DMA = (1 << 7), /* PIO cmds via DMA */ |
145 | ATA_FLAG_PIO_LBA48 = (1 << 8), /* Host DMA engine is LBA28 only */ | 146 | ATA_FLAG_PIO_LBA48 = (1 << 8), /* Host DMA engine is LBA28 only */ |
146 | ATA_FLAG_IRQ_MASK = (1 << 9), /* Mask IRQ in PIO xfers */ | 147 | ATA_FLAG_IRQ_MASK = (1 << 9), /* Mask IRQ in PIO xfers */ |
148 | ATA_FLAG_PIO_POLLING = (1 << 10), /* use polling PIO if LLD | ||
149 | * doesn't handle PIO interrupts */ | ||
147 | 150 | ||
148 | ATA_FLAG_NOINTR = (1 << 16), /* FIXME: Remove this once | ||
149 | * proper HSM is in place. */ | ||
150 | ATA_FLAG_DEBUGMSG = (1 << 17), | 151 | ATA_FLAG_DEBUGMSG = (1 << 17), |
151 | ATA_FLAG_FLUSH_PORT_TASK = (1 << 18), /* flush port task */ | 152 | ATA_FLAG_FLUSH_PORT_TASK = (1 << 18), /* flush port task */ |
152 | 153 | ||
@@ -167,11 +168,8 @@ enum { | |||
167 | ATA_HOST_SIMPLEX = (1 << 0), /* Host is simplex, one DMA channel per host_set only */ | 168 | ATA_HOST_SIMPLEX = (1 << 0), /* Host is simplex, one DMA channel per host_set only */ |
168 | 169 | ||
169 | /* various lengths of time */ | 170 | /* various lengths of time */ |
170 | ATA_TMOUT_PIO = 30 * HZ, | ||
171 | ATA_TMOUT_BOOT = 30 * HZ, /* heuristic */ | 171 | ATA_TMOUT_BOOT = 30 * HZ, /* heuristic */ |
172 | ATA_TMOUT_BOOT_QUICK = 7 * HZ, /* heuristic */ | 172 | ATA_TMOUT_BOOT_QUICK = 7 * HZ, /* heuristic */ |
173 | ATA_TMOUT_CDB = 30 * HZ, | ||
174 | ATA_TMOUT_CDB_QUICK = 5 * HZ, | ||
175 | ATA_TMOUT_INTERNAL = 30 * HZ, | 173 | ATA_TMOUT_INTERNAL = 30 * HZ, |
176 | ATA_TMOUT_INTERNAL_QUICK = 5 * HZ, | 174 | ATA_TMOUT_INTERNAL_QUICK = 5 * HZ, |
177 | 175 | ||
@@ -220,14 +218,13 @@ enum { | |||
220 | }; | 218 | }; |
221 | 219 | ||
222 | enum hsm_task_states { | 220 | enum hsm_task_states { |
223 | HSM_ST_UNKNOWN, | 221 | HSM_ST_UNKNOWN, /* state unknown */ |
224 | HSM_ST_IDLE, | 222 | HSM_ST_IDLE, /* no command on going */ |
225 | HSM_ST_POLL, | 223 | HSM_ST, /* (waiting the device to) transfer data */ |
226 | HSM_ST_TMOUT, | 224 | HSM_ST_LAST, /* (waiting the device to) complete command */ |
227 | HSM_ST, | 225 | HSM_ST_ERR, /* error */ |
228 | HSM_ST_LAST, | 226 | HSM_ST_FIRST, /* (waiting the device to) |
229 | HSM_ST_LAST_POLL, | 227 | write CDB or first data block */ |
230 | HSM_ST_ERR, | ||
231 | }; | 228 | }; |
232 | 229 | ||
233 | enum ata_completion_errors { | 230 | enum ata_completion_errors { |
@@ -418,7 +415,6 @@ struct ata_port { | |||
418 | struct work_struct port_task; | 415 | struct work_struct port_task; |
419 | 416 | ||
420 | unsigned int hsm_task_state; | 417 | unsigned int hsm_task_state; |
421 | unsigned long pio_task_timeout; | ||
422 | 418 | ||
423 | u32 msg_enable; | 419 | u32 msg_enable; |
424 | struct list_head eh_done_q; | 420 | struct list_head eh_done_q; |