aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/scsi/libata-core.c803
-rw-r--r--drivers/scsi/libata-eh.c2
-rw-r--r--drivers/scsi/pdc_adma.c8
-rw-r--r--drivers/scsi/sata_mv.c6
-rw-r--r--drivers/scsi/sata_nv.c4
-rw-r--r--drivers/scsi/sata_promise.c7
-rw-r--r--drivers/scsi/sata_qstor.c11
-rw-r--r--drivers/scsi/sata_sx4.c6
-rw-r--r--drivers/scsi/sata_vsc.c15
9 files changed, 528 insertions, 334 deletions
diff --git a/drivers/scsi/libata-core.c b/drivers/scsi/libata-core.c
index 509178c3700c..5d38a6cc5736 100644
--- a/drivers/scsi/libata-core.c
+++ b/drivers/scsi/libata-core.c
@@ -1309,11 +1309,19 @@ static int ata_dev_configure(struct ata_port *ap, struct ata_device *dev,
1309 dev->cylinders, dev->heads, dev->sectors); 1309 dev->cylinders, dev->heads, dev->sectors);
1310 } 1310 }
1311 1311
1312 if (dev->id[59] & 0x100) {
1313 dev->multi_count = dev->id[59] & 0xff;
1314 DPRINTK("ata%u: dev %u multi count %u\n",
1315 ap->id, dev->devno, dev->multi_count);
1316 }
1317
1312 dev->cdb_len = 16; 1318 dev->cdb_len = 16;
1313 } 1319 }
1314 1320
1315 /* ATAPI-specific feature tests */ 1321 /* ATAPI-specific feature tests */
1316 else if (dev->class == ATA_DEV_ATAPI) { 1322 else if (dev->class == ATA_DEV_ATAPI) {
1323 char *cdb_intr_string = "";
1324
1317 rc = atapi_cdb_len(id); 1325 rc = atapi_cdb_len(id);
1318 if ((rc < 12) || (rc > ATAPI_CDB_LEN)) { 1326 if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
1319 printk(KERN_WARNING "ata%u: unsupported CDB len\n", ap->id); 1327 printk(KERN_WARNING "ata%u: unsupported CDB len\n", ap->id);
@@ -1322,10 +1330,16 @@ static int ata_dev_configure(struct ata_port *ap, struct ata_device *dev,
1322 } 1330 }
1323 dev->cdb_len = (unsigned int) rc; 1331 dev->cdb_len = (unsigned int) rc;
1324 1332
1333 if (ata_id_cdb_intr(dev->id)) {
1334 dev->flags |= ATA_DFLAG_CDB_INTR;
1335 cdb_intr_string = ", CDB intr";
1336 }
1337
1325 /* print device info to dmesg */ 1338 /* print device info to dmesg */
1326 if (print_info) 1339 if (print_info)
1327 printk(KERN_INFO "ata%u: dev %u ATAPI, max %s\n", 1340 printk(KERN_INFO "ata%u: dev %u ATAPI, max %s%s\n",
1328 ap->id, dev->devno, ata_mode_string(xfer_mask)); 1341 ap->id, dev->devno, ata_mode_string(xfer_mask),
1342 cdb_intr_string);
1329 } 1343 }
1330 1344
1331 ap->host->max_cmd_len = 0; 1345 ap->host->max_cmd_len = 0;
@@ -3163,6 +3177,15 @@ int ata_check_atapi_dma(struct ata_queued_cmd *qc)
3163 if (ap->ops->check_atapi_dma) 3177 if (ap->ops->check_atapi_dma)
3164 rc = ap->ops->check_atapi_dma(qc); 3178 rc = ap->ops->check_atapi_dma(qc);
3165 3179
3180 /* We don't support polling DMA.
3181 * Use PIO if the LLDD handles only interrupts in
3182 * the HSM_ST_LAST state and the ATAPI device
3183 * generates CDB interrupts.
3184 */
3185 if ((ap->flags & ATA_FLAG_PIO_POLLING) &&
3186 (qc->dev->flags & ATA_DFLAG_CDB_INTR))
3187 rc = 1;
3188
3166 return rc; 3189 return rc;
3167} 3190}
3168/** 3191/**
@@ -3405,112 +3428,12 @@ void ata_poll_qc_complete(struct ata_queued_cmd *qc)
3405 unsigned long flags; 3428 unsigned long flags;
3406 3429
3407 spin_lock_irqsave(&ap->host_set->lock, flags); 3430 spin_lock_irqsave(&ap->host_set->lock, flags);
3408 ap->flags &= ~ATA_FLAG_NOINTR;
3409 ata_irq_on(ap); 3431 ata_irq_on(ap);
3410 ata_qc_complete(qc); 3432 ata_qc_complete(qc);
3411 spin_unlock_irqrestore(&ap->host_set->lock, flags); 3433 spin_unlock_irqrestore(&ap->host_set->lock, flags);
3412} 3434}
3413 3435
3414/** 3436/**
3415 * ata_pio_poll - poll using PIO, depending on current state
3416 * @qc: qc in progress
3417 *
3418 * LOCKING:
3419 * None. (executing in kernel thread context)
3420 *
3421 * RETURNS:
3422 * timeout value to use
3423 */
3424static unsigned long ata_pio_poll(struct ata_queued_cmd *qc)
3425{
3426 struct ata_port *ap = qc->ap;
3427 u8 status;
3428 unsigned int poll_state = HSM_ST_UNKNOWN;
3429 unsigned int reg_state = HSM_ST_UNKNOWN;
3430
3431 switch (ap->hsm_task_state) {
3432 case HSM_ST:
3433 case HSM_ST_POLL:
3434 poll_state = HSM_ST_POLL;
3435 reg_state = HSM_ST;
3436 break;
3437 case HSM_ST_LAST:
3438 case HSM_ST_LAST_POLL:
3439 poll_state = HSM_ST_LAST_POLL;
3440 reg_state = HSM_ST_LAST;
3441 break;
3442 default:
3443 BUG();
3444 break;
3445 }
3446
3447 status = ata_chk_status(ap);
3448 if (status & ATA_BUSY) {
3449 if (time_after(jiffies, ap->pio_task_timeout)) {
3450 qc->err_mask |= AC_ERR_TIMEOUT;
3451 ap->hsm_task_state = HSM_ST_TMOUT;
3452 return 0;
3453 }
3454 ap->hsm_task_state = poll_state;
3455 return ATA_SHORT_PAUSE;
3456 }
3457
3458 ap->hsm_task_state = reg_state;
3459 return 0;
3460}
3461
3462/**
3463 * ata_pio_complete - check if drive is busy or idle
3464 * @qc: qc to complete
3465 *
3466 * LOCKING:
3467 * None. (executing in kernel thread context)
3468 *
3469 * RETURNS:
3470 * Non-zero if qc completed, zero otherwise.
3471 */
3472static int ata_pio_complete(struct ata_queued_cmd *qc)
3473{
3474 struct ata_port *ap = qc->ap;
3475 u8 drv_stat;
3476
3477 /*
3478 * This is purely heuristic. This is a fast path. Sometimes when
3479 * we enter, BSY will be cleared in a chk-status or two. If not,
3480 * the drive is probably seeking or something. Snooze for a couple
3481 * msecs, then chk-status again. If still busy, fall back to
3482 * HSM_ST_POLL state.
3483 */
3484 drv_stat = ata_busy_wait(ap, ATA_BUSY, 10);
3485 if (drv_stat & ATA_BUSY) {
3486 msleep(2);
3487 drv_stat = ata_busy_wait(ap, ATA_BUSY, 10);
3488 if (drv_stat & ATA_BUSY) {
3489 ap->hsm_task_state = HSM_ST_LAST_POLL;
3490 ap->pio_task_timeout = jiffies + ATA_TMOUT_PIO;
3491 return 0;
3492 }
3493 }
3494
3495 drv_stat = ata_wait_idle(ap);
3496 if (!ata_ok(drv_stat)) {
3497 qc->err_mask |= __ac_err_mask(drv_stat);
3498 ap->hsm_task_state = HSM_ST_ERR;
3499 return 0;
3500 }
3501
3502 ap->hsm_task_state = HSM_ST_IDLE;
3503
3504 WARN_ON(qc->err_mask);
3505 ata_poll_qc_complete(qc);
3506
3507 /* another command may start at this point */
3508
3509 return 1;
3510}
3511
3512
3513/**
3514 * swap_buf_le16 - swap halves of 16-bit words in place 3437 * swap_buf_le16 - swap halves of 16-bit words in place
3515 * @buf: Buffer to swap 3438 * @buf: Buffer to swap
3516 * @buf_words: Number of 16-bit words in buffer. 3439 * @buf_words: Number of 16-bit words in buffer.
@@ -3678,7 +3601,23 @@ static void ata_pio_sector(struct ata_queued_cmd *qc)
3678 page = nth_page(page, (offset >> PAGE_SHIFT)); 3601 page = nth_page(page, (offset >> PAGE_SHIFT));
3679 offset %= PAGE_SIZE; 3602 offset %= PAGE_SIZE;
3680 3603
3681 buf = kmap(page) + offset; 3604 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
3605
3606 if (PageHighMem(page)) {
3607 unsigned long flags;
3608
3609 local_irq_save(flags);
3610 buf = kmap_atomic(page, KM_IRQ0);
3611
3612 /* do the actual data transfer */
3613 ata_data_xfer(ap, buf + offset, ATA_SECT_SIZE, do_write);
3614
3615 kunmap_atomic(buf, KM_IRQ0);
3616 local_irq_restore(flags);
3617 } else {
3618 buf = page_address(page);
3619 ata_data_xfer(ap, buf + offset, ATA_SECT_SIZE, do_write);
3620 }
3682 3621
3683 qc->cursect++; 3622 qc->cursect++;
3684 qc->cursg_ofs++; 3623 qc->cursg_ofs++;
@@ -3687,14 +3626,68 @@ static void ata_pio_sector(struct ata_queued_cmd *qc)
3687 qc->cursg++; 3626 qc->cursg++;
3688 qc->cursg_ofs = 0; 3627 qc->cursg_ofs = 0;
3689 } 3628 }
3629}
3690 3630
3691 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read"); 3631/**
3632 * ata_pio_sectors - Transfer one or many 512-byte sectors.
3633 * @qc: Command on going
3634 *
3635 * Transfer one or many ATA_SECT_SIZE of data from/to the
3636 * ATA device for the DRQ request.
3637 *
3638 * LOCKING:
3639 * Inherited from caller.
3640 */
3641
3642static void ata_pio_sectors(struct ata_queued_cmd *qc)
3643{
3644 if (is_multi_taskfile(&qc->tf)) {
3645 /* READ/WRITE MULTIPLE */
3646 unsigned int nsect;
3647
3648 WARN_ON(qc->dev->multi_count == 0);
3649
3650 nsect = min(qc->nsect - qc->cursect, qc->dev->multi_count);
3651 while (nsect--)
3652 ata_pio_sector(qc);
3653 } else
3654 ata_pio_sector(qc);
3655}
3656
3657/**
3658 * atapi_send_cdb - Write CDB bytes to hardware
3659 * @ap: Port to which ATAPI device is attached.
3660 * @qc: Taskfile currently active
3661 *
3662 * When device has indicated its readiness to accept
3663 * a CDB, this function is called. Send the CDB.
3664 *
3665 * LOCKING:
3666 * caller.
3667 */
3692 3668
3693 /* do the actual data transfer */ 3669static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc)
3694 do_write = (qc->tf.flags & ATA_TFLAG_WRITE); 3670{
3695 ata_data_xfer(ap, buf, ATA_SECT_SIZE, do_write); 3671 /* send SCSI cdb */
3672 DPRINTK("send cdb\n");
3673 WARN_ON(qc->dev->cdb_len < 12);
3674
3675 ata_data_xfer(ap, qc->cdb, qc->dev->cdb_len, 1);
3676 ata_altstatus(ap); /* flush */
3696 3677
3697 kunmap(page); 3678 switch (qc->tf.protocol) {
3679 case ATA_PROT_ATAPI:
3680 ap->hsm_task_state = HSM_ST;
3681 break;
3682 case ATA_PROT_ATAPI_NODATA:
3683 ap->hsm_task_state = HSM_ST_LAST;
3684 break;
3685 case ATA_PROT_ATAPI_DMA:
3686 ap->hsm_task_state = HSM_ST_LAST;
3687 /* initiate bmdma */
3688 ap->ops->bmdma_start(qc);
3689 break;
3690 }
3698} 3691}
3699 3692
3700/** 3693/**
@@ -3760,7 +3753,23 @@ next_sg:
3760 /* don't cross page boundaries */ 3753 /* don't cross page boundaries */
3761 count = min(count, (unsigned int)PAGE_SIZE - offset); 3754 count = min(count, (unsigned int)PAGE_SIZE - offset);
3762 3755
3763 buf = kmap(page) + offset; 3756 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
3757
3758 if (PageHighMem(page)) {
3759 unsigned long flags;
3760
3761 local_irq_save(flags);
3762 buf = kmap_atomic(page, KM_IRQ0);
3763
3764 /* do the actual data transfer */
3765 ata_data_xfer(ap, buf + offset, count, do_write);
3766
3767 kunmap_atomic(buf, KM_IRQ0);
3768 local_irq_restore(flags);
3769 } else {
3770 buf = page_address(page);
3771 ata_data_xfer(ap, buf + offset, count, do_write);
3772 }
3764 3773
3765 bytes -= count; 3774 bytes -= count;
3766 qc->curbytes += count; 3775 qc->curbytes += count;
@@ -3771,13 +3780,6 @@ next_sg:
3771 qc->cursg_ofs = 0; 3780 qc->cursg_ofs = 0;
3772 } 3781 }
3773 3782
3774 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
3775
3776 /* do the actual data transfer */
3777 ata_data_xfer(ap, buf, count, do_write);
3778
3779 kunmap(page);
3780
3781 if (bytes) 3783 if (bytes)
3782 goto next_sg; 3784 goto next_sg;
3783} 3785}
@@ -3814,6 +3816,8 @@ static void atapi_pio_bytes(struct ata_queued_cmd *qc)
3814 if (do_write != i_write) 3816 if (do_write != i_write)
3815 goto err_out; 3817 goto err_out;
3816 3818
3819 VPRINTK("ata%u: xfering %d bytes\n", ap->id, bytes);
3820
3817 __atapi_pio_bytes(qc, bytes); 3821 __atapi_pio_bytes(qc, bytes);
3818 3822
3819 return; 3823 return;
@@ -3826,187 +3830,294 @@ err_out:
3826} 3830}
3827 3831
3828/** 3832/**
3829 * ata_pio_block - start PIO on a block 3833 * ata_hsm_ok_in_wq - Check if the qc can be handled in the workqueue.
3830 * @qc: qc to transfer block for 3834 * @ap: the target ata_port
3835 * @qc: qc on going
3831 * 3836 *
3832 * LOCKING: 3837 * RETURNS:
3833 * None. (executing in kernel thread context) 3838 * 1 if ok in workqueue, 0 otherwise.
3834 */ 3839 */
3835static void ata_pio_block(struct ata_queued_cmd *qc) 3840
3841static inline int ata_hsm_ok_in_wq(struct ata_port *ap, struct ata_queued_cmd *qc)
3836{ 3842{
3837 struct ata_port *ap = qc->ap; 3843 if (qc->tf.flags & ATA_TFLAG_POLLING)
3838 u8 status; 3844 return 1;
3839 3845
3840 /* 3846 if (ap->hsm_task_state == HSM_ST_FIRST) {
3841 * This is purely heuristic. This is a fast path. 3847 if (qc->tf.protocol == ATA_PROT_PIO &&
3842 * Sometimes when we enter, BSY will be cleared in 3848 (qc->tf.flags & ATA_TFLAG_WRITE))
3843 * a chk-status or two. If not, the drive is probably seeking 3849 return 1;
3844 * or something. Snooze for a couple msecs, then
3845 * chk-status again. If still busy, fall back to
3846 * HSM_ST_POLL state.
3847 */
3848 status = ata_busy_wait(ap, ATA_BUSY, 5);
3849 if (status & ATA_BUSY) {
3850 msleep(2);
3851 status = ata_busy_wait(ap, ATA_BUSY, 10);
3852 if (status & ATA_BUSY) {
3853 ap->hsm_task_state = HSM_ST_POLL;
3854 ap->pio_task_timeout = jiffies + ATA_TMOUT_PIO;
3855 return;
3856 }
3857 }
3858 3850
3859 /* check error */ 3851 if (is_atapi_taskfile(&qc->tf) &&
3860 if (status & (ATA_ERR | ATA_DF)) { 3852 !(qc->dev->flags & ATA_DFLAG_CDB_INTR))
3861 qc->err_mask |= AC_ERR_DEV; 3853 return 1;
3862 ap->hsm_task_state = HSM_ST_ERR;
3863 return;
3864 } 3854 }
3865 3855
3866 /* transfer data if any */ 3856 return 0;
3867 if (is_atapi_taskfile(&qc->tf)) { 3857}
3868 /* DRQ=0 means no more data to transfer */
3869 if ((status & ATA_DRQ) == 0) {
3870 ap->hsm_task_state = HSM_ST_LAST;
3871 return;
3872 }
3873 3858
3874 atapi_pio_bytes(qc); 3859/**
3875 } else { 3860 * ata_hsm_move - move the HSM to the next state.
3876 /* handle BSY=0, DRQ=0 as error */ 3861 * @ap: the target ata_port
3877 if ((status & ATA_DRQ) == 0) { 3862 * @qc: qc on going
3863 * @status: current device status
3864 * @in_wq: 1 if called from workqueue, 0 otherwise
3865 *
3866 * RETURNS:
3867 * 1 when poll next status needed, 0 otherwise.
3868 */
3869
3870static int ata_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
3871 u8 status, int in_wq)
3872{
3873 unsigned long flags = 0;
3874 int poll_next;
3875
3876 WARN_ON((qc->flags & ATA_QCFLAG_ACTIVE) == 0);
3877
3878 /* Make sure ata_qc_issue_prot() does not throw things
3879 * like DMA polling into the workqueue. Notice that
3880 * in_wq is not equivalent to (qc->tf.flags & ATA_TFLAG_POLLING).
3881 */
3882 WARN_ON(in_wq != ata_hsm_ok_in_wq(ap, qc));
3883
3884fsm_start:
3885 DPRINTK("ata%u: protocol %d task_state %d (dev_stat 0x%X)\n",
3886 ap->id, qc->tf.protocol, ap->hsm_task_state, status);
3887
3888 switch (ap->hsm_task_state) {
3889 case HSM_ST_FIRST:
3890 /* Send first data block or PACKET CDB */
3891
3892 /* If polling, we will stay in the work queue after
3893 * sending the data. Otherwise, interrupt handler
3894 * takes over after sending the data.
3895 */
3896 poll_next = (qc->tf.flags & ATA_TFLAG_POLLING);
3897
3898 /* check device status */
3899 if (unlikely((status & (ATA_BUSY | ATA_DRQ)) != ATA_DRQ)) {
3900 /* Wrong status. Let EH handle this */
3878 qc->err_mask |= AC_ERR_HSM; 3901 qc->err_mask |= AC_ERR_HSM;
3879 ap->hsm_task_state = HSM_ST_ERR; 3902 ap->hsm_task_state = HSM_ST_ERR;
3880 return; 3903 goto fsm_start;
3881 } 3904 }
3882 3905
3883 ata_pio_sector(qc); 3906 /* Device should not ask for data transfer (DRQ=1)
3884 } 3907 * when it finds something wrong.
3885} 3908 * We ignore DRQ here and stop the HSM by
3909 * changing hsm_task_state to HSM_ST_ERR and
3910 * let the EH abort the command or reset the device.
3911 */
3912 if (unlikely(status & (ATA_ERR | ATA_DF))) {
3913 printk(KERN_WARNING "ata%d: DRQ=1 with device error, dev_stat 0x%X\n",
3914 ap->id, status);
3915 qc->err_mask |= AC_ERR_DEV;
3916 ap->hsm_task_state = HSM_ST_ERR;
3917 goto fsm_start;
3918 }
3886 3919
3887static void ata_pio_error(struct ata_queued_cmd *qc) 3920 /* Send the CDB (atapi) or the first data block (ata pio out).
3888{ 3921 * During the state transition, interrupt handler shouldn't
3889 struct ata_port *ap = qc->ap; 3922 * be invoked before the data transfer is complete and
3923 * hsm_task_state is changed. Hence, the following locking.
3924 */
3925 if (in_wq)
3926 spin_lock_irqsave(&ap->host_set->lock, flags);
3890 3927
3891 if (qc->tf.command != ATA_CMD_PACKET) 3928 if (qc->tf.protocol == ATA_PROT_PIO) {
3892 printk(KERN_WARNING "ata%u: dev %u PIO error\n", 3929 /* PIO data out protocol.
3893 ap->id, qc->dev->devno); 3930 * send first data block.
3931 */
3894 3932
3895 /* make sure qc->err_mask is available to 3933 /* ata_pio_sectors() might change the state
3896 * know what's wrong and recover 3934 * to HSM_ST_LAST. so, the state is changed here
3897 */ 3935 * before ata_pio_sectors().
3898 WARN_ON(qc->err_mask == 0); 3936 */
3937 ap->hsm_task_state = HSM_ST;
3938 ata_pio_sectors(qc);
3939 ata_altstatus(ap); /* flush */
3940 } else
3941 /* send CDB */
3942 atapi_send_cdb(ap, qc);
3943
3944 if (in_wq)
3945 spin_unlock_irqrestore(&ap->host_set->lock, flags);
3946
3947 /* if polling, ata_pio_task() handles the rest.
3948 * otherwise, interrupt handler takes over from here.
3949 */
3950 break;
3899 3951
3900 ap->hsm_task_state = HSM_ST_IDLE; 3952 case HSM_ST:
3953 /* complete command or read/write the data register */
3954 if (qc->tf.protocol == ATA_PROT_ATAPI) {
3955 /* ATAPI PIO protocol */
3956 if ((status & ATA_DRQ) == 0) {
3957 /* no more data to transfer */
3958 ap->hsm_task_state = HSM_ST_LAST;
3959 goto fsm_start;
3960 }
3901 3961
3902 ata_poll_qc_complete(qc); 3962 /* Device should not ask for data transfer (DRQ=1)
3903} 3963 * when it finds something wrong.
3964 * We ignore DRQ here and stop the HSM by
3965 * changing hsm_task_state to HSM_ST_ERR and
3966 * let the EH abort the command or reset the device.
3967 */
3968 if (unlikely(status & (ATA_ERR | ATA_DF))) {
3969 printk(KERN_WARNING "ata%d: DRQ=1 with device error, dev_stat 0x%X\n",
3970 ap->id, status);
3971 qc->err_mask |= AC_ERR_DEV;
3972 ap->hsm_task_state = HSM_ST_ERR;
3973 goto fsm_start;
3974 }
3904 3975
3905static void ata_pio_task(void *_data) 3976 atapi_pio_bytes(qc);
3906{
3907 struct ata_queued_cmd *qc = _data;
3908 struct ata_port *ap = qc->ap;
3909 unsigned long timeout;
3910 int qc_completed;
3911 3977
3912fsm_start: 3978 if (unlikely(ap->hsm_task_state == HSM_ST_ERR))
3913 timeout = 0; 3979 /* bad ireason reported by device */
3914 qc_completed = 0; 3980 goto fsm_start;
3915 3981
3916 switch (ap->hsm_task_state) { 3982 } else {
3917 case HSM_ST_IDLE: 3983 /* ATA PIO protocol */
3918 return; 3984 if (unlikely((status & ATA_DRQ) == 0)) {
3985 /* handle BSY=0, DRQ=0 as error */
3986 qc->err_mask |= AC_ERR_HSM;
3987 ap->hsm_task_state = HSM_ST_ERR;
3988 goto fsm_start;
3989 }
3919 3990
3920 case HSM_ST: 3991 /* For PIO reads, some devices may ask for
3921 ata_pio_block(qc); 3992 * data transfer (DRQ=1) alone with ERR=1.
3993 * We respect DRQ here and transfer one
3994 * block of junk data before changing the
3995 * hsm_task_state to HSM_ST_ERR.
3996 *
3997 * For PIO writes, ERR=1 DRQ=1 doesn't make
3998 * sense since the data block has been
3999 * transferred to the device.
4000 */
4001 if (unlikely(status & (ATA_ERR | ATA_DF))) {
4002 /* data might be corrputed */
4003 qc->err_mask |= AC_ERR_DEV;
4004
4005 if (!(qc->tf.flags & ATA_TFLAG_WRITE)) {
4006 ata_pio_sectors(qc);
4007 ata_altstatus(ap);
4008 status = ata_wait_idle(ap);
4009 }
4010
4011 /* ata_pio_sectors() might change the
4012 * state to HSM_ST_LAST. so, the state
4013 * is changed after ata_pio_sectors().
4014 */
4015 ap->hsm_task_state = HSM_ST_ERR;
4016 goto fsm_start;
4017 }
4018
4019 ata_pio_sectors(qc);
4020
4021 if (ap->hsm_task_state == HSM_ST_LAST &&
4022 (!(qc->tf.flags & ATA_TFLAG_WRITE))) {
4023 /* all data read */
4024 ata_altstatus(ap);
4025 status = ata_wait_idle(ap);
4026 goto fsm_start;
4027 }
4028 }
4029
4030 ata_altstatus(ap); /* flush */
4031 poll_next = 1;
3922 break; 4032 break;
3923 4033
3924 case HSM_ST_LAST: 4034 case HSM_ST_LAST:
3925 qc_completed = ata_pio_complete(qc); 4035 if (unlikely(!ata_ok(status))) {
3926 break; 4036 qc->err_mask |= __ac_err_mask(status);
4037 ap->hsm_task_state = HSM_ST_ERR;
4038 goto fsm_start;
4039 }
4040
4041 /* no more data to transfer */
4042 DPRINTK("ata%u: dev %u command complete, drv_stat 0x%x\n",
4043 ap->id, qc->dev->devno, status);
4044
4045 WARN_ON(qc->err_mask);
4046
4047 ap->hsm_task_state = HSM_ST_IDLE;
3927 4048
3928 case HSM_ST_POLL: 4049 /* complete taskfile transaction */
3929 case HSM_ST_LAST_POLL: 4050 if (in_wq)
3930 timeout = ata_pio_poll(qc); 4051 ata_poll_qc_complete(qc);
4052 else
4053 ata_qc_complete(qc);
4054
4055 poll_next = 0;
3931 break; 4056 break;
3932 4057
3933 case HSM_ST_TMOUT:
3934 case HSM_ST_ERR: 4058 case HSM_ST_ERR:
3935 ata_pio_error(qc); 4059 if (qc->tf.command != ATA_CMD_PACKET)
3936 return; 4060 printk(KERN_ERR "ata%u: dev %u command error, drv_stat 0x%x\n",
4061 ap->id, qc->dev->devno, status);
4062
4063 /* make sure qc->err_mask is available to
4064 * know what's wrong and recover
4065 */
4066 WARN_ON(qc->err_mask == 0);
4067
4068 ap->hsm_task_state = HSM_ST_IDLE;
4069
4070 /* complete taskfile transaction */
4071 if (in_wq)
4072 ata_poll_qc_complete(qc);
4073 else
4074 ata_qc_complete(qc);
4075
4076 poll_next = 0;
4077 break;
4078 default:
4079 poll_next = 0;
4080 BUG();
3937 } 4081 }
3938 4082
3939 if (timeout) 4083 return poll_next;
3940 ata_port_queue_task(ap, ata_pio_task, qc, timeout);
3941 else if (!qc_completed)
3942 goto fsm_start;
3943} 4084}
3944 4085
3945/** 4086static void ata_pio_task(void *_data)
3946 * atapi_packet_task - Write CDB bytes to hardware
3947 * @_data: qc in progress
3948 *
3949 * When device has indicated its readiness to accept
3950 * a CDB, this function is called. Send the CDB.
3951 * If DMA is to be performed, exit immediately.
3952 * Otherwise, we are in polling mode, so poll
3953 * status under operation succeeds or fails.
3954 *
3955 * LOCKING:
3956 * Kernel thread context (may sleep)
3957 */
3958static void atapi_packet_task(void *_data)
3959{ 4087{
3960 struct ata_queued_cmd *qc = _data; 4088 struct ata_queued_cmd *qc = _data;
3961 struct ata_port *ap = qc->ap; 4089 struct ata_port *ap = qc->ap;
3962 u8 status; 4090 u8 status;
4091 int poll_next;
3963 4092
3964 /* sleep-wait for BSY to clear */ 4093fsm_start:
3965 DPRINTK("busy wait\n"); 4094 WARN_ON(ap->hsm_task_state == HSM_ST_IDLE);
3966 if (ata_busy_sleep(ap, ATA_TMOUT_CDB_QUICK, ATA_TMOUT_CDB)) {
3967 qc->err_mask |= AC_ERR_TIMEOUT;
3968 goto err_out;
3969 }
3970
3971 /* make sure DRQ is set */
3972 status = ata_chk_status(ap);
3973 if ((status & (ATA_BUSY | ATA_DRQ)) != ATA_DRQ) {
3974 qc->err_mask |= AC_ERR_HSM;
3975 goto err_out;
3976 }
3977
3978 /* send SCSI cdb */
3979 DPRINTK("send cdb\n");
3980 WARN_ON(qc->dev->cdb_len < 12);
3981
3982 if (qc->tf.protocol == ATA_PROT_ATAPI_DMA ||
3983 qc->tf.protocol == ATA_PROT_ATAPI_NODATA) {
3984 unsigned long flags;
3985
3986 /* Once we're done issuing command and kicking bmdma,
3987 * irq handler takes over. To not lose irq, we need
3988 * to clear NOINTR flag before sending cdb, but
3989 * interrupt handler shouldn't be invoked before we're
3990 * finished. Hence, the following locking.
3991 */
3992 spin_lock_irqsave(&ap->host_set->lock, flags);
3993 ap->flags &= ~ATA_FLAG_NOINTR;
3994 ata_data_xfer(ap, qc->cdb, qc->dev->cdb_len, 1);
3995 if (qc->tf.protocol == ATA_PROT_ATAPI_DMA)
3996 ap->ops->bmdma_start(qc); /* initiate bmdma */
3997 spin_unlock_irqrestore(&ap->host_set->lock, flags);
3998 } else {
3999 ata_data_xfer(ap, qc->cdb, qc->dev->cdb_len, 1);
4000 4095
4001 /* PIO commands are handled by polling */ 4096 /*
4002 ap->hsm_task_state = HSM_ST; 4097 * This is purely heuristic. This is a fast path.
4003 ata_port_queue_task(ap, ata_pio_task, qc, 0); 4098 * Sometimes when we enter, BSY will be cleared in
4099 * a chk-status or two. If not, the drive is probably seeking
4100 * or something. Snooze for a couple msecs, then
4101 * chk-status again. If still busy, queue delayed work.
4102 */
4103 status = ata_busy_wait(ap, ATA_BUSY, 5);
4104 if (status & ATA_BUSY) {
4105 msleep(2);
4106 status = ata_busy_wait(ap, ATA_BUSY, 10);
4107 if (status & ATA_BUSY) {
4108 ata_port_queue_task(ap, ata_pio_task, qc, ATA_SHORT_PAUSE);
4109 return;
4110 }
4004 } 4111 }
4005 4112
4006 return; 4113 /* move the HSM */
4114 poll_next = ata_hsm_move(ap, qc, status, 1);
4007 4115
4008err_out: 4116 /* another command or interrupt handler
4009 ata_poll_qc_complete(qc); 4117 * may be running at this point.
4118 */
4119 if (poll_next)
4120 goto fsm_start;
4010} 4121}
4011 4122
4012/** 4123/**
@@ -4196,43 +4307,105 @@ unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc)
4196{ 4307{
4197 struct ata_port *ap = qc->ap; 4308 struct ata_port *ap = qc->ap;
4198 4309
4310 /* Use polling pio if the LLD doesn't handle
4311 * interrupt driven pio and atapi CDB interrupt.
4312 */
4313 if (ap->flags & ATA_FLAG_PIO_POLLING) {
4314 switch (qc->tf.protocol) {
4315 case ATA_PROT_PIO:
4316 case ATA_PROT_ATAPI:
4317 case ATA_PROT_ATAPI_NODATA:
4318 qc->tf.flags |= ATA_TFLAG_POLLING;
4319 break;
4320 case ATA_PROT_ATAPI_DMA:
4321 if (qc->dev->flags & ATA_DFLAG_CDB_INTR)
4322 /* see ata_check_atapi_dma() */
4323 BUG();
4324 break;
4325 default:
4326 break;
4327 }
4328 }
4329
4330 /* select the device */
4199 ata_dev_select(ap, qc->dev->devno, 1, 0); 4331 ata_dev_select(ap, qc->dev->devno, 1, 0);
4200 4332
4333 /* start the command */
4201 switch (qc->tf.protocol) { 4334 switch (qc->tf.protocol) {
4202 case ATA_PROT_NODATA: 4335 case ATA_PROT_NODATA:
4336 if (qc->tf.flags & ATA_TFLAG_POLLING)
4337 ata_qc_set_polling(qc);
4338
4203 ata_tf_to_host(ap, &qc->tf); 4339 ata_tf_to_host(ap, &qc->tf);
4340 ap->hsm_task_state = HSM_ST_LAST;
4341
4342 if (qc->tf.flags & ATA_TFLAG_POLLING)
4343 ata_port_queue_task(ap, ata_pio_task, qc, 0);
4344
4204 break; 4345 break;
4205 4346
4206 case ATA_PROT_DMA: 4347 case ATA_PROT_DMA:
4348 WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
4349
4207 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */ 4350 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
4208 ap->ops->bmdma_setup(qc); /* set up bmdma */ 4351 ap->ops->bmdma_setup(qc); /* set up bmdma */
4209 ap->ops->bmdma_start(qc); /* initiate bmdma */ 4352 ap->ops->bmdma_start(qc); /* initiate bmdma */
4353 ap->hsm_task_state = HSM_ST_LAST;
4210 break; 4354 break;
4211 4355
4212 case ATA_PROT_PIO: /* load tf registers, initiate polling pio */ 4356 case ATA_PROT_PIO:
4213 ata_qc_set_polling(qc); 4357 if (qc->tf.flags & ATA_TFLAG_POLLING)
4214 ata_tf_to_host(ap, &qc->tf); 4358 ata_qc_set_polling(qc);
4215 ap->hsm_task_state = HSM_ST;
4216 ata_port_queue_task(ap, ata_pio_task, qc, 0);
4217 break;
4218 4359
4219 case ATA_PROT_ATAPI:
4220 ata_qc_set_polling(qc);
4221 ata_tf_to_host(ap, &qc->tf); 4360 ata_tf_to_host(ap, &qc->tf);
4222 ata_port_queue_task(ap, atapi_packet_task, qc, 0); 4361
4362 if (qc->tf.flags & ATA_TFLAG_WRITE) {
4363 /* PIO data out protocol */
4364 ap->hsm_task_state = HSM_ST_FIRST;
4365 ata_port_queue_task(ap, ata_pio_task, qc, 0);
4366
4367 /* always send first data block using
4368 * the ata_pio_task() codepath.
4369 */
4370 } else {
4371 /* PIO data in protocol */
4372 ap->hsm_task_state = HSM_ST;
4373
4374 if (qc->tf.flags & ATA_TFLAG_POLLING)
4375 ata_port_queue_task(ap, ata_pio_task, qc, 0);
4376
4377 /* if polling, ata_pio_task() handles the rest.
4378 * otherwise, interrupt handler takes over from here.
4379 */
4380 }
4381
4223 break; 4382 break;
4224 4383
4384 case ATA_PROT_ATAPI:
4225 case ATA_PROT_ATAPI_NODATA: 4385 case ATA_PROT_ATAPI_NODATA:
4226 ap->flags |= ATA_FLAG_NOINTR; 4386 if (qc->tf.flags & ATA_TFLAG_POLLING)
4387 ata_qc_set_polling(qc);
4388
4227 ata_tf_to_host(ap, &qc->tf); 4389 ata_tf_to_host(ap, &qc->tf);
4228 ata_port_queue_task(ap, atapi_packet_task, qc, 0); 4390
4391 ap->hsm_task_state = HSM_ST_FIRST;
4392
4393 /* send cdb by polling if no cdb interrupt */
4394 if ((!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) ||
4395 (qc->tf.flags & ATA_TFLAG_POLLING))
4396 ata_port_queue_task(ap, ata_pio_task, qc, 0);
4229 break; 4397 break;
4230 4398
4231 case ATA_PROT_ATAPI_DMA: 4399 case ATA_PROT_ATAPI_DMA:
4232 ap->flags |= ATA_FLAG_NOINTR; 4400 WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
4401
4233 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */ 4402 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
4234 ap->ops->bmdma_setup(qc); /* set up bmdma */ 4403 ap->ops->bmdma_setup(qc); /* set up bmdma */
4235 ata_port_queue_task(ap, atapi_packet_task, qc, 0); 4404 ap->hsm_task_state = HSM_ST_FIRST;
4405
4406 /* send cdb by polling if no cdb interrupt */
4407 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
4408 ata_port_queue_task(ap, ata_pio_task, qc, 0);
4236 break; 4409 break;
4237 4410
4238 default: 4411 default:
@@ -4262,52 +4435,66 @@ unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc)
4262inline unsigned int ata_host_intr (struct ata_port *ap, 4435inline unsigned int ata_host_intr (struct ata_port *ap,
4263 struct ata_queued_cmd *qc) 4436 struct ata_queued_cmd *qc)
4264{ 4437{
4265 u8 status, host_stat; 4438 u8 status, host_stat = 0;
4266
4267 switch (qc->tf.protocol) {
4268 4439
4269 case ATA_PROT_DMA: 4440 VPRINTK("ata%u: protocol %d task_state %d\n",
4270 case ATA_PROT_ATAPI_DMA: 4441 ap->id, qc->tf.protocol, ap->hsm_task_state);
4271 case ATA_PROT_ATAPI:
4272 /* check status of DMA engine */
4273 host_stat = ap->ops->bmdma_status(ap);
4274 VPRINTK("ata%u: host_stat 0x%X\n", ap->id, host_stat);
4275
4276 /* if it's not our irq... */
4277 if (!(host_stat & ATA_DMA_INTR))
4278 goto idle_irq;
4279
4280 /* before we do anything else, clear DMA-Start bit */
4281 ap->ops->bmdma_stop(qc);
4282 4442
4283 /* fall through */ 4443 /* Check whether we are expecting interrupt in this state */
4284 4444 switch (ap->hsm_task_state) {
4285 case ATA_PROT_ATAPI_NODATA: 4445 case HSM_ST_FIRST:
4286 case ATA_PROT_NODATA: 4446 /* Some pre-ATAPI-4 devices assert INTRQ
4287 /* check altstatus */ 4447 * at this state when ready to receive CDB.
4288 status = ata_altstatus(ap); 4448 */
4289 if (status & ATA_BUSY)
4290 goto idle_irq;
4291 4449
4292 /* check main status, clearing INTRQ */ 4450 /* Check the ATA_DFLAG_CDB_INTR flag is enough here.
4293 status = ata_chk_status(ap); 4451 * The flag was turned on only for atapi devices.
4294 if (unlikely(status & ATA_BUSY)) 4452 * No need to check is_atapi_taskfile(&qc->tf) again.
4453 */
4454 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
4295 goto idle_irq; 4455 goto idle_irq;
4296 DPRINTK("ata%u: protocol %d (dev_stat 0x%X)\n",
4297 ap->id, qc->tf.protocol, status);
4298
4299 /* ack bmdma irq events */
4300 ap->ops->irq_clear(ap);
4301
4302 /* complete taskfile transaction */
4303 qc->err_mask |= ac_err_mask(status);
4304 ata_qc_complete(qc);
4305 break; 4456 break;
4306 4457 case HSM_ST_LAST:
4458 if (qc->tf.protocol == ATA_PROT_DMA ||
4459 qc->tf.protocol == ATA_PROT_ATAPI_DMA) {
4460 /* check status of DMA engine */
4461 host_stat = ap->ops->bmdma_status(ap);
4462 VPRINTK("ata%u: host_stat 0x%X\n", ap->id, host_stat);
4463
4464 /* if it's not our irq... */
4465 if (!(host_stat & ATA_DMA_INTR))
4466 goto idle_irq;
4467
4468 /* before we do anything else, clear DMA-Start bit */
4469 ap->ops->bmdma_stop(qc);
4470
4471 if (unlikely(host_stat & ATA_DMA_ERR)) {
4472 /* error when transfering data to/from memory */
4473 qc->err_mask |= AC_ERR_HOST_BUS;
4474 ap->hsm_task_state = HSM_ST_ERR;
4475 }
4476 }
4477 break;
4478 case HSM_ST:
4479 break;
4307 default: 4480 default:
4308 goto idle_irq; 4481 goto idle_irq;
4309 } 4482 }
4310 4483
4484 /* check altstatus */
4485 status = ata_altstatus(ap);
4486 if (status & ATA_BUSY)
4487 goto idle_irq;
4488
4489 /* check main status, clearing INTRQ */
4490 status = ata_chk_status(ap);
4491 if (unlikely(status & ATA_BUSY))
4492 goto idle_irq;
4493
4494 /* ack bmdma irq events */
4495 ap->ops->irq_clear(ap);
4496
4497 ata_hsm_move(ap, qc, status, 0);
4311 return 1; /* irq handled */ 4498 return 1; /* irq handled */
4312 4499
4313idle_irq: 4500idle_irq:
@@ -4354,11 +4541,11 @@ irqreturn_t ata_interrupt (int irq, void *dev_instance, struct pt_regs *regs)
4354 4541
4355 ap = host_set->ports[i]; 4542 ap = host_set->ports[i];
4356 if (ap && 4543 if (ap &&
4357 !(ap->flags & (ATA_FLAG_DISABLED | ATA_FLAG_NOINTR))) { 4544 !(ap->flags & ATA_FLAG_DISABLED)) {
4358 struct ata_queued_cmd *qc; 4545 struct ata_queued_cmd *qc;
4359 4546
4360 qc = ata_qc_from_tag(ap, ap->active_tag); 4547 qc = ata_qc_from_tag(ap, ap->active_tag);
4361 if (qc && (!(qc->tf.ctl & ATA_NIEN)) && 4548 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)) &&
4362 (qc->flags & ATA_QCFLAG_ACTIVE)) 4549 (qc->flags & ATA_QCFLAG_ACTIVE))
4363 handled |= ata_host_intr(ap, qc); 4550 handled |= ata_host_intr(ap, qc);
4364 } 4551 }
diff --git a/drivers/scsi/libata-eh.c b/drivers/scsi/libata-eh.c
index c31b13fd5307..16db62211716 100644
--- a/drivers/scsi/libata-eh.c
+++ b/drivers/scsi/libata-eh.c
@@ -171,7 +171,7 @@ static void ata_qc_timeout(struct ata_queued_cmd *qc)
171 ap->id, qc->tf.command, drv_stat, host_stat); 171 ap->id, qc->tf.command, drv_stat, host_stat);
172 172
173 /* complete taskfile transaction */ 173 /* complete taskfile transaction */
174 qc->err_mask |= ac_err_mask(drv_stat); 174 qc->err_mask |= AC_ERR_TIMEOUT;
175 break; 175 break;
176 } 176 }
177 177
diff --git a/drivers/scsi/pdc_adma.c b/drivers/scsi/pdc_adma.c
index b9a3c566f833..a341fa8d3291 100644
--- a/drivers/scsi/pdc_adma.c
+++ b/drivers/scsi/pdc_adma.c
@@ -455,13 +455,13 @@ static inline unsigned int adma_intr_pkt(struct ata_host_set *host_set)
455 continue; 455 continue;
456 handled = 1; 456 handled = 1;
457 adma_enter_reg_mode(ap); 457 adma_enter_reg_mode(ap);
458 if (ap->flags & (ATA_FLAG_DISABLED | ATA_FLAG_NOINTR)) 458 if (ap->flags & ATA_FLAG_DISABLED)
459 continue; 459 continue;
460 pp = ap->private_data; 460 pp = ap->private_data;
461 if (!pp || pp->state != adma_state_pkt) 461 if (!pp || pp->state != adma_state_pkt)
462 continue; 462 continue;
463 qc = ata_qc_from_tag(ap, ap->active_tag); 463 qc = ata_qc_from_tag(ap, ap->active_tag);
464 if (qc && (!(qc->tf.ctl & ATA_NIEN))) { 464 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) {
465 if ((status & (aPERR | aPSD | aUIRQ))) 465 if ((status & (aPERR | aPSD | aUIRQ)))
466 qc->err_mask |= AC_ERR_OTHER; 466 qc->err_mask |= AC_ERR_OTHER;
467 else if (pp->pkt[0] != cDONE) 467 else if (pp->pkt[0] != cDONE)
@@ -480,13 +480,13 @@ static inline unsigned int adma_intr_mmio(struct ata_host_set *host_set)
480 for (port_no = 0; port_no < host_set->n_ports; ++port_no) { 480 for (port_no = 0; port_no < host_set->n_ports; ++port_no) {
481 struct ata_port *ap; 481 struct ata_port *ap;
482 ap = host_set->ports[port_no]; 482 ap = host_set->ports[port_no];
483 if (ap && (!(ap->flags & (ATA_FLAG_DISABLED | ATA_FLAG_NOINTR)))) { 483 if (ap && (!(ap->flags & ATA_FLAG_DISABLED))) {
484 struct ata_queued_cmd *qc; 484 struct ata_queued_cmd *qc;
485 struct adma_port_priv *pp = ap->private_data; 485 struct adma_port_priv *pp = ap->private_data;
486 if (!pp || pp->state != adma_state_mmio) 486 if (!pp || pp->state != adma_state_mmio)
487 continue; 487 continue;
488 qc = ata_qc_from_tag(ap, ap->active_tag); 488 qc = ata_qc_from_tag(ap, ap->active_tag);
489 if (qc && (!(qc->tf.ctl & ATA_NIEN))) { 489 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) {
490 490
491 /* check main status, clearing INTRQ */ 491 /* check main status, clearing INTRQ */
492 u8 status = ata_check_status(ap); 492 u8 status = ata_check_status(ap);
diff --git a/drivers/scsi/sata_mv.c b/drivers/scsi/sata_mv.c
index 4aabc759fcec..08665ea1e94e 100644
--- a/drivers/scsi/sata_mv.c
+++ b/drivers/scsi/sata_mv.c
@@ -87,7 +87,7 @@ enum {
87 MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */ 87 MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */
88 MV_COMMON_FLAGS = (ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 88 MV_COMMON_FLAGS = (ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
89 ATA_FLAG_SATA_RESET | ATA_FLAG_MMIO | 89 ATA_FLAG_SATA_RESET | ATA_FLAG_MMIO |
90 ATA_FLAG_NO_ATAPI), 90 ATA_FLAG_PIO_POLLING),
91 MV_6XXX_FLAGS = MV_FLAG_IRQ_COALESCE, 91 MV_6XXX_FLAGS = MV_FLAG_IRQ_COALESCE,
92 92
93 CRQB_FLAG_READ = (1 << 0), 93 CRQB_FLAG_READ = (1 << 0),
@@ -1396,7 +1396,7 @@ static void mv_host_intr(struct ata_host_set *host_set, u32 relevant,
1396 } 1396 }
1397 } 1397 }
1398 1398
1399 if (ap->flags & (ATA_FLAG_DISABLED | ATA_FLAG_NOINTR)) 1399 if (ap && (ap->flags & ATA_FLAG_DISABLED))
1400 continue; 1400 continue;
1401 1401
1402 err_mask = ac_err_mask(ata_status); 1402 err_mask = ac_err_mask(ata_status);
@@ -1417,7 +1417,7 @@ static void mv_host_intr(struct ata_host_set *host_set, u32 relevant,
1417 VPRINTK("port %u IRQ found for qc, " 1417 VPRINTK("port %u IRQ found for qc, "
1418 "ata_status 0x%x\n", port,ata_status); 1418 "ata_status 0x%x\n", port,ata_status);
1419 /* mark qc status appropriately */ 1419 /* mark qc status appropriately */
1420 if (!(qc->tf.ctl & ATA_NIEN)) { 1420 if (!(qc->tf.flags & ATA_TFLAG_POLLING)) {
1421 qc->err_mask |= err_mask; 1421 qc->err_mask |= err_mask;
1422 ata_qc_complete(qc); 1422 ata_qc_complete(qc);
1423 } 1423 }
diff --git a/drivers/scsi/sata_nv.c b/drivers/scsi/sata_nv.c
index 3a70875be8ba..70c51088d371 100644
--- a/drivers/scsi/sata_nv.c
+++ b/drivers/scsi/sata_nv.c
@@ -279,11 +279,11 @@ static irqreturn_t nv_interrupt (int irq, void *dev_instance,
279 279
280 ap = host_set->ports[i]; 280 ap = host_set->ports[i];
281 if (ap && 281 if (ap &&
282 !(ap->flags & (ATA_FLAG_DISABLED | ATA_FLAG_NOINTR))) { 282 !(ap->flags & ATA_FLAG_DISABLED)) {
283 struct ata_queued_cmd *qc; 283 struct ata_queued_cmd *qc;
284 284
285 qc = ata_qc_from_tag(ap, ap->active_tag); 285 qc = ata_qc_from_tag(ap, ap->active_tag);
286 if (qc && (!(qc->tf.ctl & ATA_NIEN))) 286 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)))
287 handled += ata_host_intr(ap, qc); 287 handled += ata_host_intr(ap, qc);
288 else 288 else
289 // No request pending? Clear interrupt status 289 // No request pending? Clear interrupt status
diff --git a/drivers/scsi/sata_promise.c b/drivers/scsi/sata_promise.c
index ddbc0c6dd9fe..aaf896a0c63a 100644
--- a/drivers/scsi/sata_promise.c
+++ b/drivers/scsi/sata_promise.c
@@ -76,7 +76,8 @@ enum {
76 PDC_RESET = (1 << 11), /* HDMA reset */ 76 PDC_RESET = (1 << 11), /* HDMA reset */
77 77
78 PDC_COMMON_FLAGS = ATA_FLAG_NO_LEGACY | ATA_FLAG_SRST | 78 PDC_COMMON_FLAGS = ATA_FLAG_NO_LEGACY | ATA_FLAG_SRST |
79 ATA_FLAG_MMIO | ATA_FLAG_NO_ATAPI, 79 ATA_FLAG_MMIO | ATA_FLAG_NO_ATAPI |
80 ATA_FLAG_PIO_POLLING,
80}; 81};
81 82
82 83
@@ -533,11 +534,11 @@ static irqreturn_t pdc_interrupt (int irq, void *dev_instance, struct pt_regs *r
533 ap = host_set->ports[i]; 534 ap = host_set->ports[i];
534 tmp = mask & (1 << (i + 1)); 535 tmp = mask & (1 << (i + 1));
535 if (tmp && ap && 536 if (tmp && ap &&
536 !(ap->flags & (ATA_FLAG_DISABLED | ATA_FLAG_NOINTR))) { 537 !(ap->flags & ATA_FLAG_DISABLED)) {
537 struct ata_queued_cmd *qc; 538 struct ata_queued_cmd *qc;
538 539
539 qc = ata_qc_from_tag(ap, ap->active_tag); 540 qc = ata_qc_from_tag(ap, ap->active_tag);
540 if (qc && (!(qc->tf.ctl & ATA_NIEN))) 541 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)))
541 handled += pdc_host_intr(ap, qc); 542 handled += pdc_host_intr(ap, qc);
542 } 543 }
543 } 544 }
diff --git a/drivers/scsi/sata_qstor.c b/drivers/scsi/sata_qstor.c
index 259c2dec4e21..54283e06070e 100644
--- a/drivers/scsi/sata_qstor.c
+++ b/drivers/scsi/sata_qstor.c
@@ -175,7 +175,7 @@ static const struct ata_port_info qs_port_info[] = {
175 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 175 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
176 ATA_FLAG_SATA_RESET | 176 ATA_FLAG_SATA_RESET |
177 //FIXME ATA_FLAG_SRST | 177 //FIXME ATA_FLAG_SRST |
178 ATA_FLAG_MMIO, 178 ATA_FLAG_MMIO | ATA_FLAG_PIO_POLLING,
179 .pio_mask = 0x10, /* pio4 */ 179 .pio_mask = 0x10, /* pio4 */
180 .udma_mask = 0x7f, /* udma0-6 */ 180 .udma_mask = 0x7f, /* udma0-6 */
181 .port_ops = &qs_ata_ops, 181 .port_ops = &qs_ata_ops,
@@ -394,14 +394,13 @@ static inline unsigned int qs_intr_pkt(struct ata_host_set *host_set)
394 DPRINTK("SFF=%08x%08x: sCHAN=%u sHST=%d sDST=%02x\n", 394 DPRINTK("SFF=%08x%08x: sCHAN=%u sHST=%d sDST=%02x\n",
395 sff1, sff0, port_no, sHST, sDST); 395 sff1, sff0, port_no, sHST, sDST);
396 handled = 1; 396 handled = 1;
397 if (ap && !(ap->flags & 397 if (ap && !(ap->flags & ATA_FLAG_DISABLED)) {
398 (ATA_FLAG_DISABLED|ATA_FLAG_NOINTR))) {
399 struct ata_queued_cmd *qc; 398 struct ata_queued_cmd *qc;
400 struct qs_port_priv *pp = ap->private_data; 399 struct qs_port_priv *pp = ap->private_data;
401 if (!pp || pp->state != qs_state_pkt) 400 if (!pp || pp->state != qs_state_pkt)
402 continue; 401 continue;
403 qc = ata_qc_from_tag(ap, ap->active_tag); 402 qc = ata_qc_from_tag(ap, ap->active_tag);
404 if (qc && (!(qc->tf.ctl & ATA_NIEN))) { 403 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) {
405 switch (sHST) { 404 switch (sHST) {
406 case 0: /* successful CPB */ 405 case 0: /* successful CPB */
407 case 3: /* device error */ 406 case 3: /* device error */
@@ -428,13 +427,13 @@ static inline unsigned int qs_intr_mmio(struct ata_host_set *host_set)
428 struct ata_port *ap; 427 struct ata_port *ap;
429 ap = host_set->ports[port_no]; 428 ap = host_set->ports[port_no];
430 if (ap && 429 if (ap &&
431 !(ap->flags & (ATA_FLAG_DISABLED | ATA_FLAG_NOINTR))) { 430 !(ap->flags & ATA_FLAG_DISABLED)) {
432 struct ata_queued_cmd *qc; 431 struct ata_queued_cmd *qc;
433 struct qs_port_priv *pp = ap->private_data; 432 struct qs_port_priv *pp = ap->private_data;
434 if (!pp || pp->state != qs_state_mmio) 433 if (!pp || pp->state != qs_state_mmio)
435 continue; 434 continue;
436 qc = ata_qc_from_tag(ap, ap->active_tag); 435 qc = ata_qc_from_tag(ap, ap->active_tag);
437 if (qc && (!(qc->tf.ctl & ATA_NIEN))) { 436 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) {
438 437
439 /* check main status, clearing INTRQ */ 438 /* check main status, clearing INTRQ */
440 u8 status = ata_check_status(ap); 439 u8 status = ata_check_status(ap);
diff --git a/drivers/scsi/sata_sx4.c b/drivers/scsi/sata_sx4.c
index a669d0589889..4c07ba1f6e62 100644
--- a/drivers/scsi/sata_sx4.c
+++ b/drivers/scsi/sata_sx4.c
@@ -218,7 +218,7 @@ static const struct ata_port_info pdc_port_info[] = {
218 .sht = &pdc_sata_sht, 218 .sht = &pdc_sata_sht,
219 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 219 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
220 ATA_FLAG_SRST | ATA_FLAG_MMIO | 220 ATA_FLAG_SRST | ATA_FLAG_MMIO |
221 ATA_FLAG_NO_ATAPI, 221 ATA_FLAG_PIO_POLLING,
222 .pio_mask = 0x1f, /* pio0-4 */ 222 .pio_mask = 0x1f, /* pio0-4 */
223 .mwdma_mask = 0x07, /* mwdma0-2 */ 223 .mwdma_mask = 0x07, /* mwdma0-2 */
224 .udma_mask = 0x7f, /* udma0-6 ; FIXME */ 224 .udma_mask = 0x7f, /* udma0-6 ; FIXME */
@@ -833,11 +833,11 @@ static irqreturn_t pdc20621_interrupt (int irq, void *dev_instance, struct pt_re
833 tmp = mask & (1 << i); 833 tmp = mask & (1 << i);
834 VPRINTK("seq %u, port_no %u, ap %p, tmp %x\n", i, port_no, ap, tmp); 834 VPRINTK("seq %u, port_no %u, ap %p, tmp %x\n", i, port_no, ap, tmp);
835 if (tmp && ap && 835 if (tmp && ap &&
836 !(ap->flags & (ATA_FLAG_DISABLED | ATA_FLAG_NOINTR))) { 836 !(ap->flags & ATA_FLAG_DISABLED)) {
837 struct ata_queued_cmd *qc; 837 struct ata_queued_cmd *qc;
838 838
839 qc = ata_qc_from_tag(ap, ap->active_tag); 839 qc = ata_qc_from_tag(ap, ap->active_tag);
840 if (qc && (!(qc->tf.ctl & ATA_NIEN))) 840 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)))
841 handled += pdc20621_host_intr(ap, qc, (i > 4), 841 handled += pdc20621_host_intr(ap, qc, (i > 4),
842 mmio_base); 842 mmio_base);
843 } 843 }
diff --git a/drivers/scsi/sata_vsc.c b/drivers/scsi/sata_vsc.c
index 9646c3932129..0372be7ff1c9 100644
--- a/drivers/scsi/sata_vsc.c
+++ b/drivers/scsi/sata_vsc.c
@@ -221,14 +221,21 @@ static irqreturn_t vsc_sata_interrupt (int irq, void *dev_instance,
221 221
222 ap = host_set->ports[i]; 222 ap = host_set->ports[i];
223 223
224 if (ap && !(ap->flags & 224 if (is_vsc_sata_int_err(i, int_status)) {
225 (ATA_FLAG_DISABLED|ATA_FLAG_NOINTR))) { 225 u32 err_status;
226 printk(KERN_DEBUG "%s: ignoring interrupt(s)\n", __FUNCTION__);
227 err_status = ap ? vsc_sata_scr_read(ap, SCR_ERROR) : 0;
228 vsc_sata_scr_write(ap, SCR_ERROR, err_status);
229 handled++;
230 }
231
232 if (ap && !(ap->flags & ATA_FLAG_DISABLED)) {
226 struct ata_queued_cmd *qc; 233 struct ata_queued_cmd *qc;
227 234
228 qc = ata_qc_from_tag(ap, ap->active_tag); 235 qc = ata_qc_from_tag(ap, ap->active_tag);
229 if (qc && (!(qc->tf.ctl & ATA_NIEN))) { 236 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)))
230 handled += ata_host_intr(ap, qc); 237 handled += ata_host_intr(ap, qc);
231 } else if (is_vsc_sata_int_err(i, int_status)) { 238 else if (is_vsc_sata_int_err(i, int_status)) {
232 /* 239 /*
233 * On some chips (i.e. Intel 31244), an error 240 * On some chips (i.e. Intel 31244), an error
234 * interrupt will sneak in at initialization 241 * interrupt will sneak in at initialization