diff options
Diffstat (limited to 'drivers/scsi')
-rw-r--r-- | drivers/scsi/libata-core.c | 474 | ||||
-rw-r--r-- | drivers/scsi/pdc_adma.c | 4 | ||||
-rw-r--r-- | drivers/scsi/sata_mv.c | 3 | ||||
-rw-r--r-- | drivers/scsi/sata_nv.c | 4 | ||||
-rw-r--r-- | drivers/scsi/sata_promise.c | 13 | ||||
-rw-r--r-- | drivers/scsi/sata_qstor.c | 11 | ||||
-rw-r--r-- | drivers/scsi/sata_sx4.c | 7 | ||||
-rw-r--r-- | drivers/scsi/sata_vsc.c | 6 |
8 files changed, 372 insertions, 150 deletions
diff --git a/drivers/scsi/libata-core.c b/drivers/scsi/libata-core.c index f53d7b8ac33f..ab8b7354452c 100644 --- a/drivers/scsi/libata-core.c +++ b/drivers/scsi/libata-core.c | |||
@@ -73,6 +73,7 @@ static int ata_choose_xfer_mode(const struct ata_port *ap, | |||
73 | u8 *xfer_mode_out, | 73 | u8 *xfer_mode_out, |
74 | unsigned int *xfer_shift_out); | 74 | unsigned int *xfer_shift_out); |
75 | static void __ata_qc_complete(struct ata_queued_cmd *qc); | 75 | static void __ata_qc_complete(struct ata_queued_cmd *qc); |
76 | static void ata_pio_error(struct ata_port *ap); | ||
76 | 77 | ||
77 | static unsigned int ata_unique_id = 1; | 78 | static unsigned int ata_unique_id = 1; |
78 | static struct workqueue_struct *ata_wq; | 79 | static struct workqueue_struct *ata_wq; |
@@ -1334,6 +1335,9 @@ retry: | |||
1334 | ap->cdb_len = (unsigned int) rc; | 1335 | ap->cdb_len = (unsigned int) rc; |
1335 | ap->host->max_cmd_len = (unsigned char) ap->cdb_len; | 1336 | ap->host->max_cmd_len = (unsigned char) ap->cdb_len; |
1336 | 1337 | ||
1338 | if (ata_id_cdb_intr(dev->id)) | ||
1339 | dev->flags |= ATA_DFLAG_CDB_INTR; | ||
1340 | |||
1337 | /* print device info to dmesg */ | 1341 | /* print device info to dmesg */ |
1338 | printk(KERN_INFO "ata%u: dev %u ATAPI, max %s\n", | 1342 | printk(KERN_INFO "ata%u: dev %u ATAPI, max %s\n", |
1339 | ap->id, device, | 1343 | ap->id, device, |
@@ -2693,7 +2697,6 @@ void ata_poll_qc_complete(struct ata_queued_cmd *qc, u8 drv_stat) | |||
2693 | unsigned long flags; | 2697 | unsigned long flags; |
2694 | 2698 | ||
2695 | spin_lock_irqsave(&ap->host_set->lock, flags); | 2699 | spin_lock_irqsave(&ap->host_set->lock, flags); |
2696 | ap->flags &= ~ATA_FLAG_NOINTR; | ||
2697 | ata_irq_on(ap); | 2700 | ata_irq_on(ap); |
2698 | ata_qc_complete(qc, drv_stat); | 2701 | ata_qc_complete(qc, drv_stat); |
2699 | spin_unlock_irqrestore(&ap->host_set->lock, flags); | 2702 | spin_unlock_irqrestore(&ap->host_set->lock, flags); |
@@ -2957,7 +2960,23 @@ static void ata_pio_sector(struct ata_queued_cmd *qc) | |||
2957 | page = nth_page(page, (offset >> PAGE_SHIFT)); | 2960 | page = nth_page(page, (offset >> PAGE_SHIFT)); |
2958 | offset %= PAGE_SIZE; | 2961 | offset %= PAGE_SIZE; |
2959 | 2962 | ||
2960 | buf = kmap(page) + offset; | 2963 | DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read"); |
2964 | |||
2965 | if (PageHighMem(page)) { | ||
2966 | unsigned long flags; | ||
2967 | |||
2968 | local_irq_save(flags); | ||
2969 | buf = kmap_atomic(page, KM_IRQ0); | ||
2970 | |||
2971 | /* do the actual data transfer */ | ||
2972 | ata_data_xfer(ap, buf + offset, ATA_SECT_SIZE, do_write); | ||
2973 | |||
2974 | kunmap_atomic(buf, KM_IRQ0); | ||
2975 | local_irq_restore(flags); | ||
2976 | } else { | ||
2977 | buf = page_address(page); | ||
2978 | ata_data_xfer(ap, buf + offset, ATA_SECT_SIZE, do_write); | ||
2979 | } | ||
2961 | 2980 | ||
2962 | qc->cursect++; | 2981 | qc->cursect++; |
2963 | qc->cursg_ofs++; | 2982 | qc->cursg_ofs++; |
@@ -2966,14 +2985,114 @@ static void ata_pio_sector(struct ata_queued_cmd *qc) | |||
2966 | qc->cursg++; | 2985 | qc->cursg++; |
2967 | qc->cursg_ofs = 0; | 2986 | qc->cursg_ofs = 0; |
2968 | } | 2987 | } |
2988 | } | ||
2969 | 2989 | ||
2970 | DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read"); | 2990 | /** |
2991 | * atapi_send_cdb - Write CDB bytes to hardware | ||
2992 | * @ap: Port to which ATAPI device is attached. | ||
2993 | * @qc: Taskfile currently active | ||
2994 | * | ||
2995 | * When device has indicated its readiness to accept | ||
2996 | * a CDB, this function is called. Send the CDB. | ||
2997 | * | ||
2998 | * LOCKING: | ||
2999 | * caller. | ||
3000 | */ | ||
3001 | |||
3002 | static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc) | ||
3003 | { | ||
3004 | /* send SCSI cdb */ | ||
3005 | DPRINTK("send cdb\n"); | ||
3006 | assert(ap->cdb_len >= 12); | ||
3007 | |||
3008 | ata_data_xfer(ap, qc->cdb, ap->cdb_len, 1); | ||
3009 | ata_altstatus(ap); /* flush */ | ||
3010 | |||
3011 | switch (qc->tf.protocol) { | ||
3012 | case ATA_PROT_ATAPI: | ||
3013 | ap->hsm_task_state = HSM_ST; | ||
3014 | break; | ||
3015 | case ATA_PROT_ATAPI_NODATA: | ||
3016 | ap->hsm_task_state = HSM_ST_LAST; | ||
3017 | break; | ||
3018 | case ATA_PROT_ATAPI_DMA: | ||
3019 | ap->hsm_task_state = HSM_ST_LAST; | ||
3020 | /* initiate bmdma */ | ||
3021 | ap->ops->bmdma_start(qc); | ||
3022 | break; | ||
3023 | } | ||
3024 | } | ||
3025 | |||
3026 | /** | ||
3027 | * ata_dataout_task - Write first data block to hardware | ||
3028 | * @_data: Port to which ATA/ATAPI device is attached. | ||
3029 | * | ||
3030 | * When device has indicated its readiness to accept | ||
3031 | * the data, this function sends out the CDB or | ||
3032 | * the first data block by PIO. | ||
3033 | * After this, | ||
3034 | * - If polling, ata_pio_task() handles the rest. | ||
3035 | * - Otherwise, interrupt handler takes over. | ||
3036 | * | ||
3037 | * LOCKING: | ||
3038 | * Kernel thread context (may sleep) | ||
3039 | */ | ||
3040 | |||
3041 | static void ata_dataout_task(void *_data) | ||
3042 | { | ||
3043 | struct ata_port *ap = _data; | ||
3044 | struct ata_queued_cmd *qc; | ||
3045 | u8 status; | ||
3046 | unsigned long flags; | ||
3047 | |||
3048 | qc = ata_qc_from_tag(ap, ap->active_tag); | ||
3049 | assert(qc != NULL); | ||
3050 | assert(qc->flags & ATA_QCFLAG_ACTIVE); | ||
3051 | |||
3052 | /* sleep-wait for BSY to clear */ | ||
3053 | DPRINTK("busy wait\n"); | ||
3054 | if (ata_busy_sleep(ap, ATA_TMOUT_DATAOUT_QUICK, ATA_TMOUT_DATAOUT)) | ||
3055 | goto err_out; | ||
2971 | 3056 | ||
2972 | /* do the actual data transfer */ | 3057 | /* make sure DRQ is set */ |
2973 | do_write = (qc->tf.flags & ATA_TFLAG_WRITE); | 3058 | status = ata_chk_status(ap); |
2974 | ata_data_xfer(ap, buf, ATA_SECT_SIZE, do_write); | 3059 | if ((status & (ATA_BUSY | ATA_DRQ)) != ATA_DRQ) |
3060 | goto err_out; | ||
3061 | |||
3062 | /* Send the CDB (atapi) or the first data block (ata pio out). | ||
3063 | * During the state transition, interrupt handler shouldn't | ||
3064 | * be invoked before the data transfer is complete and | ||
3065 | * hsm_task_state is changed. Hence, the following locking. | ||
3066 | */ | ||
3067 | spin_lock_irqsave(&ap->host_set->lock, flags); | ||
3068 | |||
3069 | if (qc->tf.protocol == ATA_PROT_PIO) { | ||
3070 | /* PIO data out protocol. | ||
3071 | * send first data block. | ||
3072 | */ | ||
3073 | |||
3074 | /* ata_pio_sector() might change the state to HSM_ST_LAST. | ||
3075 | * so, the state is changed here before ata_pio_sector(). | ||
3076 | */ | ||
3077 | ap->hsm_task_state = HSM_ST; | ||
3078 | ata_pio_sector(qc); | ||
3079 | ata_altstatus(ap); /* flush */ | ||
3080 | } else | ||
3081 | /* send CDB */ | ||
3082 | atapi_send_cdb(ap, qc); | ||
3083 | |||
3084 | /* if polling, ata_pio_task() handles the rest. | ||
3085 | * otherwise, interrupt handler takes over from here. | ||
3086 | */ | ||
3087 | if (qc->tf.flags & ATA_TFLAG_POLLING) | ||
3088 | queue_work(ata_wq, &ap->pio_task); | ||
3089 | |||
3090 | spin_unlock_irqrestore(&ap->host_set->lock, flags); | ||
2975 | 3091 | ||
2976 | kunmap(page); | 3092 | return; |
3093 | |||
3094 | err_out: | ||
3095 | ata_pio_error(ap); | ||
2977 | } | 3096 | } |
2978 | 3097 | ||
2979 | /** | 3098 | /** |
@@ -3039,7 +3158,23 @@ next_sg: | |||
3039 | /* don't cross page boundaries */ | 3158 | /* don't cross page boundaries */ |
3040 | count = min(count, (unsigned int)PAGE_SIZE - offset); | 3159 | count = min(count, (unsigned int)PAGE_SIZE - offset); |
3041 | 3160 | ||
3042 | buf = kmap(page) + offset; | 3161 | DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read"); |
3162 | |||
3163 | if (PageHighMem(page)) { | ||
3164 | unsigned long flags; | ||
3165 | |||
3166 | local_irq_save(flags); | ||
3167 | buf = kmap_atomic(page, KM_IRQ0); | ||
3168 | |||
3169 | /* do the actual data transfer */ | ||
3170 | ata_data_xfer(ap, buf + offset, count, do_write); | ||
3171 | |||
3172 | kunmap_atomic(buf, KM_IRQ0); | ||
3173 | local_irq_restore(flags); | ||
3174 | } else { | ||
3175 | buf = page_address(page); | ||
3176 | ata_data_xfer(ap, buf + offset, count, do_write); | ||
3177 | } | ||
3043 | 3178 | ||
3044 | bytes -= count; | 3179 | bytes -= count; |
3045 | qc->curbytes += count; | 3180 | qc->curbytes += count; |
@@ -3050,13 +3185,6 @@ next_sg: | |||
3050 | qc->cursg_ofs = 0; | 3185 | qc->cursg_ofs = 0; |
3051 | } | 3186 | } |
3052 | 3187 | ||
3053 | DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read"); | ||
3054 | |||
3055 | /* do the actual data transfer */ | ||
3056 | ata_data_xfer(ap, buf, count, do_write); | ||
3057 | |||
3058 | kunmap(page); | ||
3059 | |||
3060 | if (bytes) | 3188 | if (bytes) |
3061 | goto next_sg; | 3189 | goto next_sg; |
3062 | } | 3190 | } |
@@ -3093,6 +3221,8 @@ static void atapi_pio_bytes(struct ata_queued_cmd *qc) | |||
3093 | if (do_write != i_write) | 3221 | if (do_write != i_write) |
3094 | goto err_out; | 3222 | goto err_out; |
3095 | 3223 | ||
3224 | VPRINTK("ata%u: xfering %d bytes\n", ap->id, bytes); | ||
3225 | |||
3096 | __atapi_pio_bytes(qc, bytes); | 3226 | __atapi_pio_bytes(qc, bytes); |
3097 | 3227 | ||
3098 | return; | 3228 | return; |
@@ -3293,6 +3423,8 @@ static void ata_qc_timeout(struct ata_queued_cmd *qc) | |||
3293 | printk(KERN_ERR "ata%u: command 0x%x timeout, stat 0x%x host_stat 0x%x\n", | 3423 | printk(KERN_ERR "ata%u: command 0x%x timeout, stat 0x%x host_stat 0x%x\n", |
3294 | ap->id, qc->tf.command, drv_stat, host_stat); | 3424 | ap->id, qc->tf.command, drv_stat, host_stat); |
3295 | 3425 | ||
3426 | ap->hsm_task_state = HSM_ST_IDLE; | ||
3427 | |||
3296 | /* complete taskfile transaction */ | 3428 | /* complete taskfile transaction */ |
3297 | ata_qc_complete(qc, drv_stat); | 3429 | ata_qc_complete(qc, drv_stat); |
3298 | break; | 3430 | break; |
@@ -3578,43 +3710,103 @@ int ata_qc_issue_prot(struct ata_queued_cmd *qc) | |||
3578 | { | 3710 | { |
3579 | struct ata_port *ap = qc->ap; | 3711 | struct ata_port *ap = qc->ap; |
3580 | 3712 | ||
3713 | /* Use polling pio if the LLD doesn't handle | ||
3714 | * interrupt driven pio and atapi CDB interrupt. | ||
3715 | */ | ||
3716 | if (ap->flags & ATA_FLAG_PIO_POLLING) { | ||
3717 | switch (qc->tf.protocol) { | ||
3718 | case ATA_PROT_PIO: | ||
3719 | case ATA_PROT_ATAPI: | ||
3720 | case ATA_PROT_ATAPI_NODATA: | ||
3721 | qc->tf.flags |= ATA_TFLAG_POLLING; | ||
3722 | break; | ||
3723 | case ATA_PROT_ATAPI_DMA: | ||
3724 | if (qc->dev->flags & ATA_DFLAG_CDB_INTR) | ||
3725 | BUG(); | ||
3726 | break; | ||
3727 | default: | ||
3728 | break; | ||
3729 | } | ||
3730 | } | ||
3731 | |||
3732 | /* select the device */ | ||
3581 | ata_dev_select(ap, qc->dev->devno, 1, 0); | 3733 | ata_dev_select(ap, qc->dev->devno, 1, 0); |
3582 | 3734 | ||
3735 | /* start the command */ | ||
3583 | switch (qc->tf.protocol) { | 3736 | switch (qc->tf.protocol) { |
3584 | case ATA_PROT_NODATA: | 3737 | case ATA_PROT_NODATA: |
3738 | if (qc->tf.flags & ATA_TFLAG_POLLING) | ||
3739 | ata_qc_set_polling(qc); | ||
3740 | |||
3585 | ata_tf_to_host_nolock(ap, &qc->tf); | 3741 | ata_tf_to_host_nolock(ap, &qc->tf); |
3742 | ap->hsm_task_state = HSM_ST_LAST; | ||
3743 | |||
3744 | if (qc->tf.flags & ATA_TFLAG_POLLING) | ||
3745 | queue_work(ata_wq, &ap->pio_task); | ||
3746 | |||
3586 | break; | 3747 | break; |
3587 | 3748 | ||
3588 | case ATA_PROT_DMA: | 3749 | case ATA_PROT_DMA: |
3750 | assert(!(qc->tf.flags & ATA_TFLAG_POLLING)); | ||
3751 | |||
3589 | ap->ops->tf_load(ap, &qc->tf); /* load tf registers */ | 3752 | ap->ops->tf_load(ap, &qc->tf); /* load tf registers */ |
3590 | ap->ops->bmdma_setup(qc); /* set up bmdma */ | 3753 | ap->ops->bmdma_setup(qc); /* set up bmdma */ |
3591 | ap->ops->bmdma_start(qc); /* initiate bmdma */ | 3754 | ap->ops->bmdma_start(qc); /* initiate bmdma */ |
3755 | ap->hsm_task_state = HSM_ST_LAST; | ||
3592 | break; | 3756 | break; |
3593 | 3757 | ||
3594 | case ATA_PROT_PIO: /* load tf registers, initiate polling pio */ | 3758 | case ATA_PROT_PIO: |
3595 | ata_qc_set_polling(qc); | 3759 | if (qc->tf.flags & ATA_TFLAG_POLLING) |
3596 | ata_tf_to_host_nolock(ap, &qc->tf); | 3760 | ata_qc_set_polling(qc); |
3597 | ap->hsm_task_state = HSM_ST; | ||
3598 | queue_work(ata_wq, &ap->pio_task); | ||
3599 | break; | ||
3600 | 3761 | ||
3601 | case ATA_PROT_ATAPI: | ||
3602 | ata_qc_set_polling(qc); | ||
3603 | ata_tf_to_host_nolock(ap, &qc->tf); | 3762 | ata_tf_to_host_nolock(ap, &qc->tf); |
3604 | queue_work(ata_wq, &ap->packet_task); | 3763 | |
3764 | if (qc->tf.flags & ATA_TFLAG_WRITE) { | ||
3765 | /* PIO data out protocol */ | ||
3766 | ap->hsm_task_state = HSM_ST_FIRST; | ||
3767 | queue_work(ata_wq, &ap->dataout_task); | ||
3768 | |||
3769 | /* always send first data block using | ||
3770 | * the ata_dataout_task() codepath. | ||
3771 | */ | ||
3772 | } else { | ||
3773 | /* PIO data in protocol */ | ||
3774 | ap->hsm_task_state = HSM_ST; | ||
3775 | |||
3776 | if (qc->tf.flags & ATA_TFLAG_POLLING) | ||
3777 | queue_work(ata_wq, &ap->pio_task); | ||
3778 | |||
3779 | /* if polling, ata_pio_task() handles the rest. | ||
3780 | * otherwise, interrupt handler takes over from here. | ||
3781 | */ | ||
3782 | } | ||
3783 | |||
3605 | break; | 3784 | break; |
3606 | 3785 | ||
3786 | case ATA_PROT_ATAPI: | ||
3607 | case ATA_PROT_ATAPI_NODATA: | 3787 | case ATA_PROT_ATAPI_NODATA: |
3608 | ap->flags |= ATA_FLAG_NOINTR; | 3788 | if (qc->tf.flags & ATA_TFLAG_POLLING) |
3789 | ata_qc_set_polling(qc); | ||
3790 | |||
3609 | ata_tf_to_host_nolock(ap, &qc->tf); | 3791 | ata_tf_to_host_nolock(ap, &qc->tf); |
3610 | queue_work(ata_wq, &ap->packet_task); | 3792 | ap->hsm_task_state = HSM_ST_FIRST; |
3793 | |||
3794 | /* send cdb by polling if no cdb interrupt */ | ||
3795 | if ((!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) || | ||
3796 | (qc->tf.flags & ATA_TFLAG_POLLING)) | ||
3797 | queue_work(ata_wq, &ap->dataout_task); | ||
3611 | break; | 3798 | break; |
3612 | 3799 | ||
3613 | case ATA_PROT_ATAPI_DMA: | 3800 | case ATA_PROT_ATAPI_DMA: |
3614 | ap->flags |= ATA_FLAG_NOINTR; | 3801 | assert(!(qc->tf.flags & ATA_TFLAG_POLLING)); |
3802 | |||
3615 | ap->ops->tf_load(ap, &qc->tf); /* load tf registers */ | 3803 | ap->ops->tf_load(ap, &qc->tf); /* load tf registers */ |
3616 | ap->ops->bmdma_setup(qc); /* set up bmdma */ | 3804 | ap->ops->bmdma_setup(qc); /* set up bmdma */ |
3617 | queue_work(ata_wq, &ap->packet_task); | 3805 | ap->hsm_task_state = HSM_ST_FIRST; |
3806 | |||
3807 | /* send cdb by polling if no cdb interrupt */ | ||
3808 | if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) | ||
3809 | queue_work(ata_wq, &ap->dataout_task); | ||
3618 | break; | 3810 | break; |
3619 | 3811 | ||
3620 | default: | 3812 | default: |
@@ -3875,47 +4067,142 @@ void ata_bmdma_stop(struct ata_queued_cmd *qc) | |||
3875 | inline unsigned int ata_host_intr (struct ata_port *ap, | 4067 | inline unsigned int ata_host_intr (struct ata_port *ap, |
3876 | struct ata_queued_cmd *qc) | 4068 | struct ata_queued_cmd *qc) |
3877 | { | 4069 | { |
3878 | u8 status, host_stat; | 4070 | u8 status, host_stat = 0; |
3879 | |||
3880 | switch (qc->tf.protocol) { | ||
3881 | 4071 | ||
3882 | case ATA_PROT_DMA: | 4072 | VPRINTK("ata%u: protocol %d task_state %d\n", |
3883 | case ATA_PROT_ATAPI_DMA: | 4073 | ap->id, qc->tf.protocol, ap->hsm_task_state); |
3884 | case ATA_PROT_ATAPI: | ||
3885 | /* check status of DMA engine */ | ||
3886 | host_stat = ap->ops->bmdma_status(ap); | ||
3887 | VPRINTK("ata%u: host_stat 0x%X\n", ap->id, host_stat); | ||
3888 | 4074 | ||
3889 | /* if it's not our irq... */ | 4075 | /* Check whether we are expecting interrupt in this state */ |
3890 | if (!(host_stat & ATA_DMA_INTR)) | 4076 | switch (ap->hsm_task_state) { |
4077 | case HSM_ST_FIRST: | ||
4078 | /* Check the ATA_DFLAG_CDB_INTR flag is enough here. | ||
4079 | * The flag was turned on only for atapi devices. | ||
4080 | * No need to check is_atapi_taskfile(&qc->tf) again. | ||
4081 | */ | ||
4082 | if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) | ||
3891 | goto idle_irq; | 4083 | goto idle_irq; |
4084 | break; | ||
4085 | case HSM_ST_LAST: | ||
4086 | if (qc->tf.protocol == ATA_PROT_DMA || | ||
4087 | qc->tf.protocol == ATA_PROT_ATAPI_DMA) { | ||
4088 | /* check status of DMA engine */ | ||
4089 | host_stat = ap->ops->bmdma_status(ap); | ||
4090 | VPRINTK("ata%u: host_stat 0x%X\n", ap->id, host_stat); | ||
4091 | |||
4092 | /* if it's not our irq... */ | ||
4093 | if (!(host_stat & ATA_DMA_INTR)) | ||
4094 | goto idle_irq; | ||
4095 | |||
4096 | /* before we do anything else, clear DMA-Start bit */ | ||
4097 | ap->ops->bmdma_stop(qc); | ||
4098 | } | ||
4099 | break; | ||
4100 | case HSM_ST: | ||
4101 | break; | ||
4102 | default: | ||
4103 | goto idle_irq; | ||
4104 | } | ||
3892 | 4105 | ||
3893 | /* before we do anything else, clear DMA-Start bit */ | 4106 | /* check altstatus */ |
3894 | ap->ops->bmdma_stop(qc); | 4107 | status = ata_altstatus(ap); |
4108 | if (status & ATA_BUSY) | ||
4109 | goto idle_irq; | ||
3895 | 4110 | ||
3896 | /* fall through */ | 4111 | /* check main status, clearing INTRQ */ |
4112 | status = ata_chk_status(ap); | ||
4113 | if (unlikely(status & ATA_BUSY)) | ||
4114 | goto idle_irq; | ||
3897 | 4115 | ||
3898 | case ATA_PROT_ATAPI_NODATA: | 4116 | DPRINTK("ata%u: protocol %d task_state %d (dev_stat 0x%X)\n", |
3899 | case ATA_PROT_NODATA: | 4117 | ap->id, qc->tf.protocol, ap->hsm_task_state, status); |
3900 | /* check altstatus */ | ||
3901 | status = ata_altstatus(ap); | ||
3902 | if (status & ATA_BUSY) | ||
3903 | goto idle_irq; | ||
3904 | 4118 | ||
3905 | /* check main status, clearing INTRQ */ | 4119 | /* ack bmdma irq events */ |
3906 | status = ata_chk_status(ap); | 4120 | ap->ops->irq_clear(ap); |
3907 | if (unlikely(status & ATA_BUSY)) | ||
3908 | goto idle_irq; | ||
3909 | DPRINTK("ata%u: protocol %d (dev_stat 0x%X)\n", | ||
3910 | ap->id, qc->tf.protocol, status); | ||
3911 | 4121 | ||
3912 | /* ack bmdma irq events */ | 4122 | /* check error */ |
3913 | ap->ops->irq_clear(ap); | 4123 | if (unlikely((status & ATA_ERR) || (host_stat & ATA_DMA_ERR))) |
4124 | ap->hsm_task_state = HSM_ST_ERR; | ||
4125 | |||
4126 | fsm_start: | ||
4127 | switch (ap->hsm_task_state) { | ||
4128 | case HSM_ST_FIRST: | ||
4129 | /* Some pre-ATAPI-4 devices assert INTRQ | ||
4130 | * at this state when ready to receive CDB. | ||
4131 | */ | ||
4132 | |||
4133 | /* check device status */ | ||
4134 | if (unlikely((status & (ATA_BUSY | ATA_DRQ)) != ATA_DRQ)) { | ||
4135 | /* Wrong status. Let EH handle this */ | ||
4136 | ap->hsm_task_state = HSM_ST_ERR; | ||
4137 | goto fsm_start; | ||
4138 | } | ||
4139 | |||
4140 | atapi_send_cdb(ap, qc); | ||
4141 | |||
4142 | break; | ||
4143 | |||
4144 | case HSM_ST: | ||
4145 | /* complete command or read/write the data register */ | ||
4146 | if (qc->tf.protocol == ATA_PROT_ATAPI) { | ||
4147 | /* ATAPI PIO protocol */ | ||
4148 | if ((status & ATA_DRQ) == 0) { | ||
4149 | /* no more data to transfer */ | ||
4150 | ap->hsm_task_state = HSM_ST_LAST; | ||
4151 | goto fsm_start; | ||
4152 | } | ||
4153 | |||
4154 | atapi_pio_bytes(qc); | ||
4155 | |||
4156 | if (unlikely(ap->hsm_task_state == HSM_ST_ERR)) | ||
4157 | /* bad ireason reported by device */ | ||
4158 | goto fsm_start; | ||
4159 | |||
4160 | } else { | ||
4161 | /* ATA PIO protocol */ | ||
4162 | if (unlikely((status & ATA_DRQ) == 0)) { | ||
4163 | /* handle BSY=0, DRQ=0 as error */ | ||
4164 | ap->hsm_task_state = HSM_ST_ERR; | ||
4165 | goto fsm_start; | ||
4166 | } | ||
4167 | |||
4168 | ata_pio_sector(qc); | ||
4169 | |||
4170 | if (ap->hsm_task_state == HSM_ST_LAST && | ||
4171 | (!(qc->tf.flags & ATA_TFLAG_WRITE))) { | ||
4172 | /* all data read */ | ||
4173 | ata_altstatus(ap); | ||
4174 | status = ata_chk_status(ap); | ||
4175 | goto fsm_start; | ||
4176 | } | ||
4177 | } | ||
4178 | |||
4179 | ata_altstatus(ap); /* flush */ | ||
4180 | break; | ||
4181 | |||
4182 | case HSM_ST_LAST: | ||
4183 | if (unlikely(status & ATA_DRQ)) { | ||
4184 | /* handle DRQ=1 as error */ | ||
4185 | ap->hsm_task_state = HSM_ST_ERR; | ||
4186 | goto fsm_start; | ||
4187 | } | ||
4188 | |||
4189 | /* no more data to transfer */ | ||
4190 | DPRINTK("ata%u: command complete, drv_stat 0x%x\n", | ||
4191 | ap->id, status); | ||
4192 | |||
4193 | ap->hsm_task_state = HSM_ST_IDLE; | ||
3914 | 4194 | ||
3915 | /* complete taskfile transaction */ | 4195 | /* complete taskfile transaction */ |
3916 | ata_qc_complete(qc, status); | 4196 | ata_qc_complete(qc, status); |
3917 | break; | 4197 | break; |
3918 | 4198 | ||
4199 | case HSM_ST_ERR: | ||
4200 | printk(KERN_ERR "ata%u: command error, drv_stat 0x%x host_stat 0x%x\n", | ||
4201 | ap->id, status, host_stat); | ||
4202 | |||
4203 | ap->hsm_task_state = HSM_ST_IDLE; | ||
4204 | ata_qc_complete(qc, status | ATA_ERR); | ||
4205 | break; | ||
3919 | default: | 4206 | default: |
3920 | goto idle_irq; | 4207 | goto idle_irq; |
3921 | } | 4208 | } |
@@ -3966,11 +4253,11 @@ irqreturn_t ata_interrupt (int irq, void *dev_instance, struct pt_regs *regs) | |||
3966 | 4253 | ||
3967 | ap = host_set->ports[i]; | 4254 | ap = host_set->ports[i]; |
3968 | if (ap && | 4255 | if (ap && |
3969 | !(ap->flags & (ATA_FLAG_PORT_DISABLED | ATA_FLAG_NOINTR))) { | 4256 | !(ap->flags & ATA_FLAG_PORT_DISABLED)) { |
3970 | struct ata_queued_cmd *qc; | 4257 | struct ata_queued_cmd *qc; |
3971 | 4258 | ||
3972 | qc = ata_qc_from_tag(ap, ap->active_tag); | 4259 | qc = ata_qc_from_tag(ap, ap->active_tag); |
3973 | if (qc && (!(qc->tf.ctl & ATA_NIEN)) && | 4260 | if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)) && |
3974 | (qc->flags & ATA_QCFLAG_ACTIVE)) | 4261 | (qc->flags & ATA_QCFLAG_ACTIVE)) |
3975 | handled |= ata_host_intr(ap, qc); | 4262 | handled |= ata_host_intr(ap, qc); |
3976 | } | 4263 | } |
@@ -3982,75 +4269,6 @@ irqreturn_t ata_interrupt (int irq, void *dev_instance, struct pt_regs *regs) | |||
3982 | } | 4269 | } |
3983 | 4270 | ||
3984 | /** | 4271 | /** |
3985 | * atapi_packet_task - Write CDB bytes to hardware | ||
3986 | * @_data: Port to which ATAPI device is attached. | ||
3987 | * | ||
3988 | * When device has indicated its readiness to accept | ||
3989 | * a CDB, this function is called. Send the CDB. | ||
3990 | * If DMA is to be performed, exit immediately. | ||
3991 | * Otherwise, we are in polling mode, so poll | ||
3992 | * status under operation succeeds or fails. | ||
3993 | * | ||
3994 | * LOCKING: | ||
3995 | * Kernel thread context (may sleep) | ||
3996 | */ | ||
3997 | |||
3998 | static void atapi_packet_task(void *_data) | ||
3999 | { | ||
4000 | struct ata_port *ap = _data; | ||
4001 | struct ata_queued_cmd *qc; | ||
4002 | u8 status; | ||
4003 | |||
4004 | qc = ata_qc_from_tag(ap, ap->active_tag); | ||
4005 | assert(qc != NULL); | ||
4006 | assert(qc->flags & ATA_QCFLAG_ACTIVE); | ||
4007 | |||
4008 | /* sleep-wait for BSY to clear */ | ||
4009 | DPRINTK("busy wait\n"); | ||
4010 | if (ata_busy_sleep(ap, ATA_TMOUT_CDB_QUICK, ATA_TMOUT_CDB)) | ||
4011 | goto err_out; | ||
4012 | |||
4013 | /* make sure DRQ is set */ | ||
4014 | status = ata_chk_status(ap); | ||
4015 | if ((status & (ATA_BUSY | ATA_DRQ)) != ATA_DRQ) | ||
4016 | goto err_out; | ||
4017 | |||
4018 | /* send SCSI cdb */ | ||
4019 | DPRINTK("send cdb\n"); | ||
4020 | assert(ap->cdb_len >= 12); | ||
4021 | |||
4022 | if (qc->tf.protocol == ATA_PROT_ATAPI_DMA || | ||
4023 | qc->tf.protocol == ATA_PROT_ATAPI_NODATA) { | ||
4024 | unsigned long flags; | ||
4025 | |||
4026 | /* Once we're done issuing command and kicking bmdma, | ||
4027 | * irq handler takes over. To not lose irq, we need | ||
4028 | * to clear NOINTR flag before sending cdb, but | ||
4029 | * interrupt handler shouldn't be invoked before we're | ||
4030 | * finished. Hence, the following locking. | ||
4031 | */ | ||
4032 | spin_lock_irqsave(&ap->host_set->lock, flags); | ||
4033 | ap->flags &= ~ATA_FLAG_NOINTR; | ||
4034 | ata_data_xfer(ap, qc->cdb, ap->cdb_len, 1); | ||
4035 | if (qc->tf.protocol == ATA_PROT_ATAPI_DMA) | ||
4036 | ap->ops->bmdma_start(qc); /* initiate bmdma */ | ||
4037 | spin_unlock_irqrestore(&ap->host_set->lock, flags); | ||
4038 | } else { | ||
4039 | ata_data_xfer(ap, qc->cdb, ap->cdb_len, 1); | ||
4040 | |||
4041 | /* PIO commands are handled by polling */ | ||
4042 | ap->hsm_task_state = HSM_ST; | ||
4043 | queue_work(ata_wq, &ap->pio_task); | ||
4044 | } | ||
4045 | |||
4046 | return; | ||
4047 | |||
4048 | err_out: | ||
4049 | ata_poll_qc_complete(qc, ATA_ERR); | ||
4050 | } | ||
4051 | |||
4052 | |||
4053 | /** | ||
4054 | * ata_port_start - Set port up for dma. | 4272 | * ata_port_start - Set port up for dma. |
4055 | * @ap: Port to initialize | 4273 | * @ap: Port to initialize |
4056 | * | 4274 | * |
@@ -4170,7 +4388,7 @@ static void ata_host_init(struct ata_port *ap, struct Scsi_Host *host, | |||
4170 | ap->active_tag = ATA_TAG_POISON; | 4388 | ap->active_tag = ATA_TAG_POISON; |
4171 | ap->last_ctl = 0xFF; | 4389 | ap->last_ctl = 0xFF; |
4172 | 4390 | ||
4173 | INIT_WORK(&ap->packet_task, atapi_packet_task, ap); | 4391 | INIT_WORK(&ap->dataout_task, ata_dataout_task, ap); |
4174 | INIT_WORK(&ap->pio_task, ata_pio_task, ap); | 4392 | INIT_WORK(&ap->pio_task, ata_pio_task, ap); |
4175 | 4393 | ||
4176 | for (i = 0; i < ATA_MAX_DEVICES; i++) | 4394 | for (i = 0; i < ATA_MAX_DEVICES; i++) |
diff --git a/drivers/scsi/pdc_adma.c b/drivers/scsi/pdc_adma.c index 9820f272f889..1701e318a9f0 100644 --- a/drivers/scsi/pdc_adma.c +++ b/drivers/scsi/pdc_adma.c | |||
@@ -481,13 +481,13 @@ static inline unsigned int adma_intr_mmio(struct ata_host_set *host_set) | |||
481 | for (port_no = 0; port_no < host_set->n_ports; ++port_no) { | 481 | for (port_no = 0; port_no < host_set->n_ports; ++port_no) { |
482 | struct ata_port *ap; | 482 | struct ata_port *ap; |
483 | ap = host_set->ports[port_no]; | 483 | ap = host_set->ports[port_no]; |
484 | if (ap && (!(ap->flags & (ATA_FLAG_PORT_DISABLED | ATA_FLAG_NOINTR)))) { | 484 | if (ap && (!(ap->flags & ATA_FLAG_PORT_DISABLED))) { |
485 | struct ata_queued_cmd *qc; | 485 | struct ata_queued_cmd *qc; |
486 | struct adma_port_priv *pp = ap->private_data; | 486 | struct adma_port_priv *pp = ap->private_data; |
487 | if (!pp || pp->state != adma_state_mmio) | 487 | if (!pp || pp->state != adma_state_mmio) |
488 | continue; | 488 | continue; |
489 | qc = ata_qc_from_tag(ap, ap->active_tag); | 489 | qc = ata_qc_from_tag(ap, ap->active_tag); |
490 | if (qc && (!(qc->tf.ctl & ATA_NIEN))) { | 490 | if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) { |
491 | 491 | ||
492 | /* check main status, clearing INTRQ */ | 492 | /* check main status, clearing INTRQ */ |
493 | u8 status = ata_chk_status(ap); | 493 | u8 status = ata_chk_status(ap); |
diff --git a/drivers/scsi/sata_mv.c b/drivers/scsi/sata_mv.c index 422e0b6f603a..c40e9843b454 100644 --- a/drivers/scsi/sata_mv.c +++ b/drivers/scsi/sata_mv.c | |||
@@ -87,7 +87,8 @@ enum { | |||
87 | MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */ | 87 | MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */ |
88 | MV_FLAG_GLBL_SFT_RST = (1 << 28), /* Global Soft Reset support */ | 88 | MV_FLAG_GLBL_SFT_RST = (1 << 28), /* Global Soft Reset support */ |
89 | MV_COMMON_FLAGS = (ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | | 89 | MV_COMMON_FLAGS = (ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | |
90 | ATA_FLAG_SATA_RESET | ATA_FLAG_MMIO), | 90 | ATA_FLAG_SATA_RESET | ATA_FLAG_MMIO | |
91 | ATA_FLAG_PIO_POLLING), | ||
91 | MV_6XXX_FLAGS = (MV_FLAG_IRQ_COALESCE | | 92 | MV_6XXX_FLAGS = (MV_FLAG_IRQ_COALESCE | |
92 | MV_FLAG_GLBL_SFT_RST), | 93 | MV_FLAG_GLBL_SFT_RST), |
93 | 94 | ||
diff --git a/drivers/scsi/sata_nv.c b/drivers/scsi/sata_nv.c index 1a56d6c79ddd..73c2c5a29049 100644 --- a/drivers/scsi/sata_nv.c +++ b/drivers/scsi/sata_nv.c | |||
@@ -304,11 +304,11 @@ static irqreturn_t nv_interrupt (int irq, void *dev_instance, | |||
304 | 304 | ||
305 | ap = host_set->ports[i]; | 305 | ap = host_set->ports[i]; |
306 | if (ap && | 306 | if (ap && |
307 | !(ap->flags & (ATA_FLAG_PORT_DISABLED | ATA_FLAG_NOINTR))) { | 307 | !(ap->flags & ATA_FLAG_PORT_DISABLED)) { |
308 | struct ata_queued_cmd *qc; | 308 | struct ata_queued_cmd *qc; |
309 | 309 | ||
310 | qc = ata_qc_from_tag(ap, ap->active_tag); | 310 | qc = ata_qc_from_tag(ap, ap->active_tag); |
311 | if (qc && (!(qc->tf.ctl & ATA_NIEN))) | 311 | if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) |
312 | handled += ata_host_intr(ap, qc); | 312 | handled += ata_host_intr(ap, qc); |
313 | } | 313 | } |
314 | 314 | ||
diff --git a/drivers/scsi/sata_promise.c b/drivers/scsi/sata_promise.c index eee93b0016df..9ca07becc49a 100644 --- a/drivers/scsi/sata_promise.c +++ b/drivers/scsi/sata_promise.c | |||
@@ -162,7 +162,8 @@ static struct ata_port_info pdc_port_info[] = { | |||
162 | { | 162 | { |
163 | .sht = &pdc_ata_sht, | 163 | .sht = &pdc_ata_sht, |
164 | .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | | 164 | .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | |
165 | ATA_FLAG_SRST | ATA_FLAG_MMIO, | 165 | ATA_FLAG_SRST | ATA_FLAG_MMIO | |
166 | ATA_FLAG_PIO_POLLING, | ||
166 | .pio_mask = 0x1f, /* pio0-4 */ | 167 | .pio_mask = 0x1f, /* pio0-4 */ |
167 | .mwdma_mask = 0x07, /* mwdma0-2 */ | 168 | .mwdma_mask = 0x07, /* mwdma0-2 */ |
168 | .udma_mask = 0x7f, /* udma0-6 ; FIXME */ | 169 | .udma_mask = 0x7f, /* udma0-6 ; FIXME */ |
@@ -173,7 +174,8 @@ static struct ata_port_info pdc_port_info[] = { | |||
173 | { | 174 | { |
174 | .sht = &pdc_ata_sht, | 175 | .sht = &pdc_ata_sht, |
175 | .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | | 176 | .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | |
176 | ATA_FLAG_SRST | ATA_FLAG_MMIO, | 177 | ATA_FLAG_SRST | ATA_FLAG_MMIO | |
178 | ATA_FLAG_PIO_POLLING, | ||
177 | .pio_mask = 0x1f, /* pio0-4 */ | 179 | .pio_mask = 0x1f, /* pio0-4 */ |
178 | .mwdma_mask = 0x07, /* mwdma0-2 */ | 180 | .mwdma_mask = 0x07, /* mwdma0-2 */ |
179 | .udma_mask = 0x7f, /* udma0-6 ; FIXME */ | 181 | .udma_mask = 0x7f, /* udma0-6 ; FIXME */ |
@@ -184,7 +186,8 @@ static struct ata_port_info pdc_port_info[] = { | |||
184 | { | 186 | { |
185 | .sht = &pdc_ata_sht, | 187 | .sht = &pdc_ata_sht, |
186 | .host_flags = ATA_FLAG_NO_LEGACY | ATA_FLAG_SRST | | 188 | .host_flags = ATA_FLAG_NO_LEGACY | ATA_FLAG_SRST | |
187 | ATA_FLAG_MMIO | ATA_FLAG_SLAVE_POSS, | 189 | ATA_FLAG_MMIO | ATA_FLAG_SLAVE_POSS | |
190 | ATA_FLAG_PIO_POLLING, | ||
188 | .pio_mask = 0x1f, /* pio0-4 */ | 191 | .pio_mask = 0x1f, /* pio0-4 */ |
189 | .mwdma_mask = 0x07, /* mwdma0-2 */ | 192 | .mwdma_mask = 0x07, /* mwdma0-2 */ |
190 | .udma_mask = 0x7f, /* udma0-6 ; FIXME */ | 193 | .udma_mask = 0x7f, /* udma0-6 ; FIXME */ |
@@ -493,11 +496,11 @@ static irqreturn_t pdc_interrupt (int irq, void *dev_instance, struct pt_regs *r | |||
493 | ap = host_set->ports[i]; | 496 | ap = host_set->ports[i]; |
494 | tmp = mask & (1 << (i + 1)); | 497 | tmp = mask & (1 << (i + 1)); |
495 | if (tmp && ap && | 498 | if (tmp && ap && |
496 | !(ap->flags & (ATA_FLAG_PORT_DISABLED | ATA_FLAG_NOINTR))) { | 499 | !(ap->flags & ATA_FLAG_PORT_DISABLED)) { |
497 | struct ata_queued_cmd *qc; | 500 | struct ata_queued_cmd *qc; |
498 | 501 | ||
499 | qc = ata_qc_from_tag(ap, ap->active_tag); | 502 | qc = ata_qc_from_tag(ap, ap->active_tag); |
500 | if (qc && (!(qc->tf.ctl & ATA_NIEN))) | 503 | if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) |
501 | handled += pdc_host_intr(ap, qc); | 504 | handled += pdc_host_intr(ap, qc); |
502 | } | 505 | } |
503 | } | 506 | } |
diff --git a/drivers/scsi/sata_qstor.c b/drivers/scsi/sata_qstor.c index 250dafa6bc36..6e0c749b6cb4 100644 --- a/drivers/scsi/sata_qstor.c +++ b/drivers/scsi/sata_qstor.c | |||
@@ -177,7 +177,7 @@ static struct ata_port_info qs_port_info[] = { | |||
177 | .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | | 177 | .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | |
178 | ATA_FLAG_SATA_RESET | | 178 | ATA_FLAG_SATA_RESET | |
179 | //FIXME ATA_FLAG_SRST | | 179 | //FIXME ATA_FLAG_SRST | |
180 | ATA_FLAG_MMIO, | 180 | ATA_FLAG_MMIO | ATA_FLAG_PIO_POLLING, |
181 | .pio_mask = 0x10, /* pio4 */ | 181 | .pio_mask = 0x10, /* pio4 */ |
182 | .udma_mask = 0x7f, /* udma0-6 */ | 182 | .udma_mask = 0x7f, /* udma0-6 */ |
183 | .port_ops = &qs_ata_ops, | 183 | .port_ops = &qs_ata_ops, |
@@ -391,14 +391,13 @@ static inline unsigned int qs_intr_pkt(struct ata_host_set *host_set) | |||
391 | DPRINTK("SFF=%08x%08x: sCHAN=%u sHST=%d sDST=%02x\n", | 391 | DPRINTK("SFF=%08x%08x: sCHAN=%u sHST=%d sDST=%02x\n", |
392 | sff1, sff0, port_no, sHST, sDST); | 392 | sff1, sff0, port_no, sHST, sDST); |
393 | handled = 1; | 393 | handled = 1; |
394 | if (ap && !(ap->flags & | 394 | if (ap && !(ap->flags & ATA_FLAG_PORT_DISABLED)) { |
395 | (ATA_FLAG_PORT_DISABLED|ATA_FLAG_NOINTR))) { | ||
396 | struct ata_queued_cmd *qc; | 395 | struct ata_queued_cmd *qc; |
397 | struct qs_port_priv *pp = ap->private_data; | 396 | struct qs_port_priv *pp = ap->private_data; |
398 | if (!pp || pp->state != qs_state_pkt) | 397 | if (!pp || pp->state != qs_state_pkt) |
399 | continue; | 398 | continue; |
400 | qc = ata_qc_from_tag(ap, ap->active_tag); | 399 | qc = ata_qc_from_tag(ap, ap->active_tag); |
401 | if (qc && (!(qc->tf.ctl & ATA_NIEN))) { | 400 | if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) { |
402 | switch (sHST) { | 401 | switch (sHST) { |
403 | case 0: /* sucessful CPB */ | 402 | case 0: /* sucessful CPB */ |
404 | case 3: /* device error */ | 403 | case 3: /* device error */ |
@@ -424,13 +423,13 @@ static inline unsigned int qs_intr_mmio(struct ata_host_set *host_set) | |||
424 | struct ata_port *ap; | 423 | struct ata_port *ap; |
425 | ap = host_set->ports[port_no]; | 424 | ap = host_set->ports[port_no]; |
426 | if (ap && | 425 | if (ap && |
427 | !(ap->flags & (ATA_FLAG_PORT_DISABLED | ATA_FLAG_NOINTR))) { | 426 | !(ap->flags & ATA_FLAG_PORT_DISABLED)) { |
428 | struct ata_queued_cmd *qc; | 427 | struct ata_queued_cmd *qc; |
429 | struct qs_port_priv *pp = ap->private_data; | 428 | struct qs_port_priv *pp = ap->private_data; |
430 | if (!pp || pp->state != qs_state_mmio) | 429 | if (!pp || pp->state != qs_state_mmio) |
431 | continue; | 430 | continue; |
432 | qc = ata_qc_from_tag(ap, ap->active_tag); | 431 | qc = ata_qc_from_tag(ap, ap->active_tag); |
433 | if (qc && (!(qc->tf.ctl & ATA_NIEN))) { | 432 | if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) { |
434 | 433 | ||
435 | /* check main status, clearing INTRQ */ | 434 | /* check main status, clearing INTRQ */ |
436 | u8 status = ata_chk_status(ap); | 435 | u8 status = ata_chk_status(ap); |
diff --git a/drivers/scsi/sata_sx4.c b/drivers/scsi/sata_sx4.c index af08f4f650c1..1908c588ebb9 100644 --- a/drivers/scsi/sata_sx4.c +++ b/drivers/scsi/sata_sx4.c | |||
@@ -219,7 +219,8 @@ static struct ata_port_info pdc_port_info[] = { | |||
219 | { | 219 | { |
220 | .sht = &pdc_sata_sht, | 220 | .sht = &pdc_sata_sht, |
221 | .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | | 221 | .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | |
222 | ATA_FLAG_SRST | ATA_FLAG_MMIO, | 222 | ATA_FLAG_SRST | ATA_FLAG_MMIO | |
223 | ATA_FLAG_PIO_POLLING, | ||
223 | .pio_mask = 0x1f, /* pio0-4 */ | 224 | .pio_mask = 0x1f, /* pio0-4 */ |
224 | .mwdma_mask = 0x07, /* mwdma0-2 */ | 225 | .mwdma_mask = 0x07, /* mwdma0-2 */ |
225 | .udma_mask = 0x7f, /* udma0-6 ; FIXME */ | 226 | .udma_mask = 0x7f, /* udma0-6 ; FIXME */ |
@@ -832,11 +833,11 @@ static irqreturn_t pdc20621_interrupt (int irq, void *dev_instance, struct pt_re | |||
832 | tmp = mask & (1 << i); | 833 | tmp = mask & (1 << i); |
833 | VPRINTK("seq %u, port_no %u, ap %p, tmp %x\n", i, port_no, ap, tmp); | 834 | VPRINTK("seq %u, port_no %u, ap %p, tmp %x\n", i, port_no, ap, tmp); |
834 | if (tmp && ap && | 835 | if (tmp && ap && |
835 | !(ap->flags & (ATA_FLAG_PORT_DISABLED | ATA_FLAG_NOINTR))) { | 836 | !(ap->flags & ATA_FLAG_PORT_DISABLED)) { |
836 | struct ata_queued_cmd *qc; | 837 | struct ata_queued_cmd *qc; |
837 | 838 | ||
838 | qc = ata_qc_from_tag(ap, ap->active_tag); | 839 | qc = ata_qc_from_tag(ap, ap->active_tag); |
839 | if (qc && (!(qc->tf.ctl & ATA_NIEN))) | 840 | if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) |
840 | handled += pdc20621_host_intr(ap, qc, (i > 4), | 841 | handled += pdc20621_host_intr(ap, qc, (i > 4), |
841 | mmio_base); | 842 | mmio_base); |
842 | } | 843 | } |
diff --git a/drivers/scsi/sata_vsc.c b/drivers/scsi/sata_vsc.c index 5af05fdf8544..028d4ab15c01 100644 --- a/drivers/scsi/sata_vsc.c +++ b/drivers/scsi/sata_vsc.c | |||
@@ -193,12 +193,12 @@ static irqreturn_t vsc_sata_interrupt (int irq, void *dev_instance, | |||
193 | struct ata_port *ap; | 193 | struct ata_port *ap; |
194 | 194 | ||
195 | ap = host_set->ports[i]; | 195 | ap = host_set->ports[i]; |
196 | if (ap && !(ap->flags & | 196 | if (ap && |
197 | (ATA_FLAG_PORT_DISABLED|ATA_FLAG_NOINTR))) { | 197 | !(ap->flags & ATA_FLAG_PORT_DISABLED)) { |
198 | struct ata_queued_cmd *qc; | 198 | struct ata_queued_cmd *qc; |
199 | 199 | ||
200 | qc = ata_qc_from_tag(ap, ap->active_tag); | 200 | qc = ata_qc_from_tag(ap, ap->active_tag); |
201 | if (qc && (!(qc->tf.ctl & ATA_NIEN))) | 201 | if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) |
202 | handled += ata_host_intr(ap, qc); | 202 | handled += ata_host_intr(ap, qc); |
203 | } | 203 | } |
204 | } | 204 | } |