aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/scsi/libata-core.c550
-rw-r--r--drivers/scsi/pdc_adma.c8
-rw-r--r--drivers/scsi/sata_mv.c3
-rw-r--r--drivers/scsi/sata_nv.c4
-rw-r--r--drivers/scsi/sata_promise.c13
-rw-r--r--drivers/scsi/sata_qstor.c11
-rw-r--r--drivers/scsi/sata_sx4.c7
-rw-r--r--drivers/scsi/sata_vsc.c6
-rw-r--r--include/linux/ata.h11
-rw-r--r--include/linux/libata.h29
10 files changed, 462 insertions, 180 deletions
diff --git a/drivers/scsi/libata-core.c b/drivers/scsi/libata-core.c
index a74b4071a662..3f9b53cc5d12 100644
--- a/drivers/scsi/libata-core.c
+++ b/drivers/scsi/libata-core.c
@@ -74,6 +74,7 @@ static int ata_choose_xfer_mode(const struct ata_port *ap,
74 u8 *xfer_mode_out, 74 u8 *xfer_mode_out,
75 unsigned int *xfer_shift_out); 75 unsigned int *xfer_shift_out);
76static void __ata_qc_complete(struct ata_queued_cmd *qc); 76static void __ata_qc_complete(struct ata_queued_cmd *qc);
77static void ata_pio_error(struct ata_port *ap);
77 78
78static unsigned int ata_unique_id = 1; 79static unsigned int ata_unique_id = 1;
79static struct workqueue_struct *ata_wq; 80static struct workqueue_struct *ata_wq;
@@ -1260,6 +1261,12 @@ retry:
1260 1261
1261 } 1262 }
1262 1263
1264 if (dev->id[59] & 0x100) {
1265 dev->multi_count = dev->id[59] & 0xff;
1266 DPRINTK("ata%u: dev %u multi count %u\n",
1267 ap->id, device, dev->multi_count);
1268 }
1269
1263 ap->host->max_cmd_len = 16; 1270 ap->host->max_cmd_len = 16;
1264 } 1271 }
1265 1272
@@ -1276,6 +1283,9 @@ retry:
1276 ap->cdb_len = (unsigned int) rc; 1283 ap->cdb_len = (unsigned int) rc;
1277 ap->host->max_cmd_len = (unsigned char) ap->cdb_len; 1284 ap->host->max_cmd_len = (unsigned char) ap->cdb_len;
1278 1285
1286 if (ata_id_cdb_intr(dev->id))
1287 dev->flags |= ATA_DFLAG_CDB_INTR;
1288
1279 /* print device info to dmesg */ 1289 /* print device info to dmesg */
1280 printk(KERN_INFO "ata%u: dev %u ATAPI, max %s\n", 1290 printk(KERN_INFO "ata%u: dev %u ATAPI, max %s\n",
1281 ap->id, device, 1291 ap->id, device,
@@ -2725,7 +2735,6 @@ void ata_poll_qc_complete(struct ata_queued_cmd *qc, unsigned int err_mask)
2725 unsigned long flags; 2735 unsigned long flags;
2726 2736
2727 spin_lock_irqsave(&ap->host_set->lock, flags); 2737 spin_lock_irqsave(&ap->host_set->lock, flags);
2728 ap->flags &= ~ATA_FLAG_NOINTR;
2729 ata_irq_on(ap); 2738 ata_irq_on(ap);
2730 ata_qc_complete(qc, err_mask); 2739 ata_qc_complete(qc, err_mask);
2731 spin_unlock_irqrestore(&ap->host_set->lock, flags); 2740 spin_unlock_irqrestore(&ap->host_set->lock, flags);
@@ -2786,7 +2795,8 @@ static unsigned long ata_pio_poll(struct ata_port *ap)
2786 * None. (executing in kernel thread context) 2795 * None. (executing in kernel thread context)
2787 * 2796 *
2788 * RETURNS: 2797 * RETURNS:
2789 * Non-zero if qc completed, zero otherwise. 2798 * Zero if qc completed.
2799 * Non-zero if has next.
2790 */ 2800 */
2791 2801
2792static int ata_pio_complete (struct ata_port *ap) 2802static int ata_pio_complete (struct ata_port *ap)
@@ -2799,7 +2809,7 @@ static int ata_pio_complete (struct ata_port *ap)
2799 * we enter, BSY will be cleared in a chk-status or two. If not, 2809 * we enter, BSY will be cleared in a chk-status or two. If not,
2800 * the drive is probably seeking or something. Snooze for a couple 2810 * the drive is probably seeking or something. Snooze for a couple
2801 * msecs, then chk-status again. If still busy, fall back to 2811 * msecs, then chk-status again. If still busy, fall back to
2802 * HSM_ST_POLL state. 2812 * HSM_ST_LAST_POLL state.
2803 */ 2813 */
2804 drv_stat = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 10); 2814 drv_stat = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 10);
2805 if (drv_stat & (ATA_BUSY | ATA_DRQ)) { 2815 if (drv_stat & (ATA_BUSY | ATA_DRQ)) {
@@ -2808,14 +2818,14 @@ static int ata_pio_complete (struct ata_port *ap)
2808 if (drv_stat & (ATA_BUSY | ATA_DRQ)) { 2818 if (drv_stat & (ATA_BUSY | ATA_DRQ)) {
2809 ap->hsm_task_state = HSM_ST_LAST_POLL; 2819 ap->hsm_task_state = HSM_ST_LAST_POLL;
2810 ap->pio_task_timeout = jiffies + ATA_TMOUT_PIO; 2820 ap->pio_task_timeout = jiffies + ATA_TMOUT_PIO;
2811 return 0; 2821 return 1;
2812 } 2822 }
2813 } 2823 }
2814 2824
2815 drv_stat = ata_wait_idle(ap); 2825 drv_stat = ata_wait_idle(ap);
2816 if (!ata_ok(drv_stat)) { 2826 if (!ata_ok(drv_stat)) {
2817 ap->hsm_task_state = HSM_ST_ERR; 2827 ap->hsm_task_state = HSM_ST_ERR;
2818 return 0; 2828 return 1;
2819 } 2829 }
2820 2830
2821 qc = ata_qc_from_tag(ap, ap->active_tag); 2831 qc = ata_qc_from_tag(ap, ap->active_tag);
@@ -2827,7 +2837,7 @@ static int ata_pio_complete (struct ata_port *ap)
2827 2837
2828 /* another command may start at this point */ 2838 /* another command may start at this point */
2829 2839
2830 return 1; 2840 return 0;
2831} 2841}
2832 2842
2833 2843
@@ -2988,7 +2998,23 @@ static void ata_pio_sector(struct ata_queued_cmd *qc)
2988 page = nth_page(page, (offset >> PAGE_SHIFT)); 2998 page = nth_page(page, (offset >> PAGE_SHIFT));
2989 offset %= PAGE_SIZE; 2999 offset %= PAGE_SIZE;
2990 3000
2991 buf = kmap(page) + offset; 3001 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
3002
3003 if (PageHighMem(page)) {
3004 unsigned long flags;
3005
3006 local_irq_save(flags);
3007 buf = kmap_atomic(page, KM_IRQ0);
3008
3009 /* do the actual data transfer */
3010 ata_data_xfer(ap, buf + offset, ATA_SECT_SIZE, do_write);
3011
3012 kunmap_atomic(buf, KM_IRQ0);
3013 local_irq_restore(flags);
3014 } else {
3015 buf = page_address(page);
3016 ata_data_xfer(ap, buf + offset, ATA_SECT_SIZE, do_write);
3017 }
2992 3018
2993 qc->cursect++; 3019 qc->cursect++;
2994 qc->cursg_ofs++; 3020 qc->cursg_ofs++;
@@ -2997,14 +3023,151 @@ static void ata_pio_sector(struct ata_queued_cmd *qc)
2997 qc->cursg++; 3023 qc->cursg++;
2998 qc->cursg_ofs = 0; 3024 qc->cursg_ofs = 0;
2999 } 3025 }
3026}
3000 3027
3001 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read"); 3028/**
3029 * ata_pio_sectors - Transfer one or many 512-byte sectors.
3030 * @qc: Command on going
3031 *
3032 * Transfer one or many ATA_SECT_SIZE of data from/to the
3033 * ATA device for the DRQ request.
3034 *
3035 * LOCKING:
3036 * Inherited from caller.
3037 */
3038
3039static void ata_pio_sectors(struct ata_queued_cmd *qc)
3040{
3041 if (is_multi_taskfile(&qc->tf)) {
3042 /* READ/WRITE MULTIPLE */
3043 unsigned int nsect;
3044
3045 assert(qc->dev->multi_count);
3046
3047 nsect = min(qc->nsect - qc->cursect, qc->dev->multi_count);
3048 while (nsect--)
3049 ata_pio_sector(qc);
3050 } else
3051 ata_pio_sector(qc);
3052}
3002 3053
3003 /* do the actual data transfer */ 3054/**
3004 do_write = (qc->tf.flags & ATA_TFLAG_WRITE); 3055 * atapi_send_cdb - Write CDB bytes to hardware
3005 ata_data_xfer(ap, buf, ATA_SECT_SIZE, do_write); 3056 * @ap: Port to which ATAPI device is attached.
3057 * @qc: Taskfile currently active
3058 *
3059 * When device has indicated its readiness to accept
3060 * a CDB, this function is called. Send the CDB.
3061 *
3062 * LOCKING:
3063 * caller.
3064 */
3006 3065
3007 kunmap(page); 3066static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc)
3067{
3068 /* send SCSI cdb */
3069 DPRINTK("send cdb\n");
3070 assert(ap->cdb_len >= 12);
3071
3072 ata_data_xfer(ap, qc->cdb, ap->cdb_len, 1);
3073 ata_altstatus(ap); /* flush */
3074
3075 switch (qc->tf.protocol) {
3076 case ATA_PROT_ATAPI:
3077 ap->hsm_task_state = HSM_ST;
3078 break;
3079 case ATA_PROT_ATAPI_NODATA:
3080 ap->hsm_task_state = HSM_ST_LAST;
3081 break;
3082 case ATA_PROT_ATAPI_DMA:
3083 ap->hsm_task_state = HSM_ST_LAST;
3084 /* initiate bmdma */
3085 ap->ops->bmdma_start(qc);
3086 break;
3087 }
3088}
3089
3090/**
3091 * ata_pio_first_block - Write first data block to hardware
3092 * @ap: Port to which ATA/ATAPI device is attached.
3093 *
3094 * When device has indicated its readiness to accept
3095 * the data, this function sends out the CDB or
3096 * the first data block by PIO.
3097 * After this,
3098 * - If polling, ata_pio_task() handles the rest.
3099 * - Otherwise, interrupt handler takes over.
3100 *
3101 * LOCKING:
3102 * Kernel thread context (may sleep)
3103 *
3104 * RETURNS:
3105 * Zero if irq handler takes over
3106 * Non-zero if has next (polling).
3107 */
3108
3109static int ata_pio_first_block(struct ata_port *ap)
3110{
3111 struct ata_queued_cmd *qc;
3112 u8 status;
3113 unsigned long flags;
3114 int has_next;
3115
3116 qc = ata_qc_from_tag(ap, ap->active_tag);
3117 assert(qc != NULL);
3118 assert(qc->flags & ATA_QCFLAG_ACTIVE);
3119
3120 /* if polling, we will stay in the work queue after sending the data.
3121 * otherwise, interrupt handler takes over after sending the data.
3122 */
3123 has_next = (qc->tf.flags & ATA_TFLAG_POLLING);
3124
3125 /* sleep-wait for BSY to clear */
3126 DPRINTK("busy wait\n");
3127 if (ata_busy_sleep(ap, ATA_TMOUT_DATAOUT_QUICK, ATA_TMOUT_DATAOUT)) {
3128 ap->hsm_task_state = HSM_ST_TMOUT;
3129 goto err_out;
3130 }
3131
3132 /* make sure DRQ is set */
3133 status = ata_chk_status(ap);
3134 if ((status & (ATA_BUSY | ATA_DRQ)) != ATA_DRQ) {
3135 /* device status error */
3136 ap->hsm_task_state = HSM_ST_ERR;
3137 goto err_out;
3138 }
3139
3140 /* Send the CDB (atapi) or the first data block (ata pio out).
3141 * During the state transition, interrupt handler shouldn't
3142 * be invoked before the data transfer is complete and
3143 * hsm_task_state is changed. Hence, the following locking.
3144 */
3145 spin_lock_irqsave(&ap->host_set->lock, flags);
3146
3147 if (qc->tf.protocol == ATA_PROT_PIO) {
3148 /* PIO data out protocol.
3149 * send first data block.
3150 */
3151
3152 /* ata_pio_sectors() might change the state to HSM_ST_LAST.
3153 * so, the state is changed here before ata_pio_sectors().
3154 */
3155 ap->hsm_task_state = HSM_ST;
3156 ata_pio_sectors(qc);
3157 ata_altstatus(ap); /* flush */
3158 } else
3159 /* send CDB */
3160 atapi_send_cdb(ap, qc);
3161
3162 spin_unlock_irqrestore(&ap->host_set->lock, flags);
3163
3164 /* if polling, ata_pio_task() handles the rest.
3165 * otherwise, interrupt handler takes over from here.
3166 */
3167 return has_next;
3168
3169err_out:
3170 return 1; /* has next */
3008} 3171}
3009 3172
3010/** 3173/**
@@ -3070,7 +3233,23 @@ next_sg:
3070 /* don't cross page boundaries */ 3233 /* don't cross page boundaries */
3071 count = min(count, (unsigned int)PAGE_SIZE - offset); 3234 count = min(count, (unsigned int)PAGE_SIZE - offset);
3072 3235
3073 buf = kmap(page) + offset; 3236 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
3237
3238 if (PageHighMem(page)) {
3239 unsigned long flags;
3240
3241 local_irq_save(flags);
3242 buf = kmap_atomic(page, KM_IRQ0);
3243
3244 /* do the actual data transfer */
3245 ata_data_xfer(ap, buf + offset, count, do_write);
3246
3247 kunmap_atomic(buf, KM_IRQ0);
3248 local_irq_restore(flags);
3249 } else {
3250 buf = page_address(page);
3251 ata_data_xfer(ap, buf + offset, count, do_write);
3252 }
3074 3253
3075 bytes -= count; 3254 bytes -= count;
3076 qc->curbytes += count; 3255 qc->curbytes += count;
@@ -3081,13 +3260,6 @@ next_sg:
3081 qc->cursg_ofs = 0; 3260 qc->cursg_ofs = 0;
3082 } 3261 }
3083 3262
3084 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
3085
3086 /* do the actual data transfer */
3087 ata_data_xfer(ap, buf, count, do_write);
3088
3089 kunmap(page);
3090
3091 if (bytes) 3263 if (bytes)
3092 goto next_sg; 3264 goto next_sg;
3093} 3265}
@@ -3124,6 +3296,8 @@ static void atapi_pio_bytes(struct ata_queued_cmd *qc)
3124 if (do_write != i_write) 3296 if (do_write != i_write)
3125 goto err_out; 3297 goto err_out;
3126 3298
3299 VPRINTK("ata%u: xfering %d bytes\n", ap->id, bytes);
3300
3127 __atapi_pio_bytes(qc, bytes); 3301 __atapi_pio_bytes(qc, bytes);
3128 3302
3129 return; 3303 return;
@@ -3184,8 +3358,10 @@ static void ata_pio_block(struct ata_port *ap)
3184 return; 3358 return;
3185 } 3359 }
3186 3360
3187 ata_pio_sector(qc); 3361 ata_pio_sectors(qc);
3188 } 3362 }
3363
3364 ata_altstatus(ap); /* flush */
3189} 3365}
3190 3366
3191static void ata_pio_error(struct ata_port *ap) 3367static void ata_pio_error(struct ata_port *ap)
@@ -3206,22 +3382,23 @@ static void ata_pio_task(void *_data)
3206{ 3382{
3207 struct ata_port *ap = _data; 3383 struct ata_port *ap = _data;
3208 unsigned long timeout; 3384 unsigned long timeout;
3209 int qc_completed; 3385 int has_next;
3210 3386
3211fsm_start: 3387fsm_start:
3212 timeout = 0; 3388 timeout = 0;
3213 qc_completed = 0; 3389 has_next = 1;
3214 3390
3215 switch (ap->hsm_task_state) { 3391 switch (ap->hsm_task_state) {
3216 case HSM_ST_IDLE: 3392 case HSM_ST_FIRST:
3217 return; 3393 has_next = ata_pio_first_block(ap);
3394 break;
3218 3395
3219 case HSM_ST: 3396 case HSM_ST:
3220 ata_pio_block(ap); 3397 ata_pio_block(ap);
3221 break; 3398 break;
3222 3399
3223 case HSM_ST_LAST: 3400 case HSM_ST_LAST:
3224 qc_completed = ata_pio_complete(ap); 3401 has_next = ata_pio_complete(ap);
3225 break; 3402 break;
3226 3403
3227 case HSM_ST_POLL: 3404 case HSM_ST_POLL:
@@ -3233,11 +3410,15 @@ fsm_start:
3233 case HSM_ST_ERR: 3410 case HSM_ST_ERR:
3234 ata_pio_error(ap); 3411 ata_pio_error(ap);
3235 return; 3412 return;
3413
3414 default:
3415 BUG();
3416 return;
3236 } 3417 }
3237 3418
3238 if (timeout) 3419 if (timeout)
3239 queue_delayed_work(ata_wq, &ap->pio_task, timeout); 3420 queue_delayed_work(ata_wq, &ap->pio_task, timeout);
3240 else if (!qc_completed) 3421 else if (has_next)
3241 goto fsm_start; 3422 goto fsm_start;
3242} 3423}
3243 3424
@@ -3321,6 +3502,8 @@ static void ata_qc_timeout(struct ata_queued_cmd *qc)
3321 printk(KERN_ERR "ata%u: command 0x%x timeout, stat 0x%x host_stat 0x%x\n", 3502 printk(KERN_ERR "ata%u: command 0x%x timeout, stat 0x%x host_stat 0x%x\n",
3322 ap->id, qc->tf.command, drv_stat, host_stat); 3503 ap->id, qc->tf.command, drv_stat, host_stat);
3323 3504
3505 ap->hsm_task_state = HSM_ST_IDLE;
3506
3324 /* complete taskfile transaction */ 3507 /* complete taskfile transaction */
3325 ata_qc_complete(qc, ac_err_mask(drv_stat)); 3508 ata_qc_complete(qc, ac_err_mask(drv_stat));
3326 break; 3509 break;
@@ -3606,43 +3789,103 @@ int ata_qc_issue_prot(struct ata_queued_cmd *qc)
3606{ 3789{
3607 struct ata_port *ap = qc->ap; 3790 struct ata_port *ap = qc->ap;
3608 3791
3792 /* Use polling pio if the LLD doesn't handle
3793 * interrupt driven pio and atapi CDB interrupt.
3794 */
3795 if (ap->flags & ATA_FLAG_PIO_POLLING) {
3796 switch (qc->tf.protocol) {
3797 case ATA_PROT_PIO:
3798 case ATA_PROT_ATAPI:
3799 case ATA_PROT_ATAPI_NODATA:
3800 qc->tf.flags |= ATA_TFLAG_POLLING;
3801 break;
3802 case ATA_PROT_ATAPI_DMA:
3803 if (qc->dev->flags & ATA_DFLAG_CDB_INTR)
3804 BUG();
3805 break;
3806 default:
3807 break;
3808 }
3809 }
3810
3811 /* select the device */
3609 ata_dev_select(ap, qc->dev->devno, 1, 0); 3812 ata_dev_select(ap, qc->dev->devno, 1, 0);
3610 3813
3814 /* start the command */
3611 switch (qc->tf.protocol) { 3815 switch (qc->tf.protocol) {
3612 case ATA_PROT_NODATA: 3816 case ATA_PROT_NODATA:
3817 if (qc->tf.flags & ATA_TFLAG_POLLING)
3818 ata_qc_set_polling(qc);
3819
3613 ata_tf_to_host(ap, &qc->tf); 3820 ata_tf_to_host(ap, &qc->tf);
3821 ap->hsm_task_state = HSM_ST_LAST;
3822
3823 if (qc->tf.flags & ATA_TFLAG_POLLING)
3824 queue_work(ata_wq, &ap->pio_task);
3825
3614 break; 3826 break;
3615 3827
3616 case ATA_PROT_DMA: 3828 case ATA_PROT_DMA:
3829 assert(!(qc->tf.flags & ATA_TFLAG_POLLING));
3830
3617 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */ 3831 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
3618 ap->ops->bmdma_setup(qc); /* set up bmdma */ 3832 ap->ops->bmdma_setup(qc); /* set up bmdma */
3619 ap->ops->bmdma_start(qc); /* initiate bmdma */ 3833 ap->ops->bmdma_start(qc); /* initiate bmdma */
3834 ap->hsm_task_state = HSM_ST_LAST;
3620 break; 3835 break;
3621 3836
3622 case ATA_PROT_PIO: /* load tf registers, initiate polling pio */ 3837 case ATA_PROT_PIO:
3623 ata_qc_set_polling(qc); 3838 if (qc->tf.flags & ATA_TFLAG_POLLING)
3624 ata_tf_to_host(ap, &qc->tf); 3839 ata_qc_set_polling(qc);
3625 ap->hsm_task_state = HSM_ST;
3626 queue_work(ata_wq, &ap->pio_task);
3627 break;
3628 3840
3629 case ATA_PROT_ATAPI:
3630 ata_qc_set_polling(qc);
3631 ata_tf_to_host(ap, &qc->tf); 3841 ata_tf_to_host(ap, &qc->tf);
3632 queue_work(ata_wq, &ap->packet_task); 3842
3843 if (qc->tf.flags & ATA_TFLAG_WRITE) {
3844 /* PIO data out protocol */
3845 ap->hsm_task_state = HSM_ST_FIRST;
3846 queue_work(ata_wq, &ap->pio_task);
3847
3848 /* always send first data block using
3849 * the ata_pio_task() codepath.
3850 */
3851 } else {
3852 /* PIO data in protocol */
3853 ap->hsm_task_state = HSM_ST;
3854
3855 if (qc->tf.flags & ATA_TFLAG_POLLING)
3856 queue_work(ata_wq, &ap->pio_task);
3857
3858 /* if polling, ata_pio_task() handles the rest.
3859 * otherwise, interrupt handler takes over from here.
3860 */
3861 }
3862
3633 break; 3863 break;
3634 3864
3865 case ATA_PROT_ATAPI:
3635 case ATA_PROT_ATAPI_NODATA: 3866 case ATA_PROT_ATAPI_NODATA:
3636 ap->flags |= ATA_FLAG_NOINTR; 3867 if (qc->tf.flags & ATA_TFLAG_POLLING)
3868 ata_qc_set_polling(qc);
3869
3637 ata_tf_to_host(ap, &qc->tf); 3870 ata_tf_to_host(ap, &qc->tf);
3638 queue_work(ata_wq, &ap->packet_task); 3871 ap->hsm_task_state = HSM_ST_FIRST;
3872
3873 /* send cdb by polling if no cdb interrupt */
3874 if ((!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) ||
3875 (qc->tf.flags & ATA_TFLAG_POLLING))
3876 queue_work(ata_wq, &ap->pio_task);
3639 break; 3877 break;
3640 3878
3641 case ATA_PROT_ATAPI_DMA: 3879 case ATA_PROT_ATAPI_DMA:
3642 ap->flags |= ATA_FLAG_NOINTR; 3880 assert(!(qc->tf.flags & ATA_TFLAG_POLLING));
3881
3643 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */ 3882 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
3644 ap->ops->bmdma_setup(qc); /* set up bmdma */ 3883 ap->ops->bmdma_setup(qc); /* set up bmdma */
3645 queue_work(ata_wq, &ap->packet_task); 3884 ap->hsm_task_state = HSM_ST_FIRST;
3885
3886 /* send cdb by polling if no cdb interrupt */
3887 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
3888 queue_work(ata_wq, &ap->pio_task);
3646 break; 3889 break;
3647 3890
3648 default: 3891 default:
@@ -3903,47 +4146,142 @@ void ata_bmdma_stop(struct ata_queued_cmd *qc)
3903inline unsigned int ata_host_intr (struct ata_port *ap, 4146inline unsigned int ata_host_intr (struct ata_port *ap,
3904 struct ata_queued_cmd *qc) 4147 struct ata_queued_cmd *qc)
3905{ 4148{
3906 u8 status, host_stat; 4149 u8 status, host_stat = 0;
3907
3908 switch (qc->tf.protocol) {
3909 4150
3910 case ATA_PROT_DMA: 4151 VPRINTK("ata%u: protocol %d task_state %d\n",
3911 case ATA_PROT_ATAPI_DMA: 4152 ap->id, qc->tf.protocol, ap->hsm_task_state);
3912 case ATA_PROT_ATAPI:
3913 /* check status of DMA engine */
3914 host_stat = ap->ops->bmdma_status(ap);
3915 VPRINTK("ata%u: host_stat 0x%X\n", ap->id, host_stat);
3916 4153
3917 /* if it's not our irq... */ 4154 /* Check whether we are expecting interrupt in this state */
3918 if (!(host_stat & ATA_DMA_INTR)) 4155 switch (ap->hsm_task_state) {
4156 case HSM_ST_FIRST:
4157 /* Check the ATA_DFLAG_CDB_INTR flag is enough here.
4158 * The flag was turned on only for atapi devices.
4159 * No need to check is_atapi_taskfile(&qc->tf) again.
4160 */
4161 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
3919 goto idle_irq; 4162 goto idle_irq;
4163 break;
4164 case HSM_ST_LAST:
4165 if (qc->tf.protocol == ATA_PROT_DMA ||
4166 qc->tf.protocol == ATA_PROT_ATAPI_DMA) {
4167 /* check status of DMA engine */
4168 host_stat = ap->ops->bmdma_status(ap);
4169 VPRINTK("ata%u: host_stat 0x%X\n", ap->id, host_stat);
4170
4171 /* if it's not our irq... */
4172 if (!(host_stat & ATA_DMA_INTR))
4173 goto idle_irq;
4174
4175 /* before we do anything else, clear DMA-Start bit */
4176 ap->ops->bmdma_stop(qc);
4177 }
4178 break;
4179 case HSM_ST:
4180 break;
4181 default:
4182 goto idle_irq;
4183 }
3920 4184
3921 /* before we do anything else, clear DMA-Start bit */ 4185 /* check altstatus */
3922 ap->ops->bmdma_stop(qc); 4186 status = ata_altstatus(ap);
4187 if (status & ATA_BUSY)
4188 goto idle_irq;
3923 4189
3924 /* fall through */ 4190 /* check main status, clearing INTRQ */
4191 status = ata_chk_status(ap);
4192 if (unlikely(status & ATA_BUSY))
4193 goto idle_irq;
3925 4194
3926 case ATA_PROT_ATAPI_NODATA: 4195 DPRINTK("ata%u: protocol %d task_state %d (dev_stat 0x%X)\n",
3927 case ATA_PROT_NODATA: 4196 ap->id, qc->tf.protocol, ap->hsm_task_state, status);
3928 /* check altstatus */
3929 status = ata_altstatus(ap);
3930 if (status & ATA_BUSY)
3931 goto idle_irq;
3932 4197
3933 /* check main status, clearing INTRQ */ 4198 /* ack bmdma irq events */
3934 status = ata_chk_status(ap); 4199 ap->ops->irq_clear(ap);
3935 if (unlikely(status & ATA_BUSY))
3936 goto idle_irq;
3937 DPRINTK("ata%u: protocol %d (dev_stat 0x%X)\n",
3938 ap->id, qc->tf.protocol, status);
3939 4200
3940 /* ack bmdma irq events */ 4201 /* check error */
3941 ap->ops->irq_clear(ap); 4202 if (unlikely((status & ATA_ERR) || (host_stat & ATA_DMA_ERR)))
4203 ap->hsm_task_state = HSM_ST_ERR;
4204
4205fsm_start:
4206 switch (ap->hsm_task_state) {
4207 case HSM_ST_FIRST:
4208 /* Some pre-ATAPI-4 devices assert INTRQ
4209 * at this state when ready to receive CDB.
4210 */
4211
4212 /* check device status */
4213 if (unlikely((status & (ATA_BUSY | ATA_DRQ)) != ATA_DRQ)) {
4214 /* Wrong status. Let EH handle this */
4215 ap->hsm_task_state = HSM_ST_ERR;
4216 goto fsm_start;
4217 }
4218
4219 atapi_send_cdb(ap, qc);
4220
4221 break;
4222
4223 case HSM_ST:
4224 /* complete command or read/write the data register */
4225 if (qc->tf.protocol == ATA_PROT_ATAPI) {
4226 /* ATAPI PIO protocol */
4227 if ((status & ATA_DRQ) == 0) {
4228 /* no more data to transfer */
4229 ap->hsm_task_state = HSM_ST_LAST;
4230 goto fsm_start;
4231 }
4232
4233 atapi_pio_bytes(qc);
4234
4235 if (unlikely(ap->hsm_task_state == HSM_ST_ERR))
4236 /* bad ireason reported by device */
4237 goto fsm_start;
4238
4239 } else {
4240 /* ATA PIO protocol */
4241 if (unlikely((status & ATA_DRQ) == 0)) {
4242 /* handle BSY=0, DRQ=0 as error */
4243 ap->hsm_task_state = HSM_ST_ERR;
4244 goto fsm_start;
4245 }
4246
4247 ata_pio_sectors(qc);
4248
4249 if (ap->hsm_task_state == HSM_ST_LAST &&
4250 (!(qc->tf.flags & ATA_TFLAG_WRITE))) {
4251 /* all data read */
4252 ata_altstatus(ap);
4253 status = ata_chk_status(ap);
4254 goto fsm_start;
4255 }
4256 }
4257
4258 ata_altstatus(ap); /* flush */
4259 break;
4260
4261 case HSM_ST_LAST:
4262 if (unlikely(status & ATA_DRQ)) {
4263 /* handle DRQ=1 as error */
4264 ap->hsm_task_state = HSM_ST_ERR;
4265 goto fsm_start;
4266 }
4267
4268 /* no more data to transfer */
4269 DPRINTK("ata%u: command complete, drv_stat 0x%x\n",
4270 ap->id, status);
4271
4272 ap->hsm_task_state = HSM_ST_IDLE;
3942 4273
3943 /* complete taskfile transaction */ 4274 /* complete taskfile transaction */
3944 ata_qc_complete(qc, ac_err_mask(status)); 4275 ata_qc_complete(qc, ac_err_mask(status));
3945 break; 4276 break;
3946 4277
4278 case HSM_ST_ERR:
4279 printk(KERN_ERR "ata%u: command error, drv_stat 0x%x host_stat 0x%x\n",
4280 ap->id, status, host_stat);
4281
4282 ap->hsm_task_state = HSM_ST_IDLE;
4283 ata_qc_complete(qc, status | ATA_ERR);
4284 break;
3947 default: 4285 default:
3948 goto idle_irq; 4286 goto idle_irq;
3949 } 4287 }
@@ -3994,11 +4332,11 @@ irqreturn_t ata_interrupt (int irq, void *dev_instance, struct pt_regs *regs)
3994 4332
3995 ap = host_set->ports[i]; 4333 ap = host_set->ports[i];
3996 if (ap && 4334 if (ap &&
3997 !(ap->flags & (ATA_FLAG_PORT_DISABLED | ATA_FLAG_NOINTR))) { 4335 !(ap->flags & ATA_FLAG_PORT_DISABLED)) {
3998 struct ata_queued_cmd *qc; 4336 struct ata_queued_cmd *qc;
3999 4337
4000 qc = ata_qc_from_tag(ap, ap->active_tag); 4338 qc = ata_qc_from_tag(ap, ap->active_tag);
4001 if (qc && (!(qc->tf.ctl & ATA_NIEN)) && 4339 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)) &&
4002 (qc->flags & ATA_QCFLAG_ACTIVE)) 4340 (qc->flags & ATA_QCFLAG_ACTIVE))
4003 handled |= ata_host_intr(ap, qc); 4341 handled |= ata_host_intr(ap, qc);
4004 } 4342 }
@@ -4010,77 +4348,6 @@ irqreturn_t ata_interrupt (int irq, void *dev_instance, struct pt_regs *regs)
4010} 4348}
4011 4349
4012/** 4350/**
4013 * atapi_packet_task - Write CDB bytes to hardware
4014 * @_data: Port to which ATAPI device is attached.
4015 *
4016 * When device has indicated its readiness to accept
4017 * a CDB, this function is called. Send the CDB.
4018 * If DMA is to be performed, exit immediately.
4019 * Otherwise, we are in polling mode, so poll
4020 * status under operation succeeds or fails.
4021 *
4022 * LOCKING:
4023 * Kernel thread context (may sleep)
4024 */
4025
4026static void atapi_packet_task(void *_data)
4027{
4028 struct ata_port *ap = _data;
4029 struct ata_queued_cmd *qc;
4030 u8 status;
4031
4032 qc = ata_qc_from_tag(ap, ap->active_tag);
4033 assert(qc != NULL);
4034 assert(qc->flags & ATA_QCFLAG_ACTIVE);
4035
4036 /* sleep-wait for BSY to clear */
4037 DPRINTK("busy wait\n");
4038 if (ata_busy_sleep(ap, ATA_TMOUT_CDB_QUICK, ATA_TMOUT_CDB))
4039 goto err_out_status;
4040
4041 /* make sure DRQ is set */
4042 status = ata_chk_status(ap);
4043 if ((status & (ATA_BUSY | ATA_DRQ)) != ATA_DRQ)
4044 goto err_out;
4045
4046 /* send SCSI cdb */
4047 DPRINTK("send cdb\n");
4048 assert(ap->cdb_len >= 12);
4049
4050 if (qc->tf.protocol == ATA_PROT_ATAPI_DMA ||
4051 qc->tf.protocol == ATA_PROT_ATAPI_NODATA) {
4052 unsigned long flags;
4053
4054 /* Once we're done issuing command and kicking bmdma,
4055 * irq handler takes over. To not lose irq, we need
4056 * to clear NOINTR flag before sending cdb, but
4057 * interrupt handler shouldn't be invoked before we're
4058 * finished. Hence, the following locking.
4059 */
4060 spin_lock_irqsave(&ap->host_set->lock, flags);
4061 ap->flags &= ~ATA_FLAG_NOINTR;
4062 ata_data_xfer(ap, qc->cdb, ap->cdb_len, 1);
4063 if (qc->tf.protocol == ATA_PROT_ATAPI_DMA)
4064 ap->ops->bmdma_start(qc); /* initiate bmdma */
4065 spin_unlock_irqrestore(&ap->host_set->lock, flags);
4066 } else {
4067 ata_data_xfer(ap, qc->cdb, ap->cdb_len, 1);
4068
4069 /* PIO commands are handled by polling */
4070 ap->hsm_task_state = HSM_ST;
4071 queue_work(ata_wq, &ap->pio_task);
4072 }
4073
4074 return;
4075
4076err_out_status:
4077 status = ata_chk_status(ap);
4078err_out:
4079 ata_poll_qc_complete(qc, __ac_err_mask(status));
4080}
4081
4082
4083/**
4084 * ata_port_start - Set port up for dma. 4351 * ata_port_start - Set port up for dma.
4085 * @ap: Port to initialize 4352 * @ap: Port to initialize
4086 * 4353 *
@@ -4206,7 +4473,6 @@ static void ata_host_init(struct ata_port *ap, struct Scsi_Host *host,
4206 ap->active_tag = ATA_TAG_POISON; 4473 ap->active_tag = ATA_TAG_POISON;
4207 ap->last_ctl = 0xFF; 4474 ap->last_ctl = 0xFF;
4208 4475
4209 INIT_WORK(&ap->packet_task, atapi_packet_task, ap);
4210 INIT_WORK(&ap->pio_task, ata_pio_task, ap); 4476 INIT_WORK(&ap->pio_task, ata_pio_task, ap);
4211 4477
4212 for (i = 0; i < ATA_MAX_DEVICES; i++) 4478 for (i = 0; i < ATA_MAX_DEVICES; i++)
diff --git a/drivers/scsi/pdc_adma.c b/drivers/scsi/pdc_adma.c
index 78b4ff117af6..6b1c901c5e4b 100644
--- a/drivers/scsi/pdc_adma.c
+++ b/drivers/scsi/pdc_adma.c
@@ -457,13 +457,13 @@ static inline unsigned int adma_intr_pkt(struct ata_host_set *host_set)
457 continue; 457 continue;
458 handled = 1; 458 handled = 1;
459 adma_enter_reg_mode(ap); 459 adma_enter_reg_mode(ap);
460 if (ap->flags & (ATA_FLAG_PORT_DISABLED | ATA_FLAG_NOINTR)) 460 if (ap->flags & ATA_FLAG_PORT_DISABLED)
461 continue; 461 continue;
462 pp = ap->private_data; 462 pp = ap->private_data;
463 if (!pp || pp->state != adma_state_pkt) 463 if (!pp || pp->state != adma_state_pkt)
464 continue; 464 continue;
465 qc = ata_qc_from_tag(ap, ap->active_tag); 465 qc = ata_qc_from_tag(ap, ap->active_tag);
466 if (qc && (!(qc->tf.ctl & ATA_NIEN))) { 466 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) {
467 unsigned int err_mask = 0; 467 unsigned int err_mask = 0;
468 468
469 if ((status & (aPERR | aPSD | aUIRQ))) 469 if ((status & (aPERR | aPSD | aUIRQ)))
@@ -484,13 +484,13 @@ static inline unsigned int adma_intr_mmio(struct ata_host_set *host_set)
484 for (port_no = 0; port_no < host_set->n_ports; ++port_no) { 484 for (port_no = 0; port_no < host_set->n_ports; ++port_no) {
485 struct ata_port *ap; 485 struct ata_port *ap;
486 ap = host_set->ports[port_no]; 486 ap = host_set->ports[port_no];
487 if (ap && (!(ap->flags & (ATA_FLAG_PORT_DISABLED | ATA_FLAG_NOINTR)))) { 487 if (ap && (!(ap->flags & ATA_FLAG_PORT_DISABLED))) {
488 struct ata_queued_cmd *qc; 488 struct ata_queued_cmd *qc;
489 struct adma_port_priv *pp = ap->private_data; 489 struct adma_port_priv *pp = ap->private_data;
490 if (!pp || pp->state != adma_state_mmio) 490 if (!pp || pp->state != adma_state_mmio)
491 continue; 491 continue;
492 qc = ata_qc_from_tag(ap, ap->active_tag); 492 qc = ata_qc_from_tag(ap, ap->active_tag);
493 if (qc && (!(qc->tf.ctl & ATA_NIEN))) { 493 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) {
494 494
495 /* check main status, clearing INTRQ */ 495 /* check main status, clearing INTRQ */
496 u8 status = ata_check_status(ap); 496 u8 status = ata_check_status(ap);
diff --git a/drivers/scsi/sata_mv.c b/drivers/scsi/sata_mv.c
index 93d55233af7b..f8976e3f6ada 100644
--- a/drivers/scsi/sata_mv.c
+++ b/drivers/scsi/sata_mv.c
@@ -88,7 +88,8 @@ enum {
88 MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */ 88 MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */
89 MV_FLAG_GLBL_SFT_RST = (1 << 28), /* Global Soft Reset support */ 89 MV_FLAG_GLBL_SFT_RST = (1 << 28), /* Global Soft Reset support */
90 MV_COMMON_FLAGS = (ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 90 MV_COMMON_FLAGS = (ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
91 ATA_FLAG_SATA_RESET | ATA_FLAG_MMIO), 91 ATA_FLAG_SATA_RESET | ATA_FLAG_MMIO |
92 ATA_FLAG_PIO_POLLING),
92 MV_6XXX_FLAGS = (MV_FLAG_IRQ_COALESCE | 93 MV_6XXX_FLAGS = (MV_FLAG_IRQ_COALESCE |
93 MV_FLAG_GLBL_SFT_RST), 94 MV_FLAG_GLBL_SFT_RST),
94 95
diff --git a/drivers/scsi/sata_nv.c b/drivers/scsi/sata_nv.c
index 37a4fae95ed4..5b7f7808add9 100644
--- a/drivers/scsi/sata_nv.c
+++ b/drivers/scsi/sata_nv.c
@@ -304,11 +304,11 @@ static irqreturn_t nv_interrupt (int irq, void *dev_instance,
304 304
305 ap = host_set->ports[i]; 305 ap = host_set->ports[i];
306 if (ap && 306 if (ap &&
307 !(ap->flags & (ATA_FLAG_PORT_DISABLED | ATA_FLAG_NOINTR))) { 307 !(ap->flags & ATA_FLAG_PORT_DISABLED)) {
308 struct ata_queued_cmd *qc; 308 struct ata_queued_cmd *qc;
309 309
310 qc = ata_qc_from_tag(ap, ap->active_tag); 310 qc = ata_qc_from_tag(ap, ap->active_tag);
311 if (qc && (!(qc->tf.ctl & ATA_NIEN))) 311 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)))
312 handled += ata_host_intr(ap, qc); 312 handled += ata_host_intr(ap, qc);
313 } 313 }
314 314
diff --git a/drivers/scsi/sata_promise.c b/drivers/scsi/sata_promise.c
index 9edc9d91efc3..ac5b9cbebdd8 100644
--- a/drivers/scsi/sata_promise.c
+++ b/drivers/scsi/sata_promise.c
@@ -163,7 +163,8 @@ static struct ata_port_info pdc_port_info[] = {
163 { 163 {
164 .sht = &pdc_ata_sht, 164 .sht = &pdc_ata_sht,
165 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 165 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
166 ATA_FLAG_SRST | ATA_FLAG_MMIO, 166 ATA_FLAG_SRST | ATA_FLAG_MMIO |
167 ATA_FLAG_PIO_POLLING,
167 .pio_mask = 0x1f, /* pio0-4 */ 168 .pio_mask = 0x1f, /* pio0-4 */
168 .mwdma_mask = 0x07, /* mwdma0-2 */ 169 .mwdma_mask = 0x07, /* mwdma0-2 */
169 .udma_mask = 0x7f, /* udma0-6 ; FIXME */ 170 .udma_mask = 0x7f, /* udma0-6 ; FIXME */
@@ -174,7 +175,8 @@ static struct ata_port_info pdc_port_info[] = {
174 { 175 {
175 .sht = &pdc_ata_sht, 176 .sht = &pdc_ata_sht,
176 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 177 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
177 ATA_FLAG_SRST | ATA_FLAG_MMIO, 178 ATA_FLAG_SRST | ATA_FLAG_MMIO |
179 ATA_FLAG_PIO_POLLING,
178 .pio_mask = 0x1f, /* pio0-4 */ 180 .pio_mask = 0x1f, /* pio0-4 */
179 .mwdma_mask = 0x07, /* mwdma0-2 */ 181 .mwdma_mask = 0x07, /* mwdma0-2 */
180 .udma_mask = 0x7f, /* udma0-6 ; FIXME */ 182 .udma_mask = 0x7f, /* udma0-6 ; FIXME */
@@ -185,7 +187,8 @@ static struct ata_port_info pdc_port_info[] = {
185 { 187 {
186 .sht = &pdc_ata_sht, 188 .sht = &pdc_ata_sht,
187 .host_flags = ATA_FLAG_NO_LEGACY | ATA_FLAG_SRST | 189 .host_flags = ATA_FLAG_NO_LEGACY | ATA_FLAG_SRST |
188 ATA_FLAG_MMIO | ATA_FLAG_SLAVE_POSS, 190 ATA_FLAG_MMIO | ATA_FLAG_SLAVE_POSS |
191 ATA_FLAG_PIO_POLLING,
189 .pio_mask = 0x1f, /* pio0-4 */ 192 .pio_mask = 0x1f, /* pio0-4 */
190 .mwdma_mask = 0x07, /* mwdma0-2 */ 193 .mwdma_mask = 0x07, /* mwdma0-2 */
191 .udma_mask = 0x7f, /* udma0-6 ; FIXME */ 194 .udma_mask = 0x7f, /* udma0-6 ; FIXME */
@@ -496,11 +499,11 @@ static irqreturn_t pdc_interrupt (int irq, void *dev_instance, struct pt_regs *r
496 ap = host_set->ports[i]; 499 ap = host_set->ports[i];
497 tmp = mask & (1 << (i + 1)); 500 tmp = mask & (1 << (i + 1));
498 if (tmp && ap && 501 if (tmp && ap &&
499 !(ap->flags & (ATA_FLAG_PORT_DISABLED | ATA_FLAG_NOINTR))) { 502 !(ap->flags & ATA_FLAG_PORT_DISABLED)) {
500 struct ata_queued_cmd *qc; 503 struct ata_queued_cmd *qc;
501 504
502 qc = ata_qc_from_tag(ap, ap->active_tag); 505 qc = ata_qc_from_tag(ap, ap->active_tag);
503 if (qc && (!(qc->tf.ctl & ATA_NIEN))) 506 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)))
504 handled += pdc_host_intr(ap, qc); 507 handled += pdc_host_intr(ap, qc);
505 } 508 }
506 } 509 }
diff --git a/drivers/scsi/sata_qstor.c b/drivers/scsi/sata_qstor.c
index d274ab235781..0e3468a8b73d 100644
--- a/drivers/scsi/sata_qstor.c
+++ b/drivers/scsi/sata_qstor.c
@@ -177,7 +177,7 @@ static struct ata_port_info qs_port_info[] = {
177 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 177 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
178 ATA_FLAG_SATA_RESET | 178 ATA_FLAG_SATA_RESET |
179 //FIXME ATA_FLAG_SRST | 179 //FIXME ATA_FLAG_SRST |
180 ATA_FLAG_MMIO, 180 ATA_FLAG_MMIO | ATA_FLAG_PIO_POLLING,
181 .pio_mask = 0x10, /* pio4 */ 181 .pio_mask = 0x10, /* pio4 */
182 .udma_mask = 0x7f, /* udma0-6 */ 182 .udma_mask = 0x7f, /* udma0-6 */
183 .port_ops = &qs_ata_ops, 183 .port_ops = &qs_ata_ops,
@@ -393,14 +393,13 @@ static inline unsigned int qs_intr_pkt(struct ata_host_set *host_set)
393 DPRINTK("SFF=%08x%08x: sCHAN=%u sHST=%d sDST=%02x\n", 393 DPRINTK("SFF=%08x%08x: sCHAN=%u sHST=%d sDST=%02x\n",
394 sff1, sff0, port_no, sHST, sDST); 394 sff1, sff0, port_no, sHST, sDST);
395 handled = 1; 395 handled = 1;
396 if (ap && !(ap->flags & 396 if (ap && !(ap->flags & ATA_FLAG_PORT_DISABLED)) {
397 (ATA_FLAG_PORT_DISABLED|ATA_FLAG_NOINTR))) {
398 struct ata_queued_cmd *qc; 397 struct ata_queued_cmd *qc;
399 struct qs_port_priv *pp = ap->private_data; 398 struct qs_port_priv *pp = ap->private_data;
400 if (!pp || pp->state != qs_state_pkt) 399 if (!pp || pp->state != qs_state_pkt)
401 continue; 400 continue;
402 qc = ata_qc_from_tag(ap, ap->active_tag); 401 qc = ata_qc_from_tag(ap, ap->active_tag);
403 if (qc && (!(qc->tf.ctl & ATA_NIEN))) { 402 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) {
404 switch (sHST) { 403 switch (sHST) {
405 case 0: /* successful CPB */ 404 case 0: /* successful CPB */
406 case 3: /* device error */ 405 case 3: /* device error */
@@ -427,13 +426,13 @@ static inline unsigned int qs_intr_mmio(struct ata_host_set *host_set)
427 struct ata_port *ap; 426 struct ata_port *ap;
428 ap = host_set->ports[port_no]; 427 ap = host_set->ports[port_no];
429 if (ap && 428 if (ap &&
430 !(ap->flags & (ATA_FLAG_PORT_DISABLED | ATA_FLAG_NOINTR))) { 429 !(ap->flags & ATA_FLAG_PORT_DISABLED)) {
431 struct ata_queued_cmd *qc; 430 struct ata_queued_cmd *qc;
432 struct qs_port_priv *pp = ap->private_data; 431 struct qs_port_priv *pp = ap->private_data;
433 if (!pp || pp->state != qs_state_mmio) 432 if (!pp || pp->state != qs_state_mmio)
434 continue; 433 continue;
435 qc = ata_qc_from_tag(ap, ap->active_tag); 434 qc = ata_qc_from_tag(ap, ap->active_tag);
436 if (qc && (!(qc->tf.ctl & ATA_NIEN))) { 435 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) {
437 436
438 /* check main status, clearing INTRQ */ 437 /* check main status, clearing INTRQ */
439 u8 status = ata_check_status(ap); 438 u8 status = ata_check_status(ap);
diff --git a/drivers/scsi/sata_sx4.c b/drivers/scsi/sata_sx4.c
index d5a38784352b..c42b2d3eeb6c 100644
--- a/drivers/scsi/sata_sx4.c
+++ b/drivers/scsi/sata_sx4.c
@@ -220,7 +220,8 @@ static struct ata_port_info pdc_port_info[] = {
220 { 220 {
221 .sht = &pdc_sata_sht, 221 .sht = &pdc_sata_sht,
222 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 222 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
223 ATA_FLAG_SRST | ATA_FLAG_MMIO, 223 ATA_FLAG_SRST | ATA_FLAG_MMIO |
224 ATA_FLAG_PIO_POLLING,
224 .pio_mask = 0x1f, /* pio0-4 */ 225 .pio_mask = 0x1f, /* pio0-4 */
225 .mwdma_mask = 0x07, /* mwdma0-2 */ 226 .mwdma_mask = 0x07, /* mwdma0-2 */
226 .udma_mask = 0x7f, /* udma0-6 ; FIXME */ 227 .udma_mask = 0x7f, /* udma0-6 ; FIXME */
@@ -832,11 +833,11 @@ static irqreturn_t pdc20621_interrupt (int irq, void *dev_instance, struct pt_re
832 tmp = mask & (1 << i); 833 tmp = mask & (1 << i);
833 VPRINTK("seq %u, port_no %u, ap %p, tmp %x\n", i, port_no, ap, tmp); 834 VPRINTK("seq %u, port_no %u, ap %p, tmp %x\n", i, port_no, ap, tmp);
834 if (tmp && ap && 835 if (tmp && ap &&
835 !(ap->flags & (ATA_FLAG_PORT_DISABLED | ATA_FLAG_NOINTR))) { 836 !(ap->flags & ATA_FLAG_PORT_DISABLED)) {
836 struct ata_queued_cmd *qc; 837 struct ata_queued_cmd *qc;
837 838
838 qc = ata_qc_from_tag(ap, ap->active_tag); 839 qc = ata_qc_from_tag(ap, ap->active_tag);
839 if (qc && (!(qc->tf.ctl & ATA_NIEN))) 840 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)))
840 handled += pdc20621_host_intr(ap, qc, (i > 4), 841 handled += pdc20621_host_intr(ap, qc, (i > 4),
841 mmio_base); 842 mmio_base);
842 } 843 }
diff --git a/drivers/scsi/sata_vsc.c b/drivers/scsi/sata_vsc.c
index ce8a2fd7da84..e819b2b4f298 100644
--- a/drivers/scsi/sata_vsc.c
+++ b/drivers/scsi/sata_vsc.c
@@ -201,12 +201,12 @@ static irqreturn_t vsc_sata_interrupt (int irq, void *dev_instance,
201 struct ata_port *ap; 201 struct ata_port *ap;
202 202
203 ap = host_set->ports[i]; 203 ap = host_set->ports[i];
204 if (ap && !(ap->flags & 204 if (ap &&
205 (ATA_FLAG_PORT_DISABLED|ATA_FLAG_NOINTR))) { 205 !(ap->flags & ATA_FLAG_PORT_DISABLED)) {
206 struct ata_queued_cmd *qc; 206 struct ata_queued_cmd *qc;
207 207
208 qc = ata_qc_from_tag(ap, ap->active_tag); 208 qc = ata_qc_from_tag(ap, ap->active_tag);
209 if (qc && (!(qc->tf.ctl & ATA_NIEN))) 209 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)))
210 handled += ata_host_intr(ap, qc); 210 handled += ata_host_intr(ap, qc);
211 } 211 }
212 } 212 }
diff --git a/include/linux/ata.h b/include/linux/ata.h
index d2873b732bb1..f512104a1a3f 100644
--- a/include/linux/ata.h
+++ b/include/linux/ata.h
@@ -192,6 +192,7 @@ enum {
192 ATA_TFLAG_DEVICE = (1 << 2), /* enable r/w to device reg */ 192 ATA_TFLAG_DEVICE = (1 << 2), /* enable r/w to device reg */
193 ATA_TFLAG_WRITE = (1 << 3), /* data dir: host->dev==1 (write) */ 193 ATA_TFLAG_WRITE = (1 << 3), /* data dir: host->dev==1 (write) */
194 ATA_TFLAG_LBA = (1 << 4), /* enable LBA */ 194 ATA_TFLAG_LBA = (1 << 4), /* enable LBA */
195 ATA_TFLAG_POLLING = (1 << 5), /* set nIEN to 1 and use polling */
195}; 196};
196 197
197enum ata_tf_protocols { 198enum ata_tf_protocols {
@@ -261,6 +262,8 @@ struct ata_taskfile {
261 ((u64) (id)[(n) + 1] << 16) | \ 262 ((u64) (id)[(n) + 1] << 16) | \
262 ((u64) (id)[(n) + 0]) ) 263 ((u64) (id)[(n) + 0]) )
263 264
265#define ata_id_cdb_intr(id) (((id)[0] & 0x60) == 0x20)
266
264static inline int ata_id_current_chs_valid(const u16 *id) 267static inline int ata_id_current_chs_valid(const u16 *id)
265{ 268{
266 /* For ATA-1 devices, if the INITIALIZE DEVICE PARAMETERS command 269 /* For ATA-1 devices, if the INITIALIZE DEVICE PARAMETERS command
@@ -290,6 +293,14 @@ static inline int is_atapi_taskfile(const struct ata_taskfile *tf)
290 (tf->protocol == ATA_PROT_ATAPI_DMA); 293 (tf->protocol == ATA_PROT_ATAPI_DMA);
291} 294}
292 295
296static inline int is_multi_taskfile(struct ata_taskfile *tf)
297{
298 return (tf->command == ATA_CMD_READ_MULTI) ||
299 (tf->command == ATA_CMD_WRITE_MULTI) ||
300 (tf->command == ATA_CMD_READ_MULTI_EXT) ||
301 (tf->command == ATA_CMD_WRITE_MULTI_EXT);
302}
303
293static inline int ata_ok(u8 status) 304static inline int ata_ok(u8 status)
294{ 305{
295 return ((status & (ATA_BUSY | ATA_DRDY | ATA_DF | ATA_DRQ | ATA_ERR)) 306 return ((status & (ATA_BUSY | ATA_DRDY | ATA_DF | ATA_DRQ | ATA_ERR))
diff --git a/include/linux/libata.h b/include/linux/libata.h
index 6f0752219f64..70ae140dbf23 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -98,6 +98,7 @@ enum {
98 ATA_DFLAG_PIO = (1 << 1), /* device currently in PIO mode */ 98 ATA_DFLAG_PIO = (1 << 1), /* device currently in PIO mode */
99 ATA_DFLAG_LOCK_SECTORS = (1 << 2), /* don't adjust max_sectors */ 99 ATA_DFLAG_LOCK_SECTORS = (1 << 2), /* don't adjust max_sectors */
100 ATA_DFLAG_LBA = (1 << 3), /* device supports LBA */ 100 ATA_DFLAG_LBA = (1 << 3), /* device supports LBA */
101 ATA_DFLAG_CDB_INTR = (1 << 4), /* device asserts INTRQ when ready for CDB */
101 102
102 ATA_DEV_UNKNOWN = 0, /* unknown device */ 103 ATA_DEV_UNKNOWN = 0, /* unknown device */
103 ATA_DEV_ATA = 1, /* ATA device */ 104 ATA_DEV_ATA = 1, /* ATA device */
@@ -116,8 +117,8 @@ enum {
116 ATA_FLAG_MMIO = (1 << 6), /* use MMIO, not PIO */ 117 ATA_FLAG_MMIO = (1 << 6), /* use MMIO, not PIO */
117 ATA_FLAG_SATA_RESET = (1 << 7), /* use COMRESET */ 118 ATA_FLAG_SATA_RESET = (1 << 7), /* use COMRESET */
118 ATA_FLAG_PIO_DMA = (1 << 8), /* PIO cmds via DMA */ 119 ATA_FLAG_PIO_DMA = (1 << 8), /* PIO cmds via DMA */
119 ATA_FLAG_NOINTR = (1 << 9), /* FIXME: Remove this once 120 ATA_FLAG_PIO_POLLING = (1 << 9), /* use polling PIO if LLD
120 * proper HSM is in place. */ 121 * doesn't handle PIO interrupts */
121 122
122 ATA_QCFLAG_ACTIVE = (1 << 1), /* cmd not yet ack'd to scsi lyer */ 123 ATA_QCFLAG_ACTIVE = (1 << 1), /* cmd not yet ack'd to scsi lyer */
123 ATA_QCFLAG_SG = (1 << 3), /* have s/g table? */ 124 ATA_QCFLAG_SG = (1 << 3), /* have s/g table? */
@@ -129,8 +130,8 @@ enum {
129 ATA_TMOUT_PIO = 30 * HZ, 130 ATA_TMOUT_PIO = 30 * HZ,
130 ATA_TMOUT_BOOT = 30 * HZ, /* hueristic */ 131 ATA_TMOUT_BOOT = 30 * HZ, /* hueristic */
131 ATA_TMOUT_BOOT_QUICK = 7 * HZ, /* hueristic */ 132 ATA_TMOUT_BOOT_QUICK = 7 * HZ, /* hueristic */
132 ATA_TMOUT_CDB = 30 * HZ, 133 ATA_TMOUT_DATAOUT = 30 * HZ,
133 ATA_TMOUT_CDB_QUICK = 5 * HZ, 134 ATA_TMOUT_DATAOUT_QUICK = 5 * HZ,
134 135
135 /* ATA bus states */ 136 /* ATA bus states */
136 BUS_UNKNOWN = 0, 137 BUS_UNKNOWN = 0,
@@ -166,14 +167,16 @@ enum {
166}; 167};
167 168
168enum hsm_task_states { 169enum hsm_task_states {
169 HSM_ST_UNKNOWN, 170 HSM_ST_UNKNOWN, /* state unknown */
170 HSM_ST_IDLE, 171 HSM_ST_IDLE, /* no command on going */
171 HSM_ST_POLL, 172 HSM_ST_POLL, /* same as HSM_ST, waits longer */
172 HSM_ST_TMOUT, 173 HSM_ST_TMOUT, /* timeout */
173 HSM_ST, 174 HSM_ST, /* (waiting the device to) transfer data */
174 HSM_ST_LAST, 175 HSM_ST_LAST, /* (waiting the device to) complete command */
175 HSM_ST_LAST_POLL, 176 HSM_ST_LAST_POLL, /* same as HSM_ST_LAST, waits longer */
176 HSM_ST_ERR, 177 HSM_ST_ERR, /* error */
178 HSM_ST_FIRST, /* (waiting the device to)
179 write CDB or first data block */
177}; 180};
178 181
179enum ata_completion_errors { 182enum ata_completion_errors {
@@ -343,8 +346,6 @@ struct ata_port {
343 struct ata_host_stats stats; 346 struct ata_host_stats stats;
344 struct ata_host_set *host_set; 347 struct ata_host_set *host_set;
345 348
346 struct work_struct packet_task;
347
348 struct work_struct pio_task; 349 struct work_struct pio_task;
349 unsigned int hsm_task_state; 350 unsigned int hsm_task_state;
350 unsigned long pio_task_timeout; 351 unsigned long pio_task_timeout;