aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/scsi/libata-core.c550
-rw-r--r--drivers/scsi/pdc_adma.c8
-rw-r--r--drivers/scsi/sata_mv.c3
-rw-r--r--drivers/scsi/sata_nv.c4
-rw-r--r--drivers/scsi/sata_promise.c13
-rw-r--r--drivers/scsi/sata_qstor.c11
-rw-r--r--drivers/scsi/sata_sx4.c7
-rw-r--r--drivers/scsi/sata_vsc.c6
-rw-r--r--include/linux/ata.h11
-rw-r--r--include/linux/libata.h29
10 files changed, 462 insertions, 180 deletions
diff --git a/drivers/scsi/libata-core.c b/drivers/scsi/libata-core.c
index 665ae79e1fd6..aae3a331d753 100644
--- a/drivers/scsi/libata-core.c
+++ b/drivers/scsi/libata-core.c
@@ -74,6 +74,7 @@ static int ata_choose_xfer_mode(const struct ata_port *ap,
74 u8 *xfer_mode_out, 74 u8 *xfer_mode_out,
75 unsigned int *xfer_shift_out); 75 unsigned int *xfer_shift_out);
76static void __ata_qc_complete(struct ata_queued_cmd *qc); 76static void __ata_qc_complete(struct ata_queued_cmd *qc);
77static void ata_pio_error(struct ata_port *ap);
77 78
78static unsigned int ata_unique_id = 1; 79static unsigned int ata_unique_id = 1;
79static struct workqueue_struct *ata_wq; 80static struct workqueue_struct *ata_wq;
@@ -1283,6 +1284,12 @@ retry:
1283 1284
1284 } 1285 }
1285 1286
1287 if (dev->id[59] & 0x100) {
1288 dev->multi_count = dev->id[59] & 0xff;
1289 DPRINTK("ata%u: dev %u multi count %u\n",
1290 ap->id, device, dev->multi_count);
1291 }
1292
1286 ap->host->max_cmd_len = 16; 1293 ap->host->max_cmd_len = 16;
1287 } 1294 }
1288 1295
@@ -1299,6 +1306,9 @@ retry:
1299 ap->cdb_len = (unsigned int) rc; 1306 ap->cdb_len = (unsigned int) rc;
1300 ap->host->max_cmd_len = (unsigned char) ap->cdb_len; 1307 ap->host->max_cmd_len = (unsigned char) ap->cdb_len;
1301 1308
1309 if (ata_id_cdb_intr(dev->id))
1310 dev->flags |= ATA_DFLAG_CDB_INTR;
1311
1302 /* print device info to dmesg */ 1312 /* print device info to dmesg */
1303 printk(KERN_INFO "ata%u: dev %u ATAPI, max %s\n", 1313 printk(KERN_INFO "ata%u: dev %u ATAPI, max %s\n",
1304 ap->id, device, 1314 ap->id, device,
@@ -2771,7 +2781,6 @@ void ata_poll_qc_complete(struct ata_queued_cmd *qc, unsigned int err_mask)
2771 unsigned long flags; 2781 unsigned long flags;
2772 2782
2773 spin_lock_irqsave(&ap->host_set->lock, flags); 2783 spin_lock_irqsave(&ap->host_set->lock, flags);
2774 ap->flags &= ~ATA_FLAG_NOINTR;
2775 ata_irq_on(ap); 2784 ata_irq_on(ap);
2776 ata_qc_complete(qc, err_mask); 2785 ata_qc_complete(qc, err_mask);
2777 spin_unlock_irqrestore(&ap->host_set->lock, flags); 2786 spin_unlock_irqrestore(&ap->host_set->lock, flags);
@@ -2832,7 +2841,8 @@ static unsigned long ata_pio_poll(struct ata_port *ap)
2832 * None. (executing in kernel thread context) 2841 * None. (executing in kernel thread context)
2833 * 2842 *
2834 * RETURNS: 2843 * RETURNS:
2835 * Non-zero if qc completed, zero otherwise. 2844 * Zero if qc completed.
2845 * Non-zero if has next.
2836 */ 2846 */
2837 2847
2838static int ata_pio_complete (struct ata_port *ap) 2848static int ata_pio_complete (struct ata_port *ap)
@@ -2845,7 +2855,7 @@ static int ata_pio_complete (struct ata_port *ap)
2845 * we enter, BSY will be cleared in a chk-status or two. If not, 2855 * we enter, BSY will be cleared in a chk-status or two. If not,
2846 * the drive is probably seeking or something. Snooze for a couple 2856 * the drive is probably seeking or something. Snooze for a couple
2847 * msecs, then chk-status again. If still busy, fall back to 2857 * msecs, then chk-status again. If still busy, fall back to
2848 * HSM_ST_POLL state. 2858 * HSM_ST_LAST_POLL state.
2849 */ 2859 */
2850 drv_stat = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 10); 2860 drv_stat = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 10);
2851 if (drv_stat & (ATA_BUSY | ATA_DRQ)) { 2861 if (drv_stat & (ATA_BUSY | ATA_DRQ)) {
@@ -2854,14 +2864,14 @@ static int ata_pio_complete (struct ata_port *ap)
2854 if (drv_stat & (ATA_BUSY | ATA_DRQ)) { 2864 if (drv_stat & (ATA_BUSY | ATA_DRQ)) {
2855 ap->hsm_task_state = HSM_ST_LAST_POLL; 2865 ap->hsm_task_state = HSM_ST_LAST_POLL;
2856 ap->pio_task_timeout = jiffies + ATA_TMOUT_PIO; 2866 ap->pio_task_timeout = jiffies + ATA_TMOUT_PIO;
2857 return 0; 2867 return 1;
2858 } 2868 }
2859 } 2869 }
2860 2870
2861 drv_stat = ata_wait_idle(ap); 2871 drv_stat = ata_wait_idle(ap);
2862 if (!ata_ok(drv_stat)) { 2872 if (!ata_ok(drv_stat)) {
2863 ap->hsm_task_state = HSM_ST_ERR; 2873 ap->hsm_task_state = HSM_ST_ERR;
2864 return 0; 2874 return 1;
2865 } 2875 }
2866 2876
2867 qc = ata_qc_from_tag(ap, ap->active_tag); 2877 qc = ata_qc_from_tag(ap, ap->active_tag);
@@ -2873,7 +2883,7 @@ static int ata_pio_complete (struct ata_port *ap)
2873 2883
2874 /* another command may start at this point */ 2884 /* another command may start at this point */
2875 2885
2876 return 1; 2886 return 0;
2877} 2887}
2878 2888
2879 2889
@@ -3034,7 +3044,23 @@ static void ata_pio_sector(struct ata_queued_cmd *qc)
3034 page = nth_page(page, (offset >> PAGE_SHIFT)); 3044 page = nth_page(page, (offset >> PAGE_SHIFT));
3035 offset %= PAGE_SIZE; 3045 offset %= PAGE_SIZE;
3036 3046
3037 buf = kmap(page) + offset; 3047 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
3048
3049 if (PageHighMem(page)) {
3050 unsigned long flags;
3051
3052 local_irq_save(flags);
3053 buf = kmap_atomic(page, KM_IRQ0);
3054
3055 /* do the actual data transfer */
3056 ata_data_xfer(ap, buf + offset, ATA_SECT_SIZE, do_write);
3057
3058 kunmap_atomic(buf, KM_IRQ0);
3059 local_irq_restore(flags);
3060 } else {
3061 buf = page_address(page);
3062 ata_data_xfer(ap, buf + offset, ATA_SECT_SIZE, do_write);
3063 }
3038 3064
3039 qc->cursect++; 3065 qc->cursect++;
3040 qc->cursg_ofs++; 3066 qc->cursg_ofs++;
@@ -3043,14 +3069,151 @@ static void ata_pio_sector(struct ata_queued_cmd *qc)
3043 qc->cursg++; 3069 qc->cursg++;
3044 qc->cursg_ofs = 0; 3070 qc->cursg_ofs = 0;
3045 } 3071 }
3072}
3046 3073
3047 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read"); 3074/**
3075 * ata_pio_sectors - Transfer one or many 512-byte sectors.
3076 * @qc: Command on going
3077 *
3078 * Transfer one or many ATA_SECT_SIZE of data from/to the
3079 * ATA device for the DRQ request.
3080 *
3081 * LOCKING:
3082 * Inherited from caller.
3083 */
3084
3085static void ata_pio_sectors(struct ata_queued_cmd *qc)
3086{
3087 if (is_multi_taskfile(&qc->tf)) {
3088 /* READ/WRITE MULTIPLE */
3089 unsigned int nsect;
3090
3091 assert(qc->dev->multi_count);
3092
3093 nsect = min(qc->nsect - qc->cursect, qc->dev->multi_count);
3094 while (nsect--)
3095 ata_pio_sector(qc);
3096 } else
3097 ata_pio_sector(qc);
3098}
3048 3099
3049 /* do the actual data transfer */ 3100/**
3050 do_write = (qc->tf.flags & ATA_TFLAG_WRITE); 3101 * atapi_send_cdb - Write CDB bytes to hardware
3051 ata_data_xfer(ap, buf, ATA_SECT_SIZE, do_write); 3102 * @ap: Port to which ATAPI device is attached.
3103 * @qc: Taskfile currently active
3104 *
3105 * When device has indicated its readiness to accept
3106 * a CDB, this function is called. Send the CDB.
3107 *
3108 * LOCKING:
3109 * caller.
3110 */
3052 3111
3053 kunmap(page); 3112static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc)
3113{
3114 /* send SCSI cdb */
3115 DPRINTK("send cdb\n");
3116 assert(ap->cdb_len >= 12);
3117
3118 ata_data_xfer(ap, qc->cdb, ap->cdb_len, 1);
3119 ata_altstatus(ap); /* flush */
3120
3121 switch (qc->tf.protocol) {
3122 case ATA_PROT_ATAPI:
3123 ap->hsm_task_state = HSM_ST;
3124 break;
3125 case ATA_PROT_ATAPI_NODATA:
3126 ap->hsm_task_state = HSM_ST_LAST;
3127 break;
3128 case ATA_PROT_ATAPI_DMA:
3129 ap->hsm_task_state = HSM_ST_LAST;
3130 /* initiate bmdma */
3131 ap->ops->bmdma_start(qc);
3132 break;
3133 }
3134}
3135
3136/**
3137 * ata_pio_first_block - Write first data block to hardware
3138 * @ap: Port to which ATA/ATAPI device is attached.
3139 *
3140 * When device has indicated its readiness to accept
3141 * the data, this function sends out the CDB or
3142 * the first data block by PIO.
3143 * After this,
3144 * - If polling, ata_pio_task() handles the rest.
3145 * - Otherwise, interrupt handler takes over.
3146 *
3147 * LOCKING:
3148 * Kernel thread context (may sleep)
3149 *
3150 * RETURNS:
3151 * Zero if irq handler takes over
3152 * Non-zero if has next (polling).
3153 */
3154
3155static int ata_pio_first_block(struct ata_port *ap)
3156{
3157 struct ata_queued_cmd *qc;
3158 u8 status;
3159 unsigned long flags;
3160 int has_next;
3161
3162 qc = ata_qc_from_tag(ap, ap->active_tag);
3163 assert(qc != NULL);
3164 assert(qc->flags & ATA_QCFLAG_ACTIVE);
3165
3166 /* if polling, we will stay in the work queue after sending the data.
3167 * otherwise, interrupt handler takes over after sending the data.
3168 */
3169 has_next = (qc->tf.flags & ATA_TFLAG_POLLING);
3170
3171 /* sleep-wait for BSY to clear */
3172 DPRINTK("busy wait\n");
3173 if (ata_busy_sleep(ap, ATA_TMOUT_DATAOUT_QUICK, ATA_TMOUT_DATAOUT)) {
3174 ap->hsm_task_state = HSM_ST_TMOUT;
3175 goto err_out;
3176 }
3177
3178 /* make sure DRQ is set */
3179 status = ata_chk_status(ap);
3180 if ((status & (ATA_BUSY | ATA_DRQ)) != ATA_DRQ) {
3181 /* device status error */
3182 ap->hsm_task_state = HSM_ST_ERR;
3183 goto err_out;
3184 }
3185
3186 /* Send the CDB (atapi) or the first data block (ata pio out).
3187 * During the state transition, interrupt handler shouldn't
3188 * be invoked before the data transfer is complete and
3189 * hsm_task_state is changed. Hence, the following locking.
3190 */
3191 spin_lock_irqsave(&ap->host_set->lock, flags);
3192
3193 if (qc->tf.protocol == ATA_PROT_PIO) {
3194 /* PIO data out protocol.
3195 * send first data block.
3196 */
3197
3198 /* ata_pio_sectors() might change the state to HSM_ST_LAST.
3199 * so, the state is changed here before ata_pio_sectors().
3200 */
3201 ap->hsm_task_state = HSM_ST;
3202 ata_pio_sectors(qc);
3203 ata_altstatus(ap); /* flush */
3204 } else
3205 /* send CDB */
3206 atapi_send_cdb(ap, qc);
3207
3208 spin_unlock_irqrestore(&ap->host_set->lock, flags);
3209
3210 /* if polling, ata_pio_task() handles the rest.
3211 * otherwise, interrupt handler takes over from here.
3212 */
3213 return has_next;
3214
3215err_out:
3216 return 1; /* has next */
3054} 3217}
3055 3218
3056/** 3219/**
@@ -3116,7 +3279,23 @@ next_sg:
3116 /* don't cross page boundaries */ 3279 /* don't cross page boundaries */
3117 count = min(count, (unsigned int)PAGE_SIZE - offset); 3280 count = min(count, (unsigned int)PAGE_SIZE - offset);
3118 3281
3119 buf = kmap(page) + offset; 3282 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
3283
3284 if (PageHighMem(page)) {
3285 unsigned long flags;
3286
3287 local_irq_save(flags);
3288 buf = kmap_atomic(page, KM_IRQ0);
3289
3290 /* do the actual data transfer */
3291 ata_data_xfer(ap, buf + offset, count, do_write);
3292
3293 kunmap_atomic(buf, KM_IRQ0);
3294 local_irq_restore(flags);
3295 } else {
3296 buf = page_address(page);
3297 ata_data_xfer(ap, buf + offset, count, do_write);
3298 }
3120 3299
3121 bytes -= count; 3300 bytes -= count;
3122 qc->curbytes += count; 3301 qc->curbytes += count;
@@ -3127,13 +3306,6 @@ next_sg:
3127 qc->cursg_ofs = 0; 3306 qc->cursg_ofs = 0;
3128 } 3307 }
3129 3308
3130 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
3131
3132 /* do the actual data transfer */
3133 ata_data_xfer(ap, buf, count, do_write);
3134
3135 kunmap(page);
3136
3137 if (bytes) 3309 if (bytes)
3138 goto next_sg; 3310 goto next_sg;
3139} 3311}
@@ -3170,6 +3342,8 @@ static void atapi_pio_bytes(struct ata_queued_cmd *qc)
3170 if (do_write != i_write) 3342 if (do_write != i_write)
3171 goto err_out; 3343 goto err_out;
3172 3344
3345 VPRINTK("ata%u: xfering %d bytes\n", ap->id, bytes);
3346
3173 __atapi_pio_bytes(qc, bytes); 3347 __atapi_pio_bytes(qc, bytes);
3174 3348
3175 return; 3349 return;
@@ -3230,8 +3404,10 @@ static void ata_pio_block(struct ata_port *ap)
3230 return; 3404 return;
3231 } 3405 }
3232 3406
3233 ata_pio_sector(qc); 3407 ata_pio_sectors(qc);
3234 } 3408 }
3409
3410 ata_altstatus(ap); /* flush */
3235} 3411}
3236 3412
3237static void ata_pio_error(struct ata_port *ap) 3413static void ata_pio_error(struct ata_port *ap)
@@ -3252,22 +3428,23 @@ static void ata_pio_task(void *_data)
3252{ 3428{
3253 struct ata_port *ap = _data; 3429 struct ata_port *ap = _data;
3254 unsigned long timeout; 3430 unsigned long timeout;
3255 int qc_completed; 3431 int has_next;
3256 3432
3257fsm_start: 3433fsm_start:
3258 timeout = 0; 3434 timeout = 0;
3259 qc_completed = 0; 3435 has_next = 1;
3260 3436
3261 switch (ap->hsm_task_state) { 3437 switch (ap->hsm_task_state) {
3262 case HSM_ST_IDLE: 3438 case HSM_ST_FIRST:
3263 return; 3439 has_next = ata_pio_first_block(ap);
3440 break;
3264 3441
3265 case HSM_ST: 3442 case HSM_ST:
3266 ata_pio_block(ap); 3443 ata_pio_block(ap);
3267 break; 3444 break;
3268 3445
3269 case HSM_ST_LAST: 3446 case HSM_ST_LAST:
3270 qc_completed = ata_pio_complete(ap); 3447 has_next = ata_pio_complete(ap);
3271 break; 3448 break;
3272 3449
3273 case HSM_ST_POLL: 3450 case HSM_ST_POLL:
@@ -3279,11 +3456,15 @@ fsm_start:
3279 case HSM_ST_ERR: 3456 case HSM_ST_ERR:
3280 ata_pio_error(ap); 3457 ata_pio_error(ap);
3281 return; 3458 return;
3459
3460 default:
3461 BUG();
3462 return;
3282 } 3463 }
3283 3464
3284 if (timeout) 3465 if (timeout)
3285 queue_delayed_work(ata_wq, &ap->pio_task, timeout); 3466 queue_delayed_work(ata_wq, &ap->pio_task, timeout);
3286 else if (!qc_completed) 3467 else if (has_next)
3287 goto fsm_start; 3468 goto fsm_start;
3288} 3469}
3289 3470
@@ -3346,6 +3527,8 @@ static void ata_qc_timeout(struct ata_queued_cmd *qc)
3346 printk(KERN_ERR "ata%u: command 0x%x timeout, stat 0x%x host_stat 0x%x\n", 3527 printk(KERN_ERR "ata%u: command 0x%x timeout, stat 0x%x host_stat 0x%x\n",
3347 ap->id, qc->tf.command, drv_stat, host_stat); 3528 ap->id, qc->tf.command, drv_stat, host_stat);
3348 3529
3530 ap->hsm_task_state = HSM_ST_IDLE;
3531
3349 /* complete taskfile transaction */ 3532 /* complete taskfile transaction */
3350 ata_qc_complete(qc, ac_err_mask(drv_stat)); 3533 ata_qc_complete(qc, ac_err_mask(drv_stat));
3351 break; 3534 break;
@@ -3625,43 +3808,103 @@ int ata_qc_issue_prot(struct ata_queued_cmd *qc)
3625{ 3808{
3626 struct ata_port *ap = qc->ap; 3809 struct ata_port *ap = qc->ap;
3627 3810
3811 /* Use polling pio if the LLD doesn't handle
3812 * interrupt driven pio and atapi CDB interrupt.
3813 */
3814 if (ap->flags & ATA_FLAG_PIO_POLLING) {
3815 switch (qc->tf.protocol) {
3816 case ATA_PROT_PIO:
3817 case ATA_PROT_ATAPI:
3818 case ATA_PROT_ATAPI_NODATA:
3819 qc->tf.flags |= ATA_TFLAG_POLLING;
3820 break;
3821 case ATA_PROT_ATAPI_DMA:
3822 if (qc->dev->flags & ATA_DFLAG_CDB_INTR)
3823 BUG();
3824 break;
3825 default:
3826 break;
3827 }
3828 }
3829
3830 /* select the device */
3628 ata_dev_select(ap, qc->dev->devno, 1, 0); 3831 ata_dev_select(ap, qc->dev->devno, 1, 0);
3629 3832
3833 /* start the command */
3630 switch (qc->tf.protocol) { 3834 switch (qc->tf.protocol) {
3631 case ATA_PROT_NODATA: 3835 case ATA_PROT_NODATA:
3836 if (qc->tf.flags & ATA_TFLAG_POLLING)
3837 ata_qc_set_polling(qc);
3838
3632 ata_tf_to_host(ap, &qc->tf); 3839 ata_tf_to_host(ap, &qc->tf);
3840 ap->hsm_task_state = HSM_ST_LAST;
3841
3842 if (qc->tf.flags & ATA_TFLAG_POLLING)
3843 queue_work(ata_wq, &ap->pio_task);
3844
3633 break; 3845 break;
3634 3846
3635 case ATA_PROT_DMA: 3847 case ATA_PROT_DMA:
3848 assert(!(qc->tf.flags & ATA_TFLAG_POLLING));
3849
3636 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */ 3850 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
3637 ap->ops->bmdma_setup(qc); /* set up bmdma */ 3851 ap->ops->bmdma_setup(qc); /* set up bmdma */
3638 ap->ops->bmdma_start(qc); /* initiate bmdma */ 3852 ap->ops->bmdma_start(qc); /* initiate bmdma */
3853 ap->hsm_task_state = HSM_ST_LAST;
3639 break; 3854 break;
3640 3855
3641 case ATA_PROT_PIO: /* load tf registers, initiate polling pio */ 3856 case ATA_PROT_PIO:
3642 ata_qc_set_polling(qc); 3857 if (qc->tf.flags & ATA_TFLAG_POLLING)
3643 ata_tf_to_host(ap, &qc->tf); 3858 ata_qc_set_polling(qc);
3644 ap->hsm_task_state = HSM_ST;
3645 queue_work(ata_wq, &ap->pio_task);
3646 break;
3647 3859
3648 case ATA_PROT_ATAPI:
3649 ata_qc_set_polling(qc);
3650 ata_tf_to_host(ap, &qc->tf); 3860 ata_tf_to_host(ap, &qc->tf);
3651 queue_work(ata_wq, &ap->packet_task); 3861
3862 if (qc->tf.flags & ATA_TFLAG_WRITE) {
3863 /* PIO data out protocol */
3864 ap->hsm_task_state = HSM_ST_FIRST;
3865 queue_work(ata_wq, &ap->pio_task);
3866
3867 /* always send first data block using
3868 * the ata_pio_task() codepath.
3869 */
3870 } else {
3871 /* PIO data in protocol */
3872 ap->hsm_task_state = HSM_ST;
3873
3874 if (qc->tf.flags & ATA_TFLAG_POLLING)
3875 queue_work(ata_wq, &ap->pio_task);
3876
3877 /* if polling, ata_pio_task() handles the rest.
3878 * otherwise, interrupt handler takes over from here.
3879 */
3880 }
3881
3652 break; 3882 break;
3653 3883
3884 case ATA_PROT_ATAPI:
3654 case ATA_PROT_ATAPI_NODATA: 3885 case ATA_PROT_ATAPI_NODATA:
3655 ap->flags |= ATA_FLAG_NOINTR; 3886 if (qc->tf.flags & ATA_TFLAG_POLLING)
3887 ata_qc_set_polling(qc);
3888
3656 ata_tf_to_host(ap, &qc->tf); 3889 ata_tf_to_host(ap, &qc->tf);
3657 queue_work(ata_wq, &ap->packet_task); 3890 ap->hsm_task_state = HSM_ST_FIRST;
3891
3892 /* send cdb by polling if no cdb interrupt */
3893 if ((!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) ||
3894 (qc->tf.flags & ATA_TFLAG_POLLING))
3895 queue_work(ata_wq, &ap->pio_task);
3658 break; 3896 break;
3659 3897
3660 case ATA_PROT_ATAPI_DMA: 3898 case ATA_PROT_ATAPI_DMA:
3661 ap->flags |= ATA_FLAG_NOINTR; 3899 assert(!(qc->tf.flags & ATA_TFLAG_POLLING));
3900
3662 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */ 3901 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
3663 ap->ops->bmdma_setup(qc); /* set up bmdma */ 3902 ap->ops->bmdma_setup(qc); /* set up bmdma */
3664 queue_work(ata_wq, &ap->packet_task); 3903 ap->hsm_task_state = HSM_ST_FIRST;
3904
3905 /* send cdb by polling if no cdb interrupt */
3906 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
3907 queue_work(ata_wq, &ap->pio_task);
3665 break; 3908 break;
3666 3909
3667 default: 3910 default:
@@ -3922,47 +4165,142 @@ void ata_bmdma_stop(struct ata_queued_cmd *qc)
3922inline unsigned int ata_host_intr (struct ata_port *ap, 4165inline unsigned int ata_host_intr (struct ata_port *ap,
3923 struct ata_queued_cmd *qc) 4166 struct ata_queued_cmd *qc)
3924{ 4167{
3925 u8 status, host_stat; 4168 u8 status, host_stat = 0;
3926
3927 switch (qc->tf.protocol) {
3928 4169
3929 case ATA_PROT_DMA: 4170 VPRINTK("ata%u: protocol %d task_state %d\n",
3930 case ATA_PROT_ATAPI_DMA: 4171 ap->id, qc->tf.protocol, ap->hsm_task_state);
3931 case ATA_PROT_ATAPI:
3932 /* check status of DMA engine */
3933 host_stat = ap->ops->bmdma_status(ap);
3934 VPRINTK("ata%u: host_stat 0x%X\n", ap->id, host_stat);
3935 4172
3936 /* if it's not our irq... */ 4173 /* Check whether we are expecting interrupt in this state */
3937 if (!(host_stat & ATA_DMA_INTR)) 4174 switch (ap->hsm_task_state) {
4175 case HSM_ST_FIRST:
4176 /* Check the ATA_DFLAG_CDB_INTR flag is enough here.
4177 * The flag was turned on only for atapi devices.
4178 * No need to check is_atapi_taskfile(&qc->tf) again.
4179 */
4180 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
3938 goto idle_irq; 4181 goto idle_irq;
4182 break;
4183 case HSM_ST_LAST:
4184 if (qc->tf.protocol == ATA_PROT_DMA ||
4185 qc->tf.protocol == ATA_PROT_ATAPI_DMA) {
4186 /* check status of DMA engine */
4187 host_stat = ap->ops->bmdma_status(ap);
4188 VPRINTK("ata%u: host_stat 0x%X\n", ap->id, host_stat);
4189
4190 /* if it's not our irq... */
4191 if (!(host_stat & ATA_DMA_INTR))
4192 goto idle_irq;
4193
4194 /* before we do anything else, clear DMA-Start bit */
4195 ap->ops->bmdma_stop(qc);
4196 }
4197 break;
4198 case HSM_ST:
4199 break;
4200 default:
4201 goto idle_irq;
4202 }
3939 4203
3940 /* before we do anything else, clear DMA-Start bit */ 4204 /* check altstatus */
3941 ap->ops->bmdma_stop(qc); 4205 status = ata_altstatus(ap);
4206 if (status & ATA_BUSY)
4207 goto idle_irq;
3942 4208
3943 /* fall through */ 4209 /* check main status, clearing INTRQ */
4210 status = ata_chk_status(ap);
4211 if (unlikely(status & ATA_BUSY))
4212 goto idle_irq;
3944 4213
3945 case ATA_PROT_ATAPI_NODATA: 4214 DPRINTK("ata%u: protocol %d task_state %d (dev_stat 0x%X)\n",
3946 case ATA_PROT_NODATA: 4215 ap->id, qc->tf.protocol, ap->hsm_task_state, status);
3947 /* check altstatus */
3948 status = ata_altstatus(ap);
3949 if (status & ATA_BUSY)
3950 goto idle_irq;
3951 4216
3952 /* check main status, clearing INTRQ */ 4217 /* ack bmdma irq events */
3953 status = ata_chk_status(ap); 4218 ap->ops->irq_clear(ap);
3954 if (unlikely(status & ATA_BUSY))
3955 goto idle_irq;
3956 DPRINTK("ata%u: protocol %d (dev_stat 0x%X)\n",
3957 ap->id, qc->tf.protocol, status);
3958 4219
3959 /* ack bmdma irq events */ 4220 /* check error */
3960 ap->ops->irq_clear(ap); 4221 if (unlikely((status & ATA_ERR) || (host_stat & ATA_DMA_ERR)))
4222 ap->hsm_task_state = HSM_ST_ERR;
4223
4224fsm_start:
4225 switch (ap->hsm_task_state) {
4226 case HSM_ST_FIRST:
4227 /* Some pre-ATAPI-4 devices assert INTRQ
4228 * at this state when ready to receive CDB.
4229 */
4230
4231 /* check device status */
4232 if (unlikely((status & (ATA_BUSY | ATA_DRQ)) != ATA_DRQ)) {
4233 /* Wrong status. Let EH handle this */
4234 ap->hsm_task_state = HSM_ST_ERR;
4235 goto fsm_start;
4236 }
4237
4238 atapi_send_cdb(ap, qc);
4239
4240 break;
4241
4242 case HSM_ST:
4243 /* complete command or read/write the data register */
4244 if (qc->tf.protocol == ATA_PROT_ATAPI) {
4245 /* ATAPI PIO protocol */
4246 if ((status & ATA_DRQ) == 0) {
4247 /* no more data to transfer */
4248 ap->hsm_task_state = HSM_ST_LAST;
4249 goto fsm_start;
4250 }
4251
4252 atapi_pio_bytes(qc);
4253
4254 if (unlikely(ap->hsm_task_state == HSM_ST_ERR))
4255 /* bad ireason reported by device */
4256 goto fsm_start;
4257
4258 } else {
4259 /* ATA PIO protocol */
4260 if (unlikely((status & ATA_DRQ) == 0)) {
4261 /* handle BSY=0, DRQ=0 as error */
4262 ap->hsm_task_state = HSM_ST_ERR;
4263 goto fsm_start;
4264 }
4265
4266 ata_pio_sectors(qc);
4267
4268 if (ap->hsm_task_state == HSM_ST_LAST &&
4269 (!(qc->tf.flags & ATA_TFLAG_WRITE))) {
4270 /* all data read */
4271 ata_altstatus(ap);
4272 status = ata_chk_status(ap);
4273 goto fsm_start;
4274 }
4275 }
4276
4277 ata_altstatus(ap); /* flush */
4278 break;
4279
4280 case HSM_ST_LAST:
4281 if (unlikely(status & ATA_DRQ)) {
4282 /* handle DRQ=1 as error */
4283 ap->hsm_task_state = HSM_ST_ERR;
4284 goto fsm_start;
4285 }
4286
4287 /* no more data to transfer */
4288 DPRINTK("ata%u: command complete, drv_stat 0x%x\n",
4289 ap->id, status);
4290
4291 ap->hsm_task_state = HSM_ST_IDLE;
3961 4292
3962 /* complete taskfile transaction */ 4293 /* complete taskfile transaction */
3963 ata_qc_complete(qc, ac_err_mask(status)); 4294 ata_qc_complete(qc, ac_err_mask(status));
3964 break; 4295 break;
3965 4296
4297 case HSM_ST_ERR:
4298 printk(KERN_ERR "ata%u: command error, drv_stat 0x%x host_stat 0x%x\n",
4299 ap->id, status, host_stat);
4300
4301 ap->hsm_task_state = HSM_ST_IDLE;
4302 ata_qc_complete(qc, status | ATA_ERR);
4303 break;
3966 default: 4304 default:
3967 goto idle_irq; 4305 goto idle_irq;
3968 } 4306 }
@@ -4013,11 +4351,11 @@ irqreturn_t ata_interrupt (int irq, void *dev_instance, struct pt_regs *regs)
4013 4351
4014 ap = host_set->ports[i]; 4352 ap = host_set->ports[i];
4015 if (ap && 4353 if (ap &&
4016 !(ap->flags & (ATA_FLAG_PORT_DISABLED | ATA_FLAG_NOINTR))) { 4354 !(ap->flags & ATA_FLAG_PORT_DISABLED)) {
4017 struct ata_queued_cmd *qc; 4355 struct ata_queued_cmd *qc;
4018 4356
4019 qc = ata_qc_from_tag(ap, ap->active_tag); 4357 qc = ata_qc_from_tag(ap, ap->active_tag);
4020 if (qc && (!(qc->tf.ctl & ATA_NIEN)) && 4358 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)) &&
4021 (qc->flags & ATA_QCFLAG_ACTIVE)) 4359 (qc->flags & ATA_QCFLAG_ACTIVE))
4022 handled |= ata_host_intr(ap, qc); 4360 handled |= ata_host_intr(ap, qc);
4023 } 4361 }
@@ -4029,77 +4367,6 @@ irqreturn_t ata_interrupt (int irq, void *dev_instance, struct pt_regs *regs)
4029} 4367}
4030 4368
4031/** 4369/**
4032 * atapi_packet_task - Write CDB bytes to hardware
4033 * @_data: Port to which ATAPI device is attached.
4034 *
4035 * When device has indicated its readiness to accept
4036 * a CDB, this function is called. Send the CDB.
4037 * If DMA is to be performed, exit immediately.
4038 * Otherwise, we are in polling mode, so poll
4039 * status under operation succeeds or fails.
4040 *
4041 * LOCKING:
4042 * Kernel thread context (may sleep)
4043 */
4044
4045static void atapi_packet_task(void *_data)
4046{
4047 struct ata_port *ap = _data;
4048 struct ata_queued_cmd *qc;
4049 u8 status;
4050
4051 qc = ata_qc_from_tag(ap, ap->active_tag);
4052 assert(qc != NULL);
4053 assert(qc->flags & ATA_QCFLAG_ACTIVE);
4054
4055 /* sleep-wait for BSY to clear */
4056 DPRINTK("busy wait\n");
4057 if (ata_busy_sleep(ap, ATA_TMOUT_CDB_QUICK, ATA_TMOUT_CDB))
4058 goto err_out_status;
4059
4060 /* make sure DRQ is set */
4061 status = ata_chk_status(ap);
4062 if ((status & (ATA_BUSY | ATA_DRQ)) != ATA_DRQ)
4063 goto err_out;
4064
4065 /* send SCSI cdb */
4066 DPRINTK("send cdb\n");
4067 assert(ap->cdb_len >= 12);
4068
4069 if (qc->tf.protocol == ATA_PROT_ATAPI_DMA ||
4070 qc->tf.protocol == ATA_PROT_ATAPI_NODATA) {
4071 unsigned long flags;
4072
4073 /* Once we're done issuing command and kicking bmdma,
4074 * irq handler takes over. To not lose irq, we need
4075 * to clear NOINTR flag before sending cdb, but
4076 * interrupt handler shouldn't be invoked before we're
4077 * finished. Hence, the following locking.
4078 */
4079 spin_lock_irqsave(&ap->host_set->lock, flags);
4080 ap->flags &= ~ATA_FLAG_NOINTR;
4081 ata_data_xfer(ap, qc->cdb, ap->cdb_len, 1);
4082 if (qc->tf.protocol == ATA_PROT_ATAPI_DMA)
4083 ap->ops->bmdma_start(qc); /* initiate bmdma */
4084 spin_unlock_irqrestore(&ap->host_set->lock, flags);
4085 } else {
4086 ata_data_xfer(ap, qc->cdb, ap->cdb_len, 1);
4087
4088 /* PIO commands are handled by polling */
4089 ap->hsm_task_state = HSM_ST;
4090 queue_work(ata_wq, &ap->pio_task);
4091 }
4092
4093 return;
4094
4095err_out_status:
4096 status = ata_chk_status(ap);
4097err_out:
4098 ata_poll_qc_complete(qc, __ac_err_mask(status));
4099}
4100
4101
4102/**
4103 * ata_port_start - Set port up for dma. 4370 * ata_port_start - Set port up for dma.
4104 * @ap: Port to initialize 4371 * @ap: Port to initialize
4105 * 4372 *
@@ -4225,7 +4492,6 @@ static void ata_host_init(struct ata_port *ap, struct Scsi_Host *host,
4225 ap->active_tag = ATA_TAG_POISON; 4492 ap->active_tag = ATA_TAG_POISON;
4226 ap->last_ctl = 0xFF; 4493 ap->last_ctl = 0xFF;
4227 4494
4228 INIT_WORK(&ap->packet_task, atapi_packet_task, ap);
4229 INIT_WORK(&ap->pio_task, ata_pio_task, ap); 4495 INIT_WORK(&ap->pio_task, ata_pio_task, ap);
4230 4496
4231 for (i = 0; i < ATA_MAX_DEVICES; i++) 4497 for (i = 0; i < ATA_MAX_DEVICES; i++)
diff --git a/drivers/scsi/pdc_adma.c b/drivers/scsi/pdc_adma.c
index f557f17ca00c..e254f1e1bb1c 100644
--- a/drivers/scsi/pdc_adma.c
+++ b/drivers/scsi/pdc_adma.c
@@ -457,13 +457,13 @@ static inline unsigned int adma_intr_pkt(struct ata_host_set *host_set)
457 continue; 457 continue;
458 handled = 1; 458 handled = 1;
459 adma_enter_reg_mode(ap); 459 adma_enter_reg_mode(ap);
460 if (ap->flags & (ATA_FLAG_PORT_DISABLED | ATA_FLAG_NOINTR)) 460 if (ap->flags & ATA_FLAG_PORT_DISABLED)
461 continue; 461 continue;
462 pp = ap->private_data; 462 pp = ap->private_data;
463 if (!pp || pp->state != adma_state_pkt) 463 if (!pp || pp->state != adma_state_pkt)
464 continue; 464 continue;
465 qc = ata_qc_from_tag(ap, ap->active_tag); 465 qc = ata_qc_from_tag(ap, ap->active_tag);
466 if (qc && (!(qc->tf.ctl & ATA_NIEN))) { 466 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) {
467 unsigned int err_mask = 0; 467 unsigned int err_mask = 0;
468 468
469 if ((status & (aPERR | aPSD | aUIRQ))) 469 if ((status & (aPERR | aPSD | aUIRQ)))
@@ -484,13 +484,13 @@ static inline unsigned int adma_intr_mmio(struct ata_host_set *host_set)
484 for (port_no = 0; port_no < host_set->n_ports; ++port_no) { 484 for (port_no = 0; port_no < host_set->n_ports; ++port_no) {
485 struct ata_port *ap; 485 struct ata_port *ap;
486 ap = host_set->ports[port_no]; 486 ap = host_set->ports[port_no];
487 if (ap && (!(ap->flags & (ATA_FLAG_PORT_DISABLED | ATA_FLAG_NOINTR)))) { 487 if (ap && (!(ap->flags & ATA_FLAG_PORT_DISABLED))) {
488 struct ata_queued_cmd *qc; 488 struct ata_queued_cmd *qc;
489 struct adma_port_priv *pp = ap->private_data; 489 struct adma_port_priv *pp = ap->private_data;
490 if (!pp || pp->state != adma_state_mmio) 490 if (!pp || pp->state != adma_state_mmio)
491 continue; 491 continue;
492 qc = ata_qc_from_tag(ap, ap->active_tag); 492 qc = ata_qc_from_tag(ap, ap->active_tag);
493 if (qc && (!(qc->tf.ctl & ATA_NIEN))) { 493 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) {
494 494
495 /* check main status, clearing INTRQ */ 495 /* check main status, clearing INTRQ */
496 u8 status = ata_check_status(ap); 496 u8 status = ata_check_status(ap);
diff --git a/drivers/scsi/sata_mv.c b/drivers/scsi/sata_mv.c
index ac184e60797e..9687646d73e1 100644
--- a/drivers/scsi/sata_mv.c
+++ b/drivers/scsi/sata_mv.c
@@ -85,7 +85,8 @@ enum {
85 MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */ 85 MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */
86 MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */ 86 MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */
87 MV_COMMON_FLAGS = (ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 87 MV_COMMON_FLAGS = (ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
88 ATA_FLAG_SATA_RESET | ATA_FLAG_MMIO), 88 ATA_FLAG_SATA_RESET | ATA_FLAG_MMIO |
89 ATA_FLAG_PIO_POLLING),
89 MV_6XXX_FLAGS = MV_FLAG_IRQ_COALESCE, 90 MV_6XXX_FLAGS = MV_FLAG_IRQ_COALESCE,
90 91
91 CRQB_FLAG_READ = (1 << 0), 92 CRQB_FLAG_READ = (1 << 0),
diff --git a/drivers/scsi/sata_nv.c b/drivers/scsi/sata_nv.c
index 4954896dfdb9..8fdb2336f6f3 100644
--- a/drivers/scsi/sata_nv.c
+++ b/drivers/scsi/sata_nv.c
@@ -304,11 +304,11 @@ static irqreturn_t nv_interrupt (int irq, void *dev_instance,
304 304
305 ap = host_set->ports[i]; 305 ap = host_set->ports[i];
306 if (ap && 306 if (ap &&
307 !(ap->flags & (ATA_FLAG_PORT_DISABLED | ATA_FLAG_NOINTR))) { 307 !(ap->flags & ATA_FLAG_PORT_DISABLED)) {
308 struct ata_queued_cmd *qc; 308 struct ata_queued_cmd *qc;
309 309
310 qc = ata_qc_from_tag(ap, ap->active_tag); 310 qc = ata_qc_from_tag(ap, ap->active_tag);
311 if (qc && (!(qc->tf.ctl & ATA_NIEN))) 311 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)))
312 handled += ata_host_intr(ap, qc); 312 handled += ata_host_intr(ap, qc);
313 } 313 }
314 314
diff --git a/drivers/scsi/sata_promise.c b/drivers/scsi/sata_promise.c
index 8a8e3e3ef0ed..e9ffc27a0493 100644
--- a/drivers/scsi/sata_promise.c
+++ b/drivers/scsi/sata_promise.c
@@ -163,7 +163,8 @@ static struct ata_port_info pdc_port_info[] = {
163 { 163 {
164 .sht = &pdc_ata_sht, 164 .sht = &pdc_ata_sht,
165 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 165 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
166 ATA_FLAG_SRST | ATA_FLAG_MMIO, 166 ATA_FLAG_SRST | ATA_FLAG_MMIO |
167 ATA_FLAG_PIO_POLLING,
167 .pio_mask = 0x1f, /* pio0-4 */ 168 .pio_mask = 0x1f, /* pio0-4 */
168 .mwdma_mask = 0x07, /* mwdma0-2 */ 169 .mwdma_mask = 0x07, /* mwdma0-2 */
169 .udma_mask = 0x7f, /* udma0-6 ; FIXME */ 170 .udma_mask = 0x7f, /* udma0-6 ; FIXME */
@@ -174,7 +175,8 @@ static struct ata_port_info pdc_port_info[] = {
174 { 175 {
175 .sht = &pdc_ata_sht, 176 .sht = &pdc_ata_sht,
176 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 177 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
177 ATA_FLAG_SRST | ATA_FLAG_MMIO, 178 ATA_FLAG_SRST | ATA_FLAG_MMIO |
179 ATA_FLAG_PIO_POLLING,
178 .pio_mask = 0x1f, /* pio0-4 */ 180 .pio_mask = 0x1f, /* pio0-4 */
179 .mwdma_mask = 0x07, /* mwdma0-2 */ 181 .mwdma_mask = 0x07, /* mwdma0-2 */
180 .udma_mask = 0x7f, /* udma0-6 ; FIXME */ 182 .udma_mask = 0x7f, /* udma0-6 ; FIXME */
@@ -185,7 +187,8 @@ static struct ata_port_info pdc_port_info[] = {
185 { 187 {
186 .sht = &pdc_ata_sht, 188 .sht = &pdc_ata_sht,
187 .host_flags = ATA_FLAG_NO_LEGACY | ATA_FLAG_SRST | 189 .host_flags = ATA_FLAG_NO_LEGACY | ATA_FLAG_SRST |
188 ATA_FLAG_MMIO | ATA_FLAG_SLAVE_POSS, 190 ATA_FLAG_MMIO | ATA_FLAG_SLAVE_POSS |
191 ATA_FLAG_PIO_POLLING,
189 .pio_mask = 0x1f, /* pio0-4 */ 192 .pio_mask = 0x1f, /* pio0-4 */
190 .mwdma_mask = 0x07, /* mwdma0-2 */ 193 .mwdma_mask = 0x07, /* mwdma0-2 */
191 .udma_mask = 0x7f, /* udma0-6 ; FIXME */ 194 .udma_mask = 0x7f, /* udma0-6 ; FIXME */
@@ -496,11 +499,11 @@ static irqreturn_t pdc_interrupt (int irq, void *dev_instance, struct pt_regs *r
496 ap = host_set->ports[i]; 499 ap = host_set->ports[i];
497 tmp = mask & (1 << (i + 1)); 500 tmp = mask & (1 << (i + 1));
498 if (tmp && ap && 501 if (tmp && ap &&
499 !(ap->flags & (ATA_FLAG_PORT_DISABLED | ATA_FLAG_NOINTR))) { 502 !(ap->flags & ATA_FLAG_PORT_DISABLED)) {
500 struct ata_queued_cmd *qc; 503 struct ata_queued_cmd *qc;
501 504
502 qc = ata_qc_from_tag(ap, ap->active_tag); 505 qc = ata_qc_from_tag(ap, ap->active_tag);
503 if (qc && (!(qc->tf.ctl & ATA_NIEN))) 506 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)))
504 handled += pdc_host_intr(ap, qc); 507 handled += pdc_host_intr(ap, qc);
505 } 508 }
506 } 509 }
diff --git a/drivers/scsi/sata_qstor.c b/drivers/scsi/sata_qstor.c
index a8987f5ff5cc..17a39ed62306 100644
--- a/drivers/scsi/sata_qstor.c
+++ b/drivers/scsi/sata_qstor.c
@@ -177,7 +177,7 @@ static struct ata_port_info qs_port_info[] = {
177 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 177 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
178 ATA_FLAG_SATA_RESET | 178 ATA_FLAG_SATA_RESET |
179 //FIXME ATA_FLAG_SRST | 179 //FIXME ATA_FLAG_SRST |
180 ATA_FLAG_MMIO, 180 ATA_FLAG_MMIO | ATA_FLAG_PIO_POLLING,
181 .pio_mask = 0x10, /* pio4 */ 181 .pio_mask = 0x10, /* pio4 */
182 .udma_mask = 0x7f, /* udma0-6 */ 182 .udma_mask = 0x7f, /* udma0-6 */
183 .port_ops = &qs_ata_ops, 183 .port_ops = &qs_ata_ops,
@@ -396,14 +396,13 @@ static inline unsigned int qs_intr_pkt(struct ata_host_set *host_set)
396 DPRINTK("SFF=%08x%08x: sCHAN=%u sHST=%d sDST=%02x\n", 396 DPRINTK("SFF=%08x%08x: sCHAN=%u sHST=%d sDST=%02x\n",
397 sff1, sff0, port_no, sHST, sDST); 397 sff1, sff0, port_no, sHST, sDST);
398 handled = 1; 398 handled = 1;
399 if (ap && !(ap->flags & 399 if (ap && !(ap->flags & ATA_FLAG_PORT_DISABLED)) {
400 (ATA_FLAG_PORT_DISABLED|ATA_FLAG_NOINTR))) {
401 struct ata_queued_cmd *qc; 400 struct ata_queued_cmd *qc;
402 struct qs_port_priv *pp = ap->private_data; 401 struct qs_port_priv *pp = ap->private_data;
403 if (!pp || pp->state != qs_state_pkt) 402 if (!pp || pp->state != qs_state_pkt)
404 continue; 403 continue;
405 qc = ata_qc_from_tag(ap, ap->active_tag); 404 qc = ata_qc_from_tag(ap, ap->active_tag);
406 if (qc && (!(qc->tf.ctl & ATA_NIEN))) { 405 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) {
407 switch (sHST) { 406 switch (sHST) {
408 case 0: /* successful CPB */ 407 case 0: /* successful CPB */
409 case 3: /* device error */ 408 case 3: /* device error */
@@ -430,13 +429,13 @@ static inline unsigned int qs_intr_mmio(struct ata_host_set *host_set)
430 struct ata_port *ap; 429 struct ata_port *ap;
431 ap = host_set->ports[port_no]; 430 ap = host_set->ports[port_no];
432 if (ap && 431 if (ap &&
433 !(ap->flags & (ATA_FLAG_PORT_DISABLED | ATA_FLAG_NOINTR))) { 432 !(ap->flags & ATA_FLAG_PORT_DISABLED)) {
434 struct ata_queued_cmd *qc; 433 struct ata_queued_cmd *qc;
435 struct qs_port_priv *pp = ap->private_data; 434 struct qs_port_priv *pp = ap->private_data;
436 if (!pp || pp->state != qs_state_mmio) 435 if (!pp || pp->state != qs_state_mmio)
437 continue; 436 continue;
438 qc = ata_qc_from_tag(ap, ap->active_tag); 437 qc = ata_qc_from_tag(ap, ap->active_tag);
439 if (qc && (!(qc->tf.ctl & ATA_NIEN))) { 438 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) {
440 439
441 /* check main status, clearing INTRQ */ 440 /* check main status, clearing INTRQ */
442 u8 status = ata_check_status(ap); 441 u8 status = ata_check_status(ap);
diff --git a/drivers/scsi/sata_sx4.c b/drivers/scsi/sata_sx4.c
index dcc3ad9a9d6e..2eea6de12d70 100644
--- a/drivers/scsi/sata_sx4.c
+++ b/drivers/scsi/sata_sx4.c
@@ -220,7 +220,8 @@ static struct ata_port_info pdc_port_info[] = {
220 { 220 {
221 .sht = &pdc_sata_sht, 221 .sht = &pdc_sata_sht,
222 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 222 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
223 ATA_FLAG_SRST | ATA_FLAG_MMIO, 223 ATA_FLAG_SRST | ATA_FLAG_MMIO |
224 ATA_FLAG_PIO_POLLING,
224 .pio_mask = 0x1f, /* pio0-4 */ 225 .pio_mask = 0x1f, /* pio0-4 */
225 .mwdma_mask = 0x07, /* mwdma0-2 */ 226 .mwdma_mask = 0x07, /* mwdma0-2 */
226 .udma_mask = 0x7f, /* udma0-6 ; FIXME */ 227 .udma_mask = 0x7f, /* udma0-6 ; FIXME */
@@ -832,11 +833,11 @@ static irqreturn_t pdc20621_interrupt (int irq, void *dev_instance, struct pt_re
832 tmp = mask & (1 << i); 833 tmp = mask & (1 << i);
833 VPRINTK("seq %u, port_no %u, ap %p, tmp %x\n", i, port_no, ap, tmp); 834 VPRINTK("seq %u, port_no %u, ap %p, tmp %x\n", i, port_no, ap, tmp);
834 if (tmp && ap && 835 if (tmp && ap &&
835 !(ap->flags & (ATA_FLAG_PORT_DISABLED | ATA_FLAG_NOINTR))) { 836 !(ap->flags & ATA_FLAG_PORT_DISABLED)) {
836 struct ata_queued_cmd *qc; 837 struct ata_queued_cmd *qc;
837 838
838 qc = ata_qc_from_tag(ap, ap->active_tag); 839 qc = ata_qc_from_tag(ap, ap->active_tag);
839 if (qc && (!(qc->tf.ctl & ATA_NIEN))) 840 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)))
840 handled += pdc20621_host_intr(ap, qc, (i > 4), 841 handled += pdc20621_host_intr(ap, qc, (i > 4),
841 mmio_base); 842 mmio_base);
842 } 843 }
diff --git a/drivers/scsi/sata_vsc.c b/drivers/scsi/sata_vsc.c
index fcfa486965b4..f566e17246f0 100644
--- a/drivers/scsi/sata_vsc.c
+++ b/drivers/scsi/sata_vsc.c
@@ -201,12 +201,12 @@ static irqreturn_t vsc_sata_interrupt (int irq, void *dev_instance,
201 struct ata_port *ap; 201 struct ata_port *ap;
202 202
203 ap = host_set->ports[i]; 203 ap = host_set->ports[i];
204 if (ap && !(ap->flags & 204 if (ap &&
205 (ATA_FLAG_PORT_DISABLED|ATA_FLAG_NOINTR))) { 205 !(ap->flags & ATA_FLAG_PORT_DISABLED)) {
206 struct ata_queued_cmd *qc; 206 struct ata_queued_cmd *qc;
207 207
208 qc = ata_qc_from_tag(ap, ap->active_tag); 208 qc = ata_qc_from_tag(ap, ap->active_tag);
209 if (qc && (!(qc->tf.ctl & ATA_NIEN))) 209 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)))
210 handled += ata_host_intr(ap, qc); 210 handled += ata_host_intr(ap, qc);
211 } 211 }
212 } 212 }
diff --git a/include/linux/ata.h b/include/linux/ata.h
index d2873b732bb1..f512104a1a3f 100644
--- a/include/linux/ata.h
+++ b/include/linux/ata.h
@@ -192,6 +192,7 @@ enum {
192 ATA_TFLAG_DEVICE = (1 << 2), /* enable r/w to device reg */ 192 ATA_TFLAG_DEVICE = (1 << 2), /* enable r/w to device reg */
193 ATA_TFLAG_WRITE = (1 << 3), /* data dir: host->dev==1 (write) */ 193 ATA_TFLAG_WRITE = (1 << 3), /* data dir: host->dev==1 (write) */
194 ATA_TFLAG_LBA = (1 << 4), /* enable LBA */ 194 ATA_TFLAG_LBA = (1 << 4), /* enable LBA */
195 ATA_TFLAG_POLLING = (1 << 5), /* set nIEN to 1 and use polling */
195}; 196};
196 197
197enum ata_tf_protocols { 198enum ata_tf_protocols {
@@ -261,6 +262,8 @@ struct ata_taskfile {
261 ((u64) (id)[(n) + 1] << 16) | \ 262 ((u64) (id)[(n) + 1] << 16) | \
262 ((u64) (id)[(n) + 0]) ) 263 ((u64) (id)[(n) + 0]) )
263 264
265#define ata_id_cdb_intr(id) (((id)[0] & 0x60) == 0x20)
266
264static inline int ata_id_current_chs_valid(const u16 *id) 267static inline int ata_id_current_chs_valid(const u16 *id)
265{ 268{
266 /* For ATA-1 devices, if the INITIALIZE DEVICE PARAMETERS command 269 /* For ATA-1 devices, if the INITIALIZE DEVICE PARAMETERS command
@@ -290,6 +293,14 @@ static inline int is_atapi_taskfile(const struct ata_taskfile *tf)
290 (tf->protocol == ATA_PROT_ATAPI_DMA); 293 (tf->protocol == ATA_PROT_ATAPI_DMA);
291} 294}
292 295
296static inline int is_multi_taskfile(struct ata_taskfile *tf)
297{
298 return (tf->command == ATA_CMD_READ_MULTI) ||
299 (tf->command == ATA_CMD_WRITE_MULTI) ||
300 (tf->command == ATA_CMD_READ_MULTI_EXT) ||
301 (tf->command == ATA_CMD_WRITE_MULTI_EXT);
302}
303
293static inline int ata_ok(u8 status) 304static inline int ata_ok(u8 status)
294{ 305{
295 return ((status & (ATA_BUSY | ATA_DRDY | ATA_DF | ATA_DRQ | ATA_ERR)) 306 return ((status & (ATA_BUSY | ATA_DRDY | ATA_DF | ATA_DRQ | ATA_ERR))
diff --git a/include/linux/libata.h b/include/linux/libata.h
index 83a83babff84..2929282beb40 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -101,6 +101,7 @@ enum {
101 ATA_DFLAG_PIO = (1 << 1), /* device currently in PIO mode */ 101 ATA_DFLAG_PIO = (1 << 1), /* device currently in PIO mode */
102 ATA_DFLAG_LOCK_SECTORS = (1 << 2), /* don't adjust max_sectors */ 102 ATA_DFLAG_LOCK_SECTORS = (1 << 2), /* don't adjust max_sectors */
103 ATA_DFLAG_LBA = (1 << 3), /* device supports LBA */ 103 ATA_DFLAG_LBA = (1 << 3), /* device supports LBA */
104 ATA_DFLAG_CDB_INTR = (1 << 4), /* device asserts INTRQ when ready for CDB */
104 105
105 ATA_DEV_UNKNOWN = 0, /* unknown device */ 106 ATA_DEV_UNKNOWN = 0, /* unknown device */
106 ATA_DEV_ATA = 1, /* ATA device */ 107 ATA_DEV_ATA = 1, /* ATA device */
@@ -119,8 +120,8 @@ enum {
119 ATA_FLAG_MMIO = (1 << 6), /* use MMIO, not PIO */ 120 ATA_FLAG_MMIO = (1 << 6), /* use MMIO, not PIO */
120 ATA_FLAG_SATA_RESET = (1 << 7), /* use COMRESET */ 121 ATA_FLAG_SATA_RESET = (1 << 7), /* use COMRESET */
121 ATA_FLAG_PIO_DMA = (1 << 8), /* PIO cmds via DMA */ 122 ATA_FLAG_PIO_DMA = (1 << 8), /* PIO cmds via DMA */
122 ATA_FLAG_NOINTR = (1 << 9), /* FIXME: Remove this once 123 ATA_FLAG_PIO_POLLING = (1 << 9), /* use polling PIO if LLD
123 * proper HSM is in place. */ 124 * doesn't handle PIO interrupts */
124 ATA_FLAG_DEBUGMSG = (1 << 10), 125 ATA_FLAG_DEBUGMSG = (1 << 10),
125 126
126 ATA_QCFLAG_ACTIVE = (1 << 1), /* cmd not yet ack'd to scsi lyer */ 127 ATA_QCFLAG_ACTIVE = (1 << 1), /* cmd not yet ack'd to scsi lyer */
@@ -133,8 +134,8 @@ enum {
133 ATA_TMOUT_PIO = 30 * HZ, 134 ATA_TMOUT_PIO = 30 * HZ,
134 ATA_TMOUT_BOOT = 30 * HZ, /* hueristic */ 135 ATA_TMOUT_BOOT = 30 * HZ, /* hueristic */
135 ATA_TMOUT_BOOT_QUICK = 7 * HZ, /* hueristic */ 136 ATA_TMOUT_BOOT_QUICK = 7 * HZ, /* hueristic */
136 ATA_TMOUT_CDB = 30 * HZ, 137 ATA_TMOUT_DATAOUT = 30 * HZ,
137 ATA_TMOUT_CDB_QUICK = 5 * HZ, 138 ATA_TMOUT_DATAOUT_QUICK = 5 * HZ,
138 139
139 /* ATA bus states */ 140 /* ATA bus states */
140 BUS_UNKNOWN = 0, 141 BUS_UNKNOWN = 0,
@@ -170,14 +171,16 @@ enum {
170}; 171};
171 172
172enum hsm_task_states { 173enum hsm_task_states {
173 HSM_ST_UNKNOWN, 174 HSM_ST_UNKNOWN, /* state unknown */
174 HSM_ST_IDLE, 175 HSM_ST_IDLE, /* no command on going */
175 HSM_ST_POLL, 176 HSM_ST_POLL, /* same as HSM_ST, waits longer */
176 HSM_ST_TMOUT, 177 HSM_ST_TMOUT, /* timeout */
177 HSM_ST, 178 HSM_ST, /* (waiting the device to) transfer data */
178 HSM_ST_LAST, 179 HSM_ST_LAST, /* (waiting the device to) complete command */
179 HSM_ST_LAST_POLL, 180 HSM_ST_LAST_POLL, /* same as HSM_ST_LAST, waits longer */
180 HSM_ST_ERR, 181 HSM_ST_ERR, /* error */
182 HSM_ST_FIRST, /* (waiting the device to)
183 write CDB or first data block */
181}; 184};
182 185
183enum ata_completion_errors { 186enum ata_completion_errors {
@@ -345,8 +348,6 @@ struct ata_port {
345 struct ata_host_stats stats; 348 struct ata_host_stats stats;
346 struct ata_host_set *host_set; 349 struct ata_host_set *host_set;
347 350
348 struct work_struct packet_task;
349
350 struct work_struct pio_task; 351 struct work_struct pio_task;
351 unsigned int hsm_task_state; 352 unsigned int hsm_task_state;
352 unsigned long pio_task_timeout; 353 unsigned long pio_task_timeout;