diff options
-rw-r--r-- | drivers/scsi/libata-core.c | 593 | ||||
-rw-r--r-- | drivers/scsi/pdc_adma.c | 8 | ||||
-rw-r--r-- | drivers/scsi/sata_mv.c | 7 | ||||
-rw-r--r-- | drivers/scsi/sata_nv.c | 4 | ||||
-rw-r--r-- | drivers/scsi/sata_promise.c | 7 | ||||
-rw-r--r-- | drivers/scsi/sata_qstor.c | 11 | ||||
-rw-r--r-- | drivers/scsi/sata_sx4.c | 6 | ||||
-rw-r--r-- | drivers/scsi/sata_vsc.c | 6 | ||||
-rw-r--r-- | include/linux/ata.h | 11 | ||||
-rw-r--r-- | include/linux/libata.h | 27 |
10 files changed, 485 insertions, 195 deletions
diff --git a/drivers/scsi/libata-core.c b/drivers/scsi/libata-core.c index cef85e515c4c..3fd55ef5410f 100644 --- a/drivers/scsi/libata-core.c +++ b/drivers/scsi/libata-core.c | |||
@@ -70,6 +70,7 @@ static int fgb(u32 bitmap); | |||
70 | static int ata_choose_xfer_mode(const struct ata_port *ap, | 70 | static int ata_choose_xfer_mode(const struct ata_port *ap, |
71 | u8 *xfer_mode_out, | 71 | u8 *xfer_mode_out, |
72 | unsigned int *xfer_shift_out); | 72 | unsigned int *xfer_shift_out); |
73 | static void ata_pio_error(struct ata_port *ap); | ||
73 | 74 | ||
74 | static unsigned int ata_unique_id = 1; | 75 | static unsigned int ata_unique_id = 1; |
75 | static struct workqueue_struct *ata_wq; | 76 | static struct workqueue_struct *ata_wq; |
@@ -213,7 +214,7 @@ int ata_rwcmd_protocol(struct ata_queued_cmd *qc) | |||
213 | } else if (lba48 && (qc->ap->flags & ATA_FLAG_PIO_LBA48)) { | 214 | } else if (lba48 && (qc->ap->flags & ATA_FLAG_PIO_LBA48)) { |
214 | /* Unable to use DMA due to host limitation */ | 215 | /* Unable to use DMA due to host limitation */ |
215 | tf->protocol = ATA_PROT_PIO; | 216 | tf->protocol = ATA_PROT_PIO; |
216 | index = dev->multi_count ? 0 : 4; | 217 | index = dev->multi_count ? 0 : 8; |
217 | } else { | 218 | } else { |
218 | tf->protocol = ATA_PROT_DMA; | 219 | tf->protocol = ATA_PROT_DMA; |
219 | index = 16; | 220 | index = 16; |
@@ -675,13 +676,6 @@ static unsigned int ata_pio_modes(const struct ata_device *adev) | |||
675 | } | 676 | } |
676 | 677 | ||
677 | static inline void | 678 | static inline void |
678 | ata_queue_packet_task(struct ata_port *ap) | ||
679 | { | ||
680 | if (!(ap->flags & ATA_FLAG_FLUSH_PIO_TASK)) | ||
681 | queue_work(ata_wq, &ap->packet_task); | ||
682 | } | ||
683 | |||
684 | static inline void | ||
685 | ata_queue_pio_task(struct ata_port *ap) | 679 | ata_queue_pio_task(struct ata_port *ap) |
686 | { | 680 | { |
687 | if (!(ap->flags & ATA_FLAG_FLUSH_PIO_TASK)) | 681 | if (!(ap->flags & ATA_FLAG_FLUSH_PIO_TASK)) |
@@ -696,10 +690,10 @@ ata_queue_delayed_pio_task(struct ata_port *ap, unsigned long delay) | |||
696 | } | 690 | } |
697 | 691 | ||
698 | /** | 692 | /** |
699 | * ata_flush_pio_tasks - Flush pio_task and packet_task | 693 | * ata_flush_pio_tasks - Flush pio_task |
700 | * @ap: the target ata_port | 694 | * @ap: the target ata_port |
701 | * | 695 | * |
702 | * After this function completes, pio_task and packet_task are | 696 | * After this function completes, pio_task is |
703 | * guranteed not to be running or scheduled. | 697 | * guranteed not to be running or scheduled. |
704 | * | 698 | * |
705 | * LOCKING: | 699 | * LOCKING: |
@@ -726,7 +720,6 @@ static void ata_flush_pio_tasks(struct ata_port *ap) | |||
726 | * Cancel and flush. | 720 | * Cancel and flush. |
727 | */ | 721 | */ |
728 | tmp |= cancel_delayed_work(&ap->pio_task); | 722 | tmp |= cancel_delayed_work(&ap->pio_task); |
729 | tmp |= cancel_delayed_work(&ap->packet_task); | ||
730 | if (!tmp) { | 723 | if (!tmp) { |
731 | DPRINTK("flush #2\n"); | 724 | DPRINTK("flush #2\n"); |
732 | flush_workqueue(ata_wq); | 725 | flush_workqueue(ata_wq); |
@@ -1052,6 +1045,12 @@ retry: | |||
1052 | 1045 | ||
1053 | } | 1046 | } |
1054 | 1047 | ||
1048 | if (dev->id[59] & 0x100) { | ||
1049 | dev->multi_count = dev->id[59] & 0xff; | ||
1050 | DPRINTK("ata%u: dev %u multi count %u\n", | ||
1051 | ap->id, device, dev->multi_count); | ||
1052 | } | ||
1053 | |||
1055 | ap->host->max_cmd_len = 16; | 1054 | ap->host->max_cmd_len = 16; |
1056 | } | 1055 | } |
1057 | 1056 | ||
@@ -1068,6 +1067,9 @@ retry: | |||
1068 | ap->cdb_len = (unsigned int) rc; | 1067 | ap->cdb_len = (unsigned int) rc; |
1069 | ap->host->max_cmd_len = (unsigned char) ap->cdb_len; | 1068 | ap->host->max_cmd_len = (unsigned char) ap->cdb_len; |
1070 | 1069 | ||
1070 | if (ata_id_cdb_intr(dev->id)) | ||
1071 | dev->flags |= ATA_DFLAG_CDB_INTR; | ||
1072 | |||
1071 | /* print device info to dmesg */ | 1073 | /* print device info to dmesg */ |
1072 | printk(KERN_INFO "ata%u: dev %u ATAPI, max %s\n", | 1074 | printk(KERN_INFO "ata%u: dev %u ATAPI, max %s\n", |
1073 | ap->id, device, | 1075 | ap->id, device, |
@@ -2899,7 +2901,6 @@ void ata_poll_qc_complete(struct ata_queued_cmd *qc) | |||
2899 | unsigned long flags; | 2901 | unsigned long flags; |
2900 | 2902 | ||
2901 | spin_lock_irqsave(&ap->host_set->lock, flags); | 2903 | spin_lock_irqsave(&ap->host_set->lock, flags); |
2902 | ap->flags &= ~ATA_FLAG_NOINTR; | ||
2903 | ata_irq_on(ap); | 2904 | ata_irq_on(ap); |
2904 | ata_qc_complete(qc); | 2905 | ata_qc_complete(qc); |
2905 | spin_unlock_irqrestore(&ap->host_set->lock, flags); | 2906 | spin_unlock_irqrestore(&ap->host_set->lock, flags); |
@@ -2965,7 +2966,8 @@ static unsigned long ata_pio_poll(struct ata_port *ap) | |||
2965 | * None. (executing in kernel thread context) | 2966 | * None. (executing in kernel thread context) |
2966 | * | 2967 | * |
2967 | * RETURNS: | 2968 | * RETURNS: |
2968 | * Non-zero if qc completed, zero otherwise. | 2969 | * Zero if qc completed. |
2970 | * Non-zero if has next. | ||
2969 | */ | 2971 | */ |
2970 | 2972 | ||
2971 | static int ata_pio_complete (struct ata_port *ap) | 2973 | static int ata_pio_complete (struct ata_port *ap) |
@@ -2978,7 +2980,7 @@ static int ata_pio_complete (struct ata_port *ap) | |||
2978 | * we enter, BSY will be cleared in a chk-status or two. If not, | 2980 | * we enter, BSY will be cleared in a chk-status or two. If not, |
2979 | * the drive is probably seeking or something. Snooze for a couple | 2981 | * the drive is probably seeking or something. Snooze for a couple |
2980 | * msecs, then chk-status again. If still busy, fall back to | 2982 | * msecs, then chk-status again. If still busy, fall back to |
2981 | * HSM_ST_POLL state. | 2983 | * HSM_ST_LAST_POLL state. |
2982 | */ | 2984 | */ |
2983 | drv_stat = ata_busy_wait(ap, ATA_BUSY, 10); | 2985 | drv_stat = ata_busy_wait(ap, ATA_BUSY, 10); |
2984 | if (drv_stat & ATA_BUSY) { | 2986 | if (drv_stat & ATA_BUSY) { |
@@ -2987,7 +2989,7 @@ static int ata_pio_complete (struct ata_port *ap) | |||
2987 | if (drv_stat & ATA_BUSY) { | 2989 | if (drv_stat & ATA_BUSY) { |
2988 | ap->hsm_task_state = HSM_ST_LAST_POLL; | 2990 | ap->hsm_task_state = HSM_ST_LAST_POLL; |
2989 | ap->pio_task_timeout = jiffies + ATA_TMOUT_PIO; | 2991 | ap->pio_task_timeout = jiffies + ATA_TMOUT_PIO; |
2990 | return 0; | 2992 | return 1; |
2991 | } | 2993 | } |
2992 | } | 2994 | } |
2993 | 2995 | ||
@@ -2998,7 +3000,7 @@ static int ata_pio_complete (struct ata_port *ap) | |||
2998 | if (!ata_ok(drv_stat)) { | 3000 | if (!ata_ok(drv_stat)) { |
2999 | qc->err_mask |= __ac_err_mask(drv_stat); | 3001 | qc->err_mask |= __ac_err_mask(drv_stat); |
3000 | ap->hsm_task_state = HSM_ST_ERR; | 3002 | ap->hsm_task_state = HSM_ST_ERR; |
3001 | return 0; | 3003 | return 1; |
3002 | } | 3004 | } |
3003 | 3005 | ||
3004 | ap->hsm_task_state = HSM_ST_IDLE; | 3006 | ap->hsm_task_state = HSM_ST_IDLE; |
@@ -3008,7 +3010,7 @@ static int ata_pio_complete (struct ata_port *ap) | |||
3008 | 3010 | ||
3009 | /* another command may start at this point */ | 3011 | /* another command may start at this point */ |
3010 | 3012 | ||
3011 | return 1; | 3013 | return 0; |
3012 | } | 3014 | } |
3013 | 3015 | ||
3014 | 3016 | ||
@@ -3180,7 +3182,23 @@ static void ata_pio_sector(struct ata_queued_cmd *qc) | |||
3180 | page = nth_page(page, (offset >> PAGE_SHIFT)); | 3182 | page = nth_page(page, (offset >> PAGE_SHIFT)); |
3181 | offset %= PAGE_SIZE; | 3183 | offset %= PAGE_SIZE; |
3182 | 3184 | ||
3183 | buf = kmap(page) + offset; | 3185 | DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read"); |
3186 | |||
3187 | if (PageHighMem(page)) { | ||
3188 | unsigned long flags; | ||
3189 | |||
3190 | local_irq_save(flags); | ||
3191 | buf = kmap_atomic(page, KM_IRQ0); | ||
3192 | |||
3193 | /* do the actual data transfer */ | ||
3194 | ata_data_xfer(ap, buf + offset, ATA_SECT_SIZE, do_write); | ||
3195 | |||
3196 | kunmap_atomic(buf, KM_IRQ0); | ||
3197 | local_irq_restore(flags); | ||
3198 | } else { | ||
3199 | buf = page_address(page); | ||
3200 | ata_data_xfer(ap, buf + offset, ATA_SECT_SIZE, do_write); | ||
3201 | } | ||
3184 | 3202 | ||
3185 | qc->cursect++; | 3203 | qc->cursect++; |
3186 | qc->cursg_ofs++; | 3204 | qc->cursg_ofs++; |
@@ -3189,14 +3207,153 @@ static void ata_pio_sector(struct ata_queued_cmd *qc) | |||
3189 | qc->cursg++; | 3207 | qc->cursg++; |
3190 | qc->cursg_ofs = 0; | 3208 | qc->cursg_ofs = 0; |
3191 | } | 3209 | } |
3210 | } | ||
3192 | 3211 | ||
3193 | DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read"); | 3212 | /** |
3213 | * ata_pio_sectors - Transfer one or many 512-byte sectors. | ||
3214 | * @qc: Command on going | ||
3215 | * | ||
3216 | * Transfer one or many ATA_SECT_SIZE of data from/to the | ||
3217 | * ATA device for the DRQ request. | ||
3218 | * | ||
3219 | * LOCKING: | ||
3220 | * Inherited from caller. | ||
3221 | */ | ||
3222 | |||
3223 | static void ata_pio_sectors(struct ata_queued_cmd *qc) | ||
3224 | { | ||
3225 | if (is_multi_taskfile(&qc->tf)) { | ||
3226 | /* READ/WRITE MULTIPLE */ | ||
3227 | unsigned int nsect; | ||
3228 | |||
3229 | assert(qc->dev->multi_count); | ||
3230 | |||
3231 | nsect = min(qc->nsect - qc->cursect, qc->dev->multi_count); | ||
3232 | while (nsect--) | ||
3233 | ata_pio_sector(qc); | ||
3234 | } else | ||
3235 | ata_pio_sector(qc); | ||
3236 | } | ||
3237 | |||
3238 | /** | ||
3239 | * atapi_send_cdb - Write CDB bytes to hardware | ||
3240 | * @ap: Port to which ATAPI device is attached. | ||
3241 | * @qc: Taskfile currently active | ||
3242 | * | ||
3243 | * When device has indicated its readiness to accept | ||
3244 | * a CDB, this function is called. Send the CDB. | ||
3245 | * | ||
3246 | * LOCKING: | ||
3247 | * caller. | ||
3248 | */ | ||
3249 | |||
3250 | static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc) | ||
3251 | { | ||
3252 | /* send SCSI cdb */ | ||
3253 | DPRINTK("send cdb\n"); | ||
3254 | assert(ap->cdb_len >= 12); | ||
3255 | |||
3256 | ata_data_xfer(ap, qc->cdb, ap->cdb_len, 1); | ||
3257 | ata_altstatus(ap); /* flush */ | ||
3258 | |||
3259 | switch (qc->tf.protocol) { | ||
3260 | case ATA_PROT_ATAPI: | ||
3261 | ap->hsm_task_state = HSM_ST; | ||
3262 | break; | ||
3263 | case ATA_PROT_ATAPI_NODATA: | ||
3264 | ap->hsm_task_state = HSM_ST_LAST; | ||
3265 | break; | ||
3266 | case ATA_PROT_ATAPI_DMA: | ||
3267 | ap->hsm_task_state = HSM_ST_LAST; | ||
3268 | /* initiate bmdma */ | ||
3269 | ap->ops->bmdma_start(qc); | ||
3270 | break; | ||
3271 | } | ||
3272 | } | ||
3273 | |||
3274 | /** | ||
3275 | * ata_pio_first_block - Write first data block to hardware | ||
3276 | * @ap: Port to which ATA/ATAPI device is attached. | ||
3277 | * | ||
3278 | * When device has indicated its readiness to accept | ||
3279 | * the data, this function sends out the CDB or | ||
3280 | * the first data block by PIO. | ||
3281 | * After this, | ||
3282 | * - If polling, ata_pio_task() handles the rest. | ||
3283 | * - Otherwise, interrupt handler takes over. | ||
3284 | * | ||
3285 | * LOCKING: | ||
3286 | * Kernel thread context (may sleep) | ||
3287 | * | ||
3288 | * RETURNS: | ||
3289 | * Zero if irq handler takes over | ||
3290 | * Non-zero if has next (polling). | ||
3291 | */ | ||
3292 | |||
3293 | static int ata_pio_first_block(struct ata_port *ap) | ||
3294 | { | ||
3295 | struct ata_queued_cmd *qc; | ||
3296 | u8 status; | ||
3297 | unsigned long flags; | ||
3298 | int has_next; | ||
3299 | |||
3300 | qc = ata_qc_from_tag(ap, ap->active_tag); | ||
3301 | assert(qc != NULL); | ||
3302 | assert(qc->flags & ATA_QCFLAG_ACTIVE); | ||
3303 | |||
3304 | /* if polling, we will stay in the work queue after sending the data. | ||
3305 | * otherwise, interrupt handler takes over after sending the data. | ||
3306 | */ | ||
3307 | has_next = (qc->tf.flags & ATA_TFLAG_POLLING); | ||
3308 | |||
3309 | /* sleep-wait for BSY to clear */ | ||
3310 | DPRINTK("busy wait\n"); | ||
3311 | if (ata_busy_sleep(ap, ATA_TMOUT_DATAOUT_QUICK, ATA_TMOUT_DATAOUT)) { | ||
3312 | qc->err_mask |= AC_ERR_TIMEOUT; | ||
3313 | ap->hsm_task_state = HSM_ST_TMOUT; | ||
3314 | goto err_out; | ||
3315 | } | ||
3194 | 3316 | ||
3195 | /* do the actual data transfer */ | 3317 | /* make sure DRQ is set */ |
3196 | do_write = (qc->tf.flags & ATA_TFLAG_WRITE); | 3318 | status = ata_chk_status(ap); |
3197 | ata_data_xfer(ap, buf, ATA_SECT_SIZE, do_write); | 3319 | if ((status & (ATA_BUSY | ATA_DRQ)) != ATA_DRQ) { |
3320 | /* device status error */ | ||
3321 | qc->err_mask |= AC_ERR_HSM; | ||
3322 | ap->hsm_task_state = HSM_ST_ERR; | ||
3323 | goto err_out; | ||
3324 | } | ||
3325 | |||
3326 | /* Send the CDB (atapi) or the first data block (ata pio out). | ||
3327 | * During the state transition, interrupt handler shouldn't | ||
3328 | * be invoked before the data transfer is complete and | ||
3329 | * hsm_task_state is changed. Hence, the following locking. | ||
3330 | */ | ||
3331 | spin_lock_irqsave(&ap->host_set->lock, flags); | ||
3198 | 3332 | ||
3199 | kunmap(page); | 3333 | if (qc->tf.protocol == ATA_PROT_PIO) { |
3334 | /* PIO data out protocol. | ||
3335 | * send first data block. | ||
3336 | */ | ||
3337 | |||
3338 | /* ata_pio_sectors() might change the state to HSM_ST_LAST. | ||
3339 | * so, the state is changed here before ata_pio_sectors(). | ||
3340 | */ | ||
3341 | ap->hsm_task_state = HSM_ST; | ||
3342 | ata_pio_sectors(qc); | ||
3343 | ata_altstatus(ap); /* flush */ | ||
3344 | } else | ||
3345 | /* send CDB */ | ||
3346 | atapi_send_cdb(ap, qc); | ||
3347 | |||
3348 | spin_unlock_irqrestore(&ap->host_set->lock, flags); | ||
3349 | |||
3350 | /* if polling, ata_pio_task() handles the rest. | ||
3351 | * otherwise, interrupt handler takes over from here. | ||
3352 | */ | ||
3353 | return has_next; | ||
3354 | |||
3355 | err_out: | ||
3356 | return 1; /* has next */ | ||
3200 | } | 3357 | } |
3201 | 3358 | ||
3202 | /** | 3359 | /** |
@@ -3262,7 +3419,23 @@ next_sg: | |||
3262 | /* don't cross page boundaries */ | 3419 | /* don't cross page boundaries */ |
3263 | count = min(count, (unsigned int)PAGE_SIZE - offset); | 3420 | count = min(count, (unsigned int)PAGE_SIZE - offset); |
3264 | 3421 | ||
3265 | buf = kmap(page) + offset; | 3422 | DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read"); |
3423 | |||
3424 | if (PageHighMem(page)) { | ||
3425 | unsigned long flags; | ||
3426 | |||
3427 | local_irq_save(flags); | ||
3428 | buf = kmap_atomic(page, KM_IRQ0); | ||
3429 | |||
3430 | /* do the actual data transfer */ | ||
3431 | ata_data_xfer(ap, buf + offset, count, do_write); | ||
3432 | |||
3433 | kunmap_atomic(buf, KM_IRQ0); | ||
3434 | local_irq_restore(flags); | ||
3435 | } else { | ||
3436 | buf = page_address(page); | ||
3437 | ata_data_xfer(ap, buf + offset, count, do_write); | ||
3438 | } | ||
3266 | 3439 | ||
3267 | bytes -= count; | 3440 | bytes -= count; |
3268 | qc->curbytes += count; | 3441 | qc->curbytes += count; |
@@ -3273,13 +3446,6 @@ next_sg: | |||
3273 | qc->cursg_ofs = 0; | 3446 | qc->cursg_ofs = 0; |
3274 | } | 3447 | } |
3275 | 3448 | ||
3276 | DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read"); | ||
3277 | |||
3278 | /* do the actual data transfer */ | ||
3279 | ata_data_xfer(ap, buf, count, do_write); | ||
3280 | |||
3281 | kunmap(page); | ||
3282 | |||
3283 | if (bytes) | 3449 | if (bytes) |
3284 | goto next_sg; | 3450 | goto next_sg; |
3285 | } | 3451 | } |
@@ -3316,6 +3482,8 @@ static void atapi_pio_bytes(struct ata_queued_cmd *qc) | |||
3316 | if (do_write != i_write) | 3482 | if (do_write != i_write) |
3317 | goto err_out; | 3483 | goto err_out; |
3318 | 3484 | ||
3485 | VPRINTK("ata%u: xfering %d bytes\n", ap->id, bytes); | ||
3486 | |||
3319 | __atapi_pio_bytes(qc, bytes); | 3487 | __atapi_pio_bytes(qc, bytes); |
3320 | 3488 | ||
3321 | return; | 3489 | return; |
@@ -3386,19 +3554,22 @@ static void ata_pio_block(struct ata_port *ap) | |||
3386 | return; | 3554 | return; |
3387 | } | 3555 | } |
3388 | 3556 | ||
3389 | ata_pio_sector(qc); | 3557 | ata_pio_sectors(qc); |
3390 | } | 3558 | } |
3559 | |||
3560 | ata_altstatus(ap); /* flush */ | ||
3391 | } | 3561 | } |
3392 | 3562 | ||
3393 | static void ata_pio_error(struct ata_port *ap) | 3563 | static void ata_pio_error(struct ata_port *ap) |
3394 | { | 3564 | { |
3395 | struct ata_queued_cmd *qc; | 3565 | struct ata_queued_cmd *qc; |
3396 | 3566 | ||
3397 | printk(KERN_WARNING "ata%u: PIO error\n", ap->id); | ||
3398 | |||
3399 | qc = ata_qc_from_tag(ap, ap->active_tag); | 3567 | qc = ata_qc_from_tag(ap, ap->active_tag); |
3400 | WARN_ON(qc == NULL); | 3568 | WARN_ON(qc == NULL); |
3401 | 3569 | ||
3570 | if (qc->tf.command != ATA_CMD_PACKET) | ||
3571 | printk(KERN_WARNING "ata%u: PIO error\n", ap->id); | ||
3572 | |||
3402 | /* make sure qc->err_mask is available to | 3573 | /* make sure qc->err_mask is available to |
3403 | * know what's wrong and recover | 3574 | * know what's wrong and recover |
3404 | */ | 3575 | */ |
@@ -3413,22 +3584,23 @@ static void ata_pio_task(void *_data) | |||
3413 | { | 3584 | { |
3414 | struct ata_port *ap = _data; | 3585 | struct ata_port *ap = _data; |
3415 | unsigned long timeout; | 3586 | unsigned long timeout; |
3416 | int qc_completed; | 3587 | int has_next; |
3417 | 3588 | ||
3418 | fsm_start: | 3589 | fsm_start: |
3419 | timeout = 0; | 3590 | timeout = 0; |
3420 | qc_completed = 0; | 3591 | has_next = 1; |
3421 | 3592 | ||
3422 | switch (ap->hsm_task_state) { | 3593 | switch (ap->hsm_task_state) { |
3423 | case HSM_ST_IDLE: | 3594 | case HSM_ST_FIRST: |
3424 | return; | 3595 | has_next = ata_pio_first_block(ap); |
3596 | break; | ||
3425 | 3597 | ||
3426 | case HSM_ST: | 3598 | case HSM_ST: |
3427 | ata_pio_block(ap); | 3599 | ata_pio_block(ap); |
3428 | break; | 3600 | break; |
3429 | 3601 | ||
3430 | case HSM_ST_LAST: | 3602 | case HSM_ST_LAST: |
3431 | qc_completed = ata_pio_complete(ap); | 3603 | has_next = ata_pio_complete(ap); |
3432 | break; | 3604 | break; |
3433 | 3605 | ||
3434 | case HSM_ST_POLL: | 3606 | case HSM_ST_POLL: |
@@ -3440,11 +3612,15 @@ fsm_start: | |||
3440 | case HSM_ST_ERR: | 3612 | case HSM_ST_ERR: |
3441 | ata_pio_error(ap); | 3613 | ata_pio_error(ap); |
3442 | return; | 3614 | return; |
3615 | |||
3616 | default: | ||
3617 | BUG(); | ||
3618 | return; | ||
3443 | } | 3619 | } |
3444 | 3620 | ||
3445 | if (timeout) | 3621 | if (timeout) |
3446 | ata_queue_delayed_pio_task(ap, timeout); | 3622 | ata_queue_delayed_pio_task(ap, timeout); |
3447 | else if (!qc_completed) | 3623 | else if (has_next) |
3448 | goto fsm_start; | 3624 | goto fsm_start; |
3449 | } | 3625 | } |
3450 | 3626 | ||
@@ -3502,8 +3678,10 @@ static void ata_qc_timeout(struct ata_queued_cmd *qc) | |||
3502 | printk(KERN_ERR "ata%u: command 0x%x timeout, stat 0x%x host_stat 0x%x\n", | 3678 | printk(KERN_ERR "ata%u: command 0x%x timeout, stat 0x%x host_stat 0x%x\n", |
3503 | ap->id, qc->tf.command, drv_stat, host_stat); | 3679 | ap->id, qc->tf.command, drv_stat, host_stat); |
3504 | 3680 | ||
3681 | ap->hsm_task_state = HSM_ST_IDLE; | ||
3682 | |||
3505 | /* complete taskfile transaction */ | 3683 | /* complete taskfile transaction */ |
3506 | qc->err_mask |= ac_err_mask(drv_stat); | 3684 | qc->err_mask |= AC_ERR_TIMEOUT; |
3507 | break; | 3685 | break; |
3508 | } | 3686 | } |
3509 | 3687 | ||
@@ -3730,43 +3908,104 @@ unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc) | |||
3730 | { | 3908 | { |
3731 | struct ata_port *ap = qc->ap; | 3909 | struct ata_port *ap = qc->ap; |
3732 | 3910 | ||
3911 | /* Use polling pio if the LLD doesn't handle | ||
3912 | * interrupt driven pio and atapi CDB interrupt. | ||
3913 | */ | ||
3914 | if (ap->flags & ATA_FLAG_PIO_POLLING) { | ||
3915 | switch (qc->tf.protocol) { | ||
3916 | case ATA_PROT_PIO: | ||
3917 | case ATA_PROT_ATAPI: | ||
3918 | case ATA_PROT_ATAPI_NODATA: | ||
3919 | qc->tf.flags |= ATA_TFLAG_POLLING; | ||
3920 | break; | ||
3921 | case ATA_PROT_ATAPI_DMA: | ||
3922 | if (qc->dev->flags & ATA_DFLAG_CDB_INTR) | ||
3923 | BUG(); | ||
3924 | break; | ||
3925 | default: | ||
3926 | break; | ||
3927 | } | ||
3928 | } | ||
3929 | |||
3930 | /* select the device */ | ||
3733 | ata_dev_select(ap, qc->dev->devno, 1, 0); | 3931 | ata_dev_select(ap, qc->dev->devno, 1, 0); |
3734 | 3932 | ||
3933 | /* start the command */ | ||
3735 | switch (qc->tf.protocol) { | 3934 | switch (qc->tf.protocol) { |
3736 | case ATA_PROT_NODATA: | 3935 | case ATA_PROT_NODATA: |
3936 | if (qc->tf.flags & ATA_TFLAG_POLLING) | ||
3937 | ata_qc_set_polling(qc); | ||
3938 | |||
3737 | ata_tf_to_host(ap, &qc->tf); | 3939 | ata_tf_to_host(ap, &qc->tf); |
3940 | ap->hsm_task_state = HSM_ST_LAST; | ||
3941 | |||
3942 | if (qc->tf.flags & ATA_TFLAG_POLLING) | ||
3943 | ata_queue_pio_task(ap); | ||
3944 | |||
3738 | break; | 3945 | break; |
3739 | 3946 | ||
3740 | case ATA_PROT_DMA: | 3947 | case ATA_PROT_DMA: |
3948 | assert(!(qc->tf.flags & ATA_TFLAG_POLLING)); | ||
3949 | |||
3741 | ap->ops->tf_load(ap, &qc->tf); /* load tf registers */ | 3950 | ap->ops->tf_load(ap, &qc->tf); /* load tf registers */ |
3742 | ap->ops->bmdma_setup(qc); /* set up bmdma */ | 3951 | ap->ops->bmdma_setup(qc); /* set up bmdma */ |
3743 | ap->ops->bmdma_start(qc); /* initiate bmdma */ | 3952 | ap->ops->bmdma_start(qc); /* initiate bmdma */ |
3953 | ap->hsm_task_state = HSM_ST_LAST; | ||
3744 | break; | 3954 | break; |
3745 | 3955 | ||
3746 | case ATA_PROT_PIO: /* load tf registers, initiate polling pio */ | 3956 | case ATA_PROT_PIO: |
3747 | ata_qc_set_polling(qc); | 3957 | if (qc->tf.flags & ATA_TFLAG_POLLING) |
3748 | ata_tf_to_host(ap, &qc->tf); | 3958 | ata_qc_set_polling(qc); |
3749 | ap->hsm_task_state = HSM_ST; | ||
3750 | ata_queue_pio_task(ap); | ||
3751 | break; | ||
3752 | 3959 | ||
3753 | case ATA_PROT_ATAPI: | ||
3754 | ata_qc_set_polling(qc); | ||
3755 | ata_tf_to_host(ap, &qc->tf); | 3960 | ata_tf_to_host(ap, &qc->tf); |
3756 | ata_queue_packet_task(ap); | 3961 | |
3962 | if (qc->tf.flags & ATA_TFLAG_WRITE) { | ||
3963 | /* PIO data out protocol */ | ||
3964 | ap->hsm_task_state = HSM_ST_FIRST; | ||
3965 | ata_queue_pio_task(ap); | ||
3966 | |||
3967 | /* always send first data block using | ||
3968 | * the ata_pio_task() codepath. | ||
3969 | */ | ||
3970 | } else { | ||
3971 | /* PIO data in protocol */ | ||
3972 | ap->hsm_task_state = HSM_ST; | ||
3973 | |||
3974 | if (qc->tf.flags & ATA_TFLAG_POLLING) | ||
3975 | ata_queue_pio_task(ap); | ||
3976 | |||
3977 | /* if polling, ata_pio_task() handles the rest. | ||
3978 | * otherwise, interrupt handler takes over from here. | ||
3979 | */ | ||
3980 | } | ||
3981 | |||
3757 | break; | 3982 | break; |
3758 | 3983 | ||
3984 | case ATA_PROT_ATAPI: | ||
3759 | case ATA_PROT_ATAPI_NODATA: | 3985 | case ATA_PROT_ATAPI_NODATA: |
3760 | ap->flags |= ATA_FLAG_NOINTR; | 3986 | if (qc->tf.flags & ATA_TFLAG_POLLING) |
3987 | ata_qc_set_polling(qc); | ||
3988 | |||
3761 | ata_tf_to_host(ap, &qc->tf); | 3989 | ata_tf_to_host(ap, &qc->tf); |
3762 | ata_queue_packet_task(ap); | 3990 | |
3991 | ap->hsm_task_state = HSM_ST_FIRST; | ||
3992 | |||
3993 | /* send cdb by polling if no cdb interrupt */ | ||
3994 | if ((!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) || | ||
3995 | (qc->tf.flags & ATA_TFLAG_POLLING)) | ||
3996 | ata_queue_pio_task(ap); | ||
3763 | break; | 3997 | break; |
3764 | 3998 | ||
3765 | case ATA_PROT_ATAPI_DMA: | 3999 | case ATA_PROT_ATAPI_DMA: |
3766 | ap->flags |= ATA_FLAG_NOINTR; | 4000 | assert(!(qc->tf.flags & ATA_TFLAG_POLLING)); |
4001 | |||
3767 | ap->ops->tf_load(ap, &qc->tf); /* load tf registers */ | 4002 | ap->ops->tf_load(ap, &qc->tf); /* load tf registers */ |
3768 | ap->ops->bmdma_setup(qc); /* set up bmdma */ | 4003 | ap->ops->bmdma_setup(qc); /* set up bmdma */ |
3769 | ata_queue_packet_task(ap); | 4004 | ap->hsm_task_state = HSM_ST_FIRST; |
4005 | |||
4006 | /* send cdb by polling if no cdb interrupt */ | ||
4007 | if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) | ||
4008 | ata_queue_pio_task(ap); | ||
3770 | break; | 4009 | break; |
3771 | 4010 | ||
3772 | default: | 4011 | default: |
@@ -4027,48 +4266,160 @@ void ata_bmdma_stop(struct ata_queued_cmd *qc) | |||
4027 | inline unsigned int ata_host_intr (struct ata_port *ap, | 4266 | inline unsigned int ata_host_intr (struct ata_port *ap, |
4028 | struct ata_queued_cmd *qc) | 4267 | struct ata_queued_cmd *qc) |
4029 | { | 4268 | { |
4030 | u8 status, host_stat; | 4269 | u8 status, host_stat = 0; |
4031 | 4270 | ||
4032 | switch (qc->tf.protocol) { | 4271 | VPRINTK("ata%u: protocol %d task_state %d\n", |
4272 | ap->id, qc->tf.protocol, ap->hsm_task_state); | ||
4033 | 4273 | ||
4034 | case ATA_PROT_DMA: | 4274 | /* Check whether we are expecting interrupt in this state */ |
4035 | case ATA_PROT_ATAPI_DMA: | 4275 | switch (ap->hsm_task_state) { |
4036 | case ATA_PROT_ATAPI: | 4276 | case HSM_ST_FIRST: |
4037 | /* check status of DMA engine */ | 4277 | /* Check the ATA_DFLAG_CDB_INTR flag is enough here. |
4038 | host_stat = ap->ops->bmdma_status(ap); | 4278 | * The flag was turned on only for atapi devices. |
4039 | VPRINTK("ata%u: host_stat 0x%X\n", ap->id, host_stat); | 4279 | * No need to check is_atapi_taskfile(&qc->tf) again. |
4040 | 4280 | */ | |
4041 | /* if it's not our irq... */ | 4281 | if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) |
4042 | if (!(host_stat & ATA_DMA_INTR)) | ||
4043 | goto idle_irq; | 4282 | goto idle_irq; |
4283 | break; | ||
4284 | case HSM_ST_LAST: | ||
4285 | if (qc->tf.protocol == ATA_PROT_DMA || | ||
4286 | qc->tf.protocol == ATA_PROT_ATAPI_DMA) { | ||
4287 | /* check status of DMA engine */ | ||
4288 | host_stat = ap->ops->bmdma_status(ap); | ||
4289 | VPRINTK("ata%u: host_stat 0x%X\n", ap->id, host_stat); | ||
4290 | |||
4291 | /* if it's not our irq... */ | ||
4292 | if (!(host_stat & ATA_DMA_INTR)) | ||
4293 | goto idle_irq; | ||
4294 | |||
4295 | /* before we do anything else, clear DMA-Start bit */ | ||
4296 | ap->ops->bmdma_stop(qc); | ||
4297 | |||
4298 | if (unlikely(host_stat & ATA_DMA_ERR)) { | ||
4299 | /* error when transfering data to/from memory */ | ||
4300 | qc->err_mask |= AC_ERR_HOST_BUS; | ||
4301 | ap->hsm_task_state = HSM_ST_ERR; | ||
4302 | } | ||
4303 | } | ||
4304 | break; | ||
4305 | case HSM_ST: | ||
4306 | break; | ||
4307 | default: | ||
4308 | goto idle_irq; | ||
4309 | } | ||
4044 | 4310 | ||
4045 | /* before we do anything else, clear DMA-Start bit */ | 4311 | /* check altstatus */ |
4046 | ap->ops->bmdma_stop(qc); | 4312 | status = ata_altstatus(ap); |
4313 | if (status & ATA_BUSY) | ||
4314 | goto idle_irq; | ||
4047 | 4315 | ||
4048 | /* fall through */ | 4316 | /* check main status, clearing INTRQ */ |
4317 | status = ata_chk_status(ap); | ||
4318 | if (unlikely(status & ATA_BUSY)) | ||
4319 | goto idle_irq; | ||
4049 | 4320 | ||
4050 | case ATA_PROT_ATAPI_NODATA: | 4321 | DPRINTK("ata%u: protocol %d task_state %d (dev_stat 0x%X)\n", |
4051 | case ATA_PROT_NODATA: | 4322 | ap->id, qc->tf.protocol, ap->hsm_task_state, status); |
4052 | /* check altstatus */ | ||
4053 | status = ata_altstatus(ap); | ||
4054 | if (status & ATA_BUSY) | ||
4055 | goto idle_irq; | ||
4056 | 4323 | ||
4057 | /* check main status, clearing INTRQ */ | 4324 | /* ack bmdma irq events */ |
4058 | status = ata_chk_status(ap); | 4325 | ap->ops->irq_clear(ap); |
4059 | if (unlikely(status & ATA_BUSY)) | ||
4060 | goto idle_irq; | ||
4061 | DPRINTK("ata%u: protocol %d (dev_stat 0x%X)\n", | ||
4062 | ap->id, qc->tf.protocol, status); | ||
4063 | 4326 | ||
4064 | /* ack bmdma irq events */ | 4327 | /* check error */ |
4065 | ap->ops->irq_clear(ap); | 4328 | if (unlikely(status & (ATA_ERR | ATA_DF))) { |
4329 | qc->err_mask |= AC_ERR_DEV; | ||
4330 | ap->hsm_task_state = HSM_ST_ERR; | ||
4331 | } | ||
4332 | |||
4333 | fsm_start: | ||
4334 | switch (ap->hsm_task_state) { | ||
4335 | case HSM_ST_FIRST: | ||
4336 | /* Some pre-ATAPI-4 devices assert INTRQ | ||
4337 | * at this state when ready to receive CDB. | ||
4338 | */ | ||
4339 | |||
4340 | /* check device status */ | ||
4341 | if (unlikely((status & (ATA_BUSY | ATA_DRQ)) != ATA_DRQ)) { | ||
4342 | /* Wrong status. Let EH handle this */ | ||
4343 | qc->err_mask |= AC_ERR_HSM; | ||
4344 | ap->hsm_task_state = HSM_ST_ERR; | ||
4345 | goto fsm_start; | ||
4346 | } | ||
4347 | |||
4348 | atapi_send_cdb(ap, qc); | ||
4349 | |||
4350 | break; | ||
4351 | |||
4352 | case HSM_ST: | ||
4353 | /* complete command or read/write the data register */ | ||
4354 | if (qc->tf.protocol == ATA_PROT_ATAPI) { | ||
4355 | /* ATAPI PIO protocol */ | ||
4356 | if ((status & ATA_DRQ) == 0) { | ||
4357 | /* no more data to transfer */ | ||
4358 | ap->hsm_task_state = HSM_ST_LAST; | ||
4359 | goto fsm_start; | ||
4360 | } | ||
4361 | |||
4362 | atapi_pio_bytes(qc); | ||
4363 | |||
4364 | if (unlikely(ap->hsm_task_state == HSM_ST_ERR)) | ||
4365 | /* bad ireason reported by device */ | ||
4366 | goto fsm_start; | ||
4367 | |||
4368 | } else { | ||
4369 | /* ATA PIO protocol */ | ||
4370 | if (unlikely((status & ATA_DRQ) == 0)) { | ||
4371 | /* handle BSY=0, DRQ=0 as error */ | ||
4372 | qc->err_mask |= AC_ERR_HSM; | ||
4373 | ap->hsm_task_state = HSM_ST_ERR; | ||
4374 | goto fsm_start; | ||
4375 | } | ||
4376 | |||
4377 | ata_pio_sectors(qc); | ||
4378 | |||
4379 | if (ap->hsm_task_state == HSM_ST_LAST && | ||
4380 | (!(qc->tf.flags & ATA_TFLAG_WRITE))) { | ||
4381 | /* all data read */ | ||
4382 | ata_altstatus(ap); | ||
4383 | status = ata_chk_status(ap); | ||
4384 | goto fsm_start; | ||
4385 | } | ||
4386 | } | ||
4387 | |||
4388 | ata_altstatus(ap); /* flush */ | ||
4389 | break; | ||
4390 | |||
4391 | case HSM_ST_LAST: | ||
4392 | if (unlikely(status & ATA_DRQ)) { | ||
4393 | /* handle DRQ=1 as error */ | ||
4394 | qc->err_mask |= AC_ERR_HSM; | ||
4395 | ap->hsm_task_state = HSM_ST_ERR; | ||
4396 | goto fsm_start; | ||
4397 | } | ||
4398 | |||
4399 | /* no more data to transfer */ | ||
4400 | DPRINTK("ata%u: command complete, drv_stat 0x%x\n", | ||
4401 | ap->id, status); | ||
4402 | |||
4403 | ap->hsm_task_state = HSM_ST_IDLE; | ||
4066 | 4404 | ||
4067 | /* complete taskfile transaction */ | 4405 | /* complete taskfile transaction */ |
4068 | qc->err_mask |= ac_err_mask(status); | 4406 | qc->err_mask |= ac_err_mask(status); |
4069 | ata_qc_complete(qc); | 4407 | ata_qc_complete(qc); |
4070 | break; | 4408 | break; |
4071 | 4409 | ||
4410 | case HSM_ST_ERR: | ||
4411 | if (qc->tf.command != ATA_CMD_PACKET) | ||
4412 | printk(KERN_ERR "ata%u: command error, drv_stat 0x%x host_stat 0x%x\n", | ||
4413 | ap->id, status, host_stat); | ||
4414 | |||
4415 | /* make sure qc->err_mask is available to | ||
4416 | * know what's wrong and recover | ||
4417 | */ | ||
4418 | assert(qc->err_mask); | ||
4419 | |||
4420 | ap->hsm_task_state = HSM_ST_IDLE; | ||
4421 | ata_qc_complete(qc); | ||
4422 | break; | ||
4072 | default: | 4423 | default: |
4073 | goto idle_irq; | 4424 | goto idle_irq; |
4074 | } | 4425 | } |
@@ -4119,11 +4470,11 @@ irqreturn_t ata_interrupt (int irq, void *dev_instance, struct pt_regs *regs) | |||
4119 | 4470 | ||
4120 | ap = host_set->ports[i]; | 4471 | ap = host_set->ports[i]; |
4121 | if (ap && | 4472 | if (ap && |
4122 | !(ap->flags & (ATA_FLAG_PORT_DISABLED | ATA_FLAG_NOINTR))) { | 4473 | !(ap->flags & ATA_FLAG_PORT_DISABLED)) { |
4123 | struct ata_queued_cmd *qc; | 4474 | struct ata_queued_cmd *qc; |
4124 | 4475 | ||
4125 | qc = ata_qc_from_tag(ap, ap->active_tag); | 4476 | qc = ata_qc_from_tag(ap, ap->active_tag); |
4126 | if (qc && (!(qc->tf.ctl & ATA_NIEN)) && | 4477 | if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)) && |
4127 | (qc->flags & ATA_QCFLAG_ACTIVE)) | 4478 | (qc->flags & ATA_QCFLAG_ACTIVE)) |
4128 | handled |= ata_host_intr(ap, qc); | 4479 | handled |= ata_host_intr(ap, qc); |
4129 | } | 4480 | } |
@@ -4134,79 +4485,6 @@ irqreturn_t ata_interrupt (int irq, void *dev_instance, struct pt_regs *regs) | |||
4134 | return IRQ_RETVAL(handled); | 4485 | return IRQ_RETVAL(handled); |
4135 | } | 4486 | } |
4136 | 4487 | ||
4137 | /** | ||
4138 | * atapi_packet_task - Write CDB bytes to hardware | ||
4139 | * @_data: Port to which ATAPI device is attached. | ||
4140 | * | ||
4141 | * When device has indicated its readiness to accept | ||
4142 | * a CDB, this function is called. Send the CDB. | ||
4143 | * If DMA is to be performed, exit immediately. | ||
4144 | * Otherwise, we are in polling mode, so poll | ||
4145 | * status under operation succeeds or fails. | ||
4146 | * | ||
4147 | * LOCKING: | ||
4148 | * Kernel thread context (may sleep) | ||
4149 | */ | ||
4150 | |||
4151 | static void atapi_packet_task(void *_data) | ||
4152 | { | ||
4153 | struct ata_port *ap = _data; | ||
4154 | struct ata_queued_cmd *qc; | ||
4155 | u8 status; | ||
4156 | |||
4157 | qc = ata_qc_from_tag(ap, ap->active_tag); | ||
4158 | WARN_ON(qc == NULL); | ||
4159 | WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE)); | ||
4160 | |||
4161 | /* sleep-wait for BSY to clear */ | ||
4162 | DPRINTK("busy wait\n"); | ||
4163 | if (ata_busy_sleep(ap, ATA_TMOUT_CDB_QUICK, ATA_TMOUT_CDB)) { | ||
4164 | qc->err_mask |= AC_ERR_TIMEOUT; | ||
4165 | goto err_out; | ||
4166 | } | ||
4167 | |||
4168 | /* make sure DRQ is set */ | ||
4169 | status = ata_chk_status(ap); | ||
4170 | if ((status & (ATA_BUSY | ATA_DRQ)) != ATA_DRQ) { | ||
4171 | qc->err_mask |= AC_ERR_HSM; | ||
4172 | goto err_out; | ||
4173 | } | ||
4174 | |||
4175 | /* send SCSI cdb */ | ||
4176 | DPRINTK("send cdb\n"); | ||
4177 | WARN_ON(ap->cdb_len < 12); | ||
4178 | |||
4179 | if (qc->tf.protocol == ATA_PROT_ATAPI_DMA || | ||
4180 | qc->tf.protocol == ATA_PROT_ATAPI_NODATA) { | ||
4181 | unsigned long flags; | ||
4182 | |||
4183 | /* Once we're done issuing command and kicking bmdma, | ||
4184 | * irq handler takes over. To not lose irq, we need | ||
4185 | * to clear NOINTR flag before sending cdb, but | ||
4186 | * interrupt handler shouldn't be invoked before we're | ||
4187 | * finished. Hence, the following locking. | ||
4188 | */ | ||
4189 | spin_lock_irqsave(&ap->host_set->lock, flags); | ||
4190 | ap->flags &= ~ATA_FLAG_NOINTR; | ||
4191 | ata_data_xfer(ap, qc->cdb, ap->cdb_len, 1); | ||
4192 | if (qc->tf.protocol == ATA_PROT_ATAPI_DMA) | ||
4193 | ap->ops->bmdma_start(qc); /* initiate bmdma */ | ||
4194 | spin_unlock_irqrestore(&ap->host_set->lock, flags); | ||
4195 | } else { | ||
4196 | ata_data_xfer(ap, qc->cdb, ap->cdb_len, 1); | ||
4197 | |||
4198 | /* PIO commands are handled by polling */ | ||
4199 | ap->hsm_task_state = HSM_ST; | ||
4200 | ata_queue_pio_task(ap); | ||
4201 | } | ||
4202 | |||
4203 | return; | ||
4204 | |||
4205 | err_out: | ||
4206 | ata_poll_qc_complete(qc); | ||
4207 | } | ||
4208 | |||
4209 | |||
4210 | /* | 4488 | /* |
4211 | * Execute a 'simple' command, that only consists of the opcode 'cmd' itself, | 4489 | * Execute a 'simple' command, that only consists of the opcode 'cmd' itself, |
4212 | * without filling any other registers | 4490 | * without filling any other registers |
@@ -4426,7 +4704,6 @@ static void ata_host_init(struct ata_port *ap, struct Scsi_Host *host, | |||
4426 | ap->active_tag = ATA_TAG_POISON; | 4704 | ap->active_tag = ATA_TAG_POISON; |
4427 | ap->last_ctl = 0xFF; | 4705 | ap->last_ctl = 0xFF; |
4428 | 4706 | ||
4429 | INIT_WORK(&ap->packet_task, atapi_packet_task, ap); | ||
4430 | INIT_WORK(&ap->pio_task, ata_pio_task, ap); | 4707 | INIT_WORK(&ap->pio_task, ata_pio_task, ap); |
4431 | INIT_LIST_HEAD(&ap->eh_done_q); | 4708 | INIT_LIST_HEAD(&ap->eh_done_q); |
4432 | 4709 | ||
diff --git a/drivers/scsi/pdc_adma.c b/drivers/scsi/pdc_adma.c index d0ad3ebe9685..0f7e45a39fd9 100644 --- a/drivers/scsi/pdc_adma.c +++ b/drivers/scsi/pdc_adma.c | |||
@@ -458,13 +458,13 @@ static inline unsigned int adma_intr_pkt(struct ata_host_set *host_set) | |||
458 | continue; | 458 | continue; |
459 | handled = 1; | 459 | handled = 1; |
460 | adma_enter_reg_mode(ap); | 460 | adma_enter_reg_mode(ap); |
461 | if (ap->flags & (ATA_FLAG_PORT_DISABLED | ATA_FLAG_NOINTR)) | 461 | if (ap->flags & ATA_FLAG_PORT_DISABLED) |
462 | continue; | 462 | continue; |
463 | pp = ap->private_data; | 463 | pp = ap->private_data; |
464 | if (!pp || pp->state != adma_state_pkt) | 464 | if (!pp || pp->state != adma_state_pkt) |
465 | continue; | 465 | continue; |
466 | qc = ata_qc_from_tag(ap, ap->active_tag); | 466 | qc = ata_qc_from_tag(ap, ap->active_tag); |
467 | if (qc && (!(qc->tf.ctl & ATA_NIEN))) { | 467 | if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) { |
468 | if ((status & (aPERR | aPSD | aUIRQ))) | 468 | if ((status & (aPERR | aPSD | aUIRQ))) |
469 | qc->err_mask |= AC_ERR_OTHER; | 469 | qc->err_mask |= AC_ERR_OTHER; |
470 | else if (pp->pkt[0] != cDONE) | 470 | else if (pp->pkt[0] != cDONE) |
@@ -483,13 +483,13 @@ static inline unsigned int adma_intr_mmio(struct ata_host_set *host_set) | |||
483 | for (port_no = 0; port_no < host_set->n_ports; ++port_no) { | 483 | for (port_no = 0; port_no < host_set->n_ports; ++port_no) { |
484 | struct ata_port *ap; | 484 | struct ata_port *ap; |
485 | ap = host_set->ports[port_no]; | 485 | ap = host_set->ports[port_no]; |
486 | if (ap && (!(ap->flags & (ATA_FLAG_PORT_DISABLED | ATA_FLAG_NOINTR)))) { | 486 | if (ap && (!(ap->flags & ATA_FLAG_PORT_DISABLED))) { |
487 | struct ata_queued_cmd *qc; | 487 | struct ata_queued_cmd *qc; |
488 | struct adma_port_priv *pp = ap->private_data; | 488 | struct adma_port_priv *pp = ap->private_data; |
489 | if (!pp || pp->state != adma_state_mmio) | 489 | if (!pp || pp->state != adma_state_mmio) |
490 | continue; | 490 | continue; |
491 | qc = ata_qc_from_tag(ap, ap->active_tag); | 491 | qc = ata_qc_from_tag(ap, ap->active_tag); |
492 | if (qc && (!(qc->tf.ctl & ATA_NIEN))) { | 492 | if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) { |
493 | 493 | ||
494 | /* check main status, clearing INTRQ */ | 494 | /* check main status, clearing INTRQ */ |
495 | u8 status = ata_check_status(ap); | 495 | u8 status = ata_check_status(ap); |
diff --git a/drivers/scsi/sata_mv.c b/drivers/scsi/sata_mv.c index 732cb64a4d1b..d35460ff5275 100644 --- a/drivers/scsi/sata_mv.c +++ b/drivers/scsi/sata_mv.c | |||
@@ -87,7 +87,7 @@ enum { | |||
87 | MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */ | 87 | MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */ |
88 | MV_COMMON_FLAGS = (ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | | 88 | MV_COMMON_FLAGS = (ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | |
89 | ATA_FLAG_SATA_RESET | ATA_FLAG_MMIO | | 89 | ATA_FLAG_SATA_RESET | ATA_FLAG_MMIO | |
90 | ATA_FLAG_NO_ATAPI), | 90 | ATA_FLAG_PIO_POLLING), |
91 | MV_6XXX_FLAGS = MV_FLAG_IRQ_COALESCE, | 91 | MV_6XXX_FLAGS = MV_FLAG_IRQ_COALESCE, |
92 | 92 | ||
93 | CRQB_FLAG_READ = (1 << 0), | 93 | CRQB_FLAG_READ = (1 << 0), |
@@ -1388,8 +1388,7 @@ static void mv_host_intr(struct ata_host_set *host_set, u32 relevant, | |||
1388 | handled++; | 1388 | handled++; |
1389 | } | 1389 | } |
1390 | 1390 | ||
1391 | if (ap && | 1391 | if (ap && (ap->flags & ATA_FLAG_PORT_DISABLED)) |
1392 | (ap->flags & (ATA_FLAG_PORT_DISABLED | ATA_FLAG_NOINTR))) | ||
1393 | continue; | 1392 | continue; |
1394 | 1393 | ||
1395 | err_mask = ac_err_mask(ata_status); | 1394 | err_mask = ac_err_mask(ata_status); |
@@ -1410,7 +1409,7 @@ static void mv_host_intr(struct ata_host_set *host_set, u32 relevant, | |||
1410 | VPRINTK("port %u IRQ found for qc, " | 1409 | VPRINTK("port %u IRQ found for qc, " |
1411 | "ata_status 0x%x\n", port,ata_status); | 1410 | "ata_status 0x%x\n", port,ata_status); |
1412 | /* mark qc status appropriately */ | 1411 | /* mark qc status appropriately */ |
1413 | if (!(qc->tf.ctl & ATA_NIEN)) { | 1412 | if (!(qc->tf.flags & ATA_TFLAG_POLLING)) { |
1414 | qc->err_mask |= err_mask; | 1413 | qc->err_mask |= err_mask; |
1415 | ata_qc_complete(qc); | 1414 | ata_qc_complete(qc); |
1416 | } | 1415 | } |
diff --git a/drivers/scsi/sata_nv.c b/drivers/scsi/sata_nv.c index cdfeb9aa600b..94dc2e1a8f30 100644 --- a/drivers/scsi/sata_nv.c +++ b/drivers/scsi/sata_nv.c | |||
@@ -310,11 +310,11 @@ static irqreturn_t nv_interrupt (int irq, void *dev_instance, | |||
310 | 310 | ||
311 | ap = host_set->ports[i]; | 311 | ap = host_set->ports[i]; |
312 | if (ap && | 312 | if (ap && |
313 | !(ap->flags & (ATA_FLAG_PORT_DISABLED | ATA_FLAG_NOINTR))) { | 313 | !(ap->flags & ATA_FLAG_PORT_DISABLED)) { |
314 | struct ata_queued_cmd *qc; | 314 | struct ata_queued_cmd *qc; |
315 | 315 | ||
316 | qc = ata_qc_from_tag(ap, ap->active_tag); | 316 | qc = ata_qc_from_tag(ap, ap->active_tag); |
317 | if (qc && (!(qc->tf.ctl & ATA_NIEN))) | 317 | if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) |
318 | handled += ata_host_intr(ap, qc); | 318 | handled += ata_host_intr(ap, qc); |
319 | else | 319 | else |
320 | // No request pending? Clear interrupt status | 320 | // No request pending? Clear interrupt status |
diff --git a/drivers/scsi/sata_promise.c b/drivers/scsi/sata_promise.c index c9dfd9370919..a88b563ebcc4 100644 --- a/drivers/scsi/sata_promise.c +++ b/drivers/scsi/sata_promise.c | |||
@@ -76,7 +76,8 @@ enum { | |||
76 | PDC_RESET = (1 << 11), /* HDMA reset */ | 76 | PDC_RESET = (1 << 11), /* HDMA reset */ |
77 | 77 | ||
78 | PDC_COMMON_FLAGS = ATA_FLAG_NO_LEGACY | ATA_FLAG_SRST | | 78 | PDC_COMMON_FLAGS = ATA_FLAG_NO_LEGACY | ATA_FLAG_SRST | |
79 | ATA_FLAG_MMIO | ATA_FLAG_NO_ATAPI, | 79 | ATA_FLAG_MMIO | ATA_FLAG_NO_ATAPI | |
80 | ATA_FLAG_PIO_POLLING, | ||
80 | }; | 81 | }; |
81 | 82 | ||
82 | 83 | ||
@@ -534,11 +535,11 @@ static irqreturn_t pdc_interrupt (int irq, void *dev_instance, struct pt_regs *r | |||
534 | ap = host_set->ports[i]; | 535 | ap = host_set->ports[i]; |
535 | tmp = mask & (1 << (i + 1)); | 536 | tmp = mask & (1 << (i + 1)); |
536 | if (tmp && ap && | 537 | if (tmp && ap && |
537 | !(ap->flags & (ATA_FLAG_PORT_DISABLED | ATA_FLAG_NOINTR))) { | 538 | !(ap->flags & ATA_FLAG_PORT_DISABLED)) { |
538 | struct ata_queued_cmd *qc; | 539 | struct ata_queued_cmd *qc; |
539 | 540 | ||
540 | qc = ata_qc_from_tag(ap, ap->active_tag); | 541 | qc = ata_qc_from_tag(ap, ap->active_tag); |
541 | if (qc && (!(qc->tf.ctl & ATA_NIEN))) | 542 | if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) |
542 | handled += pdc_host_intr(ap, qc); | 543 | handled += pdc_host_intr(ap, qc); |
543 | } | 544 | } |
544 | } | 545 | } |
diff --git a/drivers/scsi/sata_qstor.c b/drivers/scsi/sata_qstor.c index 955131b43206..5730167d2e74 100644 --- a/drivers/scsi/sata_qstor.c +++ b/drivers/scsi/sata_qstor.c | |||
@@ -178,7 +178,7 @@ static const struct ata_port_info qs_port_info[] = { | |||
178 | .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | | 178 | .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | |
179 | ATA_FLAG_SATA_RESET | | 179 | ATA_FLAG_SATA_RESET | |
180 | //FIXME ATA_FLAG_SRST | | 180 | //FIXME ATA_FLAG_SRST | |
181 | ATA_FLAG_MMIO, | 181 | ATA_FLAG_MMIO | ATA_FLAG_PIO_POLLING, |
182 | .pio_mask = 0x10, /* pio4 */ | 182 | .pio_mask = 0x10, /* pio4 */ |
183 | .udma_mask = 0x7f, /* udma0-6 */ | 183 | .udma_mask = 0x7f, /* udma0-6 */ |
184 | .port_ops = &qs_ata_ops, | 184 | .port_ops = &qs_ata_ops, |
@@ -397,14 +397,13 @@ static inline unsigned int qs_intr_pkt(struct ata_host_set *host_set) | |||
397 | DPRINTK("SFF=%08x%08x: sCHAN=%u sHST=%d sDST=%02x\n", | 397 | DPRINTK("SFF=%08x%08x: sCHAN=%u sHST=%d sDST=%02x\n", |
398 | sff1, sff0, port_no, sHST, sDST); | 398 | sff1, sff0, port_no, sHST, sDST); |
399 | handled = 1; | 399 | handled = 1; |
400 | if (ap && !(ap->flags & | 400 | if (ap && !(ap->flags & ATA_FLAG_PORT_DISABLED)) { |
401 | (ATA_FLAG_PORT_DISABLED|ATA_FLAG_NOINTR))) { | ||
402 | struct ata_queued_cmd *qc; | 401 | struct ata_queued_cmd *qc; |
403 | struct qs_port_priv *pp = ap->private_data; | 402 | struct qs_port_priv *pp = ap->private_data; |
404 | if (!pp || pp->state != qs_state_pkt) | 403 | if (!pp || pp->state != qs_state_pkt) |
405 | continue; | 404 | continue; |
406 | qc = ata_qc_from_tag(ap, ap->active_tag); | 405 | qc = ata_qc_from_tag(ap, ap->active_tag); |
407 | if (qc && (!(qc->tf.ctl & ATA_NIEN))) { | 406 | if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) { |
408 | switch (sHST) { | 407 | switch (sHST) { |
409 | case 0: /* successful CPB */ | 408 | case 0: /* successful CPB */ |
410 | case 3: /* device error */ | 409 | case 3: /* device error */ |
@@ -431,13 +430,13 @@ static inline unsigned int qs_intr_mmio(struct ata_host_set *host_set) | |||
431 | struct ata_port *ap; | 430 | struct ata_port *ap; |
432 | ap = host_set->ports[port_no]; | 431 | ap = host_set->ports[port_no]; |
433 | if (ap && | 432 | if (ap && |
434 | !(ap->flags & (ATA_FLAG_PORT_DISABLED | ATA_FLAG_NOINTR))) { | 433 | !(ap->flags & ATA_FLAG_PORT_DISABLED)) { |
435 | struct ata_queued_cmd *qc; | 434 | struct ata_queued_cmd *qc; |
436 | struct qs_port_priv *pp = ap->private_data; | 435 | struct qs_port_priv *pp = ap->private_data; |
437 | if (!pp || pp->state != qs_state_mmio) | 436 | if (!pp || pp->state != qs_state_mmio) |
438 | continue; | 437 | continue; |
439 | qc = ata_qc_from_tag(ap, ap->active_tag); | 438 | qc = ata_qc_from_tag(ap, ap->active_tag); |
440 | if (qc && (!(qc->tf.ctl & ATA_NIEN))) { | 439 | if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) { |
441 | 440 | ||
442 | /* check main status, clearing INTRQ */ | 441 | /* check main status, clearing INTRQ */ |
443 | u8 status = ata_check_status(ap); | 442 | u8 status = ata_check_status(ap); |
diff --git a/drivers/scsi/sata_sx4.c b/drivers/scsi/sata_sx4.c index e158f7a34d62..04465fb86e1d 100644 --- a/drivers/scsi/sata_sx4.c +++ b/drivers/scsi/sata_sx4.c | |||
@@ -221,7 +221,7 @@ static const struct ata_port_info pdc_port_info[] = { | |||
221 | .sht = &pdc_sata_sht, | 221 | .sht = &pdc_sata_sht, |
222 | .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | | 222 | .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | |
223 | ATA_FLAG_SRST | ATA_FLAG_MMIO | | 223 | ATA_FLAG_SRST | ATA_FLAG_MMIO | |
224 | ATA_FLAG_NO_ATAPI, | 224 | ATA_FLAG_PIO_POLLING, |
225 | .pio_mask = 0x1f, /* pio0-4 */ | 225 | .pio_mask = 0x1f, /* pio0-4 */ |
226 | .mwdma_mask = 0x07, /* mwdma0-2 */ | 226 | .mwdma_mask = 0x07, /* mwdma0-2 */ |
227 | .udma_mask = 0x7f, /* udma0-6 ; FIXME */ | 227 | .udma_mask = 0x7f, /* udma0-6 ; FIXME */ |
@@ -836,11 +836,11 @@ static irqreturn_t pdc20621_interrupt (int irq, void *dev_instance, struct pt_re | |||
836 | tmp = mask & (1 << i); | 836 | tmp = mask & (1 << i); |
837 | VPRINTK("seq %u, port_no %u, ap %p, tmp %x\n", i, port_no, ap, tmp); | 837 | VPRINTK("seq %u, port_no %u, ap %p, tmp %x\n", i, port_no, ap, tmp); |
838 | if (tmp && ap && | 838 | if (tmp && ap && |
839 | !(ap->flags & (ATA_FLAG_PORT_DISABLED | ATA_FLAG_NOINTR))) { | 839 | !(ap->flags & ATA_FLAG_PORT_DISABLED)) { |
840 | struct ata_queued_cmd *qc; | 840 | struct ata_queued_cmd *qc; |
841 | 841 | ||
842 | qc = ata_qc_from_tag(ap, ap->active_tag); | 842 | qc = ata_qc_from_tag(ap, ap->active_tag); |
843 | if (qc && (!(qc->tf.ctl & ATA_NIEN))) | 843 | if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) |
844 | handled += pdc20621_host_intr(ap, qc, (i > 4), | 844 | handled += pdc20621_host_intr(ap, qc, (i > 4), |
845 | mmio_base); | 845 | mmio_base); |
846 | } | 846 | } |
diff --git a/drivers/scsi/sata_vsc.c b/drivers/scsi/sata_vsc.c index cf1f8a61bda0..4cfc03018ca3 100644 --- a/drivers/scsi/sata_vsc.c +++ b/drivers/scsi/sata_vsc.c | |||
@@ -201,12 +201,12 @@ static irqreturn_t vsc_sata_interrupt (int irq, void *dev_instance, | |||
201 | struct ata_port *ap; | 201 | struct ata_port *ap; |
202 | 202 | ||
203 | ap = host_set->ports[i]; | 203 | ap = host_set->ports[i]; |
204 | if (ap && !(ap->flags & | 204 | if (ap && |
205 | (ATA_FLAG_PORT_DISABLED|ATA_FLAG_NOINTR))) { | 205 | !(ap->flags & ATA_FLAG_PORT_DISABLED)) { |
206 | struct ata_queued_cmd *qc; | 206 | struct ata_queued_cmd *qc; |
207 | 207 | ||
208 | qc = ata_qc_from_tag(ap, ap->active_tag); | 208 | qc = ata_qc_from_tag(ap, ap->active_tag); |
209 | if (qc && (!(qc->tf.ctl & ATA_NIEN))) | 209 | if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) |
210 | handled += ata_host_intr(ap, qc); | 210 | handled += ata_host_intr(ap, qc); |
211 | } | 211 | } |
212 | } | 212 | } |
diff --git a/include/linux/ata.h b/include/linux/ata.h index a8155ca4947f..8e88efc565be 100644 --- a/include/linux/ata.h +++ b/include/linux/ata.h | |||
@@ -197,6 +197,7 @@ enum { | |||
197 | ATA_TFLAG_WRITE = (1 << 3), /* data dir: host->dev==1 (write) */ | 197 | ATA_TFLAG_WRITE = (1 << 3), /* data dir: host->dev==1 (write) */ |
198 | ATA_TFLAG_LBA = (1 << 4), /* enable LBA */ | 198 | ATA_TFLAG_LBA = (1 << 4), /* enable LBA */ |
199 | ATA_TFLAG_FUA = (1 << 5), /* enable FUA */ | 199 | ATA_TFLAG_FUA = (1 << 5), /* enable FUA */ |
200 | ATA_TFLAG_POLLING = (1 << 6), /* set nIEN to 1 and use polling */ | ||
200 | }; | 201 | }; |
201 | 202 | ||
202 | enum ata_tf_protocols { | 203 | enum ata_tf_protocols { |
@@ -267,6 +268,8 @@ struct ata_taskfile { | |||
267 | ((u64) (id)[(n) + 1] << 16) | \ | 268 | ((u64) (id)[(n) + 1] << 16) | \ |
268 | ((u64) (id)[(n) + 0]) ) | 269 | ((u64) (id)[(n) + 0]) ) |
269 | 270 | ||
271 | #define ata_id_cdb_intr(id) (((id)[0] & 0x60) == 0x20) | ||
272 | |||
270 | static inline int ata_id_current_chs_valid(const u16 *id) | 273 | static inline int ata_id_current_chs_valid(const u16 *id) |
271 | { | 274 | { |
272 | /* For ATA-1 devices, if the INITIALIZE DEVICE PARAMETERS command | 275 | /* For ATA-1 devices, if the INITIALIZE DEVICE PARAMETERS command |
@@ -296,6 +299,14 @@ static inline int is_atapi_taskfile(const struct ata_taskfile *tf) | |||
296 | (tf->protocol == ATA_PROT_ATAPI_DMA); | 299 | (tf->protocol == ATA_PROT_ATAPI_DMA); |
297 | } | 300 | } |
298 | 301 | ||
302 | static inline int is_multi_taskfile(struct ata_taskfile *tf) | ||
303 | { | ||
304 | return (tf->command == ATA_CMD_READ_MULTI) || | ||
305 | (tf->command == ATA_CMD_WRITE_MULTI) || | ||
306 | (tf->command == ATA_CMD_READ_MULTI_EXT) || | ||
307 | (tf->command == ATA_CMD_WRITE_MULTI_EXT); | ||
308 | } | ||
309 | |||
299 | static inline int ata_ok(u8 status) | 310 | static inline int ata_ok(u8 status) |
300 | { | 311 | { |
301 | return ((status & (ATA_BUSY | ATA_DRDY | ATA_DF | ATA_DRQ | ATA_ERR)) | 312 | return ((status & (ATA_BUSY | ATA_DRDY | ATA_DF | ATA_DRQ | ATA_ERR)) |
diff --git a/include/linux/libata.h b/include/linux/libata.h index 83a1f2ead861..9873f4c54f1e 100644 --- a/include/linux/libata.h +++ b/include/linux/libata.h | |||
@@ -124,6 +124,7 @@ enum { | |||
124 | ATA_DFLAG_PIO = (1 << 1), /* device currently in PIO mode */ | 124 | ATA_DFLAG_PIO = (1 << 1), /* device currently in PIO mode */ |
125 | ATA_DFLAG_LOCK_SECTORS = (1 << 2), /* don't adjust max_sectors */ | 125 | ATA_DFLAG_LOCK_SECTORS = (1 << 2), /* don't adjust max_sectors */ |
126 | ATA_DFLAG_LBA = (1 << 3), /* device supports LBA */ | 126 | ATA_DFLAG_LBA = (1 << 3), /* device supports LBA */ |
127 | ATA_DFLAG_CDB_INTR = (1 << 4), /* device asserts INTRQ when ready for CDB */ | ||
127 | 128 | ||
128 | ATA_DEV_UNKNOWN = 0, /* unknown device */ | 129 | ATA_DEV_UNKNOWN = 0, /* unknown device */ |
129 | ATA_DEV_ATA = 1, /* ATA device */ | 130 | ATA_DEV_ATA = 1, /* ATA device */ |
@@ -142,8 +143,8 @@ enum { | |||
142 | ATA_FLAG_MMIO = (1 << 6), /* use MMIO, not PIO */ | 143 | ATA_FLAG_MMIO = (1 << 6), /* use MMIO, not PIO */ |
143 | ATA_FLAG_SATA_RESET = (1 << 7), /* (obsolete) use COMRESET */ | 144 | ATA_FLAG_SATA_RESET = (1 << 7), /* (obsolete) use COMRESET */ |
144 | ATA_FLAG_PIO_DMA = (1 << 8), /* PIO cmds via DMA */ | 145 | ATA_FLAG_PIO_DMA = (1 << 8), /* PIO cmds via DMA */ |
145 | ATA_FLAG_NOINTR = (1 << 9), /* FIXME: Remove this once | 146 | ATA_FLAG_PIO_POLLING = (1 << 9), /* use polling PIO if LLD |
146 | * proper HSM is in place. */ | 147 | * doesn't handle PIO interrupts */ |
147 | ATA_FLAG_DEBUGMSG = (1 << 10), | 148 | ATA_FLAG_DEBUGMSG = (1 << 10), |
148 | ATA_FLAG_NO_ATAPI = (1 << 11), /* No ATAPI support */ | 149 | ATA_FLAG_NO_ATAPI = (1 << 11), /* No ATAPI support */ |
149 | 150 | ||
@@ -166,6 +167,8 @@ enum { | |||
166 | ATA_TMOUT_PIO = 30 * HZ, | 167 | ATA_TMOUT_PIO = 30 * HZ, |
167 | ATA_TMOUT_BOOT = 30 * HZ, /* heuristic */ | 168 | ATA_TMOUT_BOOT = 30 * HZ, /* heuristic */ |
168 | ATA_TMOUT_BOOT_QUICK = 7 * HZ, /* heuristic */ | 169 | ATA_TMOUT_BOOT_QUICK = 7 * HZ, /* heuristic */ |
170 | ATA_TMOUT_DATAOUT = 30 * HZ, | ||
171 | ATA_TMOUT_DATAOUT_QUICK = 5 * HZ, | ||
169 | ATA_TMOUT_CDB = 30 * HZ, | 172 | ATA_TMOUT_CDB = 30 * HZ, |
170 | ATA_TMOUT_CDB_QUICK = 5 * HZ, | 173 | ATA_TMOUT_CDB_QUICK = 5 * HZ, |
171 | ATA_TMOUT_INTERNAL = 30 * HZ, | 174 | ATA_TMOUT_INTERNAL = 30 * HZ, |
@@ -205,14 +208,16 @@ enum { | |||
205 | }; | 208 | }; |
206 | 209 | ||
207 | enum hsm_task_states { | 210 | enum hsm_task_states { |
208 | HSM_ST_UNKNOWN, | 211 | HSM_ST_UNKNOWN, /* state unknown */ |
209 | HSM_ST_IDLE, | 212 | HSM_ST_IDLE, /* no command on going */ |
210 | HSM_ST_POLL, | 213 | HSM_ST_POLL, /* same as HSM_ST, waits longer */ |
211 | HSM_ST_TMOUT, | 214 | HSM_ST_TMOUT, /* timeout */ |
212 | HSM_ST, | 215 | HSM_ST, /* (waiting the device to) transfer data */ |
213 | HSM_ST_LAST, | 216 | HSM_ST_LAST, /* (waiting the device to) complete command */ |
214 | HSM_ST_LAST_POLL, | 217 | HSM_ST_LAST_POLL, /* same as HSM_ST_LAST, waits longer */ |
215 | HSM_ST_ERR, | 218 | HSM_ST_ERR, /* error */ |
219 | HSM_ST_FIRST, /* (waiting the device to) | ||
220 | write CDB or first data block */ | ||
216 | }; | 221 | }; |
217 | 222 | ||
218 | enum ata_completion_errors { | 223 | enum ata_completion_errors { |
@@ -388,8 +393,6 @@ struct ata_port { | |||
388 | struct ata_host_stats stats; | 393 | struct ata_host_stats stats; |
389 | struct ata_host_set *host_set; | 394 | struct ata_host_set *host_set; |
390 | 395 | ||
391 | struct work_struct packet_task; | ||
392 | |||
393 | struct work_struct pio_task; | 396 | struct work_struct pio_task; |
394 | unsigned int hsm_task_state; | 397 | unsigned int hsm_task_state; |
395 | unsigned long pio_task_timeout; | 398 | unsigned long pio_task_timeout; |