diff options
Diffstat (limited to 'drivers/scsi')
-rw-r--r-- | drivers/scsi/libata-core.c | 576 | ||||
-rw-r--r-- | drivers/scsi/pdc_adma.c | 8 | ||||
-rw-r--r-- | drivers/scsi/sata_mv.c | 7 | ||||
-rw-r--r-- | drivers/scsi/sata_nv.c | 4 | ||||
-rw-r--r-- | drivers/scsi/sata_promise.c | 7 | ||||
-rw-r--r-- | drivers/scsi/sata_qstor.c | 11 | ||||
-rw-r--r-- | drivers/scsi/sata_sx4.c | 6 | ||||
-rw-r--r-- | drivers/scsi/sata_vsc.c | 6 |
8 files changed, 454 insertions, 171 deletions
diff --git a/drivers/scsi/libata-core.c b/drivers/scsi/libata-core.c index b29bf0dc948a..50fb7cea94eb 100644 --- a/drivers/scsi/libata-core.c +++ b/drivers/scsi/libata-core.c | |||
@@ -73,6 +73,7 @@ static int fgb(u32 bitmap); | |||
73 | static int ata_choose_xfer_mode(const struct ata_port *ap, | 73 | static int ata_choose_xfer_mode(const struct ata_port *ap, |
74 | u8 *xfer_mode_out, | 74 | u8 *xfer_mode_out, |
75 | unsigned int *xfer_shift_out); | 75 | unsigned int *xfer_shift_out); |
76 | static void ata_pio_error(struct ata_port *ap); | ||
76 | 77 | ||
77 | static unsigned int ata_unique_id = 1; | 78 | static unsigned int ata_unique_id = 1; |
78 | static struct workqueue_struct *ata_wq; | 79 | static struct workqueue_struct *ata_wq; |
@@ -1385,6 +1386,12 @@ retry: | |||
1385 | 1386 | ||
1386 | } | 1387 | } |
1387 | 1388 | ||
1389 | if (dev->id[59] & 0x100) { | ||
1390 | dev->multi_count = dev->id[59] & 0xff; | ||
1391 | DPRINTK("ata%u: dev %u multi count %u\n", | ||
1392 | ap->id, device, dev->multi_count); | ||
1393 | } | ||
1394 | |||
1388 | ap->host->max_cmd_len = 16; | 1395 | ap->host->max_cmd_len = 16; |
1389 | } | 1396 | } |
1390 | 1397 | ||
@@ -1401,6 +1408,9 @@ retry: | |||
1401 | ap->cdb_len = (unsigned int) rc; | 1408 | ap->cdb_len = (unsigned int) rc; |
1402 | ap->host->max_cmd_len = (unsigned char) ap->cdb_len; | 1409 | ap->host->max_cmd_len = (unsigned char) ap->cdb_len; |
1403 | 1410 | ||
1411 | if (ata_id_cdb_intr(dev->id)) | ||
1412 | dev->flags |= ATA_DFLAG_CDB_INTR; | ||
1413 | |||
1404 | /* print device info to dmesg */ | 1414 | /* print device info to dmesg */ |
1405 | printk(KERN_INFO "ata%u: dev %u ATAPI, max %s\n", | 1415 | printk(KERN_INFO "ata%u: dev %u ATAPI, max %s\n", |
1406 | ap->id, device, | 1416 | ap->id, device, |
@@ -2870,7 +2880,6 @@ void ata_poll_qc_complete(struct ata_queued_cmd *qc) | |||
2870 | unsigned long flags; | 2880 | unsigned long flags; |
2871 | 2881 | ||
2872 | spin_lock_irqsave(&ap->host_set->lock, flags); | 2882 | spin_lock_irqsave(&ap->host_set->lock, flags); |
2873 | ap->flags &= ~ATA_FLAG_NOINTR; | ||
2874 | ata_irq_on(ap); | 2883 | ata_irq_on(ap); |
2875 | ata_qc_complete(qc); | 2884 | ata_qc_complete(qc); |
2876 | spin_unlock_irqrestore(&ap->host_set->lock, flags); | 2885 | spin_unlock_irqrestore(&ap->host_set->lock, flags); |
@@ -2936,7 +2945,8 @@ static unsigned long ata_pio_poll(struct ata_port *ap) | |||
2936 | * None. (executing in kernel thread context) | 2945 | * None. (executing in kernel thread context) |
2937 | * | 2946 | * |
2938 | * RETURNS: | 2947 | * RETURNS: |
2939 | * Non-zero if qc completed, zero otherwise. | 2948 | * Zero if qc completed. |
2949 | * Non-zero if has next. | ||
2940 | */ | 2950 | */ |
2941 | 2951 | ||
2942 | static int ata_pio_complete (struct ata_port *ap) | 2952 | static int ata_pio_complete (struct ata_port *ap) |
@@ -2949,7 +2959,7 @@ static int ata_pio_complete (struct ata_port *ap) | |||
2949 | * we enter, BSY will be cleared in a chk-status or two. If not, | 2959 | * we enter, BSY will be cleared in a chk-status or two. If not, |
2950 | * the drive is probably seeking or something. Snooze for a couple | 2960 | * the drive is probably seeking or something. Snooze for a couple |
2951 | * msecs, then chk-status again. If still busy, fall back to | 2961 | * msecs, then chk-status again. If still busy, fall back to |
2952 | * HSM_ST_POLL state. | 2962 | * HSM_ST_LAST_POLL state. |
2953 | */ | 2963 | */ |
2954 | drv_stat = ata_busy_wait(ap, ATA_BUSY, 10); | 2964 | drv_stat = ata_busy_wait(ap, ATA_BUSY, 10); |
2955 | if (drv_stat & ATA_BUSY) { | 2965 | if (drv_stat & ATA_BUSY) { |
@@ -2958,7 +2968,7 @@ static int ata_pio_complete (struct ata_port *ap) | |||
2958 | if (drv_stat & ATA_BUSY) { | 2968 | if (drv_stat & ATA_BUSY) { |
2959 | ap->hsm_task_state = HSM_ST_LAST_POLL; | 2969 | ap->hsm_task_state = HSM_ST_LAST_POLL; |
2960 | ap->pio_task_timeout = jiffies + ATA_TMOUT_PIO; | 2970 | ap->pio_task_timeout = jiffies + ATA_TMOUT_PIO; |
2961 | return 0; | 2971 | return 1; |
2962 | } | 2972 | } |
2963 | } | 2973 | } |
2964 | 2974 | ||
@@ -2969,7 +2979,7 @@ static int ata_pio_complete (struct ata_port *ap) | |||
2969 | if (!ata_ok(drv_stat)) { | 2979 | if (!ata_ok(drv_stat)) { |
2970 | qc->err_mask |= __ac_err_mask(drv_stat); | 2980 | qc->err_mask |= __ac_err_mask(drv_stat); |
2971 | ap->hsm_task_state = HSM_ST_ERR; | 2981 | ap->hsm_task_state = HSM_ST_ERR; |
2972 | return 0; | 2982 | return 1; |
2973 | } | 2983 | } |
2974 | 2984 | ||
2975 | ap->hsm_task_state = HSM_ST_IDLE; | 2985 | ap->hsm_task_state = HSM_ST_IDLE; |
@@ -2979,7 +2989,7 @@ static int ata_pio_complete (struct ata_port *ap) | |||
2979 | 2989 | ||
2980 | /* another command may start at this point */ | 2990 | /* another command may start at this point */ |
2981 | 2991 | ||
2982 | return 1; | 2992 | return 0; |
2983 | } | 2993 | } |
2984 | 2994 | ||
2985 | 2995 | ||
@@ -3151,7 +3161,23 @@ static void ata_pio_sector(struct ata_queued_cmd *qc) | |||
3151 | page = nth_page(page, (offset >> PAGE_SHIFT)); | 3161 | page = nth_page(page, (offset >> PAGE_SHIFT)); |
3152 | offset %= PAGE_SIZE; | 3162 | offset %= PAGE_SIZE; |
3153 | 3163 | ||
3154 | buf = kmap(page) + offset; | 3164 | DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read"); |
3165 | |||
3166 | if (PageHighMem(page)) { | ||
3167 | unsigned long flags; | ||
3168 | |||
3169 | local_irq_save(flags); | ||
3170 | buf = kmap_atomic(page, KM_IRQ0); | ||
3171 | |||
3172 | /* do the actual data transfer */ | ||
3173 | ata_data_xfer(ap, buf + offset, ATA_SECT_SIZE, do_write); | ||
3174 | |||
3175 | kunmap_atomic(buf, KM_IRQ0); | ||
3176 | local_irq_restore(flags); | ||
3177 | } else { | ||
3178 | buf = page_address(page); | ||
3179 | ata_data_xfer(ap, buf + offset, ATA_SECT_SIZE, do_write); | ||
3180 | } | ||
3155 | 3181 | ||
3156 | qc->cursect++; | 3182 | qc->cursect++; |
3157 | qc->cursg_ofs++; | 3183 | qc->cursg_ofs++; |
@@ -3160,14 +3186,153 @@ static void ata_pio_sector(struct ata_queued_cmd *qc) | |||
3160 | qc->cursg++; | 3186 | qc->cursg++; |
3161 | qc->cursg_ofs = 0; | 3187 | qc->cursg_ofs = 0; |
3162 | } | 3188 | } |
3189 | } | ||
3163 | 3190 | ||
3164 | DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read"); | 3191 | /** |
3192 | * ata_pio_sectors - Transfer one or many 512-byte sectors. | ||
3193 | * @qc: Command on going | ||
3194 | * | ||
3195 | * Transfer one or many ATA_SECT_SIZE of data from/to the | ||
3196 | * ATA device for the DRQ request. | ||
3197 | * | ||
3198 | * LOCKING: | ||
3199 | * Inherited from caller. | ||
3200 | */ | ||
3201 | |||
3202 | static void ata_pio_sectors(struct ata_queued_cmd *qc) | ||
3203 | { | ||
3204 | if (is_multi_taskfile(&qc->tf)) { | ||
3205 | /* READ/WRITE MULTIPLE */ | ||
3206 | unsigned int nsect; | ||
3165 | 3207 | ||
3166 | /* do the actual data transfer */ | 3208 | assert(qc->dev->multi_count); |
3167 | do_write = (qc->tf.flags & ATA_TFLAG_WRITE); | ||
3168 | ata_data_xfer(ap, buf, ATA_SECT_SIZE, do_write); | ||
3169 | 3209 | ||
3170 | kunmap(page); | 3210 | nsect = min(qc->nsect - qc->cursect, qc->dev->multi_count); |
3211 | while (nsect--) | ||
3212 | ata_pio_sector(qc); | ||
3213 | } else | ||
3214 | ata_pio_sector(qc); | ||
3215 | } | ||
3216 | |||
3217 | /** | ||
3218 | * atapi_send_cdb - Write CDB bytes to hardware | ||
3219 | * @ap: Port to which ATAPI device is attached. | ||
3220 | * @qc: Taskfile currently active | ||
3221 | * | ||
3222 | * When device has indicated its readiness to accept | ||
3223 | * a CDB, this function is called. Send the CDB. | ||
3224 | * | ||
3225 | * LOCKING: | ||
3226 | * caller. | ||
3227 | */ | ||
3228 | |||
3229 | static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc) | ||
3230 | { | ||
3231 | /* send SCSI cdb */ | ||
3232 | DPRINTK("send cdb\n"); | ||
3233 | assert(ap->cdb_len >= 12); | ||
3234 | |||
3235 | ata_data_xfer(ap, qc->cdb, ap->cdb_len, 1); | ||
3236 | ata_altstatus(ap); /* flush */ | ||
3237 | |||
3238 | switch (qc->tf.protocol) { | ||
3239 | case ATA_PROT_ATAPI: | ||
3240 | ap->hsm_task_state = HSM_ST; | ||
3241 | break; | ||
3242 | case ATA_PROT_ATAPI_NODATA: | ||
3243 | ap->hsm_task_state = HSM_ST_LAST; | ||
3244 | break; | ||
3245 | case ATA_PROT_ATAPI_DMA: | ||
3246 | ap->hsm_task_state = HSM_ST_LAST; | ||
3247 | /* initiate bmdma */ | ||
3248 | ap->ops->bmdma_start(qc); | ||
3249 | break; | ||
3250 | } | ||
3251 | } | ||
3252 | |||
3253 | /** | ||
3254 | * ata_pio_first_block - Write first data block to hardware | ||
3255 | * @ap: Port to which ATA/ATAPI device is attached. | ||
3256 | * | ||
3257 | * When device has indicated its readiness to accept | ||
3258 | * the data, this function sends out the CDB or | ||
3259 | * the first data block by PIO. | ||
3260 | * After this, | ||
3261 | * - If polling, ata_pio_task() handles the rest. | ||
3262 | * - Otherwise, interrupt handler takes over. | ||
3263 | * | ||
3264 | * LOCKING: | ||
3265 | * Kernel thread context (may sleep) | ||
3266 | * | ||
3267 | * RETURNS: | ||
3268 | * Zero if irq handler takes over | ||
3269 | * Non-zero if has next (polling). | ||
3270 | */ | ||
3271 | |||
3272 | static int ata_pio_first_block(struct ata_port *ap) | ||
3273 | { | ||
3274 | struct ata_queued_cmd *qc; | ||
3275 | u8 status; | ||
3276 | unsigned long flags; | ||
3277 | int has_next; | ||
3278 | |||
3279 | qc = ata_qc_from_tag(ap, ap->active_tag); | ||
3280 | assert(qc != NULL); | ||
3281 | assert(qc->flags & ATA_QCFLAG_ACTIVE); | ||
3282 | |||
3283 | /* if polling, we will stay in the work queue after sending the data. | ||
3284 | * otherwise, interrupt handler takes over after sending the data. | ||
3285 | */ | ||
3286 | has_next = (qc->tf.flags & ATA_TFLAG_POLLING); | ||
3287 | |||
3288 | /* sleep-wait for BSY to clear */ | ||
3289 | DPRINTK("busy wait\n"); | ||
3290 | if (ata_busy_sleep(ap, ATA_TMOUT_DATAOUT_QUICK, ATA_TMOUT_DATAOUT)) { | ||
3291 | qc->err_mask |= AC_ERR_ATA_BUS; | ||
3292 | ap->hsm_task_state = HSM_ST_TMOUT; | ||
3293 | goto err_out; | ||
3294 | } | ||
3295 | |||
3296 | /* make sure DRQ is set */ | ||
3297 | status = ata_chk_status(ap); | ||
3298 | if ((status & (ATA_BUSY | ATA_DRQ)) != ATA_DRQ) { | ||
3299 | /* device status error */ | ||
3300 | qc->err_mask |= AC_ERR_ATA_BUS; | ||
3301 | ap->hsm_task_state = HSM_ST_ERR; | ||
3302 | goto err_out; | ||
3303 | } | ||
3304 | |||
3305 | /* Send the CDB (atapi) or the first data block (ata pio out). | ||
3306 | * During the state transition, interrupt handler shouldn't | ||
3307 | * be invoked before the data transfer is complete and | ||
3308 | * hsm_task_state is changed. Hence, the following locking. | ||
3309 | */ | ||
3310 | spin_lock_irqsave(&ap->host_set->lock, flags); | ||
3311 | |||
3312 | if (qc->tf.protocol == ATA_PROT_PIO) { | ||
3313 | /* PIO data out protocol. | ||
3314 | * send first data block. | ||
3315 | */ | ||
3316 | |||
3317 | /* ata_pio_sectors() might change the state to HSM_ST_LAST. | ||
3318 | * so, the state is changed here before ata_pio_sectors(). | ||
3319 | */ | ||
3320 | ap->hsm_task_state = HSM_ST; | ||
3321 | ata_pio_sectors(qc); | ||
3322 | ata_altstatus(ap); /* flush */ | ||
3323 | } else | ||
3324 | /* send CDB */ | ||
3325 | atapi_send_cdb(ap, qc); | ||
3326 | |||
3327 | spin_unlock_irqrestore(&ap->host_set->lock, flags); | ||
3328 | |||
3329 | /* if polling, ata_pio_task() handles the rest. | ||
3330 | * otherwise, interrupt handler takes over from here. | ||
3331 | */ | ||
3332 | return has_next; | ||
3333 | |||
3334 | err_out: | ||
3335 | return 1; /* has next */ | ||
3171 | } | 3336 | } |
3172 | 3337 | ||
3173 | /** | 3338 | /** |
@@ -3233,7 +3398,23 @@ next_sg: | |||
3233 | /* don't cross page boundaries */ | 3398 | /* don't cross page boundaries */ |
3234 | count = min(count, (unsigned int)PAGE_SIZE - offset); | 3399 | count = min(count, (unsigned int)PAGE_SIZE - offset); |
3235 | 3400 | ||
3236 | buf = kmap(page) + offset; | 3401 | DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read"); |
3402 | |||
3403 | if (PageHighMem(page)) { | ||
3404 | unsigned long flags; | ||
3405 | |||
3406 | local_irq_save(flags); | ||
3407 | buf = kmap_atomic(page, KM_IRQ0); | ||
3408 | |||
3409 | /* do the actual data transfer */ | ||
3410 | ata_data_xfer(ap, buf + offset, count, do_write); | ||
3411 | |||
3412 | kunmap_atomic(buf, KM_IRQ0); | ||
3413 | local_irq_restore(flags); | ||
3414 | } else { | ||
3415 | buf = page_address(page); | ||
3416 | ata_data_xfer(ap, buf + offset, count, do_write); | ||
3417 | } | ||
3237 | 3418 | ||
3238 | bytes -= count; | 3419 | bytes -= count; |
3239 | qc->curbytes += count; | 3420 | qc->curbytes += count; |
@@ -3244,13 +3425,6 @@ next_sg: | |||
3244 | qc->cursg_ofs = 0; | 3425 | qc->cursg_ofs = 0; |
3245 | } | 3426 | } |
3246 | 3427 | ||
3247 | DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read"); | ||
3248 | |||
3249 | /* do the actual data transfer */ | ||
3250 | ata_data_xfer(ap, buf, count, do_write); | ||
3251 | |||
3252 | kunmap(page); | ||
3253 | |||
3254 | if (bytes) | 3428 | if (bytes) |
3255 | goto next_sg; | 3429 | goto next_sg; |
3256 | } | 3430 | } |
@@ -3287,6 +3461,8 @@ static void atapi_pio_bytes(struct ata_queued_cmd *qc) | |||
3287 | if (do_write != i_write) | 3461 | if (do_write != i_write) |
3288 | goto err_out; | 3462 | goto err_out; |
3289 | 3463 | ||
3464 | VPRINTK("ata%u: xfering %d bytes\n", ap->id, bytes); | ||
3465 | |||
3290 | __atapi_pio_bytes(qc, bytes); | 3466 | __atapi_pio_bytes(qc, bytes); |
3291 | 3467 | ||
3292 | return; | 3468 | return; |
@@ -3357,19 +3533,22 @@ static void ata_pio_block(struct ata_port *ap) | |||
3357 | return; | 3533 | return; |
3358 | } | 3534 | } |
3359 | 3535 | ||
3360 | ata_pio_sector(qc); | 3536 | ata_pio_sectors(qc); |
3361 | } | 3537 | } |
3538 | |||
3539 | ata_altstatus(ap); /* flush */ | ||
3362 | } | 3540 | } |
3363 | 3541 | ||
3364 | static void ata_pio_error(struct ata_port *ap) | 3542 | static void ata_pio_error(struct ata_port *ap) |
3365 | { | 3543 | { |
3366 | struct ata_queued_cmd *qc; | 3544 | struct ata_queued_cmd *qc; |
3367 | 3545 | ||
3368 | printk(KERN_WARNING "ata%u: PIO error\n", ap->id); | ||
3369 | |||
3370 | qc = ata_qc_from_tag(ap, ap->active_tag); | 3546 | qc = ata_qc_from_tag(ap, ap->active_tag); |
3371 | assert(qc != NULL); | 3547 | assert(qc != NULL); |
3372 | 3548 | ||
3549 | if (qc->tf.command != ATA_CMD_PACKET) | ||
3550 | printk(KERN_WARNING "ata%u: PIO error\n", ap->id); | ||
3551 | |||
3373 | /* make sure qc->err_mask is available to | 3552 | /* make sure qc->err_mask is available to |
3374 | * know what's wrong and recover | 3553 | * know what's wrong and recover |
3375 | */ | 3554 | */ |
@@ -3384,22 +3563,23 @@ static void ata_pio_task(void *_data) | |||
3384 | { | 3563 | { |
3385 | struct ata_port *ap = _data; | 3564 | struct ata_port *ap = _data; |
3386 | unsigned long timeout; | 3565 | unsigned long timeout; |
3387 | int qc_completed; | 3566 | int has_next; |
3388 | 3567 | ||
3389 | fsm_start: | 3568 | fsm_start: |
3390 | timeout = 0; | 3569 | timeout = 0; |
3391 | qc_completed = 0; | 3570 | has_next = 1; |
3392 | 3571 | ||
3393 | switch (ap->hsm_task_state) { | 3572 | switch (ap->hsm_task_state) { |
3394 | case HSM_ST_IDLE: | 3573 | case HSM_ST_FIRST: |
3395 | return; | 3574 | has_next = ata_pio_first_block(ap); |
3575 | break; | ||
3396 | 3576 | ||
3397 | case HSM_ST: | 3577 | case HSM_ST: |
3398 | ata_pio_block(ap); | 3578 | ata_pio_block(ap); |
3399 | break; | 3579 | break; |
3400 | 3580 | ||
3401 | case HSM_ST_LAST: | 3581 | case HSM_ST_LAST: |
3402 | qc_completed = ata_pio_complete(ap); | 3582 | has_next = ata_pio_complete(ap); |
3403 | break; | 3583 | break; |
3404 | 3584 | ||
3405 | case HSM_ST_POLL: | 3585 | case HSM_ST_POLL: |
@@ -3411,11 +3591,15 @@ fsm_start: | |||
3411 | case HSM_ST_ERR: | 3591 | case HSM_ST_ERR: |
3412 | ata_pio_error(ap); | 3592 | ata_pio_error(ap); |
3413 | return; | 3593 | return; |
3594 | |||
3595 | default: | ||
3596 | BUG(); | ||
3597 | return; | ||
3414 | } | 3598 | } |
3415 | 3599 | ||
3416 | if (timeout) | 3600 | if (timeout) |
3417 | queue_delayed_work(ata_wq, &ap->pio_task, timeout); | 3601 | queue_delayed_work(ata_wq, &ap->pio_task, timeout); |
3418 | else if (!qc_completed) | 3602 | else if (has_next) |
3419 | goto fsm_start; | 3603 | goto fsm_start; |
3420 | } | 3604 | } |
3421 | 3605 | ||
@@ -3478,6 +3662,8 @@ static void ata_qc_timeout(struct ata_queued_cmd *qc) | |||
3478 | printk(KERN_ERR "ata%u: command 0x%x timeout, stat 0x%x host_stat 0x%x\n", | 3662 | printk(KERN_ERR "ata%u: command 0x%x timeout, stat 0x%x host_stat 0x%x\n", |
3479 | ap->id, qc->tf.command, drv_stat, host_stat); | 3663 | ap->id, qc->tf.command, drv_stat, host_stat); |
3480 | 3664 | ||
3665 | ap->hsm_task_state = HSM_ST_IDLE; | ||
3666 | |||
3481 | /* complete taskfile transaction */ | 3667 | /* complete taskfile transaction */ |
3482 | qc->err_mask |= ac_err_mask(drv_stat); | 3668 | qc->err_mask |= ac_err_mask(drv_stat); |
3483 | ata_qc_complete(qc); | 3669 | ata_qc_complete(qc); |
@@ -3727,43 +3913,103 @@ unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc) | |||
3727 | { | 3913 | { |
3728 | struct ata_port *ap = qc->ap; | 3914 | struct ata_port *ap = qc->ap; |
3729 | 3915 | ||
3916 | /* Use polling pio if the LLD doesn't handle | ||
3917 | * interrupt driven pio and atapi CDB interrupt. | ||
3918 | */ | ||
3919 | if (ap->flags & ATA_FLAG_PIO_POLLING) { | ||
3920 | switch (qc->tf.protocol) { | ||
3921 | case ATA_PROT_PIO: | ||
3922 | case ATA_PROT_ATAPI: | ||
3923 | case ATA_PROT_ATAPI_NODATA: | ||
3924 | qc->tf.flags |= ATA_TFLAG_POLLING; | ||
3925 | break; | ||
3926 | case ATA_PROT_ATAPI_DMA: | ||
3927 | if (qc->dev->flags & ATA_DFLAG_CDB_INTR) | ||
3928 | BUG(); | ||
3929 | break; | ||
3930 | default: | ||
3931 | break; | ||
3932 | } | ||
3933 | } | ||
3934 | |||
3935 | /* select the device */ | ||
3730 | ata_dev_select(ap, qc->dev->devno, 1, 0); | 3936 | ata_dev_select(ap, qc->dev->devno, 1, 0); |
3731 | 3937 | ||
3938 | /* start the command */ | ||
3732 | switch (qc->tf.protocol) { | 3939 | switch (qc->tf.protocol) { |
3733 | case ATA_PROT_NODATA: | 3940 | case ATA_PROT_NODATA: |
3941 | if (qc->tf.flags & ATA_TFLAG_POLLING) | ||
3942 | ata_qc_set_polling(qc); | ||
3943 | |||
3734 | ata_tf_to_host(ap, &qc->tf); | 3944 | ata_tf_to_host(ap, &qc->tf); |
3945 | ap->hsm_task_state = HSM_ST_LAST; | ||
3946 | |||
3947 | if (qc->tf.flags & ATA_TFLAG_POLLING) | ||
3948 | queue_work(ata_wq, &ap->pio_task); | ||
3949 | |||
3735 | break; | 3950 | break; |
3736 | 3951 | ||
3737 | case ATA_PROT_DMA: | 3952 | case ATA_PROT_DMA: |
3953 | assert(!(qc->tf.flags & ATA_TFLAG_POLLING)); | ||
3954 | |||
3738 | ap->ops->tf_load(ap, &qc->tf); /* load tf registers */ | 3955 | ap->ops->tf_load(ap, &qc->tf); /* load tf registers */ |
3739 | ap->ops->bmdma_setup(qc); /* set up bmdma */ | 3956 | ap->ops->bmdma_setup(qc); /* set up bmdma */ |
3740 | ap->ops->bmdma_start(qc); /* initiate bmdma */ | 3957 | ap->ops->bmdma_start(qc); /* initiate bmdma */ |
3958 | ap->hsm_task_state = HSM_ST_LAST; | ||
3741 | break; | 3959 | break; |
3742 | 3960 | ||
3743 | case ATA_PROT_PIO: /* load tf registers, initiate polling pio */ | 3961 | case ATA_PROT_PIO: |
3744 | ata_qc_set_polling(qc); | 3962 | if (qc->tf.flags & ATA_TFLAG_POLLING) |
3745 | ata_tf_to_host(ap, &qc->tf); | 3963 | ata_qc_set_polling(qc); |
3746 | ap->hsm_task_state = HSM_ST; | ||
3747 | queue_work(ata_wq, &ap->pio_task); | ||
3748 | break; | ||
3749 | 3964 | ||
3750 | case ATA_PROT_ATAPI: | ||
3751 | ata_qc_set_polling(qc); | ||
3752 | ata_tf_to_host(ap, &qc->tf); | 3965 | ata_tf_to_host(ap, &qc->tf); |
3753 | queue_work(ata_wq, &ap->packet_task); | 3966 | |
3967 | if (qc->tf.flags & ATA_TFLAG_WRITE) { | ||
3968 | /* PIO data out protocol */ | ||
3969 | ap->hsm_task_state = HSM_ST_FIRST; | ||
3970 | queue_work(ata_wq, &ap->pio_task); | ||
3971 | |||
3972 | /* always send first data block using | ||
3973 | * the ata_pio_task() codepath. | ||
3974 | */ | ||
3975 | } else { | ||
3976 | /* PIO data in protocol */ | ||
3977 | ap->hsm_task_state = HSM_ST; | ||
3978 | |||
3979 | if (qc->tf.flags & ATA_TFLAG_POLLING) | ||
3980 | queue_work(ata_wq, &ap->pio_task); | ||
3981 | |||
3982 | /* if polling, ata_pio_task() handles the rest. | ||
3983 | * otherwise, interrupt handler takes over from here. | ||
3984 | */ | ||
3985 | } | ||
3986 | |||
3754 | break; | 3987 | break; |
3755 | 3988 | ||
3989 | case ATA_PROT_ATAPI: | ||
3756 | case ATA_PROT_ATAPI_NODATA: | 3990 | case ATA_PROT_ATAPI_NODATA: |
3757 | ap->flags |= ATA_FLAG_NOINTR; | 3991 | if (qc->tf.flags & ATA_TFLAG_POLLING) |
3992 | ata_qc_set_polling(qc); | ||
3993 | |||
3758 | ata_tf_to_host(ap, &qc->tf); | 3994 | ata_tf_to_host(ap, &qc->tf); |
3759 | queue_work(ata_wq, &ap->packet_task); | 3995 | ap->hsm_task_state = HSM_ST_FIRST; |
3996 | |||
3997 | /* send cdb by polling if no cdb interrupt */ | ||
3998 | if ((!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) || | ||
3999 | (qc->tf.flags & ATA_TFLAG_POLLING)) | ||
4000 | queue_work(ata_wq, &ap->pio_task); | ||
3760 | break; | 4001 | break; |
3761 | 4002 | ||
3762 | case ATA_PROT_ATAPI_DMA: | 4003 | case ATA_PROT_ATAPI_DMA: |
3763 | ap->flags |= ATA_FLAG_NOINTR; | 4004 | assert(!(qc->tf.flags & ATA_TFLAG_POLLING)); |
4005 | |||
3764 | ap->ops->tf_load(ap, &qc->tf); /* load tf registers */ | 4006 | ap->ops->tf_load(ap, &qc->tf); /* load tf registers */ |
3765 | ap->ops->bmdma_setup(qc); /* set up bmdma */ | 4007 | ap->ops->bmdma_setup(qc); /* set up bmdma */ |
3766 | queue_work(ata_wq, &ap->packet_task); | 4008 | ap->hsm_task_state = HSM_ST_FIRST; |
4009 | |||
4010 | /* send cdb by polling if no cdb interrupt */ | ||
4011 | if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) | ||
4012 | queue_work(ata_wq, &ap->pio_task); | ||
3767 | break; | 4013 | break; |
3768 | 4014 | ||
3769 | default: | 4015 | default: |
@@ -4024,48 +4270,160 @@ void ata_bmdma_stop(struct ata_queued_cmd *qc) | |||
4024 | inline unsigned int ata_host_intr (struct ata_port *ap, | 4270 | inline unsigned int ata_host_intr (struct ata_port *ap, |
4025 | struct ata_queued_cmd *qc) | 4271 | struct ata_queued_cmd *qc) |
4026 | { | 4272 | { |
4027 | u8 status, host_stat; | 4273 | u8 status, host_stat = 0; |
4028 | 4274 | ||
4029 | switch (qc->tf.protocol) { | 4275 | VPRINTK("ata%u: protocol %d task_state %d\n", |
4030 | 4276 | ap->id, qc->tf.protocol, ap->hsm_task_state); | |
4031 | case ATA_PROT_DMA: | ||
4032 | case ATA_PROT_ATAPI_DMA: | ||
4033 | case ATA_PROT_ATAPI: | ||
4034 | /* check status of DMA engine */ | ||
4035 | host_stat = ap->ops->bmdma_status(ap); | ||
4036 | VPRINTK("ata%u: host_stat 0x%X\n", ap->id, host_stat); | ||
4037 | 4277 | ||
4038 | /* if it's not our irq... */ | 4278 | /* Check whether we are expecting interrupt in this state */ |
4039 | if (!(host_stat & ATA_DMA_INTR)) | 4279 | switch (ap->hsm_task_state) { |
4280 | case HSM_ST_FIRST: | ||
4281 | /* Check the ATA_DFLAG_CDB_INTR flag is enough here. | ||
4282 | * The flag was turned on only for atapi devices. | ||
4283 | * No need to check is_atapi_taskfile(&qc->tf) again. | ||
4284 | */ | ||
4285 | if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) | ||
4040 | goto idle_irq; | 4286 | goto idle_irq; |
4287 | break; | ||
4288 | case HSM_ST_LAST: | ||
4289 | if (qc->tf.protocol == ATA_PROT_DMA || | ||
4290 | qc->tf.protocol == ATA_PROT_ATAPI_DMA) { | ||
4291 | /* check status of DMA engine */ | ||
4292 | host_stat = ap->ops->bmdma_status(ap); | ||
4293 | VPRINTK("ata%u: host_stat 0x%X\n", ap->id, host_stat); | ||
4294 | |||
4295 | /* if it's not our irq... */ | ||
4296 | if (!(host_stat & ATA_DMA_INTR)) | ||
4297 | goto idle_irq; | ||
4298 | |||
4299 | /* before we do anything else, clear DMA-Start bit */ | ||
4300 | ap->ops->bmdma_stop(qc); | ||
4301 | |||
4302 | if (unlikely(host_stat & ATA_DMA_ERR)) { | ||
4303 | /* error when transfering data to/from memory */ | ||
4304 | qc->err_mask |= AC_ERR_HOST_BUS; | ||
4305 | ap->hsm_task_state = HSM_ST_ERR; | ||
4306 | } | ||
4307 | } | ||
4308 | break; | ||
4309 | case HSM_ST: | ||
4310 | break; | ||
4311 | default: | ||
4312 | goto idle_irq; | ||
4313 | } | ||
4041 | 4314 | ||
4042 | /* before we do anything else, clear DMA-Start bit */ | 4315 | /* check altstatus */ |
4043 | ap->ops->bmdma_stop(qc); | 4316 | status = ata_altstatus(ap); |
4317 | if (status & ATA_BUSY) | ||
4318 | goto idle_irq; | ||
4044 | 4319 | ||
4045 | /* fall through */ | 4320 | /* check main status, clearing INTRQ */ |
4321 | status = ata_chk_status(ap); | ||
4322 | if (unlikely(status & ATA_BUSY)) | ||
4323 | goto idle_irq; | ||
4046 | 4324 | ||
4047 | case ATA_PROT_ATAPI_NODATA: | 4325 | DPRINTK("ata%u: protocol %d task_state %d (dev_stat 0x%X)\n", |
4048 | case ATA_PROT_NODATA: | 4326 | ap->id, qc->tf.protocol, ap->hsm_task_state, status); |
4049 | /* check altstatus */ | ||
4050 | status = ata_altstatus(ap); | ||
4051 | if (status & ATA_BUSY) | ||
4052 | goto idle_irq; | ||
4053 | 4327 | ||
4054 | /* check main status, clearing INTRQ */ | 4328 | /* ack bmdma irq events */ |
4055 | status = ata_chk_status(ap); | 4329 | ap->ops->irq_clear(ap); |
4056 | if (unlikely(status & ATA_BUSY)) | ||
4057 | goto idle_irq; | ||
4058 | DPRINTK("ata%u: protocol %d (dev_stat 0x%X)\n", | ||
4059 | ap->id, qc->tf.protocol, status); | ||
4060 | 4330 | ||
4061 | /* ack bmdma irq events */ | 4331 | /* check error */ |
4062 | ap->ops->irq_clear(ap); | 4332 | if (unlikely(status & (ATA_ERR | ATA_DF))) { |
4333 | qc->err_mask |= AC_ERR_DEV; | ||
4334 | ap->hsm_task_state = HSM_ST_ERR; | ||
4335 | } | ||
4336 | |||
4337 | fsm_start: | ||
4338 | switch (ap->hsm_task_state) { | ||
4339 | case HSM_ST_FIRST: | ||
4340 | /* Some pre-ATAPI-4 devices assert INTRQ | ||
4341 | * at this state when ready to receive CDB. | ||
4342 | */ | ||
4343 | |||
4344 | /* check device status */ | ||
4345 | if (unlikely((status & (ATA_BUSY | ATA_DRQ)) != ATA_DRQ)) { | ||
4346 | /* Wrong status. Let EH handle this */ | ||
4347 | qc->err_mask |= AC_ERR_ATA_BUS; | ||
4348 | ap->hsm_task_state = HSM_ST_ERR; | ||
4349 | goto fsm_start; | ||
4350 | } | ||
4351 | |||
4352 | atapi_send_cdb(ap, qc); | ||
4353 | |||
4354 | break; | ||
4355 | |||
4356 | case HSM_ST: | ||
4357 | /* complete command or read/write the data register */ | ||
4358 | if (qc->tf.protocol == ATA_PROT_ATAPI) { | ||
4359 | /* ATAPI PIO protocol */ | ||
4360 | if ((status & ATA_DRQ) == 0) { | ||
4361 | /* no more data to transfer */ | ||
4362 | ap->hsm_task_state = HSM_ST_LAST; | ||
4363 | goto fsm_start; | ||
4364 | } | ||
4365 | |||
4366 | atapi_pio_bytes(qc); | ||
4367 | |||
4368 | if (unlikely(ap->hsm_task_state == HSM_ST_ERR)) | ||
4369 | /* bad ireason reported by device */ | ||
4370 | goto fsm_start; | ||
4371 | |||
4372 | } else { | ||
4373 | /* ATA PIO protocol */ | ||
4374 | if (unlikely((status & ATA_DRQ) == 0)) { | ||
4375 | /* handle BSY=0, DRQ=0 as error */ | ||
4376 | qc->err_mask |= AC_ERR_ATA_BUS; | ||
4377 | ap->hsm_task_state = HSM_ST_ERR; | ||
4378 | goto fsm_start; | ||
4379 | } | ||
4380 | |||
4381 | ata_pio_sectors(qc); | ||
4382 | |||
4383 | if (ap->hsm_task_state == HSM_ST_LAST && | ||
4384 | (!(qc->tf.flags & ATA_TFLAG_WRITE))) { | ||
4385 | /* all data read */ | ||
4386 | ata_altstatus(ap); | ||
4387 | status = ata_chk_status(ap); | ||
4388 | goto fsm_start; | ||
4389 | } | ||
4390 | } | ||
4391 | |||
4392 | ata_altstatus(ap); /* flush */ | ||
4393 | break; | ||
4394 | |||
4395 | case HSM_ST_LAST: | ||
4396 | if (unlikely(status & ATA_DRQ)) { | ||
4397 | /* handle DRQ=1 as error */ | ||
4398 | qc->err_mask |= AC_ERR_ATA_BUS; | ||
4399 | ap->hsm_task_state = HSM_ST_ERR; | ||
4400 | goto fsm_start; | ||
4401 | } | ||
4402 | |||
4403 | /* no more data to transfer */ | ||
4404 | DPRINTK("ata%u: command complete, drv_stat 0x%x\n", | ||
4405 | ap->id, status); | ||
4406 | |||
4407 | ap->hsm_task_state = HSM_ST_IDLE; | ||
4063 | 4408 | ||
4064 | /* complete taskfile transaction */ | 4409 | /* complete taskfile transaction */ |
4065 | qc->err_mask |= ac_err_mask(status); | 4410 | qc->err_mask |= ac_err_mask(status); |
4066 | ata_qc_complete(qc); | 4411 | ata_qc_complete(qc); |
4067 | break; | 4412 | break; |
4068 | 4413 | ||
4414 | case HSM_ST_ERR: | ||
4415 | if (qc->tf.command != ATA_CMD_PACKET) | ||
4416 | printk(KERN_ERR "ata%u: command error, drv_stat 0x%x host_stat 0x%x\n", | ||
4417 | ap->id, status, host_stat); | ||
4418 | |||
4419 | /* make sure qc->err_mask is available to | ||
4420 | * know what's wrong and recover | ||
4421 | */ | ||
4422 | assert(qc->err_mask); | ||
4423 | |||
4424 | ap->hsm_task_state = HSM_ST_IDLE; | ||
4425 | ata_qc_complete(qc); | ||
4426 | break; | ||
4069 | default: | 4427 | default: |
4070 | goto idle_irq; | 4428 | goto idle_irq; |
4071 | } | 4429 | } |
@@ -4116,11 +4474,11 @@ irqreturn_t ata_interrupt (int irq, void *dev_instance, struct pt_regs *regs) | |||
4116 | 4474 | ||
4117 | ap = host_set->ports[i]; | 4475 | ap = host_set->ports[i]; |
4118 | if (ap && | 4476 | if (ap && |
4119 | !(ap->flags & (ATA_FLAG_PORT_DISABLED | ATA_FLAG_NOINTR))) { | 4477 | !(ap->flags & ATA_FLAG_PORT_DISABLED)) { |
4120 | struct ata_queued_cmd *qc; | 4478 | struct ata_queued_cmd *qc; |
4121 | 4479 | ||
4122 | qc = ata_qc_from_tag(ap, ap->active_tag); | 4480 | qc = ata_qc_from_tag(ap, ap->active_tag); |
4123 | if (qc && (!(qc->tf.ctl & ATA_NIEN)) && | 4481 | if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)) && |
4124 | (qc->flags & ATA_QCFLAG_ACTIVE)) | 4482 | (qc->flags & ATA_QCFLAG_ACTIVE)) |
4125 | handled |= ata_host_intr(ap, qc); | 4483 | handled |= ata_host_intr(ap, qc); |
4126 | } | 4484 | } |
@@ -4132,79 +4490,6 @@ irqreturn_t ata_interrupt (int irq, void *dev_instance, struct pt_regs *regs) | |||
4132 | } | 4490 | } |
4133 | 4491 | ||
4134 | /** | 4492 | /** |
4135 | * atapi_packet_task - Write CDB bytes to hardware | ||
4136 | * @_data: Port to which ATAPI device is attached. | ||
4137 | * | ||
4138 | * When device has indicated its readiness to accept | ||
4139 | * a CDB, this function is called. Send the CDB. | ||
4140 | * If DMA is to be performed, exit immediately. | ||
4141 | * Otherwise, we are in polling mode, so poll | ||
4142 | * status under operation succeeds or fails. | ||
4143 | * | ||
4144 | * LOCKING: | ||
4145 | * Kernel thread context (may sleep) | ||
4146 | */ | ||
4147 | |||
4148 | static void atapi_packet_task(void *_data) | ||
4149 | { | ||
4150 | struct ata_port *ap = _data; | ||
4151 | struct ata_queued_cmd *qc; | ||
4152 | u8 status; | ||
4153 | |||
4154 | qc = ata_qc_from_tag(ap, ap->active_tag); | ||
4155 | assert(qc != NULL); | ||
4156 | assert(qc->flags & ATA_QCFLAG_ACTIVE); | ||
4157 | |||
4158 | /* sleep-wait for BSY to clear */ | ||
4159 | DPRINTK("busy wait\n"); | ||
4160 | if (ata_busy_sleep(ap, ATA_TMOUT_CDB_QUICK, ATA_TMOUT_CDB)) { | ||
4161 | qc->err_mask |= AC_ERR_TIMEOUT; | ||
4162 | goto err_out; | ||
4163 | } | ||
4164 | |||
4165 | /* make sure DRQ is set */ | ||
4166 | status = ata_chk_status(ap); | ||
4167 | if ((status & (ATA_BUSY | ATA_DRQ)) != ATA_DRQ) { | ||
4168 | qc->err_mask |= AC_ERR_HSM; | ||
4169 | goto err_out; | ||
4170 | } | ||
4171 | |||
4172 | /* send SCSI cdb */ | ||
4173 | DPRINTK("send cdb\n"); | ||
4174 | assert(ap->cdb_len >= 12); | ||
4175 | |||
4176 | if (qc->tf.protocol == ATA_PROT_ATAPI_DMA || | ||
4177 | qc->tf.protocol == ATA_PROT_ATAPI_NODATA) { | ||
4178 | unsigned long flags; | ||
4179 | |||
4180 | /* Once we're done issuing command and kicking bmdma, | ||
4181 | * irq handler takes over. To not lose irq, we need | ||
4182 | * to clear NOINTR flag before sending cdb, but | ||
4183 | * interrupt handler shouldn't be invoked before we're | ||
4184 | * finished. Hence, the following locking. | ||
4185 | */ | ||
4186 | spin_lock_irqsave(&ap->host_set->lock, flags); | ||
4187 | ap->flags &= ~ATA_FLAG_NOINTR; | ||
4188 | ata_data_xfer(ap, qc->cdb, ap->cdb_len, 1); | ||
4189 | if (qc->tf.protocol == ATA_PROT_ATAPI_DMA) | ||
4190 | ap->ops->bmdma_start(qc); /* initiate bmdma */ | ||
4191 | spin_unlock_irqrestore(&ap->host_set->lock, flags); | ||
4192 | } else { | ||
4193 | ata_data_xfer(ap, qc->cdb, ap->cdb_len, 1); | ||
4194 | |||
4195 | /* PIO commands are handled by polling */ | ||
4196 | ap->hsm_task_state = HSM_ST; | ||
4197 | queue_work(ata_wq, &ap->pio_task); | ||
4198 | } | ||
4199 | |||
4200 | return; | ||
4201 | |||
4202 | err_out: | ||
4203 | ata_poll_qc_complete(qc); | ||
4204 | } | ||
4205 | |||
4206 | |||
4207 | /** | ||
4208 | * ata_port_start - Set port up for dma. | 4493 | * ata_port_start - Set port up for dma. |
4209 | * @ap: Port to initialize | 4494 | * @ap: Port to initialize |
4210 | * | 4495 | * |
@@ -4420,7 +4705,6 @@ static void ata_host_init(struct ata_port *ap, struct Scsi_Host *host, | |||
4420 | ap->active_tag = ATA_TAG_POISON; | 4705 | ap->active_tag = ATA_TAG_POISON; |
4421 | ap->last_ctl = 0xFF; | 4706 | ap->last_ctl = 0xFF; |
4422 | 4707 | ||
4423 | INIT_WORK(&ap->packet_task, atapi_packet_task, ap); | ||
4424 | INIT_WORK(&ap->pio_task, ata_pio_task, ap); | 4708 | INIT_WORK(&ap->pio_task, ata_pio_task, ap); |
4425 | 4709 | ||
4426 | for (i = 0; i < ATA_MAX_DEVICES; i++) | 4710 | for (i = 0; i < ATA_MAX_DEVICES; i++) |
diff --git a/drivers/scsi/pdc_adma.c b/drivers/scsi/pdc_adma.c index 3a6bf58dc37b..2dca6c53868a 100644 --- a/drivers/scsi/pdc_adma.c +++ b/drivers/scsi/pdc_adma.c | |||
@@ -457,13 +457,13 @@ static inline unsigned int adma_intr_pkt(struct ata_host_set *host_set) | |||
457 | continue; | 457 | continue; |
458 | handled = 1; | 458 | handled = 1; |
459 | adma_enter_reg_mode(ap); | 459 | adma_enter_reg_mode(ap); |
460 | if (ap->flags & (ATA_FLAG_PORT_DISABLED | ATA_FLAG_NOINTR)) | 460 | if (ap->flags & ATA_FLAG_PORT_DISABLED) |
461 | continue; | 461 | continue; |
462 | pp = ap->private_data; | 462 | pp = ap->private_data; |
463 | if (!pp || pp->state != adma_state_pkt) | 463 | if (!pp || pp->state != adma_state_pkt) |
464 | continue; | 464 | continue; |
465 | qc = ata_qc_from_tag(ap, ap->active_tag); | 465 | qc = ata_qc_from_tag(ap, ap->active_tag); |
466 | if (qc && (!(qc->tf.ctl & ATA_NIEN))) { | 466 | if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) { |
467 | if ((status & (aPERR | aPSD | aUIRQ))) | 467 | if ((status & (aPERR | aPSD | aUIRQ))) |
468 | qc->err_mask |= AC_ERR_OTHER; | 468 | qc->err_mask |= AC_ERR_OTHER; |
469 | else if (pp->pkt[0] != cDONE) | 469 | else if (pp->pkt[0] != cDONE) |
@@ -482,13 +482,13 @@ static inline unsigned int adma_intr_mmio(struct ata_host_set *host_set) | |||
482 | for (port_no = 0; port_no < host_set->n_ports; ++port_no) { | 482 | for (port_no = 0; port_no < host_set->n_ports; ++port_no) { |
483 | struct ata_port *ap; | 483 | struct ata_port *ap; |
484 | ap = host_set->ports[port_no]; | 484 | ap = host_set->ports[port_no]; |
485 | if (ap && (!(ap->flags & (ATA_FLAG_PORT_DISABLED | ATA_FLAG_NOINTR)))) { | 485 | if (ap && (!(ap->flags & ATA_FLAG_PORT_DISABLED))) { |
486 | struct ata_queued_cmd *qc; | 486 | struct ata_queued_cmd *qc; |
487 | struct adma_port_priv *pp = ap->private_data; | 487 | struct adma_port_priv *pp = ap->private_data; |
488 | if (!pp || pp->state != adma_state_mmio) | 488 | if (!pp || pp->state != adma_state_mmio) |
489 | continue; | 489 | continue; |
490 | qc = ata_qc_from_tag(ap, ap->active_tag); | 490 | qc = ata_qc_from_tag(ap, ap->active_tag); |
491 | if (qc && (!(qc->tf.ctl & ATA_NIEN))) { | 491 | if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) { |
492 | 492 | ||
493 | /* check main status, clearing INTRQ */ | 493 | /* check main status, clearing INTRQ */ |
494 | u8 status = ata_check_status(ap); | 494 | u8 status = ata_check_status(ap); |
diff --git a/drivers/scsi/sata_mv.c b/drivers/scsi/sata_mv.c index 281223a0e45f..498d6284a2f7 100644 --- a/drivers/scsi/sata_mv.c +++ b/drivers/scsi/sata_mv.c | |||
@@ -87,7 +87,7 @@ enum { | |||
87 | MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */ | 87 | MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */ |
88 | MV_COMMON_FLAGS = (ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | | 88 | MV_COMMON_FLAGS = (ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | |
89 | ATA_FLAG_SATA_RESET | ATA_FLAG_MMIO | | 89 | ATA_FLAG_SATA_RESET | ATA_FLAG_MMIO | |
90 | ATA_FLAG_NO_ATAPI), | 90 | ATA_FLAG_PIO_POLLING), |
91 | MV_6XXX_FLAGS = MV_FLAG_IRQ_COALESCE, | 91 | MV_6XXX_FLAGS = MV_FLAG_IRQ_COALESCE, |
92 | 92 | ||
93 | CRQB_FLAG_READ = (1 << 0), | 93 | CRQB_FLAG_READ = (1 << 0), |
@@ -1220,8 +1220,7 @@ static void mv_host_intr(struct ata_host_set *host_set, u32 relevant, | |||
1220 | handled++; | 1220 | handled++; |
1221 | } | 1221 | } |
1222 | 1222 | ||
1223 | if (ap && | 1223 | if (ap && (ap->flags & ATA_FLAG_PORT_DISABLED)) |
1224 | (ap->flags & (ATA_FLAG_PORT_DISABLED | ATA_FLAG_NOINTR))) | ||
1225 | continue; | 1224 | continue; |
1226 | 1225 | ||
1227 | err_mask = ac_err_mask(ata_status); | 1226 | err_mask = ac_err_mask(ata_status); |
@@ -1242,7 +1241,7 @@ static void mv_host_intr(struct ata_host_set *host_set, u32 relevant, | |||
1242 | VPRINTK("port %u IRQ found for qc, " | 1241 | VPRINTK("port %u IRQ found for qc, " |
1243 | "ata_status 0x%x\n", port,ata_status); | 1242 | "ata_status 0x%x\n", port,ata_status); |
1244 | /* mark qc status appropriately */ | 1243 | /* mark qc status appropriately */ |
1245 | if (!(qc->tf.ctl & ATA_NIEN)) { | 1244 | if (!(qc->tf.flags & ATA_TFLAG_POLLING)) { |
1246 | qc->err_mask |= err_mask; | 1245 | qc->err_mask |= err_mask; |
1247 | ata_qc_complete(qc); | 1246 | ata_qc_complete(qc); |
1248 | } | 1247 | } |
diff --git a/drivers/scsi/sata_nv.c b/drivers/scsi/sata_nv.c index bbbb55eeb73a..945194b76998 100644 --- a/drivers/scsi/sata_nv.c +++ b/drivers/scsi/sata_nv.c | |||
@@ -309,11 +309,11 @@ static irqreturn_t nv_interrupt (int irq, void *dev_instance, | |||
309 | 309 | ||
310 | ap = host_set->ports[i]; | 310 | ap = host_set->ports[i]; |
311 | if (ap && | 311 | if (ap && |
312 | !(ap->flags & (ATA_FLAG_PORT_DISABLED | ATA_FLAG_NOINTR))) { | 312 | !(ap->flags & ATA_FLAG_PORT_DISABLED)) { |
313 | struct ata_queued_cmd *qc; | 313 | struct ata_queued_cmd *qc; |
314 | 314 | ||
315 | qc = ata_qc_from_tag(ap, ap->active_tag); | 315 | qc = ata_qc_from_tag(ap, ap->active_tag); |
316 | if (qc && (!(qc->tf.ctl & ATA_NIEN))) | 316 | if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) |
317 | handled += ata_host_intr(ap, qc); | 317 | handled += ata_host_intr(ap, qc); |
318 | else | 318 | else |
319 | // No request pending? Clear interrupt status | 319 | // No request pending? Clear interrupt status |
diff --git a/drivers/scsi/sata_promise.c b/drivers/scsi/sata_promise.c index bac36d5b7c3e..010e08819886 100644 --- a/drivers/scsi/sata_promise.c +++ b/drivers/scsi/sata_promise.c | |||
@@ -73,7 +73,8 @@ enum { | |||
73 | PDC_RESET = (1 << 11), /* HDMA reset */ | 73 | PDC_RESET = (1 << 11), /* HDMA reset */ |
74 | 74 | ||
75 | PDC_COMMON_FLAGS = ATA_FLAG_NO_LEGACY | ATA_FLAG_SRST | | 75 | PDC_COMMON_FLAGS = ATA_FLAG_NO_LEGACY | ATA_FLAG_SRST | |
76 | ATA_FLAG_MMIO | ATA_FLAG_NO_ATAPI, | 76 | ATA_FLAG_MMIO | ATA_FLAG_NO_ATAPI | |
77 | ATA_FLAG_PIO_POLLING, | ||
77 | }; | 78 | }; |
78 | 79 | ||
79 | 80 | ||
@@ -510,11 +511,11 @@ static irqreturn_t pdc_interrupt (int irq, void *dev_instance, struct pt_regs *r | |||
510 | ap = host_set->ports[i]; | 511 | ap = host_set->ports[i]; |
511 | tmp = mask & (1 << (i + 1)); | 512 | tmp = mask & (1 << (i + 1)); |
512 | if (tmp && ap && | 513 | if (tmp && ap && |
513 | !(ap->flags & (ATA_FLAG_PORT_DISABLED | ATA_FLAG_NOINTR))) { | 514 | !(ap->flags & ATA_FLAG_PORT_DISABLED)) { |
514 | struct ata_queued_cmd *qc; | 515 | struct ata_queued_cmd *qc; |
515 | 516 | ||
516 | qc = ata_qc_from_tag(ap, ap->active_tag); | 517 | qc = ata_qc_from_tag(ap, ap->active_tag); |
517 | if (qc && (!(qc->tf.ctl & ATA_NIEN))) | 518 | if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) |
518 | handled += pdc_host_intr(ap, qc); | 519 | handled += pdc_host_intr(ap, qc); |
519 | } | 520 | } |
520 | } | 521 | } |
diff --git a/drivers/scsi/sata_qstor.c b/drivers/scsi/sata_qstor.c index 2afbeb77f6fe..b2f87da75735 100644 --- a/drivers/scsi/sata_qstor.c +++ b/drivers/scsi/sata_qstor.c | |||
@@ -177,7 +177,7 @@ static const struct ata_port_info qs_port_info[] = { | |||
177 | .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | | 177 | .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | |
178 | ATA_FLAG_SATA_RESET | | 178 | ATA_FLAG_SATA_RESET | |
179 | //FIXME ATA_FLAG_SRST | | 179 | //FIXME ATA_FLAG_SRST | |
180 | ATA_FLAG_MMIO, | 180 | ATA_FLAG_MMIO | ATA_FLAG_PIO_POLLING, |
181 | .pio_mask = 0x10, /* pio4 */ | 181 | .pio_mask = 0x10, /* pio4 */ |
182 | .udma_mask = 0x7f, /* udma0-6 */ | 182 | .udma_mask = 0x7f, /* udma0-6 */ |
183 | .port_ops = &qs_ata_ops, | 183 | .port_ops = &qs_ata_ops, |
@@ -396,14 +396,13 @@ static inline unsigned int qs_intr_pkt(struct ata_host_set *host_set) | |||
396 | DPRINTK("SFF=%08x%08x: sCHAN=%u sHST=%d sDST=%02x\n", | 396 | DPRINTK("SFF=%08x%08x: sCHAN=%u sHST=%d sDST=%02x\n", |
397 | sff1, sff0, port_no, sHST, sDST); | 397 | sff1, sff0, port_no, sHST, sDST); |
398 | handled = 1; | 398 | handled = 1; |
399 | if (ap && !(ap->flags & | 399 | if (ap && !(ap->flags & ATA_FLAG_PORT_DISABLED)) { |
400 | (ATA_FLAG_PORT_DISABLED|ATA_FLAG_NOINTR))) { | ||
401 | struct ata_queued_cmd *qc; | 400 | struct ata_queued_cmd *qc; |
402 | struct qs_port_priv *pp = ap->private_data; | 401 | struct qs_port_priv *pp = ap->private_data; |
403 | if (!pp || pp->state != qs_state_pkt) | 402 | if (!pp || pp->state != qs_state_pkt) |
404 | continue; | 403 | continue; |
405 | qc = ata_qc_from_tag(ap, ap->active_tag); | 404 | qc = ata_qc_from_tag(ap, ap->active_tag); |
406 | if (qc && (!(qc->tf.ctl & ATA_NIEN))) { | 405 | if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) { |
407 | switch (sHST) { | 406 | switch (sHST) { |
408 | case 0: /* successful CPB */ | 407 | case 0: /* successful CPB */ |
409 | case 3: /* device error */ | 408 | case 3: /* device error */ |
@@ -430,13 +429,13 @@ static inline unsigned int qs_intr_mmio(struct ata_host_set *host_set) | |||
430 | struct ata_port *ap; | 429 | struct ata_port *ap; |
431 | ap = host_set->ports[port_no]; | 430 | ap = host_set->ports[port_no]; |
432 | if (ap && | 431 | if (ap && |
433 | !(ap->flags & (ATA_FLAG_PORT_DISABLED | ATA_FLAG_NOINTR))) { | 432 | !(ap->flags & ATA_FLAG_PORT_DISABLED)) { |
434 | struct ata_queued_cmd *qc; | 433 | struct ata_queued_cmd *qc; |
435 | struct qs_port_priv *pp = ap->private_data; | 434 | struct qs_port_priv *pp = ap->private_data; |
436 | if (!pp || pp->state != qs_state_mmio) | 435 | if (!pp || pp->state != qs_state_mmio) |
437 | continue; | 436 | continue; |
438 | qc = ata_qc_from_tag(ap, ap->active_tag); | 437 | qc = ata_qc_from_tag(ap, ap->active_tag); |
439 | if (qc && (!(qc->tf.ctl & ATA_NIEN))) { | 438 | if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) { |
440 | 439 | ||
441 | /* check main status, clearing INTRQ */ | 440 | /* check main status, clearing INTRQ */ |
442 | u8 status = ata_check_status(ap); | 441 | u8 status = ata_check_status(ap); |
diff --git a/drivers/scsi/sata_sx4.c b/drivers/scsi/sata_sx4.c index 3175c6bb4fec..db08a8ba9ea7 100644 --- a/drivers/scsi/sata_sx4.c +++ b/drivers/scsi/sata_sx4.c | |||
@@ -220,7 +220,7 @@ static const struct ata_port_info pdc_port_info[] = { | |||
220 | .sht = &pdc_sata_sht, | 220 | .sht = &pdc_sata_sht, |
221 | .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | | 221 | .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | |
222 | ATA_FLAG_SRST | ATA_FLAG_MMIO | | 222 | ATA_FLAG_SRST | ATA_FLAG_MMIO | |
223 | ATA_FLAG_NO_ATAPI, | 223 | ATA_FLAG_PIO_POLLING, |
224 | .pio_mask = 0x1f, /* pio0-4 */ | 224 | .pio_mask = 0x1f, /* pio0-4 */ |
225 | .mwdma_mask = 0x07, /* mwdma0-2 */ | 225 | .mwdma_mask = 0x07, /* mwdma0-2 */ |
226 | .udma_mask = 0x7f, /* udma0-6 ; FIXME */ | 226 | .udma_mask = 0x7f, /* udma0-6 ; FIXME */ |
@@ -835,11 +835,11 @@ static irqreturn_t pdc20621_interrupt (int irq, void *dev_instance, struct pt_re | |||
835 | tmp = mask & (1 << i); | 835 | tmp = mask & (1 << i); |
836 | VPRINTK("seq %u, port_no %u, ap %p, tmp %x\n", i, port_no, ap, tmp); | 836 | VPRINTK("seq %u, port_no %u, ap %p, tmp %x\n", i, port_no, ap, tmp); |
837 | if (tmp && ap && | 837 | if (tmp && ap && |
838 | !(ap->flags & (ATA_FLAG_PORT_DISABLED | ATA_FLAG_NOINTR))) { | 838 | !(ap->flags & ATA_FLAG_PORT_DISABLED)) { |
839 | struct ata_queued_cmd *qc; | 839 | struct ata_queued_cmd *qc; |
840 | 840 | ||
841 | qc = ata_qc_from_tag(ap, ap->active_tag); | 841 | qc = ata_qc_from_tag(ap, ap->active_tag); |
842 | if (qc && (!(qc->tf.ctl & ATA_NIEN))) | 842 | if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) |
843 | handled += pdc20621_host_intr(ap, qc, (i > 4), | 843 | handled += pdc20621_host_intr(ap, qc, (i > 4), |
844 | mmio_base); | 844 | mmio_base); |
845 | } | 845 | } |
diff --git a/drivers/scsi/sata_vsc.c b/drivers/scsi/sata_vsc.c index 2e2c3b7acb0c..3e34fedd2104 100644 --- a/drivers/scsi/sata_vsc.c +++ b/drivers/scsi/sata_vsc.c | |||
@@ -201,12 +201,12 @@ static irqreturn_t vsc_sata_interrupt (int irq, void *dev_instance, | |||
201 | struct ata_port *ap; | 201 | struct ata_port *ap; |
202 | 202 | ||
203 | ap = host_set->ports[i]; | 203 | ap = host_set->ports[i]; |
204 | if (ap && !(ap->flags & | 204 | if (ap && |
205 | (ATA_FLAG_PORT_DISABLED|ATA_FLAG_NOINTR))) { | 205 | !(ap->flags & ATA_FLAG_PORT_DISABLED)) { |
206 | struct ata_queued_cmd *qc; | 206 | struct ata_queued_cmd *qc; |
207 | 207 | ||
208 | qc = ata_qc_from_tag(ap, ap->active_tag); | 208 | qc = ata_qc_from_tag(ap, ap->active_tag); |
209 | if (qc && (!(qc->tf.ctl & ATA_NIEN))) | 209 | if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) |
210 | handled += ata_host_intr(ap, qc); | 210 | handled += ata_host_intr(ap, qc); |
211 | } | 211 | } |
212 | } | 212 | } |