aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAlbert Lee <albertcc@tw.ibm.com>2005-09-27 05:38:03 -0400
committerJeff Garzik <jgarzik@pobox.com>2005-09-28 12:07:13 -0400
commit312f7da2824c82800ee78d6190f12854456957af (patch)
tree68b3b18a4ddd783a5c75dba688aabf2de0682fb3
parent14be71f4c5c5ad1e222c5202ee6d234e9c8828b7 (diff)
[PATCH] libata: interrupt driven pio for libata-core
- add PIO_ST_FIRST for the state before sending ATAPI CDB or sending "ATA PIO data out" first data block. - add ATA_TFLAG_POLLING and ATA_DFLAG_CDB_INTR flags - remove the ATA_FLAG_NOINTR flag since the interrupt handler is now aware of the states - modify ata_pio_sector() and atapi_pio_bytes() to work in the interrupt context - modify the ata_host_intr() to handle PIO interrupts - modify ata_qc_issue_prot() to initialize states - atapi_packet_task() changed to handle "ATA PIO data out" first data block - support the pre-ATA4 ATAPI device which raise interrupt when ready to receive CDB Signed-off-by: Albert Lee <albertcc@tw.ibm.com> Signed-off-by: Jeff Garzik <jgarzik@pobox.com>
-rw-r--r--drivers/scsi/libata-core.c333
-rw-r--r--include/linux/ata.h3
-rw-r--r--include/linux/libata.h4
3 files changed, 271 insertions, 69 deletions
diff --git a/drivers/scsi/libata-core.c b/drivers/scsi/libata-core.c
index c4fcdc30f18c..cc2d1308826e 100644
--- a/drivers/scsi/libata-core.c
+++ b/drivers/scsi/libata-core.c
@@ -1292,6 +1292,9 @@ retry:
1292 ap->cdb_len = (unsigned int) rc; 1292 ap->cdb_len = (unsigned int) rc;
1293 ap->host->max_cmd_len = (unsigned char) ap->cdb_len; 1293 ap->host->max_cmd_len = (unsigned char) ap->cdb_len;
1294 1294
1295 if (ata_id_cdb_intr(dev->id))
1296 dev->flags |= ATA_DFLAG_CDB_INTR;
1297
1295 /* print device info to dmesg */ 1298 /* print device info to dmesg */
1296 printk(KERN_INFO "ata%u: dev %u ATAPI, max %s\n", 1299 printk(KERN_INFO "ata%u: dev %u ATAPI, max %s\n",
1297 ap->id, device, 1300 ap->id, device,
@@ -2405,7 +2408,6 @@ void ata_poll_qc_complete(struct ata_queued_cmd *qc, u8 drv_stat)
2405 unsigned long flags; 2408 unsigned long flags;
2406 2409
2407 spin_lock_irqsave(&ap->host_set->lock, flags); 2410 spin_lock_irqsave(&ap->host_set->lock, flags);
2408 ap->flags &= ~ATA_FLAG_NOINTR;
2409 ata_irq_on(ap); 2411 ata_irq_on(ap);
2410 ata_qc_complete(qc, drv_stat); 2412 ata_qc_complete(qc, drv_stat);
2411 spin_unlock_irqrestore(&ap->host_set->lock, flags); 2413 spin_unlock_irqrestore(&ap->host_set->lock, flags);
@@ -2660,6 +2662,7 @@ static void ata_pio_sector(struct ata_queued_cmd *qc)
2660 struct page *page; 2662 struct page *page;
2661 unsigned int offset; 2663 unsigned int offset;
2662 unsigned char *buf; 2664 unsigned char *buf;
2665 unsigned long flags;
2663 2666
2664 if (qc->cursect == (qc->nsect - 1)) 2667 if (qc->cursect == (qc->nsect - 1))
2665 ap->hsm_task_state = HSM_ST_LAST; 2668 ap->hsm_task_state = HSM_ST_LAST;
@@ -2671,7 +2674,8 @@ static void ata_pio_sector(struct ata_queued_cmd *qc)
2671 page = nth_page(page, (offset >> PAGE_SHIFT)); 2674 page = nth_page(page, (offset >> PAGE_SHIFT));
2672 offset %= PAGE_SIZE; 2675 offset %= PAGE_SIZE;
2673 2676
2674 buf = kmap(page) + offset; 2677 local_irq_save(flags);
2678 buf = kmap_atomic(page, KM_IRQ0) + offset;
2675 2679
2676 qc->cursect++; 2680 qc->cursect++;
2677 qc->cursg_ofs++; 2681 qc->cursg_ofs++;
@@ -2687,7 +2691,8 @@ static void ata_pio_sector(struct ata_queued_cmd *qc)
2687 do_write = (qc->tf.flags & ATA_TFLAG_WRITE); 2691 do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
2688 ata_data_xfer(ap, buf, ATA_SECT_SIZE, do_write); 2692 ata_data_xfer(ap, buf, ATA_SECT_SIZE, do_write);
2689 2693
2690 kunmap(page); 2694 kunmap_atomic(buf - offset, KM_IRQ0);
2695 local_irq_restore(flags);
2691} 2696}
2692 2697
2693/** 2698/**
@@ -2710,6 +2715,7 @@ static void __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
2710 struct page *page; 2715 struct page *page;
2711 unsigned char *buf; 2716 unsigned char *buf;
2712 unsigned int offset, count; 2717 unsigned int offset, count;
2718 unsigned long flags;
2713 2719
2714 if (qc->curbytes + bytes >= qc->nbytes) 2720 if (qc->curbytes + bytes >= qc->nbytes)
2715 ap->hsm_task_state = HSM_ST_LAST; 2721 ap->hsm_task_state = HSM_ST_LAST;
@@ -2753,7 +2759,8 @@ next_sg:
2753 /* don't cross page boundaries */ 2759 /* don't cross page boundaries */
2754 count = min(count, (unsigned int)PAGE_SIZE - offset); 2760 count = min(count, (unsigned int)PAGE_SIZE - offset);
2755 2761
2756 buf = kmap(page) + offset; 2762 local_irq_save(flags);
2763 buf = kmap_atomic(page, KM_IRQ0) + offset;
2757 2764
2758 bytes -= count; 2765 bytes -= count;
2759 qc->curbytes += count; 2766 qc->curbytes += count;
@@ -2769,7 +2776,8 @@ next_sg:
2769 /* do the actual data transfer */ 2776 /* do the actual data transfer */
2770 ata_data_xfer(ap, buf, count, do_write); 2777 ata_data_xfer(ap, buf, count, do_write);
2771 2778
2772 kunmap(page); 2779 kunmap_atomic(buf - offset, KM_IRQ0);
2780 local_irq_restore(flags);
2773 2781
2774 if (bytes) 2782 if (bytes)
2775 goto next_sg; 2783 goto next_sg;
@@ -2808,6 +2816,8 @@ static void atapi_pio_bytes(struct ata_queued_cmd *qc)
2808 if (do_write != i_write) 2816 if (do_write != i_write)
2809 goto err_out; 2817 goto err_out;
2810 2818
2819 VPRINTK("ata%u: xfering %d bytes\n", ap->id, bytes);
2820
2811 __atapi_pio_bytes(qc, bytes); 2821 __atapi_pio_bytes(qc, bytes);
2812 2822
2813 return; 2823 return;
@@ -3054,6 +3064,8 @@ static void ata_qc_timeout(struct ata_queued_cmd *qc)
3054 printk(KERN_ERR "ata%u: command 0x%x timeout, stat 0x%x host_stat 0x%x\n", 3064 printk(KERN_ERR "ata%u: command 0x%x timeout, stat 0x%x host_stat 0x%x\n",
3055 ap->id, qc->tf.command, drv_stat, host_stat); 3065 ap->id, qc->tf.command, drv_stat, host_stat);
3056 3066
3067 ap->hsm_task_state = HSM_ST_IDLE;
3068
3057 /* complete taskfile transaction */ 3069 /* complete taskfile transaction */
3058 ata_qc_complete(qc, drv_stat); 3070 ata_qc_complete(qc, drv_stat);
3059 break; 3071 break;
@@ -3344,43 +3356,96 @@ int ata_qc_issue_prot(struct ata_queued_cmd *qc)
3344{ 3356{
3345 struct ata_port *ap = qc->ap; 3357 struct ata_port *ap = qc->ap;
3346 3358
3359 /* select the device */
3347 ata_dev_select(ap, qc->dev->devno, 1, 0); 3360 ata_dev_select(ap, qc->dev->devno, 1, 0);
3348 3361
3362 /* start the command */
3349 switch (qc->tf.protocol) { 3363 switch (qc->tf.protocol) {
3350 case ATA_PROT_NODATA: 3364 case ATA_PROT_NODATA:
3365 if (qc->tf.flags & ATA_TFLAG_POLLING)
3366 ata_qc_set_polling(qc);
3367
3351 ata_tf_to_host_nolock(ap, &qc->tf); 3368 ata_tf_to_host_nolock(ap, &qc->tf);
3369 ap->hsm_task_state = HSM_ST_LAST;
3370
3371 if (qc->tf.flags & ATA_TFLAG_POLLING)
3372 queue_work(ata_wq, &ap->pio_task);
3373
3352 break; 3374 break;
3353 3375
3354 case ATA_PROT_DMA: 3376 case ATA_PROT_DMA:
3377 assert(!(qc->tf.flags & ATA_TFLAG_POLLING));
3378
3355 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */ 3379 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
3356 ap->ops->bmdma_setup(qc); /* set up bmdma */ 3380 ap->ops->bmdma_setup(qc); /* set up bmdma */
3357 ap->ops->bmdma_start(qc); /* initiate bmdma */ 3381 ap->ops->bmdma_start(qc); /* initiate bmdma */
3382 ap->hsm_task_state = HSM_ST_LAST;
3358 break; 3383 break;
3359 3384
3360 case ATA_PROT_PIO: /* load tf registers, initiate polling pio */ 3385 case ATA_PROT_PIO:
3361 ata_qc_set_polling(qc); 3386 if (qc->tf.flags & ATA_TFLAG_POLLING)
3387 ata_qc_set_polling(qc);
3388
3362 ata_tf_to_host_nolock(ap, &qc->tf); 3389 ata_tf_to_host_nolock(ap, &qc->tf);
3363 ap->hsm_task_state = HSM_ST; 3390
3364 queue_work(ata_wq, &ap->pio_task); 3391 if (qc->tf.flags & ATA_TFLAG_POLLING) {
3392 /* polling PIO */
3393 ap->hsm_task_state = HSM_ST;
3394 queue_work(ata_wq, &ap->pio_task);
3395 } else {
3396 /* interrupt driven PIO */
3397 if (qc->tf.flags & ATA_TFLAG_WRITE) {
3398 /* PIO data out protocol */
3399 ap->hsm_task_state = HSM_ST_FIRST;
3400 queue_work(ata_wq, &ap->packet_task);
3401
3402 /* send first data block by polling */
3403 } else {
3404 /* PIO data in protocol */
3405 ap->hsm_task_state = HSM_ST;
3406
3407 /* interrupt handler takes over from here */
3408 }
3409 }
3410
3365 break; 3411 break;
3366 3412
3367 case ATA_PROT_ATAPI: 3413 case ATA_PROT_ATAPI:
3368 ata_qc_set_polling(qc); 3414 if (qc->tf.flags & ATA_TFLAG_POLLING)
3415 ata_qc_set_polling(qc);
3416
3369 ata_tf_to_host_nolock(ap, &qc->tf); 3417 ata_tf_to_host_nolock(ap, &qc->tf);
3370 queue_work(ata_wq, &ap->packet_task); 3418 ap->hsm_task_state = HSM_ST_FIRST;
3419
3420 /* send cdb by polling if no cdb interrupt */
3421 if ((!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) ||
3422 (qc->tf.flags & ATA_TFLAG_POLLING))
3423 queue_work(ata_wq, &ap->packet_task);
3371 break; 3424 break;
3372 3425
3373 case ATA_PROT_ATAPI_NODATA: 3426 case ATA_PROT_ATAPI_NODATA:
3374 ap->flags |= ATA_FLAG_NOINTR; 3427 if (qc->tf.flags & ATA_TFLAG_POLLING)
3428 ata_qc_set_polling(qc);
3429
3375 ata_tf_to_host_nolock(ap, &qc->tf); 3430 ata_tf_to_host_nolock(ap, &qc->tf);
3376 queue_work(ata_wq, &ap->packet_task); 3431 ap->hsm_task_state = HSM_ST_FIRST;
3432
3433 /* send cdb by polling if no cdb interrupt */
3434 if ((!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) ||
3435 (qc->tf.flags & ATA_TFLAG_POLLING))
3436 queue_work(ata_wq, &ap->packet_task);
3377 break; 3437 break;
3378 3438
3379 case ATA_PROT_ATAPI_DMA: 3439 case ATA_PROT_ATAPI_DMA:
3380 ap->flags |= ATA_FLAG_NOINTR; 3440 assert(!(qc->tf.flags & ATA_TFLAG_POLLING));
3441
3381 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */ 3442 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
3382 ap->ops->bmdma_setup(qc); /* set up bmdma */ 3443 ap->ops->bmdma_setup(qc); /* set up bmdma */
3383 queue_work(ata_wq, &ap->packet_task); 3444 ap->hsm_task_state = HSM_ST_FIRST;
3445
3446 /* send cdb by polling if no cdb interrupt */
3447 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
3448 queue_work(ata_wq, &ap->packet_task);
3384 break; 3449 break;
3385 3450
3386 default: 3451 default:
@@ -3623,6 +3688,42 @@ void ata_bmdma_stop(struct ata_queued_cmd *qc)
3623} 3688}
3624 3689
3625/** 3690/**
3691 * atapi_send_cdb - Write CDB bytes to hardware
3692 * @ap: Port to which ATAPI device is attached.
3693 * @qc: Taskfile currently active
3694 *
3695 * When device has indicated its readiness to accept
3696 * a CDB, this function is called. Send the CDB.
3697 *
3698 * LOCKING:
3699 * caller.
3700 */
3701
3702static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc)
3703{
3704 /* send SCSI cdb */
3705 DPRINTK("send cdb\n");
3706 assert(ap->cdb_len >= 12);
3707
3708 ata_data_xfer(ap, qc->cdb, ap->cdb_len, 1);
3709 ata_altstatus(ap); /* flush */
3710
3711 switch (qc->tf.protocol) {
3712 case ATA_PROT_ATAPI:
3713 ap->hsm_task_state = HSM_ST;
3714 break;
3715 case ATA_PROT_ATAPI_NODATA:
3716 ap->hsm_task_state = HSM_ST_LAST;
3717 break;
3718 case ATA_PROT_ATAPI_DMA:
3719 ap->hsm_task_state = HSM_ST_LAST;
3720 /* initiate bmdma */
3721 ap->ops->bmdma_start(qc);
3722 break;
3723 }
3724}
3725
3726/**
3626 * ata_host_intr - Handle host interrupt for given (port, task) 3727 * ata_host_intr - Handle host interrupt for given (port, task)
3627 * @ap: Port on which interrupt arrived (possibly...) 3728 * @ap: Port on which interrupt arrived (possibly...)
3628 * @qc: Taskfile currently active in engine 3729 * @qc: Taskfile currently active in engine
@@ -3641,47 +3742,142 @@ void ata_bmdma_stop(struct ata_queued_cmd *qc)
3641inline unsigned int ata_host_intr (struct ata_port *ap, 3742inline unsigned int ata_host_intr (struct ata_port *ap,
3642 struct ata_queued_cmd *qc) 3743 struct ata_queued_cmd *qc)
3643{ 3744{
3644 u8 status, host_stat; 3745 u8 status, host_stat = 0;
3645
3646 switch (qc->tf.protocol) {
3647 3746
3648 case ATA_PROT_DMA: 3747 VPRINTK("ata%u: protocol %d task_state %d\n",
3649 case ATA_PROT_ATAPI_DMA: 3748 ap->id, qc->tf.protocol, ap->hsm_task_state);
3650 case ATA_PROT_ATAPI:
3651 /* check status of DMA engine */
3652 host_stat = ap->ops->bmdma_status(ap);
3653 VPRINTK("ata%u: host_stat 0x%X\n", ap->id, host_stat);
3654 3749
3655 /* if it's not our irq... */ 3750 /* Check whether we are expecting interrupt in this state */
3656 if (!(host_stat & ATA_DMA_INTR)) 3751 switch (ap->hsm_task_state) {
3752 case HSM_ST_FIRST:
3753 /* Check the ATA_DFLAG_CDB_INTR flag is enough here.
3754 * The flag was turned on only for atapi devices.
3755 * No need to check is_atapi_taskfile(&qc->tf) again.
3756 */
3757 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
3657 goto idle_irq; 3758 goto idle_irq;
3759 break;
3760 case HSM_ST_LAST:
3761 if (qc->tf.protocol == ATA_PROT_DMA ||
3762 qc->tf.protocol == ATA_PROT_ATAPI_DMA) {
3763 /* check status of DMA engine */
3764 host_stat = ap->ops->bmdma_status(ap);
3765 VPRINTK("ata%u: host_stat 0x%X\n", ap->id, host_stat);
3766
3767 /* if it's not our irq... */
3768 if (!(host_stat & ATA_DMA_INTR))
3769 goto idle_irq;
3770
3771 /* before we do anything else, clear DMA-Start bit */
3772 ap->ops->bmdma_stop(qc);
3773 }
3774 break;
3775 case HSM_ST:
3776 break;
3777 default:
3778 goto idle_irq;
3779 }
3658 3780
3659 /* before we do anything else, clear DMA-Start bit */ 3781 /* check altstatus */
3660 ap->ops->bmdma_stop(qc); 3782 status = ata_altstatus(ap);
3783 if (status & ATA_BUSY)
3784 goto idle_irq;
3661 3785
3662 /* fall through */ 3786 /* check main status, clearing INTRQ */
3787 status = ata_chk_status(ap);
3788 if (unlikely(status & ATA_BUSY))
3789 goto idle_irq;
3663 3790
3664 case ATA_PROT_ATAPI_NODATA: 3791 DPRINTK("ata%u: protocol %d task_state %d (dev_stat 0x%X)\n",
3665 case ATA_PROT_NODATA: 3792 ap->id, qc->tf.protocol, ap->hsm_task_state, status);
3666 /* check altstatus */
3667 status = ata_altstatus(ap);
3668 if (status & ATA_BUSY)
3669 goto idle_irq;
3670 3793
3671 /* check main status, clearing INTRQ */ 3794 /* ack bmdma irq events */
3672 status = ata_chk_status(ap); 3795 ap->ops->irq_clear(ap);
3673 if (unlikely(status & ATA_BUSY))
3674 goto idle_irq;
3675 DPRINTK("ata%u: protocol %d (dev_stat 0x%X)\n",
3676 ap->id, qc->tf.protocol, status);
3677 3796
3678 /* ack bmdma irq events */ 3797 /* check error */
3679 ap->ops->irq_clear(ap); 3798 if (unlikely((status & ATA_ERR) || (host_stat & ATA_DMA_ERR)))
3799 ap->hsm_task_state = HSM_ST_ERR;
3800
3801fsm_start:
3802 switch (ap->hsm_task_state) {
3803 case HSM_ST_FIRST:
3804 /* Some pre-ATAPI-4 devices assert INTRQ
3805 * at this state when ready to receive CDB.
3806 */
3807
3808 /* check device status */
3809 if (unlikely((status & (ATA_BUSY | ATA_DRQ)) != ATA_DRQ)) {
3810 /* Wrong status. Let EH handle this */
3811 ap->hsm_task_state = HSM_ST_ERR;
3812 goto fsm_start;
3813 }
3814
3815 atapi_send_cdb(ap, qc);
3816
3817 break;
3818
3819 case HSM_ST:
3820 /* complete command or read/write the data register */
3821 if (qc->tf.protocol == ATA_PROT_ATAPI) {
3822 /* ATAPI PIO protocol */
3823 if ((status & ATA_DRQ) == 0) {
3824 /* no more data to transfer */
3825 ap->hsm_task_state = HSM_ST_LAST;
3826 goto fsm_start;
3827 }
3828
3829 atapi_pio_bytes(qc);
3830
3831 if (unlikely(ap->hsm_task_state == HSM_ST_ERR))
3832 /* bad ireason reported by device */
3833 goto fsm_start;
3834
3835 } else {
3836 /* ATA PIO protocol */
3837 if (unlikely((status & ATA_DRQ) == 0)) {
3838 /* handle BSY=0, DRQ=0 as error */
3839 ap->hsm_task_state = HSM_ST_ERR;
3840 goto fsm_start;
3841 }
3842
3843 ata_pio_sector(qc);
3844
3845 if (ap->hsm_task_state == HSM_ST_LAST &&
3846 (!(qc->tf.flags & ATA_TFLAG_WRITE))) {
3847 /* all data read */
3848 ata_altstatus(ap);
3849 status = ata_chk_status(ap);
3850 goto fsm_start;
3851 }
3852 }
3853
3854 ata_altstatus(ap); /* flush */
3855 break;
3856
3857 case HSM_ST_LAST:
3858 if (unlikely(status & ATA_DRQ)) {
3859 /* handle DRQ=1 as error */
3860 ap->hsm_task_state = HSM_ST_ERR;
3861 goto fsm_start;
3862 }
3863
3864 /* no more data to transfer */
3865 DPRINTK("ata%u: command complete, drv_stat 0x%x\n",
3866 ap->id, status);
3867
3868 ap->hsm_task_state = HSM_ST_IDLE;
3680 3869
3681 /* complete taskfile transaction */ 3870 /* complete taskfile transaction */
3682 ata_qc_complete(qc, status); 3871 ata_qc_complete(qc, status);
3683 break; 3872 break;
3684 3873
3874 case HSM_ST_ERR:
3875 printk(KERN_ERR "ata%u: command error, drv_stat 0x%x host_stat 0x%x\n",
3876 ap->id, status, host_stat);
3877
3878 ap->hsm_task_state = HSM_ST_IDLE;
3879 ata_qc_complete(qc, status | ATA_ERR);
3880 break;
3685 default: 3881 default:
3686 goto idle_irq; 3882 goto idle_irq;
3687 } 3883 }
@@ -3733,11 +3929,11 @@ irqreturn_t ata_interrupt (int irq, void *dev_instance, struct pt_regs *regs)
3733 3929
3734 ap = host_set->ports[i]; 3930 ap = host_set->ports[i];
3735 if (ap && 3931 if (ap &&
3736 !(ap->flags & (ATA_FLAG_PORT_DISABLED | ATA_FLAG_NOINTR))) { 3932 !(ap->flags & ATA_FLAG_PORT_DISABLED)) {
3737 struct ata_queued_cmd *qc; 3933 struct ata_queued_cmd *qc;
3738 3934
3739 qc = ata_qc_from_tag(ap, ap->active_tag); 3935 qc = ata_qc_from_tag(ap, ap->active_tag);
3740 if (qc && (!(qc->tf.ctl & ATA_NIEN)) && 3936 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)) &&
3741 (qc->flags & ATA_QCFLAG_ACTIVE)) 3937 (qc->flags & ATA_QCFLAG_ACTIVE))
3742 handled |= ata_host_intr(ap, qc); 3938 handled |= ata_host_intr(ap, qc);
3743 } 3939 }
@@ -3767,6 +3963,7 @@ static void atapi_packet_task(void *_data)
3767 struct ata_port *ap = _data; 3963 struct ata_port *ap = _data;
3768 struct ata_queued_cmd *qc; 3964 struct ata_queued_cmd *qc;
3769 u8 status; 3965 u8 status;
3966 unsigned long flags;
3770 3967
3771 qc = ata_qc_from_tag(ap, ap->active_tag); 3968 qc = ata_qc_from_tag(ap, ap->active_tag);
3772 assert(qc != NULL); 3969 assert(qc != NULL);
@@ -3782,38 +3979,40 @@ static void atapi_packet_task(void *_data)
3782 if ((status & (ATA_BUSY | ATA_DRQ)) != ATA_DRQ) 3979 if ((status & (ATA_BUSY | ATA_DRQ)) != ATA_DRQ)
3783 goto err_out; 3980 goto err_out;
3784 3981
3785 /* send SCSI cdb */ 3982 /* Send the CDB (atapi) or the first data block (ata pio out).
3786 DPRINTK("send cdb\n"); 3983 * During the state transition, interrupt handler shouldn't
3787 assert(ap->cdb_len >= 12); 3984 * be invoked before the data transfer is complete and
3985 * hsm_task_state is changed. Hence, the following locking.
3986 */
3987 spin_lock_irqsave(&ap->host_set->lock, flags);
3788 3988
3789 if (qc->tf.protocol == ATA_PROT_ATAPI_DMA || 3989 if (is_atapi_taskfile(&qc->tf)) {
3790 qc->tf.protocol == ATA_PROT_ATAPI_NODATA) { 3990 /* send CDB */
3791 unsigned long flags; 3991 atapi_send_cdb(ap, qc);
3792 3992
3793 /* Once we're done issuing command and kicking bmdma, 3993 if (qc->tf.flags & ATA_TFLAG_POLLING)
3794 * irq handler takes over. To not lose irq, we need 3994 queue_work(ata_wq, &ap->pio_task);
3795 * to clear NOINTR flag before sending cdb, but
3796 * interrupt handler shouldn't be invoked before we're
3797 * finished. Hence, the following locking.
3798 */
3799 spin_lock_irqsave(&ap->host_set->lock, flags);
3800 ap->flags &= ~ATA_FLAG_NOINTR;
3801 ata_data_xfer(ap, qc->cdb, ap->cdb_len, 1);
3802 if (qc->tf.protocol == ATA_PROT_ATAPI_DMA)
3803 ap->ops->bmdma_start(qc); /* initiate bmdma */
3804 spin_unlock_irqrestore(&ap->host_set->lock, flags);
3805 } else { 3995 } else {
3806 ata_data_xfer(ap, qc->cdb, ap->cdb_len, 1); 3996 /* PIO data out protocol.
3997 * send first data block.
3998 */
3807 3999
3808 /* PIO commands are handled by polling */ 4000 /* ata_pio_sector() might change the state to HSM_ST_LAST.
4001 * so, the state is changed here before ata_pio_sector().
4002 */
3809 ap->hsm_task_state = HSM_ST; 4003 ap->hsm_task_state = HSM_ST;
3810 queue_work(ata_wq, &ap->pio_task); 4004 ata_pio_sector(qc);
4005 ata_altstatus(ap); /* flush */
4006
4007 /* interrupt handler takes over from here */
3811 } 4008 }
3812 4009
4010 spin_unlock_irqrestore(&ap->host_set->lock, flags);
4011
3813 return; 4012 return;
3814 4013
3815err_out: 4014err_out:
3816 ata_poll_qc_complete(qc, ATA_ERR); 4015 ata_pio_error(ap);
3817} 4016}
3818 4017
3819 4018
diff --git a/include/linux/ata.h b/include/linux/ata.h
index a5b74efab067..6fec2f6f2d59 100644
--- a/include/linux/ata.h
+++ b/include/linux/ata.h
@@ -181,6 +181,7 @@ enum {
181 ATA_TFLAG_ISADDR = (1 << 1), /* enable r/w to nsect/lba regs */ 181 ATA_TFLAG_ISADDR = (1 << 1), /* enable r/w to nsect/lba regs */
182 ATA_TFLAG_DEVICE = (1 << 2), /* enable r/w to device reg */ 182 ATA_TFLAG_DEVICE = (1 << 2), /* enable r/w to device reg */
183 ATA_TFLAG_WRITE = (1 << 3), /* data dir: host->dev==1 (write) */ 183 ATA_TFLAG_WRITE = (1 << 3), /* data dir: host->dev==1 (write) */
184 ATA_TFLAG_POLLING = (1 << 4), /* set nIEN to 1 and use polling */
184}; 185};
185 186
186enum ata_tf_protocols { 187enum ata_tf_protocols {
@@ -250,6 +251,8 @@ struct ata_taskfile {
250 ((u64) (id)[(n) + 1] << 16) | \ 251 ((u64) (id)[(n) + 1] << 16) | \
251 ((u64) (id)[(n) + 0]) ) 252 ((u64) (id)[(n) + 0]) )
252 253
254#define ata_id_cdb_intr(id) (((id)[0] & 0x60) == 0x20)
255
253static inline int atapi_cdb_len(u16 *dev_id) 256static inline int atapi_cdb_len(u16 *dev_id)
254{ 257{
255 u16 tmp = dev_id[0] & 0x3; 258 u16 tmp = dev_id[0] & 0x3;
diff --git a/include/linux/libata.h b/include/linux/libata.h
index bb2d916bce44..9ac2b69df3c1 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -97,6 +97,7 @@ enum {
97 ATA_DFLAG_LBA48 = (1 << 0), /* device supports LBA48 */ 97 ATA_DFLAG_LBA48 = (1 << 0), /* device supports LBA48 */
98 ATA_DFLAG_PIO = (1 << 1), /* device currently in PIO mode */ 98 ATA_DFLAG_PIO = (1 << 1), /* device currently in PIO mode */
99 ATA_DFLAG_LOCK_SECTORS = (1 << 2), /* don't adjust max_sectors */ 99 ATA_DFLAG_LOCK_SECTORS = (1 << 2), /* don't adjust max_sectors */
100 ATA_DFLAG_CDB_INTR = (1 << 3), /* device asserts INTRQ when ready for CDB */
100 101
101 ATA_DEV_UNKNOWN = 0, /* unknown device */ 102 ATA_DEV_UNKNOWN = 0, /* unknown device */
102 ATA_DEV_ATA = 1, /* ATA device */ 103 ATA_DEV_ATA = 1, /* ATA device */
@@ -115,8 +116,6 @@ enum {
115 ATA_FLAG_MMIO = (1 << 6), /* use MMIO, not PIO */ 116 ATA_FLAG_MMIO = (1 << 6), /* use MMIO, not PIO */
116 ATA_FLAG_SATA_RESET = (1 << 7), /* use COMRESET */ 117 ATA_FLAG_SATA_RESET = (1 << 7), /* use COMRESET */
117 ATA_FLAG_PIO_DMA = (1 << 8), /* PIO cmds via DMA */ 118 ATA_FLAG_PIO_DMA = (1 << 8), /* PIO cmds via DMA */
118 ATA_FLAG_NOINTR = (1 << 9), /* FIXME: Remove this once
119 * proper HSM is in place. */
120 119
121 ATA_QCFLAG_ACTIVE = (1 << 1), /* cmd not yet ack'd to scsi lyer */ 120 ATA_QCFLAG_ACTIVE = (1 << 1), /* cmd not yet ack'd to scsi lyer */
122 ATA_QCFLAG_SG = (1 << 3), /* have s/g table? */ 121 ATA_QCFLAG_SG = (1 << 3), /* have s/g table? */
@@ -165,6 +164,7 @@ enum hsm_task_states {
165 HSM_ST_LAST, 164 HSM_ST_LAST,
166 HSM_ST_LAST_POLL, 165 HSM_ST_LAST_POLL,
167 HSM_ST_ERR, 166 HSM_ST_ERR,
167 HSM_ST_FIRST,
168}; 168};
169 169
170/* forward declarations */ 170/* forward declarations */