aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRobert Hancock <hancockr@shaw.ca>2006-11-26 15:20:19 -0500
committerJeff Garzik <jeff@garzik.org>2006-12-01 22:47:03 -0500
commit2dec7555e6bf2772749113ea0ad454fcdb8cf861 (patch)
tree740d37787c4968398f5cc6b18e7a47c7c51d51a6
parent099156db555aabf54dc80b40abb628ce35d90065 (diff)
[PATCH] sata_nv: fix ATAPI in ADMA mode
The attached patch against 2.6.19-rc6-mm1 fixes some problems in sata_nv with ATAPI devices on controllers running in ADMA mode. Some of the logic in the nv_adma_bmdma_* functions was inverted causing a bunch of warnings and caused those functions not to work properly. Also, when an ATAPI device is connected, we need to use the legacy DMA engine. The code now disables the PCI configuration register bits for ADMA so that this works, and ensures that no ATAPI DMA commands go through until this is done. Fixes Bugzilla http://bugzilla.kernel.org/show_bug.cgi?id=7538 Signed-off-by: Robert Hancock <hancockr@shaw.ca> Signed-off-by: Jeff Garzik <jeff@garzik.org>
-rw-r--r--drivers/ata/sata_nv.c318
1 files changed, 188 insertions, 130 deletions
diff --git a/drivers/ata/sata_nv.c b/drivers/ata/sata_nv.c
index a57710107619..27d2225c9083 100644
--- a/drivers/ata/sata_nv.c
+++ b/drivers/ata/sata_nv.c
@@ -49,7 +49,7 @@
49#include <linux/libata.h> 49#include <linux/libata.h>
50 50
51#define DRV_NAME "sata_nv" 51#define DRV_NAME "sata_nv"
52#define DRV_VERSION "3.1" 52#define DRV_VERSION "3.2"
53 53
54#define NV_ADMA_DMA_BOUNDARY 0xffffffffUL 54#define NV_ADMA_DMA_BOUNDARY 0xffffffffUL
55 55
@@ -165,6 +165,7 @@ enum {
165 165
166 /* port flags */ 166 /* port flags */
167 NV_ADMA_PORT_REGISTER_MODE = (1 << 0), 167 NV_ADMA_PORT_REGISTER_MODE = (1 << 0),
168 NV_ADMA_ATAPI_SETUP_COMPLETE = (1 << 1),
168 169
169}; 170};
170 171
@@ -231,6 +232,7 @@ static void nv_ck804_freeze(struct ata_port *ap);
231static void nv_ck804_thaw(struct ata_port *ap); 232static void nv_ck804_thaw(struct ata_port *ap);
232static void nv_error_handler(struct ata_port *ap); 233static void nv_error_handler(struct ata_port *ap);
233static int nv_adma_slave_config(struct scsi_device *sdev); 234static int nv_adma_slave_config(struct scsi_device *sdev);
235static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc);
234static void nv_adma_qc_prep(struct ata_queued_cmd *qc); 236static void nv_adma_qc_prep(struct ata_queued_cmd *qc);
235static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc); 237static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc);
236static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance); 238static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance);
@@ -415,6 +417,7 @@ static const struct ata_port_operations nv_adma_ops = {
415 .port_disable = ata_port_disable, 417 .port_disable = ata_port_disable,
416 .tf_load = ata_tf_load, 418 .tf_load = ata_tf_load,
417 .tf_read = ata_tf_read, 419 .tf_read = ata_tf_read,
420 .check_atapi_dma = nv_adma_check_atapi_dma,
418 .exec_command = ata_exec_command, 421 .exec_command = ata_exec_command,
419 .check_status = ata_check_status, 422 .check_status = ata_check_status,
420 .dev_select = ata_std_dev_select, 423 .dev_select = ata_std_dev_select,
@@ -489,13 +492,71 @@ MODULE_VERSION(DRV_VERSION);
489 492
490static int adma_enabled = 1; 493static int adma_enabled = 1;
491 494
495static inline void __iomem *__nv_adma_ctl_block(void __iomem *mmio,
496 unsigned int port_no)
497{
498 mmio += NV_ADMA_PORT + port_no * NV_ADMA_PORT_SIZE;
499 return mmio;
500}
501
502static inline void __iomem *nv_adma_ctl_block(struct ata_port *ap)
503{
504 return __nv_adma_ctl_block(ap->host->mmio_base, ap->port_no);
505}
506
507static inline void __iomem *nv_adma_gen_block(struct ata_port *ap)
508{
509 return (ap->host->mmio_base + NV_ADMA_GEN);
510}
511
512static inline void __iomem *nv_adma_notifier_clear_block(struct ata_port *ap)
513{
514 return (nv_adma_gen_block(ap) + NV_ADMA_NOTIFIER_CLEAR + (4 * ap->port_no));
515}
516
517static void nv_adma_register_mode(struct ata_port *ap)
518{
519 void __iomem *mmio = nv_adma_ctl_block(ap);
520 struct nv_adma_port_priv *pp = ap->private_data;
521 u16 tmp;
522
523 if (pp->flags & NV_ADMA_PORT_REGISTER_MODE)
524 return;
525
526 tmp = readw(mmio + NV_ADMA_CTL);
527 writew(tmp & ~NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
528
529 pp->flags |= NV_ADMA_PORT_REGISTER_MODE;
530}
531
532static void nv_adma_mode(struct ata_port *ap)
533{
534 void __iomem *mmio = nv_adma_ctl_block(ap);
535 struct nv_adma_port_priv *pp = ap->private_data;
536 u16 tmp;
537
538 if (!(pp->flags & NV_ADMA_PORT_REGISTER_MODE))
539 return;
540
541 WARN_ON(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE);
542
543 tmp = readw(mmio + NV_ADMA_CTL);
544 writew(tmp | NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
545
546 pp->flags &= ~NV_ADMA_PORT_REGISTER_MODE;
547}
548
492static int nv_adma_slave_config(struct scsi_device *sdev) 549static int nv_adma_slave_config(struct scsi_device *sdev)
493{ 550{
494 struct ata_port *ap = ata_shost_to_port(sdev->host); 551 struct ata_port *ap = ata_shost_to_port(sdev->host);
552 struct nv_adma_port_priv *pp = ap->private_data;
553 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
495 u64 bounce_limit; 554 u64 bounce_limit;
496 unsigned long segment_boundary; 555 unsigned long segment_boundary;
497 unsigned short sg_tablesize; 556 unsigned short sg_tablesize;
498 int rc; 557 int rc;
558 int adma_enable;
559 u32 current_reg, new_reg, config_mask;
499 560
500 rc = ata_scsi_slave_config(sdev); 561 rc = ata_scsi_slave_config(sdev);
501 562
@@ -516,13 +577,40 @@ static int nv_adma_slave_config(struct scsi_device *sdev)
516 /* Subtract 1 since an extra entry may be needed for padding, see 577 /* Subtract 1 since an extra entry may be needed for padding, see
517 libata-scsi.c */ 578 libata-scsi.c */
518 sg_tablesize = LIBATA_MAX_PRD - 1; 579 sg_tablesize = LIBATA_MAX_PRD - 1;
580
581 /* Since the legacy DMA engine is in use, we need to disable ADMA
582 on the port. */
583 adma_enable = 0;
584 nv_adma_register_mode(ap);
519 } 585 }
520 else { 586 else {
521 bounce_limit = *ap->dev->dma_mask; 587 bounce_limit = *ap->dev->dma_mask;
522 segment_boundary = NV_ADMA_DMA_BOUNDARY; 588 segment_boundary = NV_ADMA_DMA_BOUNDARY;
523 sg_tablesize = NV_ADMA_SGTBL_TOTAL_LEN; 589 sg_tablesize = NV_ADMA_SGTBL_TOTAL_LEN;
590 adma_enable = 1;
524 } 591 }
525 592
593 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &current_reg);
594
595 if(ap->port_no == 1)
596 config_mask = NV_MCP_SATA_CFG_20_PORT1_EN |
597 NV_MCP_SATA_CFG_20_PORT1_PWB_EN;
598 else
599 config_mask = NV_MCP_SATA_CFG_20_PORT0_EN |
600 NV_MCP_SATA_CFG_20_PORT0_PWB_EN;
601
602 if(adma_enable) {
603 new_reg = current_reg | config_mask;
604 pp->flags &= ~NV_ADMA_ATAPI_SETUP_COMPLETE;
605 }
606 else {
607 new_reg = current_reg & ~config_mask;
608 pp->flags |= NV_ADMA_ATAPI_SETUP_COMPLETE;
609 }
610
611 if(current_reg != new_reg)
612 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, new_reg);
613
526 blk_queue_bounce_limit(sdev->request_queue, bounce_limit); 614 blk_queue_bounce_limit(sdev->request_queue, bounce_limit);
527 blk_queue_segment_boundary(sdev->request_queue, segment_boundary); 615 blk_queue_segment_boundary(sdev->request_queue, segment_boundary);
528 blk_queue_max_hw_segments(sdev->request_queue, sg_tablesize); 616 blk_queue_max_hw_segments(sdev->request_queue, sg_tablesize);
@@ -532,7 +620,13 @@ static int nv_adma_slave_config(struct scsi_device *sdev)
532 return rc; 620 return rc;
533} 621}
534 622
535static unsigned int nv_adma_tf_to_cpb(struct ata_taskfile *tf, u16 *cpb) 623static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc)
624{
625 struct nv_adma_port_priv *pp = qc->ap->private_data;
626 return !(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE);
627}
628
629static unsigned int nv_adma_tf_to_cpb(struct ata_taskfile *tf, __le16 *cpb)
536{ 630{
537 unsigned int idx = 0; 631 unsigned int idx = 0;
538 632
@@ -563,33 +657,11 @@ static unsigned int nv_adma_tf_to_cpb(struct ata_taskfile *tf, u16 *cpb)
563 return idx; 657 return idx;
564} 658}
565 659
566static inline void __iomem *__nv_adma_ctl_block(void __iomem *mmio,
567 unsigned int port_no)
568{
569 mmio += NV_ADMA_PORT + port_no * NV_ADMA_PORT_SIZE;
570 return mmio;
571}
572
573static inline void __iomem *nv_adma_ctl_block(struct ata_port *ap)
574{
575 return __nv_adma_ctl_block(ap->host->mmio_base, ap->port_no);
576}
577
578static inline void __iomem *nv_adma_gen_block(struct ata_port *ap)
579{
580 return (ap->host->mmio_base + NV_ADMA_GEN);
581}
582
583static inline void __iomem *nv_adma_notifier_clear_block(struct ata_port *ap)
584{
585 return (nv_adma_gen_block(ap) + NV_ADMA_NOTIFIER_CLEAR + (4 * ap->port_no));
586}
587
588static void nv_adma_check_cpb(struct ata_port *ap, int cpb_num, int force_err) 660static void nv_adma_check_cpb(struct ata_port *ap, int cpb_num, int force_err)
589{ 661{
590 struct nv_adma_port_priv *pp = ap->private_data; 662 struct nv_adma_port_priv *pp = ap->private_data;
591 int complete = 0, have_err = 0; 663 int complete = 0, have_err = 0;
592 u16 flags = pp->cpb[cpb_num].resp_flags; 664 u8 flags = pp->cpb[cpb_num].resp_flags;
593 665
594 VPRINTK("CPB %d, flags=0x%x\n", cpb_num, flags); 666 VPRINTK("CPB %d, flags=0x%x\n", cpb_num, flags);
595 667
@@ -634,15 +706,48 @@ static void nv_adma_check_cpb(struct ata_port *ap, int cpb_num, int force_err)
634 } 706 }
635} 707}
636 708
709static int nv_host_intr(struct ata_port *ap, u8 irq_stat)
710{
711 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->active_tag);
712 int handled;
713
714 /* freeze if hotplugged */
715 if (unlikely(irq_stat & (NV_INT_ADDED | NV_INT_REMOVED))) {
716 ata_port_freeze(ap);
717 return 1;
718 }
719
720 /* bail out if not our interrupt */
721 if (!(irq_stat & NV_INT_DEV))
722 return 0;
723
724 /* DEV interrupt w/ no active qc? */
725 if (unlikely(!qc || (qc->tf.flags & ATA_TFLAG_POLLING))) {
726 ata_check_status(ap);
727 return 1;
728 }
729
730 /* handle interrupt */
731 handled = ata_host_intr(ap, qc);
732 if (unlikely(!handled)) {
733 /* spurious, clear it */
734 ata_check_status(ap);
735 }
736
737 return 1;
738}
739
637static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance) 740static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance)
638{ 741{
639 struct ata_host *host = dev_instance; 742 struct ata_host *host = dev_instance;
640 int i, handled = 0; 743 int i, handled = 0;
744 u32 notifier_clears[2];
641 745
642 spin_lock(&host->lock); 746 spin_lock(&host->lock);
643 747
644 for (i = 0; i < host->n_ports; i++) { 748 for (i = 0; i < host->n_ports; i++) {
645 struct ata_port *ap = host->ports[i]; 749 struct ata_port *ap = host->ports[i];
750 notifier_clears[i] = 0;
646 751
647 if (ap && !(ap->flags & ATA_FLAG_DISABLED)) { 752 if (ap && !(ap->flags & ATA_FLAG_DISABLED)) {
648 struct nv_adma_port_priv *pp = ap->private_data; 753 struct nv_adma_port_priv *pp = ap->private_data;
@@ -654,30 +759,18 @@ static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance)
654 759
655 /* if in ATA register mode, use standard ata interrupt handler */ 760 /* if in ATA register mode, use standard ata interrupt handler */
656 if (pp->flags & NV_ADMA_PORT_REGISTER_MODE) { 761 if (pp->flags & NV_ADMA_PORT_REGISTER_MODE) {
657 struct ata_queued_cmd *qc; 762 u8 irq_stat = readb(host->mmio_base + NV_INT_STATUS_CK804)
658 VPRINTK("in ATA register mode\n"); 763 >> (NV_INT_PORT_SHIFT * i);
659 qc = ata_qc_from_tag(ap, ap->active_tag); 764 handled += nv_host_intr(ap, irq_stat);
660 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)))
661 handled += ata_host_intr(ap, qc);
662 else {
663 /* No request pending? Clear interrupt status
664 anyway, in case there's one pending. */
665 ap->ops->check_status(ap);
666 handled++;
667 }
668 continue; 765 continue;
669 } 766 }
670 767
671 notifier = readl(mmio + NV_ADMA_NOTIFIER); 768 notifier = readl(mmio + NV_ADMA_NOTIFIER);
672 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR); 769 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
770 notifier_clears[i] = notifier | notifier_error;
673 771
674 gen_ctl = readl(nv_adma_gen_block(ap) + NV_ADMA_GEN_CTL); 772 gen_ctl = readl(nv_adma_gen_block(ap) + NV_ADMA_GEN_CTL);
675 773
676 /* Seems necessary to clear notifiers even when they were 0.
677 Otherwise we seem to stop receiving further interrupts.
678 Unsure why. */
679 writel(notifier | notifier_error, nv_adma_notifier_clear_block(ap));
680
681 if( !NV_ADMA_CHECK_INTR(gen_ctl, ap->port_no) && !notifier && 774 if( !NV_ADMA_CHECK_INTR(gen_ctl, ap->port_no) && !notifier &&
682 !notifier_error) 775 !notifier_error)
683 /* Nothing to do */ 776 /* Nothing to do */
@@ -730,6 +823,15 @@ static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance)
730 handled++; /* irq handled if we got here */ 823 handled++; /* irq handled if we got here */
731 } 824 }
732 } 825 }
826
827 if(notifier_clears[0] || notifier_clears[1]) {
828 /* Note: Both notifier clear registers must be written
829 if either is set, even if one is zero, according to NVIDIA. */
830 writel(notifier_clears[0],
831 nv_adma_notifier_clear_block(host->ports[0]));
832 writel(notifier_clears[1],
833 nv_adma_notifier_clear_block(host->ports[1]));
834 }
733 835
734 spin_unlock(&host->lock); 836 spin_unlock(&host->lock);
735 837
@@ -742,6 +844,7 @@ static void nv_adma_irq_clear(struct ata_port *ap)
742 u16 status = readw(mmio + NV_ADMA_STAT); 844 u16 status = readw(mmio + NV_ADMA_STAT);
743 u32 notifier = readl(mmio + NV_ADMA_NOTIFIER); 845 u32 notifier = readl(mmio + NV_ADMA_NOTIFIER);
744 u32 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR); 846 u32 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
847 unsigned long dma_stat_addr = ap->ioaddr.bmdma_addr + ATA_DMA_STATUS;
745 848
746 /* clear ADMA status */ 849 /* clear ADMA status */
747 writew(status, mmio + NV_ADMA_STAT); 850 writew(status, mmio + NV_ADMA_STAT);
@@ -749,92 +852,76 @@ static void nv_adma_irq_clear(struct ata_port *ap)
749 nv_adma_notifier_clear_block(ap)); 852 nv_adma_notifier_clear_block(ap));
750 853
751 /** clear legacy status */ 854 /** clear legacy status */
752 ap->flags &= ~ATA_FLAG_MMIO; 855 outb(inb(dma_stat_addr), dma_stat_addr);
753 ata_bmdma_irq_clear(ap);
754 ap->flags |= ATA_FLAG_MMIO;
755} 856}
756 857
757static void nv_adma_bmdma_setup(struct ata_queued_cmd *qc) 858static void nv_adma_bmdma_setup(struct ata_queued_cmd *qc)
758{ 859{
759 struct nv_adma_port_priv *pp = qc->ap->private_data; 860 struct ata_port *ap = qc->ap;
861 unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
862 struct nv_adma_port_priv *pp = ap->private_data;
863 u8 dmactl;
760 864
761 if(pp->flags & NV_ADMA_PORT_REGISTER_MODE) { 865 if(!(pp->flags & NV_ADMA_PORT_REGISTER_MODE)) {
762 WARN_ON(1); 866 WARN_ON(1);
763 return; 867 return;
764 } 868 }
765 869
766 qc->ap->flags &= ~ATA_FLAG_MMIO; 870 /* load PRD table addr. */
767 ata_bmdma_setup(qc); 871 outl(ap->prd_dma, ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS);
768 qc->ap->flags |= ATA_FLAG_MMIO; 872
873 /* specify data direction, triple-check start bit is clear */
874 dmactl = inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
875 dmactl &= ~(ATA_DMA_WR | ATA_DMA_START);
876 if (!rw)
877 dmactl |= ATA_DMA_WR;
878
879 outb(dmactl, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
880
881 /* issue r/w command */
882 ata_exec_command(ap, &qc->tf);
769} 883}
770 884
771static void nv_adma_bmdma_start(struct ata_queued_cmd *qc) 885static void nv_adma_bmdma_start(struct ata_queued_cmd *qc)
772{ 886{
773 struct nv_adma_port_priv *pp = qc->ap->private_data; 887 struct ata_port *ap = qc->ap;
888 struct nv_adma_port_priv *pp = ap->private_data;
889 u8 dmactl;
774 890
775 if(pp->flags & NV_ADMA_PORT_REGISTER_MODE) { 891 if(!(pp->flags & NV_ADMA_PORT_REGISTER_MODE)) {
776 WARN_ON(1); 892 WARN_ON(1);
777 return; 893 return;
778 } 894 }
779 895
780 qc->ap->flags &= ~ATA_FLAG_MMIO; 896 /* start host DMA transaction */
781 ata_bmdma_start(qc); 897 dmactl = inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
782 qc->ap->flags |= ATA_FLAG_MMIO; 898 outb(dmactl | ATA_DMA_START,
899 ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
783} 900}
784 901
785static void nv_adma_bmdma_stop(struct ata_queued_cmd *qc) 902static void nv_adma_bmdma_stop(struct ata_queued_cmd *qc)
786{ 903{
787 struct nv_adma_port_priv *pp = qc->ap->private_data; 904 struct ata_port *ap = qc->ap;
788
789 if(pp->flags & NV_ADMA_PORT_REGISTER_MODE)
790 return;
791
792 qc->ap->flags &= ~ATA_FLAG_MMIO;
793 ata_bmdma_stop(qc);
794 qc->ap->flags |= ATA_FLAG_MMIO;
795}
796
797static u8 nv_adma_bmdma_status(struct ata_port *ap)
798{
799 u8 status;
800 struct nv_adma_port_priv *pp = ap->private_data;
801
802 WARN_ON(pp->flags & NV_ADMA_PORT_REGISTER_MODE);
803
804 ap->flags &= ~ATA_FLAG_MMIO;
805 status = ata_bmdma_status(ap);
806 ap->flags |= ATA_FLAG_MMIO;
807 return status;
808}
809
810static void nv_adma_register_mode(struct ata_port *ap)
811{
812 void __iomem *mmio = nv_adma_ctl_block(ap);
813 struct nv_adma_port_priv *pp = ap->private_data; 905 struct nv_adma_port_priv *pp = ap->private_data;
814 u16 tmp;
815 906
816 if (pp->flags & NV_ADMA_PORT_REGISTER_MODE) 907 if(!(pp->flags & NV_ADMA_PORT_REGISTER_MODE))
817 return; 908 return;
818 909
819 tmp = readw(mmio + NV_ADMA_CTL); 910 /* clear start/stop bit */
820 writew(tmp & ~NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL); 911 outb(inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD) & ~ATA_DMA_START,
912 ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
821 913
822 pp->flags |= NV_ADMA_PORT_REGISTER_MODE; 914 /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */
915 ata_altstatus(ap); /* dummy read */
823} 916}
824 917
825static void nv_adma_mode(struct ata_port *ap) 918static u8 nv_adma_bmdma_status(struct ata_port *ap)
826{ 919{
827 void __iomem *mmio = nv_adma_ctl_block(ap);
828 struct nv_adma_port_priv *pp = ap->private_data; 920 struct nv_adma_port_priv *pp = ap->private_data;
829 u16 tmp;
830
831 if (!(pp->flags & NV_ADMA_PORT_REGISTER_MODE))
832 return;
833 921
834 tmp = readw(mmio + NV_ADMA_CTL); 922 WARN_ON(!(pp->flags & NV_ADMA_PORT_REGISTER_MODE));
835 writew(tmp | NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
836 923
837 pp->flags &= ~NV_ADMA_PORT_REGISTER_MODE; 924 return inb(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
838} 925}
839 926
840static int nv_adma_port_start(struct ata_port *ap) 927static int nv_adma_port_start(struct ata_port *ap)
@@ -997,7 +1084,7 @@ static void nv_adma_fill_aprd(struct ata_queued_cmd *qc,
997 int idx, 1084 int idx,
998 struct nv_adma_prd *aprd) 1085 struct nv_adma_prd *aprd)
999{ 1086{
1000 u32 flags; 1087 u8 flags;
1001 1088
1002 memset(aprd, 0, sizeof(struct nv_adma_prd)); 1089 memset(aprd, 0, sizeof(struct nv_adma_prd));
1003 1090
@@ -1011,7 +1098,7 @@ static void nv_adma_fill_aprd(struct ata_queued_cmd *qc,
1011 1098
1012 aprd->addr = cpu_to_le64(((u64)sg_dma_address(sg))); 1099 aprd->addr = cpu_to_le64(((u64)sg_dma_address(sg)));
1013 aprd->len = cpu_to_le32(((u32)sg_dma_len(sg))); /* len in bytes */ 1100 aprd->len = cpu_to_le32(((u32)sg_dma_len(sg))); /* len in bytes */
1014 aprd->flags = cpu_to_le32(flags); 1101 aprd->flags = flags;
1015} 1102}
1016 1103
1017static void nv_adma_fill_sg(struct ata_queued_cmd *qc, struct nv_adma_cpb *cpb) 1104static void nv_adma_fill_sg(struct ata_queued_cmd *qc, struct nv_adma_cpb *cpb)
@@ -1045,7 +1132,8 @@ static void nv_adma_qc_prep(struct ata_queued_cmd *qc)
1045 VPRINTK("qc->flags = 0x%lx\n", qc->flags); 1132 VPRINTK("qc->flags = 0x%lx\n", qc->flags);
1046 1133
1047 if (!(qc->flags & ATA_QCFLAG_DMAMAP) || 1134 if (!(qc->flags & ATA_QCFLAG_DMAMAP) ||
1048 qc->tf.protocol == ATA_PROT_ATAPI_DMA) { 1135 (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)) {
1136 nv_adma_register_mode(qc->ap);
1049 ata_qc_prep(qc); 1137 ata_qc_prep(qc);
1050 return; 1138 return;
1051 } 1139 }
@@ -1072,12 +1160,13 @@ static void nv_adma_qc_prep(struct ata_queued_cmd *qc)
1072 1160
1073static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc) 1161static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc)
1074{ 1162{
1163 struct nv_adma_port_priv *pp = qc->ap->private_data;
1075 void __iomem *mmio = nv_adma_ctl_block(qc->ap); 1164 void __iomem *mmio = nv_adma_ctl_block(qc->ap);
1076 1165
1077 VPRINTK("ENTER\n"); 1166 VPRINTK("ENTER\n");
1078 1167
1079 if (!(qc->flags & ATA_QCFLAG_DMAMAP) || 1168 if (!(qc->flags & ATA_QCFLAG_DMAMAP) ||
1080 qc->tf.protocol == ATA_PROT_ATAPI_DMA) { 1169 (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)) {
1081 /* use ATA register mode */ 1170 /* use ATA register mode */
1082 VPRINTK("no dmamap or ATAPI, using ATA register mode: 0x%lx\n", qc->flags); 1171 VPRINTK("no dmamap or ATAPI, using ATA register mode: 0x%lx\n", qc->flags);
1083 nv_adma_register_mode(qc->ap); 1172 nv_adma_register_mode(qc->ap);
@@ -1128,37 +1217,6 @@ static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance)
1128 return IRQ_RETVAL(handled); 1217 return IRQ_RETVAL(handled);
1129} 1218}
1130 1219
1131static int nv_host_intr(struct ata_port *ap, u8 irq_stat)
1132{
1133 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->active_tag);
1134 int handled;
1135
1136 /* freeze if hotplugged */
1137 if (unlikely(irq_stat & (NV_INT_ADDED | NV_INT_REMOVED))) {
1138 ata_port_freeze(ap);
1139 return 1;
1140 }
1141
1142 /* bail out if not our interrupt */
1143 if (!(irq_stat & NV_INT_DEV))
1144 return 0;
1145
1146 /* DEV interrupt w/ no active qc? */
1147 if (unlikely(!qc || (qc->tf.flags & ATA_TFLAG_POLLING))) {
1148 ata_check_status(ap);
1149 return 1;
1150 }
1151
1152 /* handle interrupt */
1153 handled = ata_host_intr(ap, qc);
1154 if (unlikely(!handled)) {
1155 /* spurious, clear it */
1156 ata_check_status(ap);
1157 }
1158
1159 return 1;
1160}
1161
1162static irqreturn_t nv_do_interrupt(struct ata_host *host, u8 irq_stat) 1220static irqreturn_t nv_do_interrupt(struct ata_host *host, u8 irq_stat)
1163{ 1221{
1164 int i, handled = 0; 1222 int i, handled = 0;