aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/ata/sata_inic162x.c
diff options
context:
space:
mode:
authorTejun Heo <htejun@gmail.com>2008-04-30 03:35:15 -0400
committerJeff Garzik <jgarzik@redhat.com>2008-05-06 11:40:56 -0400
commitf8b0685a8ea8e3974f8953378ede2111f8d49d22 (patch)
tree31df41c54ae8c31b82cfaa0d11c8818876d89284 /drivers/ata/sata_inic162x.c
parentb3f677e501a494aa1582d4ff35fb3ac6f0a59b08 (diff)
sata_inic162x: kill now unused SFF related stuff
sata_inic162x now doesn't use any SFF features. Remove all SFF related stuff. * Mask unsolicited ATA interrupts. This removes our primary source of spurious interrupts and spurious interrupt handling can be tightened up. There's no need to clear ATA interrupts by reading status register either. * Don't dance with IDMA_CTL_ATA_NIEN and simplify accesses to IDMA_CTL. * Inherit from sata_port_ops instead of ata_sff_port_ops. * Don't initialize or use ioaddr. There's no need to map BAR0-4 anymore. Signed-off-by: Tejun Heo <htejun@gmail.com> Signed-off-by: Jeff Garzik <jgarzik@redhat.com>
Diffstat (limited to 'drivers/ata/sata_inic162x.c')
-rw-r--r--drivers/ata/sata_inic162x.c69
1 files changed, 18 insertions, 51 deletions
diff --git a/drivers/ata/sata_inic162x.c b/drivers/ata/sata_inic162x.c
index cdae435620f6..55f8e93ac48e 100644
--- a/drivers/ata/sata_inic162x.c
+++ b/drivers/ata/sata_inic162x.c
@@ -101,7 +101,7 @@ enum {
101 PIRQ_PENDING = (1 << 7), /* port IRQ pending (STAT only) */ 101 PIRQ_PENDING = (1 << 7), /* port IRQ pending (STAT only) */
102 102
103 PIRQ_ERR = PIRQ_OFFLINE | PIRQ_ONLINE | PIRQ_FATAL, 103 PIRQ_ERR = PIRQ_OFFLINE | PIRQ_ONLINE | PIRQ_FATAL,
104 PIRQ_MASK_DEFAULT = PIRQ_REPLY, 104 PIRQ_MASK_DEFAULT = PIRQ_REPLY | PIRQ_ATA,
105 PIRQ_MASK_FREEZE = 0xff, 105 PIRQ_MASK_FREEZE = 0xff,
106 106
107 /* PORT_PRD_CTL bits */ 107 /* PORT_PRD_CTL bits */
@@ -227,31 +227,26 @@ static void __iomem *inic_port_base(struct ata_port *ap)
227static void inic_reset_port(void __iomem *port_base) 227static void inic_reset_port(void __iomem *port_base)
228{ 228{
229 void __iomem *idma_ctl = port_base + PORT_IDMA_CTL; 229 void __iomem *idma_ctl = port_base + PORT_IDMA_CTL;
230 u16 ctl;
231 230
232 ctl = readw(idma_ctl); 231 /* stop IDMA engine */
233 ctl &= ~(IDMA_CTL_RST_IDMA | IDMA_CTL_ATA_NIEN | IDMA_CTL_GO); 232 readw(idma_ctl); /* flush */
233 msleep(1);
234 234
235 /* mask IRQ and assert reset */ 235 /* mask IRQ and assert reset */
236 writew(ctl | IDMA_CTL_RST_IDMA | IDMA_CTL_ATA_NIEN, idma_ctl); 236 writew(IDMA_CTL_RST_IDMA, idma_ctl);
237 readw(idma_ctl); /* flush */ 237 readw(idma_ctl); /* flush */
238
239 /* give it some time */
240 msleep(1); 238 msleep(1);
241 239
242 /* release reset */ 240 /* release reset */
243 writew(ctl | IDMA_CTL_ATA_NIEN, idma_ctl); 241 writew(0, idma_ctl);
244 242
245 /* clear irq */ 243 /* clear irq */
246 writeb(0xff, port_base + PORT_IRQ_STAT); 244 writeb(0xff, port_base + PORT_IRQ_STAT);
247
248 /* reenable ATA IRQ, turn off IDMA mode */
249 writew(ctl, idma_ctl);
250} 245}
251 246
252static int inic_scr_read(struct ata_port *ap, unsigned sc_reg, u32 *val) 247static int inic_scr_read(struct ata_port *ap, unsigned sc_reg, u32 *val)
253{ 248{
254 void __iomem *scr_addr = ap->ioaddr.scr_addr; 249 void __iomem *scr_addr = inic_port_base(ap) + PORT_SCR;
255 void __iomem *addr; 250 void __iomem *addr;
256 251
257 if (unlikely(sc_reg >= ARRAY_SIZE(scr_map))) 252 if (unlikely(sc_reg >= ARRAY_SIZE(scr_map)))
@@ -268,7 +263,7 @@ static int inic_scr_read(struct ata_port *ap, unsigned sc_reg, u32 *val)
268 263
269static int inic_scr_write(struct ata_port *ap, unsigned sc_reg, u32 val) 264static int inic_scr_write(struct ata_port *ap, unsigned sc_reg, u32 val)
270{ 265{
271 void __iomem *scr_addr = ap->ioaddr.scr_addr; 266 void __iomem *scr_addr = inic_port_base(ap) + PORT_SCR;
272 267
273 if (unlikely(sc_reg >= ARRAY_SIZE(scr_map))) 268 if (unlikely(sc_reg >= ARRAY_SIZE(scr_map)))
274 return -EINVAL; 269 return -EINVAL;
@@ -357,10 +352,8 @@ static void inic_host_intr(struct ata_port *ap)
357 if (unlikely((irq_stat & PIRQ_ERR) || (idma_stat & IDMA_STAT_ERR))) 352 if (unlikely((irq_stat & PIRQ_ERR) || (idma_stat & IDMA_STAT_ERR)))
358 inic_host_err_intr(ap, irq_stat, idma_stat); 353 inic_host_err_intr(ap, irq_stat, idma_stat);
359 354
360 if (unlikely(!qc)) { 355 if (unlikely(!qc))
361 ap->ops->sff_check_status(ap); /* clear ATA interrupt */
362 goto spurious; 356 goto spurious;
363 }
364 357
365 if (likely(idma_stat & IDMA_STAT_DONE)) { 358 if (likely(idma_stat & IDMA_STAT_DONE)) {
366 inic_stop_idma(ap); 359 inic_stop_idma(ap);
@@ -377,7 +370,9 @@ static void inic_host_intr(struct ata_port *ap)
377 } 370 }
378 371
379 spurious: 372 spurious:
380 ap->ops->sff_check_status(ap); /* clear ATA interrupt */ 373 ata_port_printk(ap, KERN_WARNING, "unhandled interrupt: "
374 "cmd=0x%x irq_stat=0x%x idma_stat=0x%x\n",
375 qc ? qc->tf.command : 0xff, irq_stat, idma_stat);
381} 376}
382 377
383static irqreturn_t inic_interrupt(int irq, void *dev_instance) 378static irqreturn_t inic_interrupt(int irq, void *dev_instance)
@@ -568,7 +563,6 @@ static void inic_freeze(struct ata_port *ap)
568 void __iomem *port_base = inic_port_base(ap); 563 void __iomem *port_base = inic_port_base(ap);
569 564
570 writeb(PIRQ_MASK_FREEZE, port_base + PORT_IRQ_MASK); 565 writeb(PIRQ_MASK_FREEZE, port_base + PORT_IRQ_MASK);
571 ap->ops->sff_check_status(ap);
572 writeb(0xff, port_base + PORT_IRQ_STAT); 566 writeb(0xff, port_base + PORT_IRQ_STAT);
573} 567}
574 568
@@ -576,7 +570,6 @@ static void inic_thaw(struct ata_port *ap)
576{ 570{
577 void __iomem *port_base = inic_port_base(ap); 571 void __iomem *port_base = inic_port_base(ap);
578 572
579 ap->ops->sff_check_status(ap);
580 writeb(0xff, port_base + PORT_IRQ_STAT); 573 writeb(0xff, port_base + PORT_IRQ_STAT);
581 writeb(PIRQ_MASK_DEFAULT, port_base + PORT_IRQ_MASK); 574 writeb(PIRQ_MASK_DEFAULT, port_base + PORT_IRQ_MASK);
582} 575}
@@ -599,17 +592,15 @@ static int inic_hardreset(struct ata_link *link, unsigned int *class,
599 void __iomem *port_base = inic_port_base(ap); 592 void __iomem *port_base = inic_port_base(ap);
600 void __iomem *idma_ctl = port_base + PORT_IDMA_CTL; 593 void __iomem *idma_ctl = port_base + PORT_IDMA_CTL;
601 const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context); 594 const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
602 u16 val;
603 int rc; 595 int rc;
604 596
605 /* hammer it into sane state */ 597 /* hammer it into sane state */
606 inic_reset_port(port_base); 598 inic_reset_port(port_base);
607 599
608 val = readw(idma_ctl); 600 writew(IDMA_CTL_RST_ATA, idma_ctl);
609 writew(val | IDMA_CTL_RST_ATA, idma_ctl);
610 readw(idma_ctl); /* flush */ 601 readw(idma_ctl); /* flush */
611 msleep(1); 602 msleep(1);
612 writew(val & ~IDMA_CTL_RST_ATA, idma_ctl); 603 writew(0, idma_ctl);
613 604
614 rc = sata_link_resume(link, timing, deadline); 605 rc = sata_link_resume(link, timing, deadline);
615 if (rc) { 606 if (rc) {
@@ -641,16 +632,8 @@ static int inic_hardreset(struct ata_link *link, unsigned int *class,
641static void inic_error_handler(struct ata_port *ap) 632static void inic_error_handler(struct ata_port *ap)
642{ 633{
643 void __iomem *port_base = inic_port_base(ap); 634 void __iomem *port_base = inic_port_base(ap);
644 unsigned long flags;
645 635
646 /* reset PIO HSM and stop DMA engine */
647 inic_reset_port(port_base); 636 inic_reset_port(port_base);
648
649 spin_lock_irqsave(ap->lock, flags);
650 ap->hsm_task_state = HSM_ST_IDLE;
651 spin_unlock_irqrestore(ap->lock, flags);
652
653 /* PIO and DMA engines have been stopped, perform recovery */
654 ata_std_error_handler(ap); 637 ata_std_error_handler(ap);
655} 638}
656 639
@@ -714,7 +697,7 @@ static int inic_port_start(struct ata_port *ap)
714} 697}
715 698
716static struct ata_port_operations inic_port_ops = { 699static struct ata_port_operations inic_port_ops = {
717 .inherits = &ata_sff_port_ops, 700 .inherits = &sata_port_ops,
718 701
719 .check_atapi_dma = inic_check_atapi_dma, 702 .check_atapi_dma = inic_check_atapi_dma,
720 .qc_prep = inic_qc_prep, 703 .qc_prep = inic_qc_prep,
@@ -723,7 +706,6 @@ static struct ata_port_operations inic_port_ops = {
723 706
724 .freeze = inic_freeze, 707 .freeze = inic_freeze,
725 .thaw = inic_thaw, 708 .thaw = inic_thaw,
726 .softreset = ATA_OP_NULL, /* softreset is broken */
727 .hardreset = inic_hardreset, 709 .hardreset = inic_hardreset,
728 .error_handler = inic_error_handler, 710 .error_handler = inic_error_handler,
729 .post_internal_cmd = inic_post_internal_cmd, 711 .post_internal_cmd = inic_post_internal_cmd,
@@ -832,34 +814,19 @@ static int inic_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
832 if (rc) 814 if (rc)
833 return rc; 815 return rc;
834 816
835 rc = pcim_iomap_regions(pdev, 0x3f, DRV_NAME); 817 rc = pcim_iomap_regions(pdev, 1 << MMIO_BAR, DRV_NAME);
836 if (rc) 818 if (rc)
837 return rc; 819 return rc;
838 host->iomap = iomap = pcim_iomap_table(pdev); 820 host->iomap = iomap = pcim_iomap_table(pdev);
821 hpriv->cached_hctl = readw(iomap[MMIO_BAR] + HOST_CTL);
839 822
840 for (i = 0; i < NR_PORTS; i++) { 823 for (i = 0; i < NR_PORTS; i++) {
841 struct ata_port *ap = host->ports[i]; 824 struct ata_port *ap = host->ports[i];
842 struct ata_ioports *port = &ap->ioaddr;
843 unsigned int offset = i * PORT_SIZE;
844
845 port->cmd_addr = iomap[2 * i];
846 port->altstatus_addr =
847 port->ctl_addr = (void __iomem *)
848 ((unsigned long)iomap[2 * i + 1] | ATA_PCI_CTL_OFS);
849 port->scr_addr = iomap[MMIO_BAR] + offset + PORT_SCR;
850
851 ata_sff_std_ports(port);
852 825
853 ata_port_pbar_desc(ap, MMIO_BAR, -1, "mmio"); 826 ata_port_pbar_desc(ap, MMIO_BAR, -1, "mmio");
854 ata_port_pbar_desc(ap, MMIO_BAR, offset, "port"); 827 ata_port_pbar_desc(ap, MMIO_BAR, i * PORT_SIZE, "port");
855 ata_port_desc(ap, "cmd 0x%llx ctl 0x%llx",
856 (unsigned long long)pci_resource_start(pdev, 2 * i),
857 (unsigned long long)pci_resource_start(pdev, (2 * i + 1)) |
858 ATA_PCI_CTL_OFS);
859 } 828 }
860 829
861 hpriv->cached_hctl = readw(iomap[MMIO_BAR] + HOST_CTL);
862
863 /* Set dma_mask. This devices doesn't support 64bit addressing. */ 830 /* Set dma_mask. This devices doesn't support 64bit addressing. */
864 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK); 831 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
865 if (rc) { 832 if (rc) {