aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorJeff Garzik <jgarzik@pobox.com>2005-11-12 12:32:50 -0500
committerJeff Garzik <jgarzik@pobox.com>2005-11-12 12:32:50 -0500
commit8b260248d9e0e8b64bb72fd4dee03ad86984c344 (patch)
treec22783d6ca7df7efb03f0f3811afeaf51931b64e /drivers
parent095fec887eaa1c38d17c0c929a6733c744a9fa1f (diff)
[libata sata_mv] trim trailing whitespace
Diffstat (limited to 'drivers')
-rw-r--r--drivers/scsi/sata_mv.c76
1 files changed, 38 insertions, 38 deletions
diff --git a/drivers/scsi/sata_mv.c b/drivers/scsi/sata_mv.c
index 82d1750e779b..26e9d51e6caf 100644
--- a/drivers/scsi/sata_mv.c
+++ b/drivers/scsi/sata_mv.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * sata_mv.c - Marvell SATA support 2 * sata_mv.c - Marvell SATA support
3 * 3 *
4 * Copyright 2005: EMC Corporation, all rights reserved. 4 * Copyright 2005: EMC Corporation, all rights reserved.
5 * 5 *
6 * Please ALWAYS copy linux-ide@vger.kernel.org on emails. 6 * Please ALWAYS copy linux-ide@vger.kernel.org on emails.
7 * 7 *
@@ -84,7 +84,7 @@ enum {
84 MV_FLAG_GLBL_SFT_RST = (1 << 28), /* Global Soft Reset support */ 84 MV_FLAG_GLBL_SFT_RST = (1 << 28), /* Global Soft Reset support */
85 MV_COMMON_FLAGS = (ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 85 MV_COMMON_FLAGS = (ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
86 ATA_FLAG_SATA_RESET | ATA_FLAG_MMIO), 86 ATA_FLAG_SATA_RESET | ATA_FLAG_MMIO),
87 MV_6XXX_FLAGS = (MV_FLAG_IRQ_COALESCE | 87 MV_6XXX_FLAGS = (MV_FLAG_IRQ_COALESCE |
88 MV_FLAG_GLBL_SFT_RST), 88 MV_FLAG_GLBL_SFT_RST),
89 89
90 chip_504x = 0, 90 chip_504x = 0,
@@ -129,7 +129,7 @@ enum {
129 SELF_INT = (1 << 23), 129 SELF_INT = (1 << 23),
130 TWSI_INT = (1 << 24), 130 TWSI_INT = (1 << 24),
131 HC_MAIN_RSVD = (0x7f << 25), /* bits 31-25 */ 131 HC_MAIN_RSVD = (0x7f << 25), /* bits 31-25 */
132 HC_MAIN_MASKED_IRQS = (TRAN_LO_DONE | TRAN_HI_DONE | 132 HC_MAIN_MASKED_IRQS = (TRAN_LO_DONE | TRAN_HI_DONE |
133 PORTS_0_7_COAL_DONE | GPIO_INT | TWSI_INT | 133 PORTS_0_7_COAL_DONE | GPIO_INT | TWSI_INT |
134 HC_MAIN_RSVD), 134 HC_MAIN_RSVD),
135 135
@@ -177,12 +177,12 @@ enum {
177 EDMA_ERR_LNK_CTRL_TX = (0x1f << 21), 177 EDMA_ERR_LNK_CTRL_TX = (0x1f << 21),
178 EDMA_ERR_LNK_DATA_TX = (0x1f << 26), 178 EDMA_ERR_LNK_DATA_TX = (0x1f << 26),
179 EDMA_ERR_TRANS_PROTO = (1 << 31), 179 EDMA_ERR_TRANS_PROTO = (1 << 31),
180 EDMA_ERR_FATAL = (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR | 180 EDMA_ERR_FATAL = (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR |
181 EDMA_ERR_DEV_DCON | EDMA_ERR_CRBQ_PAR | 181 EDMA_ERR_DEV_DCON | EDMA_ERR_CRBQ_PAR |
182 EDMA_ERR_CRPB_PAR | EDMA_ERR_INTRL_PAR | 182 EDMA_ERR_CRPB_PAR | EDMA_ERR_INTRL_PAR |
183 EDMA_ERR_IORDY | EDMA_ERR_LNK_CTRL_RX_2 | 183 EDMA_ERR_IORDY | EDMA_ERR_LNK_CTRL_RX_2 |
184 EDMA_ERR_LNK_DATA_RX | 184 EDMA_ERR_LNK_DATA_RX |
185 EDMA_ERR_LNK_DATA_TX | 185 EDMA_ERR_LNK_DATA_TX |
186 EDMA_ERR_TRANS_PROTO), 186 EDMA_ERR_TRANS_PROTO),
187 187
188 EDMA_REQ_Q_BASE_HI_OFS = 0x10, 188 EDMA_REQ_Q_BASE_HI_OFS = 0x10,
@@ -345,7 +345,7 @@ static struct ata_port_info mv_port_info[] = {
345 }, 345 },
346 { /* chip_608x */ 346 { /* chip_608x */
347 .sht = &mv_sht, 347 .sht = &mv_sht,
348 .host_flags = (MV_COMMON_FLAGS | MV_6XXX_FLAGS | 348 .host_flags = (MV_COMMON_FLAGS | MV_6XXX_FLAGS |
349 MV_FLAG_DUAL_HC), 349 MV_FLAG_DUAL_HC),
350 .pio_mask = 0x1f, /* pio0-4 */ 350 .pio_mask = 0x1f, /* pio0-4 */
351 .udma_mask = 0x7f, /* udma0-6 */ 351 .udma_mask = 0x7f, /* udma0-6 */
@@ -393,7 +393,7 @@ static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc)
393static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port) 393static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port)
394{ 394{
395 return (mv_hc_base(base, port >> MV_PORT_HC_SHIFT) + 395 return (mv_hc_base(base, port >> MV_PORT_HC_SHIFT) +
396 MV_SATAHC_ARBTR_REG_SZ + 396 MV_SATAHC_ARBTR_REG_SZ +
397 ((port & MV_PORT_MASK) * MV_PORT_REG_SZ)); 397 ((port & MV_PORT_MASK) * MV_PORT_REG_SZ));
398} 398}
399 399
@@ -456,7 +456,7 @@ static void mv_stop_dma(struct ata_port *ap)
456 } else { 456 } else {
457 assert(!(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS))); 457 assert(!(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS)));
458 } 458 }
459 459
460 /* now properly wait for the eDMA to stop */ 460 /* now properly wait for the eDMA to stop */
461 for (i = 1000; i > 0; i--) { 461 for (i = 1000; i > 0; i--) {
462 reg = readl(port_mmio + EDMA_CMD_OFS); 462 reg = readl(port_mmio + EDMA_CMD_OFS);
@@ -507,7 +507,7 @@ static void mv_dump_all_regs(void __iomem *mmio_base, int port,
507 struct pci_dev *pdev) 507 struct pci_dev *pdev)
508{ 508{
509#ifdef ATA_DEBUG 509#ifdef ATA_DEBUG
510 void __iomem *hc_base = mv_hc_base(mmio_base, 510 void __iomem *hc_base = mv_hc_base(mmio_base,
511 port >> MV_PORT_HC_SHIFT); 511 port >> MV_PORT_HC_SHIFT);
512 void __iomem *port_base; 512 void __iomem *port_base;
513 int start_port, num_ports, p, start_hc, num_hcs, hc; 513 int start_port, num_ports, p, start_hc, num_hcs, hc;
@@ -521,7 +521,7 @@ static void mv_dump_all_regs(void __iomem *mmio_base, int port,
521 start_port = port; 521 start_port = port;
522 num_ports = num_hcs = 1; 522 num_ports = num_hcs = 1;
523 } 523 }
524 DPRINTK("All registers for port(s) %u-%u:\n", start_port, 524 DPRINTK("All registers for port(s) %u-%u:\n", start_port,
525 num_ports > 1 ? num_ports - 1 : start_port); 525 num_ports > 1 ? num_ports - 1 : start_port);
526 526
527 if (NULL != pdev) { 527 if (NULL != pdev) {
@@ -705,7 +705,7 @@ static int mv_port_start(struct ata_port *ap)
705 goto err_out; 705 goto err_out;
706 memset(pp, 0, sizeof(*pp)); 706 memset(pp, 0, sizeof(*pp));
707 707
708 mem = dma_alloc_coherent(dev, MV_PORT_PRIV_DMA_SZ, &mem_dma, 708 mem = dma_alloc_coherent(dev, MV_PORT_PRIV_DMA_SZ, &mem_dma,
709 GFP_KERNEL); 709 GFP_KERNEL);
710 if (!mem) 710 if (!mem)
711 goto err_out_pp; 711 goto err_out_pp;
@@ -715,7 +715,7 @@ static int mv_port_start(struct ata_port *ap)
715 if (rc) 715 if (rc)
716 goto err_out_priv; 716 goto err_out_priv;
717 717
718 /* First item in chunk of DMA memory: 718 /* First item in chunk of DMA memory:
719 * 32-slot command request table (CRQB), 32 bytes each in size 719 * 32-slot command request table (CRQB), 32 bytes each in size
720 */ 720 */
721 pp->crqb = mem; 721 pp->crqb = mem;
@@ -723,7 +723,7 @@ static int mv_port_start(struct ata_port *ap)
723 mem += MV_CRQB_Q_SZ; 723 mem += MV_CRQB_Q_SZ;
724 mem_dma += MV_CRQB_Q_SZ; 724 mem_dma += MV_CRQB_Q_SZ;
725 725
726 /* Second item: 726 /* Second item:
727 * 32-slot command response table (CRPB), 8 bytes each in size 727 * 32-slot command response table (CRPB), 8 bytes each in size
728 */ 728 */
729 pp->crpb = mem; 729 pp->crpb = mem;
@@ -737,18 +737,18 @@ static int mv_port_start(struct ata_port *ap)
737 pp->sg_tbl = mem; 737 pp->sg_tbl = mem;
738 pp->sg_tbl_dma = mem_dma; 738 pp->sg_tbl_dma = mem_dma;
739 739
740 writelfl(EDMA_CFG_Q_DEPTH | EDMA_CFG_RD_BRST_EXT | 740 writelfl(EDMA_CFG_Q_DEPTH | EDMA_CFG_RD_BRST_EXT |
741 EDMA_CFG_WR_BUFF_LEN, port_mmio + EDMA_CFG_OFS); 741 EDMA_CFG_WR_BUFF_LEN, port_mmio + EDMA_CFG_OFS);
742 742
743 writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS); 743 writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS);
744 writelfl(pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK, 744 writelfl(pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK,
745 port_mmio + EDMA_REQ_Q_IN_PTR_OFS); 745 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
746 746
747 writelfl(0, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS); 747 writelfl(0, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
748 writelfl(0, port_mmio + EDMA_RSP_Q_IN_PTR_OFS); 748 writelfl(0, port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
749 749
750 writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS); 750 writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS);
751 writelfl(pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK, 751 writelfl(pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK,
752 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS); 752 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
753 753
754 pp->req_producer = pp->rsp_consumer = 0; 754 pp->req_producer = pp->rsp_consumer = 0;
@@ -863,7 +863,7 @@ static void mv_qc_prep(struct ata_queued_cmd *qc)
863 } 863 }
864 864
865 /* the req producer index should be the same as we remember it */ 865 /* the req producer index should be the same as we remember it */
866 assert(((readl(mv_ap_base(qc->ap) + EDMA_REQ_Q_IN_PTR_OFS) >> 866 assert(((readl(mv_ap_base(qc->ap) + EDMA_REQ_Q_IN_PTR_OFS) >>
867 EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) == 867 EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) ==
868 pp->req_producer); 868 pp->req_producer);
869 869
@@ -875,9 +875,9 @@ static void mv_qc_prep(struct ata_queued_cmd *qc)
875 assert(MV_MAX_Q_DEPTH > qc->tag); 875 assert(MV_MAX_Q_DEPTH > qc->tag);
876 flags |= qc->tag << CRQB_TAG_SHIFT; 876 flags |= qc->tag << CRQB_TAG_SHIFT;
877 877
878 pp->crqb[pp->req_producer].sg_addr = 878 pp->crqb[pp->req_producer].sg_addr =
879 cpu_to_le32(pp->sg_tbl_dma & 0xffffffff); 879 cpu_to_le32(pp->sg_tbl_dma & 0xffffffff);
880 pp->crqb[pp->req_producer].sg_addr_hi = 880 pp->crqb[pp->req_producer].sg_addr_hi =
881 cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16); 881 cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16);
882 pp->crqb[pp->req_producer].ctrl_flags = cpu_to_le16(flags); 882 pp->crqb[pp->req_producer].ctrl_flags = cpu_to_le16(flags);
883 883
@@ -900,7 +900,7 @@ static void mv_qc_prep(struct ata_queued_cmd *qc)
900#ifdef LIBATA_NCQ /* FIXME: remove this line when NCQ added */ 900#ifdef LIBATA_NCQ /* FIXME: remove this line when NCQ added */
901 case ATA_CMD_FPDMA_READ: 901 case ATA_CMD_FPDMA_READ:
902 case ATA_CMD_FPDMA_WRITE: 902 case ATA_CMD_FPDMA_WRITE:
903 mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0); 903 mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0);
904 mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0); 904 mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0);
905 break; 905 break;
906#endif /* FIXME: remove this line when NCQ added */ 906#endif /* FIXME: remove this line when NCQ added */
@@ -966,7 +966,7 @@ static int mv_qc_issue(struct ata_queued_cmd *qc)
966 pp->req_producer); 966 pp->req_producer);
967 /* until we do queuing, the queue should be empty at this point */ 967 /* until we do queuing, the queue should be empty at this point */
968 assert(((in_ptr >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) == 968 assert(((in_ptr >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) ==
969 ((readl(port_mmio + EDMA_REQ_Q_OUT_PTR_OFS) >> 969 ((readl(port_mmio + EDMA_REQ_Q_OUT_PTR_OFS) >>
970 EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK)); 970 EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK));
971 971
972 mv_inc_q_index(&pp->req_producer); /* now incr producer index */ 972 mv_inc_q_index(&pp->req_producer); /* now incr producer index */
@@ -1003,15 +1003,15 @@ static u8 mv_get_crpb_status(struct ata_port *ap)
1003 out_ptr = readl(port_mmio + EDMA_RSP_Q_OUT_PTR_OFS); 1003 out_ptr = readl(port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
1004 1004
1005 /* the response consumer index should be the same as we remember it */ 1005 /* the response consumer index should be the same as we remember it */
1006 assert(((out_ptr >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) == 1006 assert(((out_ptr >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) ==
1007 pp->rsp_consumer); 1007 pp->rsp_consumer);
1008 1008
1009 /* increment our consumer index... */ 1009 /* increment our consumer index... */
1010 pp->rsp_consumer = mv_inc_q_index(&pp->rsp_consumer); 1010 pp->rsp_consumer = mv_inc_q_index(&pp->rsp_consumer);
1011 1011
1012 /* and, until we do NCQ, there should only be 1 CRPB waiting */ 1012 /* and, until we do NCQ, there should only be 1 CRPB waiting */
1013 assert(((readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS) >> 1013 assert(((readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS) >>
1014 EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) == 1014 EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) ==
1015 pp->rsp_consumer); 1015 pp->rsp_consumer);
1016 1016
1017 /* write out our inc'd consumer index so EDMA knows we're caught up */ 1017 /* write out our inc'd consumer index so EDMA knows we're caught up */
@@ -1135,7 +1135,7 @@ static void mv_host_intr(struct ata_host_set *host_set, u32 relevant,
1135 err_mask |= AC_ERR_OTHER; 1135 err_mask |= AC_ERR_OTHER;
1136 handled++; 1136 handled++;
1137 } 1137 }
1138 1138
1139 if (handled && ap) { 1139 if (handled && ap) {
1140 qc = ata_qc_from_tag(ap, ap->active_tag); 1140 qc = ata_qc_from_tag(ap, ap->active_tag);
1141 if (NULL != qc) { 1141 if (NULL != qc) {
@@ -1150,7 +1150,7 @@ static void mv_host_intr(struct ata_host_set *host_set, u32 relevant,
1150} 1150}
1151 1151
1152/** 1152/**
1153 * mv_interrupt - 1153 * mv_interrupt -
1154 * @irq: unused 1154 * @irq: unused
1155 * @dev_instance: private data; in this case the host structure 1155 * @dev_instance: private data; in this case the host structure
1156 * @regs: unused 1156 * @regs: unused
@@ -1160,7 +1160,7 @@ static void mv_host_intr(struct ata_host_set *host_set, u32 relevant,
1160 * routine to handle. Also check for PCI errors which are only 1160 * routine to handle. Also check for PCI errors which are only
1161 * reported here. 1161 * reported here.
1162 * 1162 *
1163 * LOCKING: 1163 * LOCKING:
1164 * This routine holds the host_set lock while processing pending 1164 * This routine holds the host_set lock while processing pending
1165 * interrupts. 1165 * interrupts.
1166 */ 1166 */
@@ -1303,12 +1303,12 @@ static void mv_eng_timeout(struct ata_port *ap)
1303 1303
1304 printk(KERN_ERR "ata%u: Entering mv_eng_timeout\n",ap->id); 1304 printk(KERN_ERR "ata%u: Entering mv_eng_timeout\n",ap->id);
1305 DPRINTK("All regs @ start of eng_timeout\n"); 1305 DPRINTK("All regs @ start of eng_timeout\n");
1306 mv_dump_all_regs(ap->host_set->mmio_base, ap->port_no, 1306 mv_dump_all_regs(ap->host_set->mmio_base, ap->port_no,
1307 to_pci_dev(ap->host_set->dev)); 1307 to_pci_dev(ap->host_set->dev));
1308 1308
1309 qc = ata_qc_from_tag(ap, ap->active_tag); 1309 qc = ata_qc_from_tag(ap, ap->active_tag);
1310 printk(KERN_ERR "mmio_base %p ap %p qc %p scsi_cmnd %p &cmnd %p\n", 1310 printk(KERN_ERR "mmio_base %p ap %p qc %p scsi_cmnd %p &cmnd %p\n",
1311 ap->host_set->mmio_base, ap, qc, qc->scsicmd, 1311 ap->host_set->mmio_base, ap, qc, qc->scsicmd,
1312 &qc->scsicmd->cmnd); 1312 &qc->scsicmd->cmnd);
1313 1313
1314 mv_err_intr(ap); 1314 mv_err_intr(ap);
@@ -1348,17 +1348,17 @@ static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio)
1348 unsigned long shd_base = (unsigned long) port_mmio + SHD_BLK_OFS; 1348 unsigned long shd_base = (unsigned long) port_mmio + SHD_BLK_OFS;
1349 unsigned serr_ofs; 1349 unsigned serr_ofs;
1350 1350
1351 /* PIO related setup 1351 /* PIO related setup
1352 */ 1352 */
1353 port->data_addr = shd_base + (sizeof(u32) * ATA_REG_DATA); 1353 port->data_addr = shd_base + (sizeof(u32) * ATA_REG_DATA);
1354 port->error_addr = 1354 port->error_addr =
1355 port->feature_addr = shd_base + (sizeof(u32) * ATA_REG_ERR); 1355 port->feature_addr = shd_base + (sizeof(u32) * ATA_REG_ERR);
1356 port->nsect_addr = shd_base + (sizeof(u32) * ATA_REG_NSECT); 1356 port->nsect_addr = shd_base + (sizeof(u32) * ATA_REG_NSECT);
1357 port->lbal_addr = shd_base + (sizeof(u32) * ATA_REG_LBAL); 1357 port->lbal_addr = shd_base + (sizeof(u32) * ATA_REG_LBAL);
1358 port->lbam_addr = shd_base + (sizeof(u32) * ATA_REG_LBAM); 1358 port->lbam_addr = shd_base + (sizeof(u32) * ATA_REG_LBAM);
1359 port->lbah_addr = shd_base + (sizeof(u32) * ATA_REG_LBAH); 1359 port->lbah_addr = shd_base + (sizeof(u32) * ATA_REG_LBAH);
1360 port->device_addr = shd_base + (sizeof(u32) * ATA_REG_DEVICE); 1360 port->device_addr = shd_base + (sizeof(u32) * ATA_REG_DEVICE);
1361 port->status_addr = 1361 port->status_addr =
1362 port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS); 1362 port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS);
1363 /* special case: control/altstatus doesn't have ATA_REG_ address */ 1363 /* special case: control/altstatus doesn't have ATA_REG_ address */
1364 port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST_OFS; 1364 port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST_OFS;
@@ -1374,7 +1374,7 @@ static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio)
1374 /* unmask all EDMA error interrupts */ 1374 /* unmask all EDMA error interrupts */
1375 writelfl(~0, port_mmio + EDMA_ERR_IRQ_MASK_OFS); 1375 writelfl(~0, port_mmio + EDMA_ERR_IRQ_MASK_OFS);
1376 1376
1377 VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n", 1377 VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n",
1378 readl(port_mmio + EDMA_CFG_OFS), 1378 readl(port_mmio + EDMA_CFG_OFS),
1379 readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS), 1379 readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS),
1380 readl(port_mmio + EDMA_ERR_IRQ_MASK_OFS)); 1380 readl(port_mmio + EDMA_ERR_IRQ_MASK_OFS));
@@ -1396,7 +1396,7 @@ static int mv_host_init(struct ata_probe_ent *probe_ent)
1396 void __iomem *mmio = probe_ent->mmio_base; 1396 void __iomem *mmio = probe_ent->mmio_base;
1397 void __iomem *port_mmio; 1397 void __iomem *port_mmio;
1398 1398
1399 if ((MV_FLAG_GLBL_SFT_RST & probe_ent->host_flags) && 1399 if ((MV_FLAG_GLBL_SFT_RST & probe_ent->host_flags) &&
1400 mv_global_soft_reset(probe_ent->mmio_base)) { 1400 mv_global_soft_reset(probe_ent->mmio_base)) {
1401 rc = 1; 1401 rc = 1;
1402 goto done; 1402 goto done;
@@ -1430,7 +1430,7 @@ static int mv_host_init(struct ata_probe_ent *probe_ent)
1430 writelfl(~HC_MAIN_MASKED_IRQS, mmio + HC_MAIN_IRQ_MASK_OFS); 1430 writelfl(~HC_MAIN_MASKED_IRQS, mmio + HC_MAIN_IRQ_MASK_OFS);
1431 1431
1432 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x " 1432 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x "
1433 "PCI int cause/mask=0x%08x/0x%08x\n", 1433 "PCI int cause/mask=0x%08x/0x%08x\n",
1434 readl(mmio + HC_MAIN_IRQ_CAUSE_OFS), 1434 readl(mmio + HC_MAIN_IRQ_CAUSE_OFS),
1435 readl(mmio + HC_MAIN_IRQ_MASK_OFS), 1435 readl(mmio + HC_MAIN_IRQ_MASK_OFS),
1436 readl(mmio + PCI_IRQ_CAUSE_OFS), 1436 readl(mmio + PCI_IRQ_CAUSE_OFS),
@@ -1470,7 +1470,7 @@ static void mv_print_info(struct ata_probe_ent *probe_ent)
1470 1470
1471 dev_printk(KERN_INFO, &pdev->dev, 1471 dev_printk(KERN_INFO, &pdev->dev,
1472 "%u slots %u ports %s mode IRQ via %s\n", 1472 "%u slots %u ports %s mode IRQ via %s\n",
1473 (unsigned)MV_MAX_Q_DEPTH, probe_ent->n_ports, 1473 (unsigned)MV_MAX_Q_DEPTH, probe_ent->n_ports,
1474 scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx"); 1474 scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx");
1475} 1475}
1476 1476