aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/sata_mv.c
diff options
context:
space:
mode:
authorJeff Garzik <jgarzik@pobox.com>2006-01-31 12:18:41 -0500
committerJeff Garzik <jgarzik@pobox.com>2006-01-31 12:18:41 -0500
commite4e7b89280d1d666e2c09e5ad36cf071796c4c7e (patch)
tree32f9fed9d68452fe6cf1ccca6559c0703b04f544 /drivers/scsi/sata_mv.c
parentd6fb89bf6b8b45ec8c911570ba0852940690d846 (diff)
[libata sata_mv] add 6042 support, fix 60xx/50xx EDMA configuration
Diffstat (limited to 'drivers/scsi/sata_mv.c')
-rw-r--r--drivers/scsi/sata_mv.c212
1 files changed, 200 insertions, 12 deletions
diff --git a/drivers/scsi/sata_mv.c b/drivers/scsi/sata_mv.c
index d9a554ca45c7..b55dd839ddbd 100644
--- a/drivers/scsi/sata_mv.c
+++ b/drivers/scsi/sata_mv.c
@@ -37,7 +37,7 @@
37#include <asm/io.h> 37#include <asm/io.h>
38 38
39#define DRV_NAME "sata_mv" 39#define DRV_NAME "sata_mv"
40#define DRV_VERSION "0.5" 40#define DRV_VERSION "0.6"
41 41
42enum { 42enum {
43 /* BAR's are enumerated in terms of pci_resource_start() terms */ 43 /* BAR's are enumerated in terms of pci_resource_start() terms */
@@ -228,7 +228,9 @@ enum {
228 MV_HP_ERRATA_50XXB2 = (1 << 2), 228 MV_HP_ERRATA_50XXB2 = (1 << 2),
229 MV_HP_ERRATA_60X1B2 = (1 << 3), 229 MV_HP_ERRATA_60X1B2 = (1 << 3),
230 MV_HP_ERRATA_60X1C0 = (1 << 4), 230 MV_HP_ERRATA_60X1C0 = (1 << 4),
231 MV_HP_50XX = (1 << 5), 231 MV_HP_ERRATA_XX42A0 = (1 << 5),
232 MV_HP_50XX = (1 << 6),
233 MV_HP_GEN_IIE = (1 << 7),
232 234
233 /* Port private flags (pp_flags) */ 235 /* Port private flags (pp_flags) */
234 MV_PP_FLAG_EDMA_EN = (1 << 0), 236 MV_PP_FLAG_EDMA_EN = (1 << 0),
@@ -237,6 +239,9 @@ enum {
237 239
238#define IS_50XX(hpriv) ((hpriv)->hp_flags & MV_HP_50XX) 240#define IS_50XX(hpriv) ((hpriv)->hp_flags & MV_HP_50XX)
239#define IS_60XX(hpriv) (((hpriv)->hp_flags & MV_HP_50XX) == 0) 241#define IS_60XX(hpriv) (((hpriv)->hp_flags & MV_HP_50XX) == 0)
242#define IS_GEN_I(hpriv) IS_50XX(hpriv)
243#define IS_GEN_II(hpriv) IS_60XX(hpriv)
244#define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
240 245
241enum { 246enum {
242 /* Our DMA boundary is determined by an ePRD being unable to handle 247 /* Our DMA boundary is determined by an ePRD being unable to handle
@@ -255,6 +260,8 @@ enum chip_type {
255 chip_5080, 260 chip_5080,
256 chip_604x, 261 chip_604x,
257 chip_608x, 262 chip_608x,
263 chip_6042,
264 chip_7042,
258}; 265};
259 266
260/* Command ReQuest Block: 32B */ 267/* Command ReQuest Block: 32B */
@@ -265,6 +272,14 @@ struct mv_crqb {
265 u16 ata_cmd[11]; 272 u16 ata_cmd[11];
266}; 273};
267 274
275struct mv_crqb_iie {
276 u32 addr;
277 u32 addr_hi;
278 u32 flags;
279 u32 len;
280 u32 ata_cmd[4];
281};
282
268/* Command ResPonse Block: 8B */ 283/* Command ResPonse Block: 8B */
269struct mv_crpb { 284struct mv_crpb {
270 u16 id; 285 u16 id;
@@ -328,6 +343,7 @@ static void mv_host_stop(struct ata_host_set *host_set);
328static int mv_port_start(struct ata_port *ap); 343static int mv_port_start(struct ata_port *ap);
329static void mv_port_stop(struct ata_port *ap); 344static void mv_port_stop(struct ata_port *ap);
330static void mv_qc_prep(struct ata_queued_cmd *qc); 345static void mv_qc_prep(struct ata_queued_cmd *qc);
346static void mv_qc_prep_iie(struct ata_queued_cmd *qc);
331static unsigned int mv_qc_issue(struct ata_queued_cmd *qc); 347static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
332static irqreturn_t mv_interrupt(int irq, void *dev_instance, 348static irqreturn_t mv_interrupt(int irq, void *dev_instance,
333 struct pt_regs *regs); 349 struct pt_regs *regs);
@@ -430,6 +446,33 @@ static const struct ata_port_operations mv6_ops = {
430 .host_stop = mv_host_stop, 446 .host_stop = mv_host_stop,
431}; 447};
432 448
449static const struct ata_port_operations mv_iie_ops = {
450 .port_disable = ata_port_disable,
451
452 .tf_load = ata_tf_load,
453 .tf_read = ata_tf_read,
454 .check_status = ata_check_status,
455 .exec_command = ata_exec_command,
456 .dev_select = ata_std_dev_select,
457
458 .phy_reset = mv_phy_reset,
459
460 .qc_prep = mv_qc_prep_iie,
461 .qc_issue = mv_qc_issue,
462
463 .eng_timeout = mv_eng_timeout,
464
465 .irq_handler = mv_interrupt,
466 .irq_clear = mv_irq_clear,
467
468 .scr_read = mv_scr_read,
469 .scr_write = mv_scr_write,
470
471 .port_start = mv_port_start,
472 .port_stop = mv_port_stop,
473 .host_stop = mv_host_stop,
474};
475
433static const struct ata_port_info mv_port_info[] = { 476static const struct ata_port_info mv_port_info[] = {
434 { /* chip_504x */ 477 { /* chip_504x */
435 .sht = &mv_sht, 478 .sht = &mv_sht,
@@ -467,6 +510,21 @@ static const struct ata_port_info mv_port_info[] = {
467 .udma_mask = 0x7f, /* udma0-6 */ 510 .udma_mask = 0x7f, /* udma0-6 */
468 .port_ops = &mv6_ops, 511 .port_ops = &mv6_ops,
469 }, 512 },
513 { /* chip_6042 */
514 .sht = &mv_sht,
515 .host_flags = (MV_COMMON_FLAGS | MV_6XXX_FLAGS),
516 .pio_mask = 0x1f, /* pio0-4 */
517 .udma_mask = 0x7f, /* udma0-6 */
518 .port_ops = &mv_iie_ops,
519 },
520 { /* chip_7042 */
521 .sht = &mv_sht,
522 .host_flags = (MV_COMMON_FLAGS | MV_6XXX_FLAGS |
523 MV_FLAG_DUAL_HC),
524 .pio_mask = 0x1f, /* pio0-4 */
525 .udma_mask = 0x7f, /* udma0-6 */
526 .port_ops = &mv_iie_ops,
527 },
470}; 528};
471 529
472static const struct pci_device_id mv_pci_tbl[] = { 530static const struct pci_device_id mv_pci_tbl[] = {
@@ -477,6 +535,7 @@ static const struct pci_device_id mv_pci_tbl[] = {
477 535
478 {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x6040), 0, 0, chip_604x}, 536 {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x6040), 0, 0, chip_604x},
479 {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x6041), 0, 0, chip_604x}, 537 {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x6041), 0, 0, chip_604x},
538 {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x6042), 0, 0, chip_6042},
480 {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x6080), 0, 0, chip_608x}, 539 {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x6080), 0, 0, chip_608x},
481 {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x6081), 0, 0, chip_608x}, 540 {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x6081), 0, 0, chip_608x},
482 541
@@ -767,6 +826,33 @@ static inline void mv_priv_free(struct mv_port_priv *pp, struct device *dev)
767 dma_free_coherent(dev, MV_PORT_PRIV_DMA_SZ, pp->crpb, pp->crpb_dma); 826 dma_free_coherent(dev, MV_PORT_PRIV_DMA_SZ, pp->crpb, pp->crpb_dma);
768} 827}
769 828
829static void mv_edma_cfg(struct mv_host_priv *hpriv, void __iomem *port_mmio)
830{
831 u32 cfg = readl(port_mmio + EDMA_CFG_OFS);
832
833 /* set up non-NCQ EDMA configuration */
834 cfg &= ~0x1f; /* clear queue depth */
835 cfg &= ~EDMA_CFG_NCQ; /* clear NCQ mode */
836 cfg &= ~(1 << 9); /* disable equeue */
837
838 if (IS_GEN_I(hpriv))
839 cfg |= (1 << 8); /* enab config burst size mask */
840
841 else if (IS_GEN_II(hpriv))
842 cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN;
843
844 else if (IS_GEN_IIE(hpriv)) {
845 cfg |= (1 << 23); /* dis RX PM port mask */
846 cfg &= ~(1 << 16); /* dis FIS-based switching (for now) */
847 cfg &= ~(1 << 19); /* dis 128-entry queue (for now?) */
848 cfg |= (1 << 18); /* enab early completion */
849 cfg |= (1 << 17); /* enab host q cache */
850 cfg |= (1 << 22); /* enab cutthrough */
851 }
852
853 writelfl(cfg, port_mmio + EDMA_CFG_OFS);
854}
855
770/** 856/**
771 * mv_port_start - Port specific init/start routine. 857 * mv_port_start - Port specific init/start routine.
772 * @ap: ATA channel to manipulate 858 * @ap: ATA channel to manipulate
@@ -780,6 +866,7 @@ static inline void mv_priv_free(struct mv_port_priv *pp, struct device *dev)
780static int mv_port_start(struct ata_port *ap) 866static int mv_port_start(struct ata_port *ap)
781{ 867{
782 struct device *dev = ap->host_set->dev; 868 struct device *dev = ap->host_set->dev;
869 struct mv_host_priv *hpriv = ap->host_set->private_data;
783 struct mv_port_priv *pp; 870 struct mv_port_priv *pp;
784 void __iomem *port_mmio = mv_ap_base(ap); 871 void __iomem *port_mmio = mv_ap_base(ap);
785 void *mem; 872 void *mem;
@@ -823,17 +910,26 @@ static int mv_port_start(struct ata_port *ap)
823 pp->sg_tbl = mem; 910 pp->sg_tbl = mem;
824 pp->sg_tbl_dma = mem_dma; 911 pp->sg_tbl_dma = mem_dma;
825 912
826 writelfl(EDMA_CFG_Q_DEPTH | EDMA_CFG_RD_BRST_EXT | 913 mv_edma_cfg(hpriv, port_mmio);
827 EDMA_CFG_WR_BUFF_LEN, port_mmio + EDMA_CFG_OFS);
828 914
829 writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS); 915 writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS);
830 writelfl(pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK, 916 writelfl(pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK,
831 port_mmio + EDMA_REQ_Q_IN_PTR_OFS); 917 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
832 918
833 writelfl(0, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS); 919 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
834 writelfl(0, port_mmio + EDMA_RSP_Q_IN_PTR_OFS); 920 writelfl(pp->crqb_dma & 0xffffffff,
921 port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
922 else
923 writelfl(0, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
835 924
836 writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS); 925 writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS);
926
927 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
928 writelfl(pp->crpb_dma & 0xffffffff,
929 port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
930 else
931 writelfl(0, port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
932
837 writelfl(pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK, 933 writelfl(pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK,
838 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS); 934 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
839 935
@@ -954,9 +1050,8 @@ static void mv_qc_prep(struct ata_queued_cmd *qc)
954 struct ata_taskfile *tf; 1050 struct ata_taskfile *tf;
955 u16 flags = 0; 1051 u16 flags = 0;
956 1052
957 if (ATA_PROT_DMA != qc->tf.protocol) { 1053 if (ATA_PROT_DMA != qc->tf.protocol)
958 return; 1054 return;
959 }
960 1055
961 /* the req producer index should be the same as we remember it */ 1056 /* the req producer index should be the same as we remember it */
962 assert(((readl(mv_ap_base(qc->ap) + EDMA_REQ_Q_IN_PTR_OFS) >> 1057 assert(((readl(mv_ap_base(qc->ap) + EDMA_REQ_Q_IN_PTR_OFS) >>
@@ -965,9 +1060,8 @@ static void mv_qc_prep(struct ata_queued_cmd *qc)
965 1060
966 /* Fill in command request block 1061 /* Fill in command request block
967 */ 1062 */
968 if (!(qc->tf.flags & ATA_TFLAG_WRITE)) { 1063 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
969 flags |= CRQB_FLAG_READ; 1064 flags |= CRQB_FLAG_READ;
970 }
971 assert(MV_MAX_Q_DEPTH > qc->tag); 1065 assert(MV_MAX_Q_DEPTH > qc->tag);
972 flags |= qc->tag << CRQB_TAG_SHIFT; 1066 flags |= qc->tag << CRQB_TAG_SHIFT;
973 1067
@@ -1022,9 +1116,76 @@ static void mv_qc_prep(struct ata_queued_cmd *qc)
1022 mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0); 1116 mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0);
1023 mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */ 1117 mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */
1024 1118
1025 if (!(qc->flags & ATA_QCFLAG_DMAMAP)) { 1119 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1120 return;
1121 mv_fill_sg(qc);
1122}
1123
1124/**
1125 * mv_qc_prep_iie - Host specific command preparation.
1126 * @qc: queued command to prepare
1127 *
1128 * This routine simply redirects to the general purpose routine
1129 * if command is not DMA. Else, it handles prep of the CRQB
1130 * (command request block), does some sanity checking, and calls
1131 * the SG load routine.
1132 *
1133 * LOCKING:
1134 * Inherited from caller.
1135 */
1136static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
1137{
1138 struct ata_port *ap = qc->ap;
1139 struct mv_port_priv *pp = ap->private_data;
1140 struct mv_crqb_iie *crqb;
1141 struct ata_taskfile *tf;
1142 u32 flags = 0;
1143
1144 if (ATA_PROT_DMA != qc->tf.protocol)
1145 return;
1146
1147 /* the req producer index should be the same as we remember it */
1148 assert(((readl(mv_ap_base(qc->ap) + EDMA_REQ_Q_IN_PTR_OFS) >>
1149 EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) ==
1150 pp->req_producer);
1151
1152 /* Fill in Gen IIE command request block
1153 */
1154 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1155 flags |= CRQB_FLAG_READ;
1156
1157 assert(MV_MAX_Q_DEPTH > qc->tag);
1158 flags |= qc->tag << CRQB_TAG_SHIFT;
1159
1160 crqb = (struct mv_crqb_iie *) &pp->crqb[pp->req_producer];
1161 crqb->addr = cpu_to_le32(pp->sg_tbl_dma & 0xffffffff);
1162 crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16);
1163 crqb->flags = cpu_to_le32(flags);
1164
1165 tf = &qc->tf;
1166 crqb->ata_cmd[0] = cpu_to_le32(
1167 (tf->command << 16) |
1168 (tf->feature << 24)
1169 );
1170 crqb->ata_cmd[1] = cpu_to_le32(
1171 (tf->lbal << 0) |
1172 (tf->lbam << 8) |
1173 (tf->lbah << 16) |
1174 (tf->device << 24)
1175 );
1176 crqb->ata_cmd[2] = cpu_to_le32(
1177 (tf->hob_lbal << 0) |
1178 (tf->hob_lbam << 8) |
1179 (tf->hob_lbah << 16) |
1180 (tf->hob_feature << 24)
1181 );
1182 crqb->ata_cmd[3] = cpu_to_le32(
1183 (tf->nsect << 0) |
1184 (tf->hob_nsect << 8)
1185 );
1186
1187 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1026 return; 1188 return;
1027 }
1028 mv_fill_sg(qc); 1189 mv_fill_sg(qc);
1029} 1190}
1030 1191
@@ -1674,6 +1835,12 @@ static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
1674 m2 |= hpriv->signal[port].pre; 1835 m2 |= hpriv->signal[port].pre;
1675 m2 &= ~(1 << 16); 1836 m2 &= ~(1 << 16);
1676 1837
1838 /* according to mvSata 3.6.1, some IIE values are fixed */
1839 if (IS_GEN_IIE(hpriv)) {
1840 m2 &= ~0xC30FF01F;
1841 m2 |= 0x0000900F;
1842 }
1843
1677 writel(m2, port_mmio + PHY_MODE2); 1844 writel(m2, port_mmio + PHY_MODE2);
1678} 1845}
1679 1846
@@ -1978,6 +2145,27 @@ static int mv_chip_id(struct pci_dev *pdev, struct mv_host_priv *hpriv,
1978 } 2145 }
1979 break; 2146 break;
1980 2147
2148 case chip_7042:
2149 case chip_6042:
2150 hpriv->ops = &mv6xxx_ops;
2151
2152 hp_flags |= MV_HP_GEN_IIE;
2153
2154 switch (rev_id) {
2155 case 0x0:
2156 hp_flags |= MV_HP_ERRATA_XX42A0;
2157 break;
2158 case 0x1:
2159 hp_flags |= MV_HP_ERRATA_60X1C0;
2160 break;
2161 default:
2162 dev_printk(KERN_WARNING, &pdev->dev,
2163 "Applying 60X1C0 workarounds to unknown rev\n");
2164 hp_flags |= MV_HP_ERRATA_60X1C0;
2165 break;
2166 }
2167 break;
2168
1981 default: 2169 default:
1982 printk(KERN_ERR DRV_NAME ": BUG: invalid board index %u\n", board_idx); 2170 printk(KERN_ERR DRV_NAME ": BUG: invalid board index %u\n", board_idx);
1983 return 1; 2171 return 1;