diff options
Diffstat (limited to 'drivers/scsi/sata_mv.c')
-rw-r--r-- | drivers/scsi/sata_mv.c | 286 |
1 files changed, 231 insertions, 55 deletions
diff --git a/drivers/scsi/sata_mv.c b/drivers/scsi/sata_mv.c index 33e387354388..fa901fd65085 100644 --- a/drivers/scsi/sata_mv.c +++ b/drivers/scsi/sata_mv.c | |||
@@ -37,7 +37,7 @@ | |||
37 | #include <asm/io.h> | 37 | #include <asm/io.h> |
38 | 38 | ||
39 | #define DRV_NAME "sata_mv" | 39 | #define DRV_NAME "sata_mv" |
40 | #define DRV_VERSION "0.5" | 40 | #define DRV_VERSION "0.6" |
41 | 41 | ||
42 | enum { | 42 | enum { |
43 | /* BAR's are enumerated in terms of pci_resource_start() terms */ | 43 | /* BAR's are enumerated in terms of pci_resource_start() terms */ |
@@ -228,7 +228,9 @@ enum { | |||
228 | MV_HP_ERRATA_50XXB2 = (1 << 2), | 228 | MV_HP_ERRATA_50XXB2 = (1 << 2), |
229 | MV_HP_ERRATA_60X1B2 = (1 << 3), | 229 | MV_HP_ERRATA_60X1B2 = (1 << 3), |
230 | MV_HP_ERRATA_60X1C0 = (1 << 4), | 230 | MV_HP_ERRATA_60X1C0 = (1 << 4), |
231 | MV_HP_50XX = (1 << 5), | 231 | MV_HP_ERRATA_XX42A0 = (1 << 5), |
232 | MV_HP_50XX = (1 << 6), | ||
233 | MV_HP_GEN_IIE = (1 << 7), | ||
232 | 234 | ||
233 | /* Port private flags (pp_flags) */ | 235 | /* Port private flags (pp_flags) */ |
234 | MV_PP_FLAG_EDMA_EN = (1 << 0), | 236 | MV_PP_FLAG_EDMA_EN = (1 << 0), |
@@ -237,6 +239,9 @@ enum { | |||
237 | 239 | ||
238 | #define IS_50XX(hpriv) ((hpriv)->hp_flags & MV_HP_50XX) | 240 | #define IS_50XX(hpriv) ((hpriv)->hp_flags & MV_HP_50XX) |
239 | #define IS_60XX(hpriv) (((hpriv)->hp_flags & MV_HP_50XX) == 0) | 241 | #define IS_60XX(hpriv) (((hpriv)->hp_flags & MV_HP_50XX) == 0) |
242 | #define IS_GEN_I(hpriv) IS_50XX(hpriv) | ||
243 | #define IS_GEN_II(hpriv) IS_60XX(hpriv) | ||
244 | #define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE) | ||
240 | 245 | ||
241 | enum { | 246 | enum { |
242 | /* Our DMA boundary is determined by an ePRD being unable to handle | 247 | /* Our DMA boundary is determined by an ePRD being unable to handle |
@@ -255,6 +260,8 @@ enum chip_type { | |||
255 | chip_5080, | 260 | chip_5080, |
256 | chip_604x, | 261 | chip_604x, |
257 | chip_608x, | 262 | chip_608x, |
263 | chip_6042, | ||
264 | chip_7042, | ||
258 | }; | 265 | }; |
259 | 266 | ||
260 | /* Command ReQuest Block: 32B */ | 267 | /* Command ReQuest Block: 32B */ |
@@ -265,6 +272,14 @@ struct mv_crqb { | |||
265 | u16 ata_cmd[11]; | 272 | u16 ata_cmd[11]; |
266 | }; | 273 | }; |
267 | 274 | ||
275 | struct mv_crqb_iie { | ||
276 | u32 addr; | ||
277 | u32 addr_hi; | ||
278 | u32 flags; | ||
279 | u32 len; | ||
280 | u32 ata_cmd[4]; | ||
281 | }; | ||
282 | |||
268 | /* Command ResPonse Block: 8B */ | 283 | /* Command ResPonse Block: 8B */ |
269 | struct mv_crpb { | 284 | struct mv_crpb { |
270 | u16 id; | 285 | u16 id; |
@@ -328,7 +343,8 @@ static void mv_host_stop(struct ata_host_set *host_set); | |||
328 | static int mv_port_start(struct ata_port *ap); | 343 | static int mv_port_start(struct ata_port *ap); |
329 | static void mv_port_stop(struct ata_port *ap); | 344 | static void mv_port_stop(struct ata_port *ap); |
330 | static void mv_qc_prep(struct ata_queued_cmd *qc); | 345 | static void mv_qc_prep(struct ata_queued_cmd *qc); |
331 | static int mv_qc_issue(struct ata_queued_cmd *qc); | 346 | static void mv_qc_prep_iie(struct ata_queued_cmd *qc); |
347 | static unsigned int mv_qc_issue(struct ata_queued_cmd *qc); | ||
332 | static irqreturn_t mv_interrupt(int irq, void *dev_instance, | 348 | static irqreturn_t mv_interrupt(int irq, void *dev_instance, |
333 | struct pt_regs *regs); | 349 | struct pt_regs *regs); |
334 | static void mv_eng_timeout(struct ata_port *ap); | 350 | static void mv_eng_timeout(struct ata_port *ap); |
@@ -366,7 +382,6 @@ static struct scsi_host_template mv_sht = { | |||
366 | .can_queue = MV_USE_Q_DEPTH, | 382 | .can_queue = MV_USE_Q_DEPTH, |
367 | .this_id = ATA_SHT_THIS_ID, | 383 | .this_id = ATA_SHT_THIS_ID, |
368 | .sg_tablesize = MV_MAX_SG_CT / 2, | 384 | .sg_tablesize = MV_MAX_SG_CT / 2, |
369 | .max_sectors = ATA_MAX_SECTORS, | ||
370 | .cmd_per_lun = ATA_SHT_CMD_PER_LUN, | 385 | .cmd_per_lun = ATA_SHT_CMD_PER_LUN, |
371 | .emulated = ATA_SHT_EMULATED, | 386 | .emulated = ATA_SHT_EMULATED, |
372 | .use_clustering = ATA_SHT_USE_CLUSTERING, | 387 | .use_clustering = ATA_SHT_USE_CLUSTERING, |
@@ -430,6 +445,33 @@ static const struct ata_port_operations mv6_ops = { | |||
430 | .host_stop = mv_host_stop, | 445 | .host_stop = mv_host_stop, |
431 | }; | 446 | }; |
432 | 447 | ||
448 | static const struct ata_port_operations mv_iie_ops = { | ||
449 | .port_disable = ata_port_disable, | ||
450 | |||
451 | .tf_load = ata_tf_load, | ||
452 | .tf_read = ata_tf_read, | ||
453 | .check_status = ata_check_status, | ||
454 | .exec_command = ata_exec_command, | ||
455 | .dev_select = ata_std_dev_select, | ||
456 | |||
457 | .phy_reset = mv_phy_reset, | ||
458 | |||
459 | .qc_prep = mv_qc_prep_iie, | ||
460 | .qc_issue = mv_qc_issue, | ||
461 | |||
462 | .eng_timeout = mv_eng_timeout, | ||
463 | |||
464 | .irq_handler = mv_interrupt, | ||
465 | .irq_clear = mv_irq_clear, | ||
466 | |||
467 | .scr_read = mv_scr_read, | ||
468 | .scr_write = mv_scr_write, | ||
469 | |||
470 | .port_start = mv_port_start, | ||
471 | .port_stop = mv_port_stop, | ||
472 | .host_stop = mv_host_stop, | ||
473 | }; | ||
474 | |||
433 | static const struct ata_port_info mv_port_info[] = { | 475 | static const struct ata_port_info mv_port_info[] = { |
434 | { /* chip_504x */ | 476 | { /* chip_504x */ |
435 | .sht = &mv_sht, | 477 | .sht = &mv_sht, |
@@ -467,6 +509,21 @@ static const struct ata_port_info mv_port_info[] = { | |||
467 | .udma_mask = 0x7f, /* udma0-6 */ | 509 | .udma_mask = 0x7f, /* udma0-6 */ |
468 | .port_ops = &mv6_ops, | 510 | .port_ops = &mv6_ops, |
469 | }, | 511 | }, |
512 | { /* chip_6042 */ | ||
513 | .sht = &mv_sht, | ||
514 | .host_flags = (MV_COMMON_FLAGS | MV_6XXX_FLAGS), | ||
515 | .pio_mask = 0x1f, /* pio0-4 */ | ||
516 | .udma_mask = 0x7f, /* udma0-6 */ | ||
517 | .port_ops = &mv_iie_ops, | ||
518 | }, | ||
519 | { /* chip_7042 */ | ||
520 | .sht = &mv_sht, | ||
521 | .host_flags = (MV_COMMON_FLAGS | MV_6XXX_FLAGS | | ||
522 | MV_FLAG_DUAL_HC), | ||
523 | .pio_mask = 0x1f, /* pio0-4 */ | ||
524 | .udma_mask = 0x7f, /* udma0-6 */ | ||
525 | .port_ops = &mv_iie_ops, | ||
526 | }, | ||
470 | }; | 527 | }; |
471 | 528 | ||
472 | static const struct pci_device_id mv_pci_tbl[] = { | 529 | static const struct pci_device_id mv_pci_tbl[] = { |
@@ -477,6 +534,7 @@ static const struct pci_device_id mv_pci_tbl[] = { | |||
477 | 534 | ||
478 | {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x6040), 0, 0, chip_604x}, | 535 | {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x6040), 0, 0, chip_604x}, |
479 | {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x6041), 0, 0, chip_604x}, | 536 | {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x6041), 0, 0, chip_604x}, |
537 | {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x6042), 0, 0, chip_6042}, | ||
480 | {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x6080), 0, 0, chip_608x}, | 538 | {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x6080), 0, 0, chip_608x}, |
481 | {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x6081), 0, 0, chip_608x}, | 539 | {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x6081), 0, 0, chip_608x}, |
482 | 540 | ||
@@ -572,8 +630,8 @@ static void mv_irq_clear(struct ata_port *ap) | |||
572 | * @base: port base address | 630 | * @base: port base address |
573 | * @pp: port private data | 631 | * @pp: port private data |
574 | * | 632 | * |
575 | * Verify the local cache of the eDMA state is accurate with an | 633 | * Verify the local cache of the eDMA state is accurate with a |
576 | * assert. | 634 | * WARN_ON. |
577 | * | 635 | * |
578 | * LOCKING: | 636 | * LOCKING: |
579 | * Inherited from caller. | 637 | * Inherited from caller. |
@@ -584,15 +642,15 @@ static void mv_start_dma(void __iomem *base, struct mv_port_priv *pp) | |||
584 | writelfl(EDMA_EN, base + EDMA_CMD_OFS); | 642 | writelfl(EDMA_EN, base + EDMA_CMD_OFS); |
585 | pp->pp_flags |= MV_PP_FLAG_EDMA_EN; | 643 | pp->pp_flags |= MV_PP_FLAG_EDMA_EN; |
586 | } | 644 | } |
587 | assert(EDMA_EN & readl(base + EDMA_CMD_OFS)); | 645 | WARN_ON(!(EDMA_EN & readl(base + EDMA_CMD_OFS))); |
588 | } | 646 | } |
589 | 647 | ||
590 | /** | 648 | /** |
591 | * mv_stop_dma - Disable eDMA engine | 649 | * mv_stop_dma - Disable eDMA engine |
592 | * @ap: ATA channel to manipulate | 650 | * @ap: ATA channel to manipulate |
593 | * | 651 | * |
594 | * Verify the local cache of the eDMA state is accurate with an | 652 | * Verify the local cache of the eDMA state is accurate with a |
595 | * assert. | 653 | * WARN_ON. |
596 | * | 654 | * |
597 | * LOCKING: | 655 | * LOCKING: |
598 | * Inherited from caller. | 656 | * Inherited from caller. |
@@ -610,7 +668,7 @@ static void mv_stop_dma(struct ata_port *ap) | |||
610 | writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS); | 668 | writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS); |
611 | pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN; | 669 | pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN; |
612 | } else { | 670 | } else { |
613 | assert(!(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS))); | 671 | WARN_ON(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS)); |
614 | } | 672 | } |
615 | 673 | ||
616 | /* now properly wait for the eDMA to stop */ | 674 | /* now properly wait for the eDMA to stop */ |
@@ -773,6 +831,33 @@ static inline void mv_priv_free(struct mv_port_priv *pp, struct device *dev) | |||
773 | dma_free_coherent(dev, MV_PORT_PRIV_DMA_SZ, pp->crpb, pp->crpb_dma); | 831 | dma_free_coherent(dev, MV_PORT_PRIV_DMA_SZ, pp->crpb, pp->crpb_dma); |
774 | } | 832 | } |
775 | 833 | ||
834 | static void mv_edma_cfg(struct mv_host_priv *hpriv, void __iomem *port_mmio) | ||
835 | { | ||
836 | u32 cfg = readl(port_mmio + EDMA_CFG_OFS); | ||
837 | |||
838 | /* set up non-NCQ EDMA configuration */ | ||
839 | cfg &= ~0x1f; /* clear queue depth */ | ||
840 | cfg &= ~EDMA_CFG_NCQ; /* clear NCQ mode */ | ||
841 | cfg &= ~(1 << 9); /* disable equeue */ | ||
842 | |||
843 | if (IS_GEN_I(hpriv)) | ||
844 | cfg |= (1 << 8); /* enab config burst size mask */ | ||
845 | |||
846 | else if (IS_GEN_II(hpriv)) | ||
847 | cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN; | ||
848 | |||
849 | else if (IS_GEN_IIE(hpriv)) { | ||
850 | cfg |= (1 << 23); /* dis RX PM port mask */ | ||
851 | cfg &= ~(1 << 16); /* dis FIS-based switching (for now) */ | ||
852 | cfg &= ~(1 << 19); /* dis 128-entry queue (for now?) */ | ||
853 | cfg |= (1 << 18); /* enab early completion */ | ||
854 | cfg |= (1 << 17); /* enab host q cache */ | ||
855 | cfg |= (1 << 22); /* enab cutthrough */ | ||
856 | } | ||
857 | |||
858 | writelfl(cfg, port_mmio + EDMA_CFG_OFS); | ||
859 | } | ||
860 | |||
776 | /** | 861 | /** |
777 | * mv_port_start - Port specific init/start routine. | 862 | * mv_port_start - Port specific init/start routine. |
778 | * @ap: ATA channel to manipulate | 863 | * @ap: ATA channel to manipulate |
@@ -786,6 +871,7 @@ static inline void mv_priv_free(struct mv_port_priv *pp, struct device *dev) | |||
786 | static int mv_port_start(struct ata_port *ap) | 871 | static int mv_port_start(struct ata_port *ap) |
787 | { | 872 | { |
788 | struct device *dev = ap->host_set->dev; | 873 | struct device *dev = ap->host_set->dev; |
874 | struct mv_host_priv *hpriv = ap->host_set->private_data; | ||
789 | struct mv_port_priv *pp; | 875 | struct mv_port_priv *pp; |
790 | void __iomem *port_mmio = mv_ap_base(ap); | 876 | void __iomem *port_mmio = mv_ap_base(ap); |
791 | void *mem; | 877 | void *mem; |
@@ -829,17 +915,26 @@ static int mv_port_start(struct ata_port *ap) | |||
829 | pp->sg_tbl = mem; | 915 | pp->sg_tbl = mem; |
830 | pp->sg_tbl_dma = mem_dma; | 916 | pp->sg_tbl_dma = mem_dma; |
831 | 917 | ||
832 | writelfl(EDMA_CFG_Q_DEPTH | EDMA_CFG_RD_BRST_EXT | | 918 | mv_edma_cfg(hpriv, port_mmio); |
833 | EDMA_CFG_WR_BUFF_LEN, port_mmio + EDMA_CFG_OFS); | ||
834 | 919 | ||
835 | writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS); | 920 | writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS); |
836 | writelfl(pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK, | 921 | writelfl(pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK, |
837 | port_mmio + EDMA_REQ_Q_IN_PTR_OFS); | 922 | port_mmio + EDMA_REQ_Q_IN_PTR_OFS); |
838 | 923 | ||
839 | writelfl(0, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS); | 924 | if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0) |
840 | writelfl(0, port_mmio + EDMA_RSP_Q_IN_PTR_OFS); | 925 | writelfl(pp->crqb_dma & 0xffffffff, |
926 | port_mmio + EDMA_REQ_Q_OUT_PTR_OFS); | ||
927 | else | ||
928 | writelfl(0, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS); | ||
841 | 929 | ||
842 | writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS); | 930 | writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS); |
931 | |||
932 | if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0) | ||
933 | writelfl(pp->crpb_dma & 0xffffffff, | ||
934 | port_mmio + EDMA_RSP_Q_IN_PTR_OFS); | ||
935 | else | ||
936 | writelfl(0, port_mmio + EDMA_RSP_Q_IN_PTR_OFS); | ||
937 | |||
843 | writelfl(pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK, | 938 | writelfl(pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK, |
844 | port_mmio + EDMA_RSP_Q_OUT_PTR_OFS); | 939 | port_mmio + EDMA_RSP_Q_OUT_PTR_OFS); |
845 | 940 | ||
@@ -960,21 +1055,19 @@ static void mv_qc_prep(struct ata_queued_cmd *qc) | |||
960 | struct ata_taskfile *tf; | 1055 | struct ata_taskfile *tf; |
961 | u16 flags = 0; | 1056 | u16 flags = 0; |
962 | 1057 | ||
963 | if (ATA_PROT_DMA != qc->tf.protocol) { | 1058 | if (ATA_PROT_DMA != qc->tf.protocol) |
964 | return; | 1059 | return; |
965 | } | ||
966 | 1060 | ||
967 | /* the req producer index should be the same as we remember it */ | 1061 | /* the req producer index should be the same as we remember it */ |
968 | assert(((readl(mv_ap_base(qc->ap) + EDMA_REQ_Q_IN_PTR_OFS) >> | 1062 | WARN_ON(((readl(mv_ap_base(qc->ap) + EDMA_REQ_Q_IN_PTR_OFS) >> |
969 | EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) == | 1063 | EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) != |
970 | pp->req_producer); | 1064 | pp->req_producer); |
971 | 1065 | ||
972 | /* Fill in command request block | 1066 | /* Fill in command request block |
973 | */ | 1067 | */ |
974 | if (!(qc->tf.flags & ATA_TFLAG_WRITE)) { | 1068 | if (!(qc->tf.flags & ATA_TFLAG_WRITE)) |
975 | flags |= CRQB_FLAG_READ; | 1069 | flags |= CRQB_FLAG_READ; |
976 | } | 1070 | WARN_ON(MV_MAX_Q_DEPTH <= qc->tag); |
977 | assert(MV_MAX_Q_DEPTH > qc->tag); | ||
978 | flags |= qc->tag << CRQB_TAG_SHIFT; | 1071 | flags |= qc->tag << CRQB_TAG_SHIFT; |
979 | 1072 | ||
980 | pp->crqb[pp->req_producer].sg_addr = | 1073 | pp->crqb[pp->req_producer].sg_addr = |
@@ -1029,9 +1122,76 @@ static void mv_qc_prep(struct ata_queued_cmd *qc) | |||
1029 | mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0); | 1122 | mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0); |
1030 | mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */ | 1123 | mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */ |
1031 | 1124 | ||
1032 | if (!(qc->flags & ATA_QCFLAG_DMAMAP)) { | 1125 | if (!(qc->flags & ATA_QCFLAG_DMAMAP)) |
1126 | return; | ||
1127 | mv_fill_sg(qc); | ||
1128 | } | ||
1129 | |||
1130 | /** | ||
1131 | * mv_qc_prep_iie - Host specific command preparation. | ||
1132 | * @qc: queued command to prepare | ||
1133 | * | ||
1134 | * This routine simply redirects to the general purpose routine | ||
1135 | * if command is not DMA. Else, it handles prep of the CRQB | ||
1136 | * (command request block), does some sanity checking, and calls | ||
1137 | * the SG load routine. | ||
1138 | * | ||
1139 | * LOCKING: | ||
1140 | * Inherited from caller. | ||
1141 | */ | ||
1142 | static void mv_qc_prep_iie(struct ata_queued_cmd *qc) | ||
1143 | { | ||
1144 | struct ata_port *ap = qc->ap; | ||
1145 | struct mv_port_priv *pp = ap->private_data; | ||
1146 | struct mv_crqb_iie *crqb; | ||
1147 | struct ata_taskfile *tf; | ||
1148 | u32 flags = 0; | ||
1149 | |||
1150 | if (ATA_PROT_DMA != qc->tf.protocol) | ||
1151 | return; | ||
1152 | |||
1153 | /* the req producer index should be the same as we remember it */ | ||
1154 | WARN_ON(((readl(mv_ap_base(qc->ap) + EDMA_REQ_Q_IN_PTR_OFS) >> | ||
1155 | EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) != | ||
1156 | pp->req_producer); | ||
1157 | |||
1158 | /* Fill in Gen IIE command request block | ||
1159 | */ | ||
1160 | if (!(qc->tf.flags & ATA_TFLAG_WRITE)) | ||
1161 | flags |= CRQB_FLAG_READ; | ||
1162 | |||
1163 | WARN_ON(MV_MAX_Q_DEPTH <= qc->tag); | ||
1164 | flags |= qc->tag << CRQB_TAG_SHIFT; | ||
1165 | |||
1166 | crqb = (struct mv_crqb_iie *) &pp->crqb[pp->req_producer]; | ||
1167 | crqb->addr = cpu_to_le32(pp->sg_tbl_dma & 0xffffffff); | ||
1168 | crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16); | ||
1169 | crqb->flags = cpu_to_le32(flags); | ||
1170 | |||
1171 | tf = &qc->tf; | ||
1172 | crqb->ata_cmd[0] = cpu_to_le32( | ||
1173 | (tf->command << 16) | | ||
1174 | (tf->feature << 24) | ||
1175 | ); | ||
1176 | crqb->ata_cmd[1] = cpu_to_le32( | ||
1177 | (tf->lbal << 0) | | ||
1178 | (tf->lbam << 8) | | ||
1179 | (tf->lbah << 16) | | ||
1180 | (tf->device << 24) | ||
1181 | ); | ||
1182 | crqb->ata_cmd[2] = cpu_to_le32( | ||
1183 | (tf->hob_lbal << 0) | | ||
1184 | (tf->hob_lbam << 8) | | ||
1185 | (tf->hob_lbah << 16) | | ||
1186 | (tf->hob_feature << 24) | ||
1187 | ); | ||
1188 | crqb->ata_cmd[3] = cpu_to_le32( | ||
1189 | (tf->nsect << 0) | | ||
1190 | (tf->hob_nsect << 8) | ||
1191 | ); | ||
1192 | |||
1193 | if (!(qc->flags & ATA_QCFLAG_DMAMAP)) | ||
1033 | return; | 1194 | return; |
1034 | } | ||
1035 | mv_fill_sg(qc); | 1195 | mv_fill_sg(qc); |
1036 | } | 1196 | } |
1037 | 1197 | ||
@@ -1047,7 +1207,7 @@ static void mv_qc_prep(struct ata_queued_cmd *qc) | |||
1047 | * LOCKING: | 1207 | * LOCKING: |
1048 | * Inherited from caller. | 1208 | * Inherited from caller. |
1049 | */ | 1209 | */ |
1050 | static int mv_qc_issue(struct ata_queued_cmd *qc) | 1210 | static unsigned int mv_qc_issue(struct ata_queued_cmd *qc) |
1051 | { | 1211 | { |
1052 | void __iomem *port_mmio = mv_ap_base(qc->ap); | 1212 | void __iomem *port_mmio = mv_ap_base(qc->ap); |
1053 | struct mv_port_priv *pp = qc->ap->private_data; | 1213 | struct mv_port_priv *pp = qc->ap->private_data; |
@@ -1065,12 +1225,12 @@ static int mv_qc_issue(struct ata_queued_cmd *qc) | |||
1065 | in_ptr = readl(port_mmio + EDMA_REQ_Q_IN_PTR_OFS); | 1225 | in_ptr = readl(port_mmio + EDMA_REQ_Q_IN_PTR_OFS); |
1066 | 1226 | ||
1067 | /* the req producer index should be the same as we remember it */ | 1227 | /* the req producer index should be the same as we remember it */ |
1068 | assert(((in_ptr >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) == | 1228 | WARN_ON(((in_ptr >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) != |
1069 | pp->req_producer); | 1229 | pp->req_producer); |
1070 | /* until we do queuing, the queue should be empty at this point */ | 1230 | /* until we do queuing, the queue should be empty at this point */ |
1071 | assert(((in_ptr >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) == | 1231 | WARN_ON(((in_ptr >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) != |
1072 | ((readl(port_mmio + EDMA_REQ_Q_OUT_PTR_OFS) >> | 1232 | ((readl(port_mmio + EDMA_REQ_Q_OUT_PTR_OFS) >> |
1073 | EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK)); | 1233 | EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK)); |
1074 | 1234 | ||
1075 | mv_inc_q_index(&pp->req_producer); /* now incr producer index */ | 1235 | mv_inc_q_index(&pp->req_producer); /* now incr producer index */ |
1076 | 1236 | ||
@@ -1090,7 +1250,7 @@ static int mv_qc_issue(struct ata_queued_cmd *qc) | |||
1090 | * | 1250 | * |
1091 | * This routine is for use when the port is in DMA mode, when it | 1251 | * This routine is for use when the port is in DMA mode, when it |
1092 | * will be using the CRPB (command response block) method of | 1252 | * will be using the CRPB (command response block) method of |
1093 | * returning command completion information. We assert indices | 1253 | * returning command completion information. We check indices |
1094 | * are good, grab status, and bump the response consumer index to | 1254 | * are good, grab status, and bump the response consumer index to |
1095 | * prove that we're up to date. | 1255 | * prove that we're up to date. |
1096 | * | 1256 | * |
@@ -1102,20 +1262,23 @@ static u8 mv_get_crpb_status(struct ata_port *ap) | |||
1102 | void __iomem *port_mmio = mv_ap_base(ap); | 1262 | void __iomem *port_mmio = mv_ap_base(ap); |
1103 | struct mv_port_priv *pp = ap->private_data; | 1263 | struct mv_port_priv *pp = ap->private_data; |
1104 | u32 out_ptr; | 1264 | u32 out_ptr; |
1265 | u8 ata_status; | ||
1105 | 1266 | ||
1106 | out_ptr = readl(port_mmio + EDMA_RSP_Q_OUT_PTR_OFS); | 1267 | out_ptr = readl(port_mmio + EDMA_RSP_Q_OUT_PTR_OFS); |
1107 | 1268 | ||
1108 | /* the response consumer index should be the same as we remember it */ | 1269 | /* the response consumer index should be the same as we remember it */ |
1109 | assert(((out_ptr >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) == | 1270 | WARN_ON(((out_ptr >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) != |
1110 | pp->rsp_consumer); | 1271 | pp->rsp_consumer); |
1272 | |||
1273 | ata_status = pp->crpb[pp->rsp_consumer].flags >> CRPB_FLAG_STATUS_SHIFT; | ||
1111 | 1274 | ||
1112 | /* increment our consumer index... */ | 1275 | /* increment our consumer index... */ |
1113 | pp->rsp_consumer = mv_inc_q_index(&pp->rsp_consumer); | 1276 | pp->rsp_consumer = mv_inc_q_index(&pp->rsp_consumer); |
1114 | 1277 | ||
1115 | /* and, until we do NCQ, there should only be 1 CRPB waiting */ | 1278 | /* and, until we do NCQ, there should only be 1 CRPB waiting */ |
1116 | assert(((readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS) >> | 1279 | WARN_ON(((readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS) >> |
1117 | EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) == | 1280 | EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) != |
1118 | pp->rsp_consumer); | 1281 | pp->rsp_consumer); |
1119 | 1282 | ||
1120 | /* write out our inc'd consumer index so EDMA knows we're caught up */ | 1283 | /* write out our inc'd consumer index so EDMA knows we're caught up */ |
1121 | out_ptr &= EDMA_RSP_Q_BASE_LO_MASK; | 1284 | out_ptr &= EDMA_RSP_Q_BASE_LO_MASK; |
@@ -1123,7 +1286,7 @@ static u8 mv_get_crpb_status(struct ata_port *ap) | |||
1123 | writelfl(out_ptr, port_mmio + EDMA_RSP_Q_OUT_PTR_OFS); | 1286 | writelfl(out_ptr, port_mmio + EDMA_RSP_Q_OUT_PTR_OFS); |
1124 | 1287 | ||
1125 | /* Return ATA status register for completed CRPB */ | 1288 | /* Return ATA status register for completed CRPB */ |
1126 | return (pp->crpb[pp->rsp_consumer].flags >> CRPB_FLAG_STATUS_SHIFT); | 1289 | return ata_status; |
1127 | } | 1290 | } |
1128 | 1291 | ||
1129 | /** | 1292 | /** |
@@ -1191,7 +1354,6 @@ static void mv_host_intr(struct ata_host_set *host_set, u32 relevant, | |||
1191 | u32 hc_irq_cause; | 1354 | u32 hc_irq_cause; |
1192 | int shift, port, port0, hard_port, handled; | 1355 | int shift, port, port0, hard_port, handled; |
1193 | unsigned int err_mask; | 1356 | unsigned int err_mask; |
1194 | u8 ata_status = 0; | ||
1195 | 1357 | ||
1196 | if (hc == 0) { | 1358 | if (hc == 0) { |
1197 | port0 = 0; | 1359 | port0 = 0; |
@@ -1209,8 +1371,10 @@ static void mv_host_intr(struct ata_host_set *host_set, u32 relevant, | |||
1209 | hc,relevant,hc_irq_cause); | 1371 | hc,relevant,hc_irq_cause); |
1210 | 1372 | ||
1211 | for (port = port0; port < port0 + MV_PORTS_PER_HC; port++) { | 1373 | for (port = port0; port < port0 + MV_PORTS_PER_HC; port++) { |
1374 | u8 ata_status = 0; | ||
1212 | struct ata_port *ap = host_set->ports[port]; | 1375 | struct ata_port *ap = host_set->ports[port]; |
1213 | struct mv_port_priv *pp = ap->private_data; | 1376 | struct mv_port_priv *pp = ap->private_data; |
1377 | |||
1214 | hard_port = port & MV_PORT_MASK; /* range 0-3 */ | 1378 | hard_port = port & MV_PORT_MASK; /* range 0-3 */ |
1215 | handled = 0; /* ensure ata_status is set if handled++ */ | 1379 | handled = 0; /* ensure ata_status is set if handled++ */ |
1216 | 1380 | ||
@@ -1686,6 +1850,12 @@ static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio, | |||
1686 | m2 |= hpriv->signal[port].pre; | 1850 | m2 |= hpriv->signal[port].pre; |
1687 | m2 &= ~(1 << 16); | 1851 | m2 &= ~(1 << 16); |
1688 | 1852 | ||
1853 | /* according to mvSata 3.6.1, some IIE values are fixed */ | ||
1854 | if (IS_GEN_IIE(hpriv)) { | ||
1855 | m2 &= ~0xC30FF01F; | ||
1856 | m2 |= 0x0000900F; | ||
1857 | } | ||
1858 | |||
1689 | writel(m2, port_mmio + PHY_MODE2); | 1859 | writel(m2, port_mmio + PHY_MODE2); |
1690 | } | 1860 | } |
1691 | 1861 | ||
@@ -1851,7 +2021,6 @@ static void mv_phy_reset(struct ata_port *ap) | |||
1851 | static void mv_eng_timeout(struct ata_port *ap) | 2021 | static void mv_eng_timeout(struct ata_port *ap) |
1852 | { | 2022 | { |
1853 | struct ata_queued_cmd *qc; | 2023 | struct ata_queued_cmd *qc; |
1854 | unsigned long flags; | ||
1855 | 2024 | ||
1856 | printk(KERN_ERR "ata%u: Entering mv_eng_timeout\n",ap->id); | 2025 | printk(KERN_ERR "ata%u: Entering mv_eng_timeout\n",ap->id); |
1857 | DPRINTK("All regs @ start of eng_timeout\n"); | 2026 | DPRINTK("All regs @ start of eng_timeout\n"); |
@@ -1866,22 +2035,8 @@ static void mv_eng_timeout(struct ata_port *ap) | |||
1866 | mv_err_intr(ap); | 2035 | mv_err_intr(ap); |
1867 | mv_stop_and_reset(ap); | 2036 | mv_stop_and_reset(ap); |
1868 | 2037 | ||
1869 | if (!qc) { | 2038 | qc->err_mask |= AC_ERR_TIMEOUT; |
1870 | printk(KERN_ERR "ata%u: BUG: timeout without command\n", | 2039 | ata_eh_qc_complete(qc); |
1871 | ap->id); | ||
1872 | } else { | ||
1873 | /* hack alert! We cannot use the supplied completion | ||
1874 | * function from inside the ->eh_strategy_handler() thread. | ||
1875 | * libata is the only user of ->eh_strategy_handler() in | ||
1876 | * any kernel, so the default scsi_done() assumes it is | ||
1877 | * not being called from the SCSI EH. | ||
1878 | */ | ||
1879 | spin_lock_irqsave(&ap->host_set->lock, flags); | ||
1880 | qc->scsidone = scsi_finish_command; | ||
1881 | qc->err_mask |= AC_ERR_OTHER; | ||
1882 | ata_qc_complete(qc); | ||
1883 | spin_unlock_irqrestore(&ap->host_set->lock, flags); | ||
1884 | } | ||
1885 | } | 2040 | } |
1886 | 2041 | ||
1887 | /** | 2042 | /** |
@@ -2000,6 +2155,27 @@ static int mv_chip_id(struct pci_dev *pdev, struct mv_host_priv *hpriv, | |||
2000 | } | 2155 | } |
2001 | break; | 2156 | break; |
2002 | 2157 | ||
2158 | case chip_7042: | ||
2159 | case chip_6042: | ||
2160 | hpriv->ops = &mv6xxx_ops; | ||
2161 | |||
2162 | hp_flags |= MV_HP_GEN_IIE; | ||
2163 | |||
2164 | switch (rev_id) { | ||
2165 | case 0x0: | ||
2166 | hp_flags |= MV_HP_ERRATA_XX42A0; | ||
2167 | break; | ||
2168 | case 0x1: | ||
2169 | hp_flags |= MV_HP_ERRATA_60X1C0; | ||
2170 | break; | ||
2171 | default: | ||
2172 | dev_printk(KERN_WARNING, &pdev->dev, | ||
2173 | "Applying 60X1C0 workarounds to unknown rev\n"); | ||
2174 | hp_flags |= MV_HP_ERRATA_60X1C0; | ||
2175 | break; | ||
2176 | } | ||
2177 | break; | ||
2178 | |||
2003 | default: | 2179 | default: |
2004 | printk(KERN_ERR DRV_NAME ": BUG: invalid board index %u\n", board_idx); | 2180 | printk(KERN_ERR DRV_NAME ": BUG: invalid board index %u\n", board_idx); |
2005 | return 1; | 2181 | return 1; |