aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/ata/sata_mv.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/ata/sata_mv.c')
-rw-r--r--drivers/ata/sata_mv.c632
1 files changed, 252 insertions, 380 deletions
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
index 83584b6e1ba5..d52ce1188327 100644
--- a/drivers/ata/sata_mv.c
+++ b/drivers/ata/sata_mv.c
@@ -1,6 +1,7 @@
1/* 1/*
2 * sata_mv.c - Marvell SATA support 2 * sata_mv.c - Marvell SATA support
3 * 3 *
4 * Copyright 2008: Marvell Corporation, all rights reserved.
4 * Copyright 2005: EMC Corporation, all rights reserved. 5 * Copyright 2005: EMC Corporation, all rights reserved.
5 * Copyright 2005 Red Hat, Inc. All rights reserved. 6 * Copyright 2005 Red Hat, Inc. All rights reserved.
6 * 7 *
@@ -39,7 +40,9 @@
39 40
40 5) Investigate problems with PCI Message Signalled Interrupts (MSI). 41 5) Investigate problems with PCI Message Signalled Interrupts (MSI).
41 42
42 6) Add port multiplier support (intermediate) 43 6) Cache frequently-accessed registers in mv_port_priv to reduce overhead.
44
45 7) Fix/reenable hot plug/unplug (should happen as a side-effect of (2) above).
43 46
44 8) Develop a low-power-consumption strategy, and implement it. 47 8) Develop a low-power-consumption strategy, and implement it.
45 48
@@ -61,7 +64,6 @@
61 64
62*/ 65*/
63 66
64
65#include <linux/kernel.h> 67#include <linux/kernel.h>
66#include <linux/module.h> 68#include <linux/module.h>
67#include <linux/pci.h> 69#include <linux/pci.h>
@@ -132,7 +134,7 @@ enum {
132 MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */ 134 MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */
133 MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */ 135 MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */
134 /* SoC integrated controllers, no PCI interface */ 136 /* SoC integrated controllers, no PCI interface */
135 MV_FLAG_SOC = (1 << 28), 137 MV_FLAG_SOC = (1 << 28),
136 138
137 MV_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 139 MV_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
138 ATA_FLAG_MMIO | ATA_FLAG_NO_ATAPI | 140 ATA_FLAG_MMIO | ATA_FLAG_NO_ATAPI |
@@ -142,6 +144,7 @@ enum {
142 CRQB_FLAG_READ = (1 << 0), 144 CRQB_FLAG_READ = (1 << 0),
143 CRQB_TAG_SHIFT = 1, 145 CRQB_TAG_SHIFT = 1,
144 CRQB_IOID_SHIFT = 6, /* CRQB Gen-II/IIE IO Id shift */ 146 CRQB_IOID_SHIFT = 6, /* CRQB Gen-II/IIE IO Id shift */
147 CRQB_PMP_SHIFT = 12, /* CRQB Gen-II/IIE PMP shift */
145 CRQB_HOSTQ_SHIFT = 17, /* CRQB Gen-II/IIE HostQueTag shift */ 148 CRQB_HOSTQ_SHIFT = 17, /* CRQB Gen-II/IIE HostQueTag shift */
146 CRQB_CMD_ADDR_SHIFT = 8, 149 CRQB_CMD_ADDR_SHIFT = 8,
147 CRQB_CMD_CS = (0x2 << 11), 150 CRQB_CMD_CS = (0x2 << 11),
@@ -200,7 +203,7 @@ enum {
200 TWSI_INT = (1 << 24), 203 TWSI_INT = (1 << 24),
201 HC_MAIN_RSVD = (0x7f << 25), /* bits 31-25 */ 204 HC_MAIN_RSVD = (0x7f << 25), /* bits 31-25 */
202 HC_MAIN_RSVD_5 = (0x1fff << 19), /* bits 31-19 */ 205 HC_MAIN_RSVD_5 = (0x1fff << 19), /* bits 31-19 */
203 HC_MAIN_RSVD_SOC = (0x3fffffb << 6), /* bits 31-9, 7-6 */ 206 HC_MAIN_RSVD_SOC = (0x3fffffb << 6), /* bits 31-9, 7-6 */
204 HC_MAIN_MASKED_IRQS = (TRAN_LO_DONE | TRAN_HI_DONE | 207 HC_MAIN_MASKED_IRQS = (TRAN_LO_DONE | TRAN_HI_DONE |
205 PORTS_0_7_COAL_DONE | GPIO_INT | TWSI_INT | 208 PORTS_0_7_COAL_DONE | GPIO_INT | TWSI_INT |
206 HC_MAIN_RSVD), 209 HC_MAIN_RSVD),
@@ -224,13 +227,24 @@ enum {
224 SATA_STATUS_OFS = 0x300, /* ctrl, err regs follow status */ 227 SATA_STATUS_OFS = 0x300, /* ctrl, err regs follow status */
225 SATA_ACTIVE_OFS = 0x350, 228 SATA_ACTIVE_OFS = 0x350,
226 SATA_FIS_IRQ_CAUSE_OFS = 0x364, 229 SATA_FIS_IRQ_CAUSE_OFS = 0x364,
230
231 LTMODE_OFS = 0x30c,
232 LTMODE_BIT8 = (1 << 8), /* unknown, but necessary */
233
227 PHY_MODE3 = 0x310, 234 PHY_MODE3 = 0x310,
228 PHY_MODE4 = 0x314, 235 PHY_MODE4 = 0x314,
229 PHY_MODE2 = 0x330, 236 PHY_MODE2 = 0x330,
237 SATA_IFCTL_OFS = 0x344,
238 SATA_IFSTAT_OFS = 0x34c,
239 VENDOR_UNIQUE_FIS_OFS = 0x35c,
240
241 FIS_CFG_OFS = 0x360,
242 FIS_CFG_SINGLE_SYNC = (1 << 16), /* SYNC on DMA activation */
243
230 MV5_PHY_MODE = 0x74, 244 MV5_PHY_MODE = 0x74,
231 MV5_LT_MODE = 0x30, 245 MV5_LT_MODE = 0x30,
232 MV5_PHY_CTL = 0x0C, 246 MV5_PHY_CTL = 0x0C,
233 SATA_INTERFACE_CTL = 0x050, 247 SATA_INTERFACE_CFG = 0x050,
234 248
235 MV_M2_PREAMP_MASK = 0x7e0, 249 MV_M2_PREAMP_MASK = 0x7e0,
236 250
@@ -241,6 +255,8 @@ enum {
241 EDMA_CFG_NCQ_GO_ON_ERR = (1 << 14), /* continue on error */ 255 EDMA_CFG_NCQ_GO_ON_ERR = (1 << 14), /* continue on error */
242 EDMA_CFG_RD_BRST_EXT = (1 << 11), /* read burst 512B */ 256 EDMA_CFG_RD_BRST_EXT = (1 << 11), /* read burst 512B */
243 EDMA_CFG_WR_BUFF_LEN = (1 << 13), /* write buffer 512B */ 257 EDMA_CFG_WR_BUFF_LEN = (1 << 13), /* write buffer 512B */
258 EDMA_CFG_EDMA_FBS = (1 << 16), /* EDMA FIS-Based Switching */
259 EDMA_CFG_FBS = (1 << 26), /* FIS-Based Switching */
244 260
245 EDMA_ERR_IRQ_CAUSE_OFS = 0x8, 261 EDMA_ERR_IRQ_CAUSE_OFS = 0x8,
246 EDMA_ERR_IRQ_MASK_OFS = 0xc, 262 EDMA_ERR_IRQ_MASK_OFS = 0xc,
@@ -283,7 +299,9 @@ enum {
283 EDMA_ERR_IRQ_TRANSIENT = EDMA_ERR_LNK_CTRL_RX_0 | 299 EDMA_ERR_IRQ_TRANSIENT = EDMA_ERR_LNK_CTRL_RX_0 |
284 EDMA_ERR_LNK_CTRL_RX_1 | 300 EDMA_ERR_LNK_CTRL_RX_1 |
285 EDMA_ERR_LNK_CTRL_RX_3 | 301 EDMA_ERR_LNK_CTRL_RX_3 |
286 EDMA_ERR_LNK_CTRL_TX, 302 EDMA_ERR_LNK_CTRL_TX |
303 /* temporary, until we fix hotplug: */
304 (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON),
287 305
288 EDMA_EH_FREEZE = EDMA_ERR_D_PAR | 306 EDMA_EH_FREEZE = EDMA_ERR_D_PAR |
289 EDMA_ERR_PRD_PAR | 307 EDMA_ERR_PRD_PAR |
@@ -299,6 +317,7 @@ enum {
299 EDMA_ERR_LNK_DATA_RX | 317 EDMA_ERR_LNK_DATA_RX |
300 EDMA_ERR_LNK_DATA_TX | 318 EDMA_ERR_LNK_DATA_TX |
301 EDMA_ERR_TRANS_PROTO, 319 EDMA_ERR_TRANS_PROTO,
320
302 EDMA_EH_FREEZE_5 = EDMA_ERR_D_PAR | 321 EDMA_EH_FREEZE_5 = EDMA_ERR_D_PAR |
303 EDMA_ERR_PRD_PAR | 322 EDMA_ERR_PRD_PAR |
304 EDMA_ERR_DEV_DCON | 323 EDMA_ERR_DEV_DCON |
@@ -345,7 +364,6 @@ enum {
345 /* Port private flags (pp_flags) */ 364 /* Port private flags (pp_flags) */
346 MV_PP_FLAG_EDMA_EN = (1 << 0), /* is EDMA engine enabled? */ 365 MV_PP_FLAG_EDMA_EN = (1 << 0), /* is EDMA engine enabled? */
347 MV_PP_FLAG_NCQ_EN = (1 << 1), /* is EDMA set up for NCQ? */ 366 MV_PP_FLAG_NCQ_EN = (1 << 1), /* is EDMA set up for NCQ? */
348 MV_PP_FLAG_HAD_A_RESET = (1 << 2), /* 1st hard reset complete? */
349}; 367};
350 368
351#define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I) 369#define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I)
@@ -465,7 +483,6 @@ struct mv_hw_ops {
465 void (*reset_bus)(struct ata_host *host, void __iomem *mmio); 483 void (*reset_bus)(struct ata_host *host, void __iomem *mmio);
466}; 484};
467 485
468static void mv_irq_clear(struct ata_port *ap);
469static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val); 486static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
470static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val); 487static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
471static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val); 488static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
@@ -475,7 +492,8 @@ static void mv_port_stop(struct ata_port *ap);
475static void mv_qc_prep(struct ata_queued_cmd *qc); 492static void mv_qc_prep(struct ata_queued_cmd *qc);
476static void mv_qc_prep_iie(struct ata_queued_cmd *qc); 493static void mv_qc_prep_iie(struct ata_queued_cmd *qc);
477static unsigned int mv_qc_issue(struct ata_queued_cmd *qc); 494static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
478static void mv_error_handler(struct ata_port *ap); 495static int mv_hardreset(struct ata_link *link, unsigned int *class,
496 unsigned long deadline);
479static void mv_eh_freeze(struct ata_port *ap); 497static void mv_eh_freeze(struct ata_port *ap);
480static void mv_eh_thaw(struct ata_port *ap); 498static void mv_eh_thaw(struct ata_port *ap);
481static void mv6_dev_config(struct ata_device *dev); 499static void mv6_dev_config(struct ata_device *dev);
@@ -508,72 +526,46 @@ static void mv_soc_reset_flash(struct mv_host_priv *hpriv,
508 void __iomem *mmio); 526 void __iomem *mmio);
509static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio); 527static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio);
510static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio); 528static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio);
511static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio, 529static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio,
512 unsigned int port_no); 530 unsigned int port_no);
513static void mv_edma_cfg(struct mv_port_priv *pp, struct mv_host_priv *hpriv, 531static int mv_stop_edma(struct ata_port *ap);
514 void __iomem *port_mmio, int want_ncq); 532static int mv_stop_edma_engine(void __iomem *port_mmio);
515static int __mv_stop_dma(struct ata_port *ap); 533static void mv_edma_cfg(struct ata_port *ap, int want_ncq);
534
535static void mv_pmp_select(struct ata_port *ap, int pmp);
536static int mv_pmp_hardreset(struct ata_link *link, unsigned int *class,
537 unsigned long deadline);
538static int mv_softreset(struct ata_link *link, unsigned int *class,
539 unsigned long deadline);
516 540
517/* .sg_tablesize is (MV_MAX_SG_CT / 2) in the structures below 541/* .sg_tablesize is (MV_MAX_SG_CT / 2) in the structures below
518 * because we have to allow room for worst case splitting of 542 * because we have to allow room for worst case splitting of
519 * PRDs for 64K boundaries in mv_fill_sg(). 543 * PRDs for 64K boundaries in mv_fill_sg().
520 */ 544 */
521static struct scsi_host_template mv5_sht = { 545static struct scsi_host_template mv5_sht = {
522 .module = THIS_MODULE, 546 ATA_BASE_SHT(DRV_NAME),
523 .name = DRV_NAME,
524 .ioctl = ata_scsi_ioctl,
525 .queuecommand = ata_scsi_queuecmd,
526 .can_queue = ATA_DEF_QUEUE,
527 .this_id = ATA_SHT_THIS_ID,
528 .sg_tablesize = MV_MAX_SG_CT / 2, 547 .sg_tablesize = MV_MAX_SG_CT / 2,
529 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
530 .emulated = ATA_SHT_EMULATED,
531 .use_clustering = 1,
532 .proc_name = DRV_NAME,
533 .dma_boundary = MV_DMA_BOUNDARY, 548 .dma_boundary = MV_DMA_BOUNDARY,
534 .slave_configure = ata_scsi_slave_config,
535 .slave_destroy = ata_scsi_slave_destroy,
536 .bios_param = ata_std_bios_param,
537}; 549};
538 550
539static struct scsi_host_template mv6_sht = { 551static struct scsi_host_template mv6_sht = {
540 .module = THIS_MODULE, 552 ATA_NCQ_SHT(DRV_NAME),
541 .name = DRV_NAME,
542 .ioctl = ata_scsi_ioctl,
543 .queuecommand = ata_scsi_queuecmd,
544 .change_queue_depth = ata_scsi_change_queue_depth,
545 .can_queue = MV_MAX_Q_DEPTH - 1, 553 .can_queue = MV_MAX_Q_DEPTH - 1,
546 .this_id = ATA_SHT_THIS_ID,
547 .sg_tablesize = MV_MAX_SG_CT / 2, 554 .sg_tablesize = MV_MAX_SG_CT / 2,
548 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
549 .emulated = ATA_SHT_EMULATED,
550 .use_clustering = 1,
551 .proc_name = DRV_NAME,
552 .dma_boundary = MV_DMA_BOUNDARY, 555 .dma_boundary = MV_DMA_BOUNDARY,
553 .slave_configure = ata_scsi_slave_config,
554 .slave_destroy = ata_scsi_slave_destroy,
555 .bios_param = ata_std_bios_param,
556}; 556};
557 557
558static const struct ata_port_operations mv5_ops = { 558static struct ata_port_operations mv5_ops = {
559 .tf_load = ata_tf_load, 559 .inherits = &ata_sff_port_ops,
560 .tf_read = ata_tf_read,
561 .check_status = ata_check_status,
562 .exec_command = ata_exec_command,
563 .dev_select = ata_std_dev_select,
564
565 .cable_detect = ata_cable_sata,
566 560
567 .qc_prep = mv_qc_prep, 561 .qc_prep = mv_qc_prep,
568 .qc_issue = mv_qc_issue, 562 .qc_issue = mv_qc_issue,
569 .data_xfer = ata_data_xfer,
570
571 .irq_clear = mv_irq_clear,
572 .irq_on = ata_irq_on,
573 563
574 .error_handler = mv_error_handler,
575 .freeze = mv_eh_freeze, 564 .freeze = mv_eh_freeze,
576 .thaw = mv_eh_thaw, 565 .thaw = mv_eh_thaw,
566 .hardreset = mv_hardreset,
567 .error_handler = ata_std_error_handler, /* avoid SFF EH */
568 .post_internal_cmd = ATA_OP_NULL,
577 569
578 .scr_read = mv5_scr_read, 570 .scr_read = mv5_scr_read,
579 .scr_write = mv5_scr_write, 571 .scr_write = mv5_scr_write,
@@ -582,61 +574,24 @@ static const struct ata_port_operations mv5_ops = {
582 .port_stop = mv_port_stop, 574 .port_stop = mv_port_stop,
583}; 575};
584 576
585static const struct ata_port_operations mv6_ops = { 577static struct ata_port_operations mv6_ops = {
578 .inherits = &mv5_ops,
579 .qc_defer = sata_pmp_qc_defer_cmd_switch,
586 .dev_config = mv6_dev_config, 580 .dev_config = mv6_dev_config,
587 .tf_load = ata_tf_load,
588 .tf_read = ata_tf_read,
589 .check_status = ata_check_status,
590 .exec_command = ata_exec_command,
591 .dev_select = ata_std_dev_select,
592
593 .cable_detect = ata_cable_sata,
594
595 .qc_prep = mv_qc_prep,
596 .qc_issue = mv_qc_issue,
597 .data_xfer = ata_data_xfer,
598
599 .irq_clear = mv_irq_clear,
600 .irq_on = ata_irq_on,
601
602 .error_handler = mv_error_handler,
603 .freeze = mv_eh_freeze,
604 .thaw = mv_eh_thaw,
605 .qc_defer = ata_std_qc_defer,
606
607 .scr_read = mv_scr_read, 581 .scr_read = mv_scr_read,
608 .scr_write = mv_scr_write, 582 .scr_write = mv_scr_write,
609 583
610 .port_start = mv_port_start, 584 .pmp_hardreset = mv_pmp_hardreset,
611 .port_stop = mv_port_stop, 585 .pmp_softreset = mv_softreset,
586 .softreset = mv_softreset,
587 .error_handler = sata_pmp_error_handler,
612}; 588};
613 589
614static const struct ata_port_operations mv_iie_ops = { 590static struct ata_port_operations mv_iie_ops = {
615 .tf_load = ata_tf_load, 591 .inherits = &mv6_ops,
616 .tf_read = ata_tf_read, 592 .qc_defer = ata_std_qc_defer, /* FIS-based switching */
617 .check_status = ata_check_status, 593 .dev_config = ATA_OP_NULL,
618 .exec_command = ata_exec_command,
619 .dev_select = ata_std_dev_select,
620
621 .cable_detect = ata_cable_sata,
622
623 .qc_prep = mv_qc_prep_iie, 594 .qc_prep = mv_qc_prep_iie,
624 .qc_issue = mv_qc_issue,
625 .data_xfer = ata_data_xfer,
626
627 .irq_clear = mv_irq_clear,
628 .irq_on = ata_irq_on,
629
630 .error_handler = mv_error_handler,
631 .freeze = mv_eh_freeze,
632 .thaw = mv_eh_thaw,
633 .qc_defer = ata_std_qc_defer,
634
635 .scr_read = mv_scr_read,
636 .scr_write = mv_scr_write,
637
638 .port_start = mv_port_start,
639 .port_stop = mv_port_stop,
640}; 595};
641 596
642static const struct ata_port_info mv_port_info[] = { 597static const struct ata_port_info mv_port_info[] = {
@@ -660,6 +615,7 @@ static const struct ata_port_info mv_port_info[] = {
660 }, 615 },
661 { /* chip_604x */ 616 { /* chip_604x */
662 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS | 617 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
618 ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA |
663 ATA_FLAG_NCQ, 619 ATA_FLAG_NCQ,
664 .pio_mask = 0x1f, /* pio0-4 */ 620 .pio_mask = 0x1f, /* pio0-4 */
665 .udma_mask = ATA_UDMA6, 621 .udma_mask = ATA_UDMA6,
@@ -667,6 +623,7 @@ static const struct ata_port_info mv_port_info[] = {
667 }, 623 },
668 { /* chip_608x */ 624 { /* chip_608x */
669 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS | 625 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
626 ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA |
670 ATA_FLAG_NCQ | MV_FLAG_DUAL_HC, 627 ATA_FLAG_NCQ | MV_FLAG_DUAL_HC,
671 .pio_mask = 0x1f, /* pio0-4 */ 628 .pio_mask = 0x1f, /* pio0-4 */
672 .udma_mask = ATA_UDMA6, 629 .udma_mask = ATA_UDMA6,
@@ -674,6 +631,7 @@ static const struct ata_port_info mv_port_info[] = {
674 }, 631 },
675 { /* chip_6042 */ 632 { /* chip_6042 */
676 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS | 633 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
634 ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA |
677 ATA_FLAG_NCQ, 635 ATA_FLAG_NCQ,
678 .pio_mask = 0x1f, /* pio0-4 */ 636 .pio_mask = 0x1f, /* pio0-4 */
679 .udma_mask = ATA_UDMA6, 637 .udma_mask = ATA_UDMA6,
@@ -681,16 +639,19 @@ static const struct ata_port_info mv_port_info[] = {
681 }, 639 },
682 { /* chip_7042 */ 640 { /* chip_7042 */
683 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS | 641 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
642 ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA |
684 ATA_FLAG_NCQ, 643 ATA_FLAG_NCQ,
685 .pio_mask = 0x1f, /* pio0-4 */ 644 .pio_mask = 0x1f, /* pio0-4 */
686 .udma_mask = ATA_UDMA6, 645 .udma_mask = ATA_UDMA6,
687 .port_ops = &mv_iie_ops, 646 .port_ops = &mv_iie_ops,
688 }, 647 },
689 { /* chip_soc */ 648 { /* chip_soc */
690 .flags = MV_COMMON_FLAGS | MV_FLAG_SOC, 649 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
691 .pio_mask = 0x1f, /* pio0-4 */ 650 ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA |
692 .udma_mask = ATA_UDMA6, 651 ATA_FLAG_NCQ | MV_FLAG_SOC,
693 .port_ops = &mv_iie_ops, 652 .pio_mask = 0x1f, /* pio0-4 */
653 .udma_mask = ATA_UDMA6,
654 .port_ops = &mv_iie_ops,
694 }, 655 },
695}; 656};
696 657
@@ -789,6 +750,14 @@ static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port)
789 (mv_hardport_from_port(port) * MV_PORT_REG_SZ); 750 (mv_hardport_from_port(port) * MV_PORT_REG_SZ);
790} 751}
791 752
753static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port)
754{
755 void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port);
756 unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL;
757
758 return hc_mmio + ofs;
759}
760
792static inline void __iomem *mv_host_base(struct ata_host *host) 761static inline void __iomem *mv_host_base(struct ata_host *host)
793{ 762{
794 struct mv_host_priv *hpriv = host->private_data; 763 struct mv_host_priv *hpriv = host->private_data;
@@ -805,10 +774,6 @@ static inline int mv_get_hc_count(unsigned long port_flags)
805 return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1); 774 return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1);
806} 775}
807 776
808static void mv_irq_clear(struct ata_port *ap)
809{
810}
811
812static void mv_set_edma_ptrs(void __iomem *port_mmio, 777static void mv_set_edma_ptrs(void __iomem *port_mmio,
813 struct mv_host_priv *hpriv, 778 struct mv_host_priv *hpriv,
814 struct mv_port_priv *pp) 779 struct mv_port_priv *pp)
@@ -868,7 +833,7 @@ static void mv_start_dma(struct ata_port *ap, void __iomem *port_mmio,
868 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) { 833 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
869 int using_ncq = ((pp->pp_flags & MV_PP_FLAG_NCQ_EN) != 0); 834 int using_ncq = ((pp->pp_flags & MV_PP_FLAG_NCQ_EN) != 0);
870 if (want_ncq != using_ncq) 835 if (want_ncq != using_ncq)
871 __mv_stop_dma(ap); 836 mv_stop_edma(ap);
872 } 837 }
873 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) { 838 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) {
874 struct mv_host_priv *hpriv = ap->host->private_data; 839 struct mv_host_priv *hpriv = ap->host->private_data;
@@ -889,7 +854,7 @@ static void mv_start_dma(struct ata_port *ap, void __iomem *port_mmio,
889 hc_mmio + HC_IRQ_CAUSE_OFS); 854 hc_mmio + HC_IRQ_CAUSE_OFS);
890 } 855 }
891 856
892 mv_edma_cfg(pp, hpriv, port_mmio, want_ncq); 857 mv_edma_cfg(ap, want_ncq);
893 858
894 /* clear FIS IRQ Cause */ 859 /* clear FIS IRQ Cause */
895 writelfl(0, port_mmio + SATA_FIS_IRQ_CAUSE_OFS); 860 writelfl(0, port_mmio + SATA_FIS_IRQ_CAUSE_OFS);
@@ -903,58 +868,42 @@ static void mv_start_dma(struct ata_port *ap, void __iomem *port_mmio,
903} 868}
904 869
905/** 870/**
906 * __mv_stop_dma - Disable eDMA engine 871 * mv_stop_edma_engine - Disable eDMA engine
907 * @ap: ATA channel to manipulate 872 * @port_mmio: io base address
908 *
909 * Verify the local cache of the eDMA state is accurate with a
910 * WARN_ON.
911 * 873 *
912 * LOCKING: 874 * LOCKING:
913 * Inherited from caller. 875 * Inherited from caller.
914 */ 876 */
915static int __mv_stop_dma(struct ata_port *ap) 877static int mv_stop_edma_engine(void __iomem *port_mmio)
916{ 878{
917 void __iomem *port_mmio = mv_ap_base(ap); 879 int i;
918 struct mv_port_priv *pp = ap->private_data;
919 u32 reg;
920 int i, err = 0;
921 880
922 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) { 881 /* Disable eDMA. The disable bit auto clears. */
923 /* Disable EDMA if active. The disable bit auto clears. 882 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
924 */
925 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
926 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
927 } else {
928 WARN_ON(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS));
929 }
930 883
931 /* now properly wait for the eDMA to stop */ 884 /* Wait for the chip to confirm eDMA is off. */
932 for (i = 1000; i > 0; i--) { 885 for (i = 10000; i > 0; i--) {
933 reg = readl(port_mmio + EDMA_CMD_OFS); 886 u32 reg = readl(port_mmio + EDMA_CMD_OFS);
934 if (!(reg & EDMA_EN)) 887 if (!(reg & EDMA_EN))
935 break; 888 return 0;
936 889 udelay(10);
937 udelay(100);
938 }
939
940 if (reg & EDMA_EN) {
941 ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n");
942 err = -EIO;
943 } 890 }
944 891 return -EIO;
945 return err;
946} 892}
947 893
948static int mv_stop_dma(struct ata_port *ap) 894static int mv_stop_edma(struct ata_port *ap)
949{ 895{
950 unsigned long flags; 896 void __iomem *port_mmio = mv_ap_base(ap);
951 int rc; 897 struct mv_port_priv *pp = ap->private_data;
952
953 spin_lock_irqsave(&ap->host->lock, flags);
954 rc = __mv_stop_dma(ap);
955 spin_unlock_irqrestore(&ap->host->lock, flags);
956 898
957 return rc; 899 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN))
900 return 0;
901 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
902 if (mv_stop_edma_engine(port_mmio)) {
903 ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n");
904 return -EIO;
905 }
906 return 0;
958} 907}
959 908
960#ifdef ATA_DEBUG 909#ifdef ATA_DEBUG
@@ -1078,18 +1027,50 @@ static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
1078static void mv6_dev_config(struct ata_device *adev) 1027static void mv6_dev_config(struct ata_device *adev)
1079{ 1028{
1080 /* 1029 /*
1030 * Deal with Gen-II ("mv6") hardware quirks/restrictions:
1031 *
1032 * Gen-II does not support NCQ over a port multiplier
1033 * (no FIS-based switching).
1034 *
1081 * We don't have hob_nsect when doing NCQ commands on Gen-II. 1035 * We don't have hob_nsect when doing NCQ commands on Gen-II.
1082 * See mv_qc_prep() for more info. 1036 * See mv_qc_prep() for more info.
1083 */ 1037 */
1084 if (adev->flags & ATA_DFLAG_NCQ) 1038 if (adev->flags & ATA_DFLAG_NCQ) {
1085 if (adev->max_sectors > ATA_MAX_SECTORS) 1039 if (sata_pmp_attached(adev->link->ap))
1040 adev->flags &= ~ATA_DFLAG_NCQ;
1041 else if (adev->max_sectors > ATA_MAX_SECTORS)
1086 adev->max_sectors = ATA_MAX_SECTORS; 1042 adev->max_sectors = ATA_MAX_SECTORS;
1043 }
1087} 1044}
1088 1045
1089static void mv_edma_cfg(struct mv_port_priv *pp, struct mv_host_priv *hpriv, 1046static void mv_config_fbs(void __iomem *port_mmio, int enable_fbs)
1090 void __iomem *port_mmio, int want_ncq) 1047{
1048 u32 old_fcfg, new_fcfg, old_ltmode, new_ltmode;
1049 /*
1050 * Various bit settings required for operation
1051 * in FIS-based switching (fbs) mode on GenIIe:
1052 */
1053 old_fcfg = readl(port_mmio + FIS_CFG_OFS);
1054 old_ltmode = readl(port_mmio + LTMODE_OFS);
1055 if (enable_fbs) {
1056 new_fcfg = old_fcfg | FIS_CFG_SINGLE_SYNC;
1057 new_ltmode = old_ltmode | LTMODE_BIT8;
1058 } else { /* disable fbs */
1059 new_fcfg = old_fcfg & ~FIS_CFG_SINGLE_SYNC;
1060 new_ltmode = old_ltmode & ~LTMODE_BIT8;
1061 }
1062 if (new_fcfg != old_fcfg)
1063 writelfl(new_fcfg, port_mmio + FIS_CFG_OFS);
1064 if (new_ltmode != old_ltmode)
1065 writelfl(new_ltmode, port_mmio + LTMODE_OFS);
1066}
1067
1068static void mv_edma_cfg(struct ata_port *ap, int want_ncq)
1091{ 1069{
1092 u32 cfg; 1070 u32 cfg;
1071 struct mv_port_priv *pp = ap->private_data;
1072 struct mv_host_priv *hpriv = ap->host->private_data;
1073 void __iomem *port_mmio = mv_ap_base(ap);
1093 1074
1094 /* set up non-NCQ EDMA configuration */ 1075 /* set up non-NCQ EDMA configuration */
1095 cfg = EDMA_CFG_Q_DEPTH; /* always 0x1f for *all* chips */ 1076 cfg = EDMA_CFG_Q_DEPTH; /* always 0x1f for *all* chips */
@@ -1105,6 +1086,13 @@ static void mv_edma_cfg(struct mv_port_priv *pp, struct mv_host_priv *hpriv,
1105 cfg |= (1 << 22); /* enab 4-entry host queue cache */ 1086 cfg |= (1 << 22); /* enab 4-entry host queue cache */
1106 cfg |= (1 << 18); /* enab early completion */ 1087 cfg |= (1 << 18); /* enab early completion */
1107 cfg |= (1 << 17); /* enab cut-through (dis stor&forwrd) */ 1088 cfg |= (1 << 17); /* enab cut-through (dis stor&forwrd) */
1089
1090 if (want_ncq && sata_pmp_attached(ap)) {
1091 cfg |= EDMA_CFG_EDMA_FBS; /* FIS-based switching */
1092 mv_config_fbs(port_mmio, 1);
1093 } else {
1094 mv_config_fbs(port_mmio, 0);
1095 }
1108 } 1096 }
1109 1097
1110 if (want_ncq) { 1098 if (want_ncq) {
@@ -1160,8 +1148,6 @@ static int mv_port_start(struct ata_port *ap)
1160 struct device *dev = ap->host->dev; 1148 struct device *dev = ap->host->dev;
1161 struct mv_host_priv *hpriv = ap->host->private_data; 1149 struct mv_host_priv *hpriv = ap->host->private_data;
1162 struct mv_port_priv *pp; 1150 struct mv_port_priv *pp;
1163 void __iomem *port_mmio = mv_ap_base(ap);
1164 unsigned long flags;
1165 int tag; 1151 int tag;
1166 1152
1167 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL); 1153 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
@@ -1194,18 +1180,6 @@ static int mv_port_start(struct ata_port *ap)
1194 pp->sg_tbl_dma[tag] = pp->sg_tbl_dma[0]; 1180 pp->sg_tbl_dma[tag] = pp->sg_tbl_dma[0];
1195 } 1181 }
1196 } 1182 }
1197
1198 spin_lock_irqsave(&ap->host->lock, flags);
1199
1200 mv_edma_cfg(pp, hpriv, port_mmio, 0);
1201 mv_set_edma_ptrs(port_mmio, hpriv, pp);
1202
1203 spin_unlock_irqrestore(&ap->host->lock, flags);
1204
1205 /* Don't turn on EDMA here...do it before DMA commands only. Else
1206 * we'll be unable to send non-data, PIO, etc due to restricted access
1207 * to shadow regs.
1208 */
1209 return 0; 1183 return 0;
1210 1184
1211out_port_free_dma_mem: 1185out_port_free_dma_mem:
@@ -1224,7 +1198,7 @@ out_port_free_dma_mem:
1224 */ 1198 */
1225static void mv_port_stop(struct ata_port *ap) 1199static void mv_port_stop(struct ata_port *ap)
1226{ 1200{
1227 mv_stop_dma(ap); 1201 mv_stop_edma(ap);
1228 mv_port_free_dma_mem(ap); 1202 mv_port_free_dma_mem(ap);
1229} 1203}
1230 1204
@@ -1310,6 +1284,7 @@ static void mv_qc_prep(struct ata_queued_cmd *qc)
1310 flags |= CRQB_FLAG_READ; 1284 flags |= CRQB_FLAG_READ;
1311 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag); 1285 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
1312 flags |= qc->tag << CRQB_TAG_SHIFT; 1286 flags |= qc->tag << CRQB_TAG_SHIFT;
1287 flags |= (qc->dev->link->pmp & 0xf) << CRQB_PMP_SHIFT;
1313 1288
1314 /* get current queue index from software */ 1289 /* get current queue index from software */
1315 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK; 1290 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
@@ -1394,14 +1369,14 @@ static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
1394 (qc->tf.protocol != ATA_PROT_NCQ)) 1369 (qc->tf.protocol != ATA_PROT_NCQ))
1395 return; 1370 return;
1396 1371
1397 /* Fill in Gen IIE command request block 1372 /* Fill in Gen IIE command request block */
1398 */
1399 if (!(qc->tf.flags & ATA_TFLAG_WRITE)) 1373 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1400 flags |= CRQB_FLAG_READ; 1374 flags |= CRQB_FLAG_READ;
1401 1375
1402 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag); 1376 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
1403 flags |= qc->tag << CRQB_TAG_SHIFT; 1377 flags |= qc->tag << CRQB_TAG_SHIFT;
1404 flags |= qc->tag << CRQB_HOSTQ_SHIFT; 1378 flags |= qc->tag << CRQB_HOSTQ_SHIFT;
1379 flags |= (qc->dev->link->pmp & 0xf) << CRQB_PMP_SHIFT;
1405 1380
1406 /* get current queue index from software */ 1381 /* get current queue index from software */
1407 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK; 1382 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
@@ -1459,12 +1434,14 @@ static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
1459 1434
1460 if ((qc->tf.protocol != ATA_PROT_DMA) && 1435 if ((qc->tf.protocol != ATA_PROT_DMA) &&
1461 (qc->tf.protocol != ATA_PROT_NCQ)) { 1436 (qc->tf.protocol != ATA_PROT_NCQ)) {
1462 /* We're about to send a non-EDMA capable command to the 1437 /*
1438 * We're about to send a non-EDMA capable command to the
1463 * port. Turn off EDMA so there won't be problems accessing 1439 * port. Turn off EDMA so there won't be problems accessing
1464 * shadow block, etc registers. 1440 * shadow block, etc registers.
1465 */ 1441 */
1466 __mv_stop_dma(ap); 1442 mv_stop_edma(ap);
1467 return ata_qc_issue_prot(qc); 1443 mv_pmp_select(ap, qc->dev->link->pmp);
1444 return ata_sff_qc_issue(qc);
1468 } 1445 }
1469 1446
1470 mv_start_dma(ap, port_mmio, pp, qc->tf.protocol); 1447 mv_start_dma(ap, port_mmio, pp, qc->tf.protocol);
@@ -1486,10 +1463,10 @@ static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
1486 * @reset_allowed: bool: 0 == don't trigger from reset here 1463 * @reset_allowed: bool: 0 == don't trigger from reset here
1487 * 1464 *
1488 * In most cases, just clear the interrupt and move on. However, 1465 * In most cases, just clear the interrupt and move on. However,
1489 * some cases require an eDMA reset, which is done right before 1466 * some cases require an eDMA reset, which also performs a COMRESET.
1490 * the COMRESET in mv_phy_reset(). The SERR case requires a 1467 * The SERR case requires a clear of pending errors in the SATA
1491 * clear of pending errors in the SATA SERROR register. Finally, 1468 * SERROR register. Finally, if the port disabled DMA,
1492 * if the port disabled DMA, update our cached copy to match. 1469 * update our cached copy to match.
1493 * 1470 *
1494 * LOCKING: 1471 * LOCKING:
1495 * Inherited from caller. 1472 * Inherited from caller.
@@ -1528,14 +1505,14 @@ static void mv_err_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
1528 EDMA_ERR_CRQB_PAR | EDMA_ERR_CRPB_PAR | 1505 EDMA_ERR_CRQB_PAR | EDMA_ERR_CRPB_PAR |
1529 EDMA_ERR_INTRL_PAR)) { 1506 EDMA_ERR_INTRL_PAR)) {
1530 err_mask |= AC_ERR_ATA_BUS; 1507 err_mask |= AC_ERR_ATA_BUS;
1531 action |= ATA_EH_HARDRESET; 1508 action |= ATA_EH_RESET;
1532 ata_ehi_push_desc(ehi, "parity error"); 1509 ata_ehi_push_desc(ehi, "parity error");
1533 } 1510 }
1534 if (edma_err_cause & (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON)) { 1511 if (edma_err_cause & (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON)) {
1535 ata_ehi_hotplugged(ehi); 1512 ata_ehi_hotplugged(ehi);
1536 ata_ehi_push_desc(ehi, edma_err_cause & EDMA_ERR_DEV_DCON ? 1513 ata_ehi_push_desc(ehi, edma_err_cause & EDMA_ERR_DEV_DCON ?
1537 "dev disconnect" : "dev connect"); 1514 "dev disconnect" : "dev connect");
1538 action |= ATA_EH_HARDRESET; 1515 action |= ATA_EH_RESET;
1539 } 1516 }
1540 1517
1541 if (IS_GEN_I(hpriv)) { 1518 if (IS_GEN_I(hpriv)) {
@@ -1559,7 +1536,7 @@ static void mv_err_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
1559 sata_scr_read(&ap->link, SCR_ERROR, &serr); 1536 sata_scr_read(&ap->link, SCR_ERROR, &serr);
1560 sata_scr_write_flush(&ap->link, SCR_ERROR, serr); 1537 sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
1561 err_mask = AC_ERR_ATA_BUS; 1538 err_mask = AC_ERR_ATA_BUS;
1562 action |= ATA_EH_HARDRESET; 1539 action |= ATA_EH_RESET;
1563 } 1540 }
1564 } 1541 }
1565 1542
@@ -1568,7 +1545,7 @@ static void mv_err_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
1568 1545
1569 if (!err_mask) { 1546 if (!err_mask) {
1570 err_mask = AC_ERR_OTHER; 1547 err_mask = AC_ERR_OTHER;
1571 action |= ATA_EH_HARDRESET; 1548 action |= ATA_EH_RESET;
1572 } 1549 }
1573 1550
1574 ehi->serror |= serr; 1551 ehi->serror |= serr;
@@ -1727,9 +1704,9 @@ static void mv_host_intr(struct ata_host *host, u32 relevant, unsigned int hc)
1727 pp = ap->private_data; 1704 pp = ap->private_data;
1728 1705
1729 shift = port << 1; /* (port * 2) */ 1706 shift = port << 1; /* (port * 2) */
1730 if (port >= MV_PORTS_PER_HC) { 1707 if (port >= MV_PORTS_PER_HC)
1731 shift++; /* skip bit 8 in the HC Main IRQ reg */ 1708 shift++; /* skip bit 8 in the HC Main IRQ reg */
1732 } 1709
1733 have_err_bits = ((PORT0_ERR << shift) & relevant); 1710 have_err_bits = ((PORT0_ERR << shift) & relevant);
1734 1711
1735 if (unlikely(have_err_bits)) { 1712 if (unlikely(have_err_bits)) {
@@ -1784,7 +1761,7 @@ static void mv_pci_error(struct ata_host *host, void __iomem *mmio)
1784 ata_ehi_push_desc(ehi, 1761 ata_ehi_push_desc(ehi,
1785 "PCI err cause 0x%08x", err_cause); 1762 "PCI err cause 0x%08x", err_cause);
1786 err_mask = AC_ERR_HOST_BUS; 1763 err_mask = AC_ERR_HOST_BUS;
1787 ehi->action = ATA_EH_HARDRESET; 1764 ehi->action = ATA_EH_RESET;
1788 qc = ata_qc_from_tag(ap, ap->link.active_tag); 1765 qc = ata_qc_from_tag(ap, ap->link.active_tag);
1789 if (qc) 1766 if (qc)
1790 qc->err_mask |= err_mask; 1767 qc->err_mask |= err_mask;
@@ -1818,6 +1795,7 @@ static irqreturn_t mv_interrupt(int irq, void *dev_instance)
1818 void __iomem *mmio = hpriv->base; 1795 void __iomem *mmio = hpriv->base;
1819 u32 irq_stat, irq_mask; 1796 u32 irq_stat, irq_mask;
1820 1797
1798 /* Note to self: &host->lock == &ap->host->lock == ap->lock */
1821 spin_lock(&host->lock); 1799 spin_lock(&host->lock);
1822 1800
1823 irq_stat = readl(hpriv->main_cause_reg_addr); 1801 irq_stat = readl(hpriv->main_cause_reg_addr);
@@ -1851,14 +1829,6 @@ out_unlock:
1851 return IRQ_RETVAL(handled); 1829 return IRQ_RETVAL(handled);
1852} 1830}
1853 1831
1854static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port)
1855{
1856 void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port);
1857 unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL;
1858
1859 return hc_mmio + ofs;
1860}
1861
1862static unsigned int mv5_scr_offset(unsigned int sc_reg_in) 1832static unsigned int mv5_scr_offset(unsigned int sc_reg_in)
1863{ 1833{
1864 unsigned int ofs; 1834 unsigned int ofs;
@@ -1984,9 +1954,12 @@ static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio,
1984{ 1954{
1985 void __iomem *port_mmio = mv_port_base(mmio, port); 1955 void __iomem *port_mmio = mv_port_base(mmio, port);
1986 1956
1987 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS); 1957 /*
1988 1958 * The datasheet warns against setting ATA_RST when EDMA is active
1989 mv_channel_reset(hpriv, mmio, port); 1959 * (but doesn't say what the problem might be). So we first try
1960 * to disable the EDMA engine before doing the ATA_RST operation.
1961 */
1962 mv_reset_channel(hpriv, mmio, port);
1990 1963
1991 ZERO(0x028); /* command */ 1964 ZERO(0x028); /* command */
1992 writel(0x11f, port_mmio + EDMA_CFG_OFS); 1965 writel(0x11f, port_mmio + EDMA_CFG_OFS);
@@ -2136,6 +2109,13 @@ static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
2136 printk(KERN_ERR DRV_NAME ": can't clear global reset\n"); 2109 printk(KERN_ERR DRV_NAME ": can't clear global reset\n");
2137 rc = 1; 2110 rc = 1;
2138 } 2111 }
2112 /*
2113 * Temporary: wait 3 seconds before port-probing can happen,
2114 * so that we don't miss finding sleepy SilXXXX port-multipliers.
2115 * This can go away once hotplug is fully/correctly implemented.
2116 */
2117 if (rc == 0)
2118 msleep(3000);
2139done: 2119done:
2140 return rc; 2120 return rc;
2141} 2121}
@@ -2204,14 +2184,15 @@ static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
2204 m4 = readl(port_mmio + PHY_MODE4); 2184 m4 = readl(port_mmio + PHY_MODE4);
2205 2185
2206 if (hp_flags & MV_HP_ERRATA_60X1B2) 2186 if (hp_flags & MV_HP_ERRATA_60X1B2)
2207 tmp = readl(port_mmio + 0x310); 2187 tmp = readl(port_mmio + PHY_MODE3);
2208 2188
2189 /* workaround for errata FEr SATA#10 (part 1) */
2209 m4 = (m4 & ~(1 << 1)) | (1 << 0); 2190 m4 = (m4 & ~(1 << 1)) | (1 << 0);
2210 2191
2211 writel(m4, port_mmio + PHY_MODE4); 2192 writel(m4, port_mmio + PHY_MODE4);
2212 2193
2213 if (hp_flags & MV_HP_ERRATA_60X1B2) 2194 if (hp_flags & MV_HP_ERRATA_60X1B2)
2214 writel(tmp, port_mmio + 0x310); 2195 writel(tmp, port_mmio + PHY_MODE3);
2215 } 2196 }
2216 2197
2217 /* Revert values of pre-emphasis and signal amps to the saved ones */ 2198 /* Revert values of pre-emphasis and signal amps to the saved ones */
@@ -2259,9 +2240,12 @@ static void mv_soc_reset_hc_port(struct mv_host_priv *hpriv,
2259{ 2240{
2260 void __iomem *port_mmio = mv_port_base(mmio, port); 2241 void __iomem *port_mmio = mv_port_base(mmio, port);
2261 2242
2262 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS); 2243 /*
2263 2244 * The datasheet warns against setting ATA_RST when EDMA is active
2264 mv_channel_reset(hpriv, mmio, port); 2245 * (but doesn't say what the problem might be). So we first try
2246 * to disable the EDMA engine before doing the ATA_RST operation.
2247 */
2248 mv_reset_channel(hpriv, mmio, port);
2265 2249
2266 ZERO(0x028); /* command */ 2250 ZERO(0x028); /* command */
2267 writel(0x101f, port_mmio + EDMA_CFG_OFS); 2251 writel(0x101f, port_mmio + EDMA_CFG_OFS);
@@ -2318,25 +2302,39 @@ static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio)
2318 return; 2302 return;
2319} 2303}
2320 2304
2321static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio, 2305static void mv_setup_ifctl(void __iomem *port_mmio, int want_gen2i)
2306{
2307 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CFG);
2308
2309 ifctl = (ifctl & 0xf7f) | 0x9b1000; /* from chip spec */
2310 if (want_gen2i)
2311 ifctl |= (1 << 7); /* enable gen2i speed */
2312 writelfl(ifctl, port_mmio + SATA_INTERFACE_CFG);
2313}
2314
2315/*
2316 * Caller must ensure that EDMA is not active,
2317 * by first doing mv_stop_edma() where needed.
2318 */
2319static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio,
2322 unsigned int port_no) 2320 unsigned int port_no)
2323{ 2321{
2324 void __iomem *port_mmio = mv_port_base(mmio, port_no); 2322 void __iomem *port_mmio = mv_port_base(mmio, port_no);
2325 2323
2324 mv_stop_edma_engine(port_mmio);
2326 writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS); 2325 writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS);
2327 2326
2328 if (IS_GEN_II(hpriv)) { 2327 if (!IS_GEN_I(hpriv)) {
2329 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL); 2328 /* Enable 3.0gb/s link speed */
2330 ifctl |= (1 << 7); /* enable gen2i speed */ 2329 mv_setup_ifctl(port_mmio, 1);
2331 ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
2332 writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
2333 } 2330 }
2334 2331 /*
2335 udelay(25); /* allow reset propagation */ 2332 * Strobing ATA_RST here causes a hard reset of the SATA transport,
2336 2333 * link, and physical layers. It resets all SATA interface registers
2337 /* Spec never mentions clearing the bit. Marvell's driver does 2334 * (except for SATA_INTERFACE_CFG), and issues a COMRESET to the dev.
2338 * clear the bit, however.
2339 */ 2335 */
2336 writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS);
2337 udelay(25); /* allow reset propagation */
2340 writelfl(0, port_mmio + EDMA_CMD_OFS); 2338 writelfl(0, port_mmio + EDMA_CMD_OFS);
2341 2339
2342 hpriv->ops->phy_errata(hpriv, mmio, port_no); 2340 hpriv->ops->phy_errata(hpriv, mmio, port_no);
@@ -2345,136 +2343,32 @@ static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
2345 mdelay(1); 2343 mdelay(1);
2346} 2344}
2347 2345
2348/** 2346static void mv_pmp_select(struct ata_port *ap, int pmp)
2349 * mv_phy_reset - Perform eDMA reset followed by COMRESET
2350 * @ap: ATA channel to manipulate
2351 *
2352 * Part of this is taken from __sata_phy_reset and modified to
2353 * not sleep since this routine gets called from interrupt level.
2354 *
2355 * LOCKING:
2356 * Inherited from caller. This is coded to safe to call at
2357 * interrupt level, i.e. it does not sleep.
2358 */
2359static void mv_phy_reset(struct ata_port *ap, unsigned int *class,
2360 unsigned long deadline)
2361{ 2347{
2362 struct mv_port_priv *pp = ap->private_data; 2348 if (sata_pmp_supported(ap)) {
2363 struct mv_host_priv *hpriv = ap->host->private_data; 2349 void __iomem *port_mmio = mv_ap_base(ap);
2364 void __iomem *port_mmio = mv_ap_base(ap); 2350 u32 reg = readl(port_mmio + SATA_IFCTL_OFS);
2365 int retry = 5; 2351 int old = reg & 0xf;
2366 u32 sstatus;
2367
2368 VPRINTK("ENTER, port %u, mmio 0x%p\n", ap->port_no, port_mmio);
2369
2370#ifdef DEBUG
2371 {
2372 u32 sstatus, serror, scontrol;
2373
2374 mv_scr_read(ap, SCR_STATUS, &sstatus);
2375 mv_scr_read(ap, SCR_ERROR, &serror);
2376 mv_scr_read(ap, SCR_CONTROL, &scontrol);
2377 DPRINTK("S-regs after ATA_RST: SStat 0x%08x SErr 0x%08x "
2378 "SCtrl 0x%08x\n", sstatus, serror, scontrol);
2379 }
2380#endif
2381
2382 /* Issue COMRESET via SControl */
2383comreset_retry:
2384 sata_scr_write_flush(&ap->link, SCR_CONTROL, 0x301);
2385 msleep(1);
2386
2387 sata_scr_write_flush(&ap->link, SCR_CONTROL, 0x300);
2388 msleep(20);
2389 2352
2390 do { 2353 if (old != pmp) {
2391 sata_scr_read(&ap->link, SCR_STATUS, &sstatus); 2354 reg = (reg & ~0xf) | pmp;
2392 if (((sstatus & 0x3) == 3) || ((sstatus & 0x3) == 0)) 2355 writelfl(reg, port_mmio + SATA_IFCTL_OFS);
2393 break; 2356 }
2394
2395 msleep(1);
2396 } while (time_before(jiffies, deadline));
2397
2398 /* work around errata */
2399 if (IS_GEN_II(hpriv) &&
2400 (sstatus != 0x0) && (sstatus != 0x113) && (sstatus != 0x123) &&
2401 (retry-- > 0))
2402 goto comreset_retry;
2403
2404#ifdef DEBUG
2405 {
2406 u32 sstatus, serror, scontrol;
2407
2408 mv_scr_read(ap, SCR_STATUS, &sstatus);
2409 mv_scr_read(ap, SCR_ERROR, &serror);
2410 mv_scr_read(ap, SCR_CONTROL, &scontrol);
2411 DPRINTK("S-regs after PHY wake: SStat 0x%08x SErr 0x%08x "
2412 "SCtrl 0x%08x\n", sstatus, serror, scontrol);
2413 }
2414#endif
2415
2416 if (ata_link_offline(&ap->link)) {
2417 *class = ATA_DEV_NONE;
2418 return;
2419 }
2420
2421 /* even after SStatus reflects that device is ready,
2422 * it seems to take a while for link to be fully
2423 * established (and thus Status no longer 0x80/0x7F),
2424 * so we poll a bit for that, here.
2425 */
2426 retry = 20;
2427 while (1) {
2428 u8 drv_stat = ata_check_status(ap);
2429 if ((drv_stat != 0x80) && (drv_stat != 0x7f))
2430 break;
2431 msleep(500);
2432 if (retry-- <= 0)
2433 break;
2434 if (time_after(jiffies, deadline))
2435 break;
2436 } 2357 }
2437
2438 /* FIXME: if we passed the deadline, the following
2439 * code probably produces an invalid result
2440 */
2441
2442 /* finally, read device signature from TF registers */
2443 *class = ata_dev_try_classify(ap->link.device, 1, NULL);
2444
2445 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2446
2447 WARN_ON(pp->pp_flags & MV_PP_FLAG_EDMA_EN);
2448
2449 VPRINTK("EXIT\n");
2450} 2358}
2451 2359
2452static int mv_prereset(struct ata_link *link, unsigned long deadline) 2360static int mv_pmp_hardreset(struct ata_link *link, unsigned int *class,
2361 unsigned long deadline)
2453{ 2362{
2454 struct ata_port *ap = link->ap; 2363 mv_pmp_select(link->ap, sata_srst_pmp(link));
2455 struct mv_port_priv *pp = ap->private_data; 2364 return sata_std_hardreset(link, class, deadline);
2456 struct ata_eh_context *ehc = &link->eh_context; 2365}
2457 int rc;
2458
2459 rc = mv_stop_dma(ap);
2460 if (rc)
2461 ehc->i.action |= ATA_EH_HARDRESET;
2462
2463 if (!(pp->pp_flags & MV_PP_FLAG_HAD_A_RESET)) {
2464 pp->pp_flags |= MV_PP_FLAG_HAD_A_RESET;
2465 ehc->i.action |= ATA_EH_HARDRESET;
2466 }
2467
2468 /* if we're about to do hardreset, nothing more to do */
2469 if (ehc->i.action & ATA_EH_HARDRESET)
2470 return 0;
2471
2472 if (ata_link_online(link))
2473 rc = ata_wait_ready(ap, deadline);
2474 else
2475 rc = -ENODEV;
2476 2366
2477 return rc; 2367static int mv_softreset(struct ata_link *link, unsigned int *class,
2368 unsigned long deadline)
2369{
2370 mv_pmp_select(link->ap, sata_srst_pmp(link));
2371 return ata_sff_softreset(link, class, deadline);
2478} 2372}
2479 2373
2480static int mv_hardreset(struct ata_link *link, unsigned int *class, 2374static int mv_hardreset(struct ata_link *link, unsigned int *class,
@@ -2482,43 +2376,34 @@ static int mv_hardreset(struct ata_link *link, unsigned int *class,
2482{ 2376{
2483 struct ata_port *ap = link->ap; 2377 struct ata_port *ap = link->ap;
2484 struct mv_host_priv *hpriv = ap->host->private_data; 2378 struct mv_host_priv *hpriv = ap->host->private_data;
2379 struct mv_port_priv *pp = ap->private_data;
2485 void __iomem *mmio = hpriv->base; 2380 void __iomem *mmio = hpriv->base;
2381 int rc, attempts = 0, extra = 0;
2382 u32 sstatus;
2383 bool online;
2486 2384
2487 mv_stop_dma(ap); 2385 mv_reset_channel(hpriv, mmio, ap->port_no);
2488 2386 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
2489 mv_channel_reset(hpriv, mmio, ap->port_no);
2490
2491 mv_phy_reset(ap, class, deadline);
2492
2493 return 0;
2494}
2495
2496static void mv_postreset(struct ata_link *link, unsigned int *classes)
2497{
2498 struct ata_port *ap = link->ap;
2499 u32 serr;
2500
2501 /* print link status */
2502 sata_print_link_status(link);
2503
2504 /* clear SError */
2505 sata_scr_read(link, SCR_ERROR, &serr);
2506 sata_scr_write_flush(link, SCR_ERROR, serr);
2507 2387
2508 /* bail out if no device is present */ 2388 /* Workaround for errata FEr SATA#10 (part 2) */
2509 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) { 2389 do {
2510 DPRINTK("EXIT, no device\n"); 2390 const unsigned long *timing =
2511 return; 2391 sata_ehc_deb_timing(&link->eh_context);
2512 }
2513 2392
2514 /* set up device control */ 2393 rc = sata_link_hardreset(link, timing, deadline + extra,
2515 iowrite8(ap->ctl, ap->ioaddr.ctl_addr); 2394 &online, NULL);
2516} 2395 if (rc)
2396 return rc;
2397 sata_scr_read(link, SCR_STATUS, &sstatus);
2398 if (!IS_GEN_I(hpriv) && ++attempts >= 5 && sstatus == 0x121) {
2399 /* Force 1.5gb/s link speed and try again */
2400 mv_setup_ifctl(mv_ap_base(ap), 0);
2401 if (time_after(jiffies + HZ, deadline))
2402 extra = HZ; /* only extend it once, max */
2403 }
2404 } while (sstatus != 0x0 && sstatus != 0x113 && sstatus != 0x123);
2517 2405
2518static void mv_error_handler(struct ata_port *ap) 2406 return rc;
2519{
2520 ata_do_eh(ap, mv_prereset, ata_std_softreset,
2521 mv_hardreset, mv_postreset);
2522} 2407}
2523 2408
2524static void mv_eh_freeze(struct ata_port *ap) 2409static void mv_eh_freeze(struct ata_port *ap)
@@ -2812,19 +2697,6 @@ static int mv_init_host(struct ata_host *host, unsigned int board_idx)
2812 hpriv->ops->enable_leds(hpriv, mmio); 2697 hpriv->ops->enable_leds(hpriv, mmio);
2813 2698
2814 for (port = 0; port < host->n_ports; port++) { 2699 for (port = 0; port < host->n_ports; port++) {
2815 if (IS_GEN_II(hpriv)) {
2816 void __iomem *port_mmio = mv_port_base(mmio, port);
2817
2818 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
2819 ifctl |= (1 << 7); /* enable gen2i speed */
2820 ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
2821 writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
2822 }
2823
2824 hpriv->ops->phy_errata(hpriv, mmio, port);
2825 }
2826
2827 for (port = 0; port < host->n_ports; port++) {
2828 struct ata_port *ap = host->ports[port]; 2700 struct ata_port *ap = host->ports[port];
2829 void __iomem *port_mmio = mv_port_base(mmio, port); 2701 void __iomem *port_mmio = mv_port_base(mmio, port);
2830 2702
@@ -3223,7 +3095,7 @@ MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
3223MODULE_LICENSE("GPL"); 3095MODULE_LICENSE("GPL");
3224MODULE_DEVICE_TABLE(pci, mv_pci_tbl); 3096MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
3225MODULE_VERSION(DRV_VERSION); 3097MODULE_VERSION(DRV_VERSION);
3226MODULE_ALIAS("platform:sata_mv"); 3098MODULE_ALIAS("platform:" DRV_NAME);
3227 3099
3228#ifdef CONFIG_PCI 3100#ifdef CONFIG_PCI
3229module_param(msi, int, 0444); 3101module_param(msi, int, 0444);