aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/ata/sata_mv.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/ata/sata_mv.c')
-rw-r--r--drivers/ata/sata_mv.c632
1 files changed, 252 insertions, 380 deletions
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
index 6ebebde8454a..05ff8c776497 100644
--- a/drivers/ata/sata_mv.c
+++ b/drivers/ata/sata_mv.c
@@ -1,6 +1,7 @@
1/* 1/*
2 * sata_mv.c - Marvell SATA support 2 * sata_mv.c - Marvell SATA support
3 * 3 *
4 * Copyright 2008: Marvell Corporation, all rights reserved.
4 * Copyright 2005: EMC Corporation, all rights reserved. 5 * Copyright 2005: EMC Corporation, all rights reserved.
5 * Copyright 2005 Red Hat, Inc. All rights reserved. 6 * Copyright 2005 Red Hat, Inc. All rights reserved.
6 * 7 *
@@ -39,7 +40,9 @@
39 40
40 5) Investigate problems with PCI Message Signalled Interrupts (MSI). 41 5) Investigate problems with PCI Message Signalled Interrupts (MSI).
41 42
42 6) Add port multiplier support (intermediate) 43 6) Cache frequently-accessed registers in mv_port_priv to reduce overhead.
44
45 7) Fix/reenable hot plug/unplug (should happen as a side-effect of (2) above).
43 46
44 8) Develop a low-power-consumption strategy, and implement it. 47 8) Develop a low-power-consumption strategy, and implement it.
45 48
@@ -61,7 +64,6 @@
61 64
62*/ 65*/
63 66
64
65#include <linux/kernel.h> 67#include <linux/kernel.h>
66#include <linux/module.h> 68#include <linux/module.h>
67#include <linux/pci.h> 69#include <linux/pci.h>
@@ -131,7 +133,7 @@ enum {
131 MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */ 133 MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */
132 MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */ 134 MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */
133 /* SoC integrated controllers, no PCI interface */ 135 /* SoC integrated controllers, no PCI interface */
134 MV_FLAG_SOC = (1 << 28), 136 MV_FLAG_SOC = (1 << 28),
135 137
136 MV_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 138 MV_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
137 ATA_FLAG_MMIO | ATA_FLAG_NO_ATAPI | 139 ATA_FLAG_MMIO | ATA_FLAG_NO_ATAPI |
@@ -141,6 +143,7 @@ enum {
141 CRQB_FLAG_READ = (1 << 0), 143 CRQB_FLAG_READ = (1 << 0),
142 CRQB_TAG_SHIFT = 1, 144 CRQB_TAG_SHIFT = 1,
143 CRQB_IOID_SHIFT = 6, /* CRQB Gen-II/IIE IO Id shift */ 145 CRQB_IOID_SHIFT = 6, /* CRQB Gen-II/IIE IO Id shift */
146 CRQB_PMP_SHIFT = 12, /* CRQB Gen-II/IIE PMP shift */
144 CRQB_HOSTQ_SHIFT = 17, /* CRQB Gen-II/IIE HostQueTag shift */ 147 CRQB_HOSTQ_SHIFT = 17, /* CRQB Gen-II/IIE HostQueTag shift */
145 CRQB_CMD_ADDR_SHIFT = 8, 148 CRQB_CMD_ADDR_SHIFT = 8,
146 CRQB_CMD_CS = (0x2 << 11), 149 CRQB_CMD_CS = (0x2 << 11),
@@ -199,7 +202,7 @@ enum {
199 TWSI_INT = (1 << 24), 202 TWSI_INT = (1 << 24),
200 HC_MAIN_RSVD = (0x7f << 25), /* bits 31-25 */ 203 HC_MAIN_RSVD = (0x7f << 25), /* bits 31-25 */
201 HC_MAIN_RSVD_5 = (0x1fff << 19), /* bits 31-19 */ 204 HC_MAIN_RSVD_5 = (0x1fff << 19), /* bits 31-19 */
202 HC_MAIN_RSVD_SOC = (0x3fffffb << 6), /* bits 31-9, 7-6 */ 205 HC_MAIN_RSVD_SOC = (0x3fffffb << 6), /* bits 31-9, 7-6 */
203 HC_MAIN_MASKED_IRQS = (TRAN_LO_DONE | TRAN_HI_DONE | 206 HC_MAIN_MASKED_IRQS = (TRAN_LO_DONE | TRAN_HI_DONE |
204 PORTS_0_7_COAL_DONE | GPIO_INT | TWSI_INT | 207 PORTS_0_7_COAL_DONE | GPIO_INT | TWSI_INT |
205 HC_MAIN_RSVD), 208 HC_MAIN_RSVD),
@@ -223,13 +226,24 @@ enum {
223 SATA_STATUS_OFS = 0x300, /* ctrl, err regs follow status */ 226 SATA_STATUS_OFS = 0x300, /* ctrl, err regs follow status */
224 SATA_ACTIVE_OFS = 0x350, 227 SATA_ACTIVE_OFS = 0x350,
225 SATA_FIS_IRQ_CAUSE_OFS = 0x364, 228 SATA_FIS_IRQ_CAUSE_OFS = 0x364,
229
230 LTMODE_OFS = 0x30c,
231 LTMODE_BIT8 = (1 << 8), /* unknown, but necessary */
232
226 PHY_MODE3 = 0x310, 233 PHY_MODE3 = 0x310,
227 PHY_MODE4 = 0x314, 234 PHY_MODE4 = 0x314,
228 PHY_MODE2 = 0x330, 235 PHY_MODE2 = 0x330,
236 SATA_IFCTL_OFS = 0x344,
237 SATA_IFSTAT_OFS = 0x34c,
238 VENDOR_UNIQUE_FIS_OFS = 0x35c,
239
240 FIS_CFG_OFS = 0x360,
241 FIS_CFG_SINGLE_SYNC = (1 << 16), /* SYNC on DMA activation */
242
229 MV5_PHY_MODE = 0x74, 243 MV5_PHY_MODE = 0x74,
230 MV5_LT_MODE = 0x30, 244 MV5_LT_MODE = 0x30,
231 MV5_PHY_CTL = 0x0C, 245 MV5_PHY_CTL = 0x0C,
232 SATA_INTERFACE_CTL = 0x050, 246 SATA_INTERFACE_CFG = 0x050,
233 247
234 MV_M2_PREAMP_MASK = 0x7e0, 248 MV_M2_PREAMP_MASK = 0x7e0,
235 249
@@ -240,6 +254,8 @@ enum {
240 EDMA_CFG_NCQ_GO_ON_ERR = (1 << 14), /* continue on error */ 254 EDMA_CFG_NCQ_GO_ON_ERR = (1 << 14), /* continue on error */
241 EDMA_CFG_RD_BRST_EXT = (1 << 11), /* read burst 512B */ 255 EDMA_CFG_RD_BRST_EXT = (1 << 11), /* read burst 512B */
242 EDMA_CFG_WR_BUFF_LEN = (1 << 13), /* write buffer 512B */ 256 EDMA_CFG_WR_BUFF_LEN = (1 << 13), /* write buffer 512B */
257 EDMA_CFG_EDMA_FBS = (1 << 16), /* EDMA FIS-Based Switching */
258 EDMA_CFG_FBS = (1 << 26), /* FIS-Based Switching */
243 259
244 EDMA_ERR_IRQ_CAUSE_OFS = 0x8, 260 EDMA_ERR_IRQ_CAUSE_OFS = 0x8,
245 EDMA_ERR_IRQ_MASK_OFS = 0xc, 261 EDMA_ERR_IRQ_MASK_OFS = 0xc,
@@ -282,7 +298,9 @@ enum {
282 EDMA_ERR_IRQ_TRANSIENT = EDMA_ERR_LNK_CTRL_RX_0 | 298 EDMA_ERR_IRQ_TRANSIENT = EDMA_ERR_LNK_CTRL_RX_0 |
283 EDMA_ERR_LNK_CTRL_RX_1 | 299 EDMA_ERR_LNK_CTRL_RX_1 |
284 EDMA_ERR_LNK_CTRL_RX_3 | 300 EDMA_ERR_LNK_CTRL_RX_3 |
285 EDMA_ERR_LNK_CTRL_TX, 301 EDMA_ERR_LNK_CTRL_TX |
302 /* temporary, until we fix hotplug: */
303 (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON),
286 304
287 EDMA_EH_FREEZE = EDMA_ERR_D_PAR | 305 EDMA_EH_FREEZE = EDMA_ERR_D_PAR |
288 EDMA_ERR_PRD_PAR | 306 EDMA_ERR_PRD_PAR |
@@ -298,6 +316,7 @@ enum {
298 EDMA_ERR_LNK_DATA_RX | 316 EDMA_ERR_LNK_DATA_RX |
299 EDMA_ERR_LNK_DATA_TX | 317 EDMA_ERR_LNK_DATA_TX |
300 EDMA_ERR_TRANS_PROTO, 318 EDMA_ERR_TRANS_PROTO,
319
301 EDMA_EH_FREEZE_5 = EDMA_ERR_D_PAR | 320 EDMA_EH_FREEZE_5 = EDMA_ERR_D_PAR |
302 EDMA_ERR_PRD_PAR | 321 EDMA_ERR_PRD_PAR |
303 EDMA_ERR_DEV_DCON | 322 EDMA_ERR_DEV_DCON |
@@ -344,7 +363,6 @@ enum {
344 /* Port private flags (pp_flags) */ 363 /* Port private flags (pp_flags) */
345 MV_PP_FLAG_EDMA_EN = (1 << 0), /* is EDMA engine enabled? */ 364 MV_PP_FLAG_EDMA_EN = (1 << 0), /* is EDMA engine enabled? */
346 MV_PP_FLAG_NCQ_EN = (1 << 1), /* is EDMA set up for NCQ? */ 365 MV_PP_FLAG_NCQ_EN = (1 << 1), /* is EDMA set up for NCQ? */
347 MV_PP_FLAG_HAD_A_RESET = (1 << 2), /* 1st hard reset complete? */
348}; 366};
349 367
350#define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I) 368#define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I)
@@ -461,7 +479,6 @@ struct mv_hw_ops {
461 void (*reset_bus)(struct ata_host *host, void __iomem *mmio); 479 void (*reset_bus)(struct ata_host *host, void __iomem *mmio);
462}; 480};
463 481
464static void mv_irq_clear(struct ata_port *ap);
465static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val); 482static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
466static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val); 483static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
467static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val); 484static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
@@ -471,7 +488,8 @@ static void mv_port_stop(struct ata_port *ap);
471static void mv_qc_prep(struct ata_queued_cmd *qc); 488static void mv_qc_prep(struct ata_queued_cmd *qc);
472static void mv_qc_prep_iie(struct ata_queued_cmd *qc); 489static void mv_qc_prep_iie(struct ata_queued_cmd *qc);
473static unsigned int mv_qc_issue(struct ata_queued_cmd *qc); 490static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
474static void mv_error_handler(struct ata_port *ap); 491static int mv_hardreset(struct ata_link *link, unsigned int *class,
492 unsigned long deadline);
475static void mv_eh_freeze(struct ata_port *ap); 493static void mv_eh_freeze(struct ata_port *ap);
476static void mv_eh_thaw(struct ata_port *ap); 494static void mv_eh_thaw(struct ata_port *ap);
477static void mv6_dev_config(struct ata_device *dev); 495static void mv6_dev_config(struct ata_device *dev);
@@ -504,72 +522,46 @@ static void mv_soc_reset_flash(struct mv_host_priv *hpriv,
504 void __iomem *mmio); 522 void __iomem *mmio);
505static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio); 523static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio);
506static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio); 524static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio);
507static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio, 525static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio,
508 unsigned int port_no); 526 unsigned int port_no);
509static void mv_edma_cfg(struct mv_port_priv *pp, struct mv_host_priv *hpriv, 527static int mv_stop_edma(struct ata_port *ap);
510 void __iomem *port_mmio, int want_ncq); 528static int mv_stop_edma_engine(void __iomem *port_mmio);
511static int __mv_stop_dma(struct ata_port *ap); 529static void mv_edma_cfg(struct ata_port *ap, int want_ncq);
530
531static void mv_pmp_select(struct ata_port *ap, int pmp);
532static int mv_pmp_hardreset(struct ata_link *link, unsigned int *class,
533 unsigned long deadline);
534static int mv_softreset(struct ata_link *link, unsigned int *class,
535 unsigned long deadline);
512 536
513/* .sg_tablesize is (MV_MAX_SG_CT / 2) in the structures below 537/* .sg_tablesize is (MV_MAX_SG_CT / 2) in the structures below
514 * because we have to allow room for worst case splitting of 538 * because we have to allow room for worst case splitting of
515 * PRDs for 64K boundaries in mv_fill_sg(). 539 * PRDs for 64K boundaries in mv_fill_sg().
516 */ 540 */
517static struct scsi_host_template mv5_sht = { 541static struct scsi_host_template mv5_sht = {
518 .module = THIS_MODULE, 542 ATA_BASE_SHT(DRV_NAME),
519 .name = DRV_NAME,
520 .ioctl = ata_scsi_ioctl,
521 .queuecommand = ata_scsi_queuecmd,
522 .can_queue = ATA_DEF_QUEUE,
523 .this_id = ATA_SHT_THIS_ID,
524 .sg_tablesize = MV_MAX_SG_CT / 2, 543 .sg_tablesize = MV_MAX_SG_CT / 2,
525 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
526 .emulated = ATA_SHT_EMULATED,
527 .use_clustering = 1,
528 .proc_name = DRV_NAME,
529 .dma_boundary = MV_DMA_BOUNDARY, 544 .dma_boundary = MV_DMA_BOUNDARY,
530 .slave_configure = ata_scsi_slave_config,
531 .slave_destroy = ata_scsi_slave_destroy,
532 .bios_param = ata_std_bios_param,
533}; 545};
534 546
535static struct scsi_host_template mv6_sht = { 547static struct scsi_host_template mv6_sht = {
536 .module = THIS_MODULE, 548 ATA_NCQ_SHT(DRV_NAME),
537 .name = DRV_NAME,
538 .ioctl = ata_scsi_ioctl,
539 .queuecommand = ata_scsi_queuecmd,
540 .change_queue_depth = ata_scsi_change_queue_depth,
541 .can_queue = MV_MAX_Q_DEPTH - 1, 549 .can_queue = MV_MAX_Q_DEPTH - 1,
542 .this_id = ATA_SHT_THIS_ID,
543 .sg_tablesize = MV_MAX_SG_CT / 2, 550 .sg_tablesize = MV_MAX_SG_CT / 2,
544 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
545 .emulated = ATA_SHT_EMULATED,
546 .use_clustering = 1,
547 .proc_name = DRV_NAME,
548 .dma_boundary = MV_DMA_BOUNDARY, 551 .dma_boundary = MV_DMA_BOUNDARY,
549 .slave_configure = ata_scsi_slave_config,
550 .slave_destroy = ata_scsi_slave_destroy,
551 .bios_param = ata_std_bios_param,
552}; 552};
553 553
554static const struct ata_port_operations mv5_ops = { 554static struct ata_port_operations mv5_ops = {
555 .tf_load = ata_tf_load, 555 .inherits = &ata_sff_port_ops,
556 .tf_read = ata_tf_read,
557 .check_status = ata_check_status,
558 .exec_command = ata_exec_command,
559 .dev_select = ata_std_dev_select,
560
561 .cable_detect = ata_cable_sata,
562 556
563 .qc_prep = mv_qc_prep, 557 .qc_prep = mv_qc_prep,
564 .qc_issue = mv_qc_issue, 558 .qc_issue = mv_qc_issue,
565 .data_xfer = ata_data_xfer,
566
567 .irq_clear = mv_irq_clear,
568 .irq_on = ata_irq_on,
569 559
570 .error_handler = mv_error_handler,
571 .freeze = mv_eh_freeze, 560 .freeze = mv_eh_freeze,
572 .thaw = mv_eh_thaw, 561 .thaw = mv_eh_thaw,
562 .hardreset = mv_hardreset,
563 .error_handler = ata_std_error_handler, /* avoid SFF EH */
564 .post_internal_cmd = ATA_OP_NULL,
573 565
574 .scr_read = mv5_scr_read, 566 .scr_read = mv5_scr_read,
575 .scr_write = mv5_scr_write, 567 .scr_write = mv5_scr_write,
@@ -578,61 +570,24 @@ static const struct ata_port_operations mv5_ops = {
578 .port_stop = mv_port_stop, 570 .port_stop = mv_port_stop,
579}; 571};
580 572
581static const struct ata_port_operations mv6_ops = { 573static struct ata_port_operations mv6_ops = {
574 .inherits = &mv5_ops,
575 .qc_defer = sata_pmp_qc_defer_cmd_switch,
582 .dev_config = mv6_dev_config, 576 .dev_config = mv6_dev_config,
583 .tf_load = ata_tf_load,
584 .tf_read = ata_tf_read,
585 .check_status = ata_check_status,
586 .exec_command = ata_exec_command,
587 .dev_select = ata_std_dev_select,
588
589 .cable_detect = ata_cable_sata,
590
591 .qc_prep = mv_qc_prep,
592 .qc_issue = mv_qc_issue,
593 .data_xfer = ata_data_xfer,
594
595 .irq_clear = mv_irq_clear,
596 .irq_on = ata_irq_on,
597
598 .error_handler = mv_error_handler,
599 .freeze = mv_eh_freeze,
600 .thaw = mv_eh_thaw,
601 .qc_defer = ata_std_qc_defer,
602
603 .scr_read = mv_scr_read, 577 .scr_read = mv_scr_read,
604 .scr_write = mv_scr_write, 578 .scr_write = mv_scr_write,
605 579
606 .port_start = mv_port_start, 580 .pmp_hardreset = mv_pmp_hardreset,
607 .port_stop = mv_port_stop, 581 .pmp_softreset = mv_softreset,
582 .softreset = mv_softreset,
583 .error_handler = sata_pmp_error_handler,
608}; 584};
609 585
610static const struct ata_port_operations mv_iie_ops = { 586static struct ata_port_operations mv_iie_ops = {
611 .tf_load = ata_tf_load, 587 .inherits = &mv6_ops,
612 .tf_read = ata_tf_read, 588 .qc_defer = ata_std_qc_defer, /* FIS-based switching */
613 .check_status = ata_check_status, 589 .dev_config = ATA_OP_NULL,
614 .exec_command = ata_exec_command,
615 .dev_select = ata_std_dev_select,
616
617 .cable_detect = ata_cable_sata,
618
619 .qc_prep = mv_qc_prep_iie, 590 .qc_prep = mv_qc_prep_iie,
620 .qc_issue = mv_qc_issue,
621 .data_xfer = ata_data_xfer,
622
623 .irq_clear = mv_irq_clear,
624 .irq_on = ata_irq_on,
625
626 .error_handler = mv_error_handler,
627 .freeze = mv_eh_freeze,
628 .thaw = mv_eh_thaw,
629 .qc_defer = ata_std_qc_defer,
630
631 .scr_read = mv_scr_read,
632 .scr_write = mv_scr_write,
633
634 .port_start = mv_port_start,
635 .port_stop = mv_port_stop,
636}; 591};
637 592
638static const struct ata_port_info mv_port_info[] = { 593static const struct ata_port_info mv_port_info[] = {
@@ -656,6 +611,7 @@ static const struct ata_port_info mv_port_info[] = {
656 }, 611 },
657 { /* chip_604x */ 612 { /* chip_604x */
658 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS | 613 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
614 ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA |
659 ATA_FLAG_NCQ, 615 ATA_FLAG_NCQ,
660 .pio_mask = 0x1f, /* pio0-4 */ 616 .pio_mask = 0x1f, /* pio0-4 */
661 .udma_mask = ATA_UDMA6, 617 .udma_mask = ATA_UDMA6,
@@ -663,6 +619,7 @@ static const struct ata_port_info mv_port_info[] = {
663 }, 619 },
664 { /* chip_608x */ 620 { /* chip_608x */
665 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS | 621 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
622 ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA |
666 ATA_FLAG_NCQ | MV_FLAG_DUAL_HC, 623 ATA_FLAG_NCQ | MV_FLAG_DUAL_HC,
667 .pio_mask = 0x1f, /* pio0-4 */ 624 .pio_mask = 0x1f, /* pio0-4 */
668 .udma_mask = ATA_UDMA6, 625 .udma_mask = ATA_UDMA6,
@@ -670,6 +627,7 @@ static const struct ata_port_info mv_port_info[] = {
670 }, 627 },
671 { /* chip_6042 */ 628 { /* chip_6042 */
672 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS | 629 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
630 ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA |
673 ATA_FLAG_NCQ, 631 ATA_FLAG_NCQ,
674 .pio_mask = 0x1f, /* pio0-4 */ 632 .pio_mask = 0x1f, /* pio0-4 */
675 .udma_mask = ATA_UDMA6, 633 .udma_mask = ATA_UDMA6,
@@ -677,16 +635,19 @@ static const struct ata_port_info mv_port_info[] = {
677 }, 635 },
678 { /* chip_7042 */ 636 { /* chip_7042 */
679 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS | 637 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
638 ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA |
680 ATA_FLAG_NCQ, 639 ATA_FLAG_NCQ,
681 .pio_mask = 0x1f, /* pio0-4 */ 640 .pio_mask = 0x1f, /* pio0-4 */
682 .udma_mask = ATA_UDMA6, 641 .udma_mask = ATA_UDMA6,
683 .port_ops = &mv_iie_ops, 642 .port_ops = &mv_iie_ops,
684 }, 643 },
685 { /* chip_soc */ 644 { /* chip_soc */
686 .flags = MV_COMMON_FLAGS | MV_FLAG_SOC, 645 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
687 .pio_mask = 0x1f, /* pio0-4 */ 646 ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA |
688 .udma_mask = ATA_UDMA6, 647 ATA_FLAG_NCQ | MV_FLAG_SOC,
689 .port_ops = &mv_iie_ops, 648 .pio_mask = 0x1f, /* pio0-4 */
649 .udma_mask = ATA_UDMA6,
650 .port_ops = &mv_iie_ops,
690 }, 651 },
691}; 652};
692 653
@@ -785,6 +746,14 @@ static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port)
785 (mv_hardport_from_port(port) * MV_PORT_REG_SZ); 746 (mv_hardport_from_port(port) * MV_PORT_REG_SZ);
786} 747}
787 748
749static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port)
750{
751 void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port);
752 unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL;
753
754 return hc_mmio + ofs;
755}
756
788static inline void __iomem *mv_host_base(struct ata_host *host) 757static inline void __iomem *mv_host_base(struct ata_host *host)
789{ 758{
790 struct mv_host_priv *hpriv = host->private_data; 759 struct mv_host_priv *hpriv = host->private_data;
@@ -801,10 +770,6 @@ static inline int mv_get_hc_count(unsigned long port_flags)
801 return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1); 770 return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1);
802} 771}
803 772
804static void mv_irq_clear(struct ata_port *ap)
805{
806}
807
808static void mv_set_edma_ptrs(void __iomem *port_mmio, 773static void mv_set_edma_ptrs(void __iomem *port_mmio,
809 struct mv_host_priv *hpriv, 774 struct mv_host_priv *hpriv,
810 struct mv_port_priv *pp) 775 struct mv_port_priv *pp)
@@ -864,7 +829,7 @@ static void mv_start_dma(struct ata_port *ap, void __iomem *port_mmio,
864 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) { 829 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
865 int using_ncq = ((pp->pp_flags & MV_PP_FLAG_NCQ_EN) != 0); 830 int using_ncq = ((pp->pp_flags & MV_PP_FLAG_NCQ_EN) != 0);
866 if (want_ncq != using_ncq) 831 if (want_ncq != using_ncq)
867 __mv_stop_dma(ap); 832 mv_stop_edma(ap);
868 } 833 }
869 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) { 834 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) {
870 struct mv_host_priv *hpriv = ap->host->private_data; 835 struct mv_host_priv *hpriv = ap->host->private_data;
@@ -885,7 +850,7 @@ static void mv_start_dma(struct ata_port *ap, void __iomem *port_mmio,
885 hc_mmio + HC_IRQ_CAUSE_OFS); 850 hc_mmio + HC_IRQ_CAUSE_OFS);
886 } 851 }
887 852
888 mv_edma_cfg(pp, hpriv, port_mmio, want_ncq); 853 mv_edma_cfg(ap, want_ncq);
889 854
890 /* clear FIS IRQ Cause */ 855 /* clear FIS IRQ Cause */
891 writelfl(0, port_mmio + SATA_FIS_IRQ_CAUSE_OFS); 856 writelfl(0, port_mmio + SATA_FIS_IRQ_CAUSE_OFS);
@@ -899,58 +864,42 @@ static void mv_start_dma(struct ata_port *ap, void __iomem *port_mmio,
899} 864}
900 865
901/** 866/**
902 * __mv_stop_dma - Disable eDMA engine 867 * mv_stop_edma_engine - Disable eDMA engine
903 * @ap: ATA channel to manipulate 868 * @port_mmio: io base address
904 *
905 * Verify the local cache of the eDMA state is accurate with a
906 * WARN_ON.
907 * 869 *
908 * LOCKING: 870 * LOCKING:
909 * Inherited from caller. 871 * Inherited from caller.
910 */ 872 */
911static int __mv_stop_dma(struct ata_port *ap) 873static int mv_stop_edma_engine(void __iomem *port_mmio)
912{ 874{
913 void __iomem *port_mmio = mv_ap_base(ap); 875 int i;
914 struct mv_port_priv *pp = ap->private_data;
915 u32 reg;
916 int i, err = 0;
917 876
918 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) { 877 /* Disable eDMA. The disable bit auto clears. */
919 /* Disable EDMA if active. The disable bit auto clears. 878 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
920 */
921 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
922 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
923 } else {
924 WARN_ON(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS));
925 }
926 879
927 /* now properly wait for the eDMA to stop */ 880 /* Wait for the chip to confirm eDMA is off. */
928 for (i = 1000; i > 0; i--) { 881 for (i = 10000; i > 0; i--) {
929 reg = readl(port_mmio + EDMA_CMD_OFS); 882 u32 reg = readl(port_mmio + EDMA_CMD_OFS);
930 if (!(reg & EDMA_EN)) 883 if (!(reg & EDMA_EN))
931 break; 884 return 0;
932 885 udelay(10);
933 udelay(100);
934 }
935
936 if (reg & EDMA_EN) {
937 ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n");
938 err = -EIO;
939 } 886 }
940 887 return -EIO;
941 return err;
942} 888}
943 889
944static int mv_stop_dma(struct ata_port *ap) 890static int mv_stop_edma(struct ata_port *ap)
945{ 891{
946 unsigned long flags; 892 void __iomem *port_mmio = mv_ap_base(ap);
947 int rc; 893 struct mv_port_priv *pp = ap->private_data;
948
949 spin_lock_irqsave(&ap->host->lock, flags);
950 rc = __mv_stop_dma(ap);
951 spin_unlock_irqrestore(&ap->host->lock, flags);
952 894
953 return rc; 895 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN))
896 return 0;
897 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
898 if (mv_stop_edma_engine(port_mmio)) {
899 ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n");
900 return -EIO;
901 }
902 return 0;
954} 903}
955 904
956#ifdef ATA_DEBUG 905#ifdef ATA_DEBUG
@@ -1074,18 +1023,50 @@ static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
1074static void mv6_dev_config(struct ata_device *adev) 1023static void mv6_dev_config(struct ata_device *adev)
1075{ 1024{
1076 /* 1025 /*
1026 * Deal with Gen-II ("mv6") hardware quirks/restrictions:
1027 *
1028 * Gen-II does not support NCQ over a port multiplier
1029 * (no FIS-based switching).
1030 *
1077 * We don't have hob_nsect when doing NCQ commands on Gen-II. 1031 * We don't have hob_nsect when doing NCQ commands on Gen-II.
1078 * See mv_qc_prep() for more info. 1032 * See mv_qc_prep() for more info.
1079 */ 1033 */
1080 if (adev->flags & ATA_DFLAG_NCQ) 1034 if (adev->flags & ATA_DFLAG_NCQ) {
1081 if (adev->max_sectors > ATA_MAX_SECTORS) 1035 if (sata_pmp_attached(adev->link->ap))
1036 adev->flags &= ~ATA_DFLAG_NCQ;
1037 else if (adev->max_sectors > ATA_MAX_SECTORS)
1082 adev->max_sectors = ATA_MAX_SECTORS; 1038 adev->max_sectors = ATA_MAX_SECTORS;
1039 }
1083} 1040}
1084 1041
1085static void mv_edma_cfg(struct mv_port_priv *pp, struct mv_host_priv *hpriv, 1042static void mv_config_fbs(void __iomem *port_mmio, int enable_fbs)
1086 void __iomem *port_mmio, int want_ncq) 1043{
1044 u32 old_fcfg, new_fcfg, old_ltmode, new_ltmode;
1045 /*
1046 * Various bit settings required for operation
1047 * in FIS-based switching (fbs) mode on GenIIe:
1048 */
1049 old_fcfg = readl(port_mmio + FIS_CFG_OFS);
1050 old_ltmode = readl(port_mmio + LTMODE_OFS);
1051 if (enable_fbs) {
1052 new_fcfg = old_fcfg | FIS_CFG_SINGLE_SYNC;
1053 new_ltmode = old_ltmode | LTMODE_BIT8;
1054 } else { /* disable fbs */
1055 new_fcfg = old_fcfg & ~FIS_CFG_SINGLE_SYNC;
1056 new_ltmode = old_ltmode & ~LTMODE_BIT8;
1057 }
1058 if (new_fcfg != old_fcfg)
1059 writelfl(new_fcfg, port_mmio + FIS_CFG_OFS);
1060 if (new_ltmode != old_ltmode)
1061 writelfl(new_ltmode, port_mmio + LTMODE_OFS);
1062}
1063
1064static void mv_edma_cfg(struct ata_port *ap, int want_ncq)
1087{ 1065{
1088 u32 cfg; 1066 u32 cfg;
1067 struct mv_port_priv *pp = ap->private_data;
1068 struct mv_host_priv *hpriv = ap->host->private_data;
1069 void __iomem *port_mmio = mv_ap_base(ap);
1089 1070
1090 /* set up non-NCQ EDMA configuration */ 1071 /* set up non-NCQ EDMA configuration */
1091 cfg = EDMA_CFG_Q_DEPTH; /* always 0x1f for *all* chips */ 1072 cfg = EDMA_CFG_Q_DEPTH; /* always 0x1f for *all* chips */
@@ -1101,6 +1082,13 @@ static void mv_edma_cfg(struct mv_port_priv *pp, struct mv_host_priv *hpriv,
1101 cfg |= (1 << 22); /* enab 4-entry host queue cache */ 1082 cfg |= (1 << 22); /* enab 4-entry host queue cache */
1102 cfg |= (1 << 18); /* enab early completion */ 1083 cfg |= (1 << 18); /* enab early completion */
1103 cfg |= (1 << 17); /* enab cut-through (dis stor&forwrd) */ 1084 cfg |= (1 << 17); /* enab cut-through (dis stor&forwrd) */
1085
1086 if (want_ncq && sata_pmp_attached(ap)) {
1087 cfg |= EDMA_CFG_EDMA_FBS; /* FIS-based switching */
1088 mv_config_fbs(port_mmio, 1);
1089 } else {
1090 mv_config_fbs(port_mmio, 0);
1091 }
1104 } 1092 }
1105 1093
1106 if (want_ncq) { 1094 if (want_ncq) {
@@ -1156,8 +1144,6 @@ static int mv_port_start(struct ata_port *ap)
1156 struct device *dev = ap->host->dev; 1144 struct device *dev = ap->host->dev;
1157 struct mv_host_priv *hpriv = ap->host->private_data; 1145 struct mv_host_priv *hpriv = ap->host->private_data;
1158 struct mv_port_priv *pp; 1146 struct mv_port_priv *pp;
1159 void __iomem *port_mmio = mv_ap_base(ap);
1160 unsigned long flags;
1161 int tag; 1147 int tag;
1162 1148
1163 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL); 1149 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
@@ -1190,18 +1176,6 @@ static int mv_port_start(struct ata_port *ap)
1190 pp->sg_tbl_dma[tag] = pp->sg_tbl_dma[0]; 1176 pp->sg_tbl_dma[tag] = pp->sg_tbl_dma[0];
1191 } 1177 }
1192 } 1178 }
1193
1194 spin_lock_irqsave(&ap->host->lock, flags);
1195
1196 mv_edma_cfg(pp, hpriv, port_mmio, 0);
1197 mv_set_edma_ptrs(port_mmio, hpriv, pp);
1198
1199 spin_unlock_irqrestore(&ap->host->lock, flags);
1200
1201 /* Don't turn on EDMA here...do it before DMA commands only. Else
1202 * we'll be unable to send non-data, PIO, etc due to restricted access
1203 * to shadow regs.
1204 */
1205 return 0; 1179 return 0;
1206 1180
1207out_port_free_dma_mem: 1181out_port_free_dma_mem:
@@ -1220,7 +1194,7 @@ out_port_free_dma_mem:
1220 */ 1194 */
1221static void mv_port_stop(struct ata_port *ap) 1195static void mv_port_stop(struct ata_port *ap)
1222{ 1196{
1223 mv_stop_dma(ap); 1197 mv_stop_edma(ap);
1224 mv_port_free_dma_mem(ap); 1198 mv_port_free_dma_mem(ap);
1225} 1199}
1226 1200
@@ -1306,6 +1280,7 @@ static void mv_qc_prep(struct ata_queued_cmd *qc)
1306 flags |= CRQB_FLAG_READ; 1280 flags |= CRQB_FLAG_READ;
1307 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag); 1281 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
1308 flags |= qc->tag << CRQB_TAG_SHIFT; 1282 flags |= qc->tag << CRQB_TAG_SHIFT;
1283 flags |= (qc->dev->link->pmp & 0xf) << CRQB_PMP_SHIFT;
1309 1284
1310 /* get current queue index from software */ 1285 /* get current queue index from software */
1311 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK; 1286 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
@@ -1390,14 +1365,14 @@ static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
1390 (qc->tf.protocol != ATA_PROT_NCQ)) 1365 (qc->tf.protocol != ATA_PROT_NCQ))
1391 return; 1366 return;
1392 1367
1393 /* Fill in Gen IIE command request block 1368 /* Fill in Gen IIE command request block */
1394 */
1395 if (!(qc->tf.flags & ATA_TFLAG_WRITE)) 1369 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1396 flags |= CRQB_FLAG_READ; 1370 flags |= CRQB_FLAG_READ;
1397 1371
1398 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag); 1372 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
1399 flags |= qc->tag << CRQB_TAG_SHIFT; 1373 flags |= qc->tag << CRQB_TAG_SHIFT;
1400 flags |= qc->tag << CRQB_HOSTQ_SHIFT; 1374 flags |= qc->tag << CRQB_HOSTQ_SHIFT;
1375 flags |= (qc->dev->link->pmp & 0xf) << CRQB_PMP_SHIFT;
1401 1376
1402 /* get current queue index from software */ 1377 /* get current queue index from software */
1403 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK; 1378 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
@@ -1455,12 +1430,14 @@ static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
1455 1430
1456 if ((qc->tf.protocol != ATA_PROT_DMA) && 1431 if ((qc->tf.protocol != ATA_PROT_DMA) &&
1457 (qc->tf.protocol != ATA_PROT_NCQ)) { 1432 (qc->tf.protocol != ATA_PROT_NCQ)) {
1458 /* We're about to send a non-EDMA capable command to the 1433 /*
1434 * We're about to send a non-EDMA capable command to the
1459 * port. Turn off EDMA so there won't be problems accessing 1435 * port. Turn off EDMA so there won't be problems accessing
1460 * shadow block, etc registers. 1436 * shadow block, etc registers.
1461 */ 1437 */
1462 __mv_stop_dma(ap); 1438 mv_stop_edma(ap);
1463 return ata_qc_issue_prot(qc); 1439 mv_pmp_select(ap, qc->dev->link->pmp);
1440 return ata_sff_qc_issue(qc);
1464 } 1441 }
1465 1442
1466 mv_start_dma(ap, port_mmio, pp, qc->tf.protocol); 1443 mv_start_dma(ap, port_mmio, pp, qc->tf.protocol);
@@ -1482,10 +1459,10 @@ static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
1482 * @reset_allowed: bool: 0 == don't trigger from reset here 1459 * @reset_allowed: bool: 0 == don't trigger from reset here
1483 * 1460 *
1484 * In most cases, just clear the interrupt and move on. However, 1461 * In most cases, just clear the interrupt and move on. However,
1485 * some cases require an eDMA reset, which is done right before 1462 * some cases require an eDMA reset, which also performs a COMRESET.
1486 * the COMRESET in mv_phy_reset(). The SERR case requires a 1463 * The SERR case requires a clear of pending errors in the SATA
1487 * clear of pending errors in the SATA SERROR register. Finally, 1464 * SERROR register. Finally, if the port disabled DMA,
1488 * if the port disabled DMA, update our cached copy to match. 1465 * update our cached copy to match.
1489 * 1466 *
1490 * LOCKING: 1467 * LOCKING:
1491 * Inherited from caller. 1468 * Inherited from caller.
@@ -1524,14 +1501,14 @@ static void mv_err_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
1524 EDMA_ERR_CRQB_PAR | EDMA_ERR_CRPB_PAR | 1501 EDMA_ERR_CRQB_PAR | EDMA_ERR_CRPB_PAR |
1525 EDMA_ERR_INTRL_PAR)) { 1502 EDMA_ERR_INTRL_PAR)) {
1526 err_mask |= AC_ERR_ATA_BUS; 1503 err_mask |= AC_ERR_ATA_BUS;
1527 action |= ATA_EH_HARDRESET; 1504 action |= ATA_EH_RESET;
1528 ata_ehi_push_desc(ehi, "parity error"); 1505 ata_ehi_push_desc(ehi, "parity error");
1529 } 1506 }
1530 if (edma_err_cause & (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON)) { 1507 if (edma_err_cause & (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON)) {
1531 ata_ehi_hotplugged(ehi); 1508 ata_ehi_hotplugged(ehi);
1532 ata_ehi_push_desc(ehi, edma_err_cause & EDMA_ERR_DEV_DCON ? 1509 ata_ehi_push_desc(ehi, edma_err_cause & EDMA_ERR_DEV_DCON ?
1533 "dev disconnect" : "dev connect"); 1510 "dev disconnect" : "dev connect");
1534 action |= ATA_EH_HARDRESET; 1511 action |= ATA_EH_RESET;
1535 } 1512 }
1536 1513
1537 if (IS_GEN_I(hpriv)) { 1514 if (IS_GEN_I(hpriv)) {
@@ -1555,7 +1532,7 @@ static void mv_err_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
1555 sata_scr_read(&ap->link, SCR_ERROR, &serr); 1532 sata_scr_read(&ap->link, SCR_ERROR, &serr);
1556 sata_scr_write_flush(&ap->link, SCR_ERROR, serr); 1533 sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
1557 err_mask = AC_ERR_ATA_BUS; 1534 err_mask = AC_ERR_ATA_BUS;
1558 action |= ATA_EH_HARDRESET; 1535 action |= ATA_EH_RESET;
1559 } 1536 }
1560 } 1537 }
1561 1538
@@ -1564,7 +1541,7 @@ static void mv_err_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
1564 1541
1565 if (!err_mask) { 1542 if (!err_mask) {
1566 err_mask = AC_ERR_OTHER; 1543 err_mask = AC_ERR_OTHER;
1567 action |= ATA_EH_HARDRESET; 1544 action |= ATA_EH_RESET;
1568 } 1545 }
1569 1546
1570 ehi->serror |= serr; 1547 ehi->serror |= serr;
@@ -1723,9 +1700,9 @@ static void mv_host_intr(struct ata_host *host, u32 relevant, unsigned int hc)
1723 pp = ap->private_data; 1700 pp = ap->private_data;
1724 1701
1725 shift = port << 1; /* (port * 2) */ 1702 shift = port << 1; /* (port * 2) */
1726 if (port >= MV_PORTS_PER_HC) { 1703 if (port >= MV_PORTS_PER_HC)
1727 shift++; /* skip bit 8 in the HC Main IRQ reg */ 1704 shift++; /* skip bit 8 in the HC Main IRQ reg */
1728 } 1705
1729 have_err_bits = ((PORT0_ERR << shift) & relevant); 1706 have_err_bits = ((PORT0_ERR << shift) & relevant);
1730 1707
1731 if (unlikely(have_err_bits)) { 1708 if (unlikely(have_err_bits)) {
@@ -1780,7 +1757,7 @@ static void mv_pci_error(struct ata_host *host, void __iomem *mmio)
1780 ata_ehi_push_desc(ehi, 1757 ata_ehi_push_desc(ehi,
1781 "PCI err cause 0x%08x", err_cause); 1758 "PCI err cause 0x%08x", err_cause);
1782 err_mask = AC_ERR_HOST_BUS; 1759 err_mask = AC_ERR_HOST_BUS;
1783 ehi->action = ATA_EH_HARDRESET; 1760 ehi->action = ATA_EH_RESET;
1784 qc = ata_qc_from_tag(ap, ap->link.active_tag); 1761 qc = ata_qc_from_tag(ap, ap->link.active_tag);
1785 if (qc) 1762 if (qc)
1786 qc->err_mask |= err_mask; 1763 qc->err_mask |= err_mask;
@@ -1814,6 +1791,7 @@ static irqreturn_t mv_interrupt(int irq, void *dev_instance)
1814 void __iomem *mmio = hpriv->base; 1791 void __iomem *mmio = hpriv->base;
1815 u32 irq_stat, irq_mask; 1792 u32 irq_stat, irq_mask;
1816 1793
1794 /* Note to self: &host->lock == &ap->host->lock == ap->lock */
1817 spin_lock(&host->lock); 1795 spin_lock(&host->lock);
1818 1796
1819 irq_stat = readl(hpriv->main_cause_reg_addr); 1797 irq_stat = readl(hpriv->main_cause_reg_addr);
@@ -1847,14 +1825,6 @@ out_unlock:
1847 return IRQ_RETVAL(handled); 1825 return IRQ_RETVAL(handled);
1848} 1826}
1849 1827
1850static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port)
1851{
1852 void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port);
1853 unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL;
1854
1855 return hc_mmio + ofs;
1856}
1857
1858static unsigned int mv5_scr_offset(unsigned int sc_reg_in) 1828static unsigned int mv5_scr_offset(unsigned int sc_reg_in)
1859{ 1829{
1860 unsigned int ofs; 1830 unsigned int ofs;
@@ -1980,9 +1950,12 @@ static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio,
1980{ 1950{
1981 void __iomem *port_mmio = mv_port_base(mmio, port); 1951 void __iomem *port_mmio = mv_port_base(mmio, port);
1982 1952
1983 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS); 1953 /*
1984 1954 * The datasheet warns against setting ATA_RST when EDMA is active
1985 mv_channel_reset(hpriv, mmio, port); 1955 * (but doesn't say what the problem might be). So we first try
1956 * to disable the EDMA engine before doing the ATA_RST operation.
1957 */
1958 mv_reset_channel(hpriv, mmio, port);
1986 1959
1987 ZERO(0x028); /* command */ 1960 ZERO(0x028); /* command */
1988 writel(0x11f, port_mmio + EDMA_CFG_OFS); 1961 writel(0x11f, port_mmio + EDMA_CFG_OFS);
@@ -2132,6 +2105,13 @@ static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
2132 printk(KERN_ERR DRV_NAME ": can't clear global reset\n"); 2105 printk(KERN_ERR DRV_NAME ": can't clear global reset\n");
2133 rc = 1; 2106 rc = 1;
2134 } 2107 }
2108 /*
2109 * Temporary: wait 3 seconds before port-probing can happen,
2110 * so that we don't miss finding sleepy SilXXXX port-multipliers.
2111 * This can go away once hotplug is fully/correctly implemented.
2112 */
2113 if (rc == 0)
2114 msleep(3000);
2135done: 2115done:
2136 return rc; 2116 return rc;
2137} 2117}
@@ -2200,14 +2180,15 @@ static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
2200 m4 = readl(port_mmio + PHY_MODE4); 2180 m4 = readl(port_mmio + PHY_MODE4);
2201 2181
2202 if (hp_flags & MV_HP_ERRATA_60X1B2) 2182 if (hp_flags & MV_HP_ERRATA_60X1B2)
2203 tmp = readl(port_mmio + 0x310); 2183 tmp = readl(port_mmio + PHY_MODE3);
2204 2184
2185 /* workaround for errata FEr SATA#10 (part 1) */
2205 m4 = (m4 & ~(1 << 1)) | (1 << 0); 2186 m4 = (m4 & ~(1 << 1)) | (1 << 0);
2206 2187
2207 writel(m4, port_mmio + PHY_MODE4); 2188 writel(m4, port_mmio + PHY_MODE4);
2208 2189
2209 if (hp_flags & MV_HP_ERRATA_60X1B2) 2190 if (hp_flags & MV_HP_ERRATA_60X1B2)
2210 writel(tmp, port_mmio + 0x310); 2191 writel(tmp, port_mmio + PHY_MODE3);
2211 } 2192 }
2212 2193
2213 /* Revert values of pre-emphasis and signal amps to the saved ones */ 2194 /* Revert values of pre-emphasis and signal amps to the saved ones */
@@ -2255,9 +2236,12 @@ static void mv_soc_reset_hc_port(struct mv_host_priv *hpriv,
2255{ 2236{
2256 void __iomem *port_mmio = mv_port_base(mmio, port); 2237 void __iomem *port_mmio = mv_port_base(mmio, port);
2257 2238
2258 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS); 2239 /*
2259 2240 * The datasheet warns against setting ATA_RST when EDMA is active
2260 mv_channel_reset(hpriv, mmio, port); 2241 * (but doesn't say what the problem might be). So we first try
2242 * to disable the EDMA engine before doing the ATA_RST operation.
2243 */
2244 mv_reset_channel(hpriv, mmio, port);
2261 2245
2262 ZERO(0x028); /* command */ 2246 ZERO(0x028); /* command */
2263 writel(0x101f, port_mmio + EDMA_CFG_OFS); 2247 writel(0x101f, port_mmio + EDMA_CFG_OFS);
@@ -2314,25 +2298,39 @@ static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio)
2314 return; 2298 return;
2315} 2299}
2316 2300
2317static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio, 2301static void mv_setup_ifctl(void __iomem *port_mmio, int want_gen2i)
2302{
2303 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CFG);
2304
2305 ifctl = (ifctl & 0xf7f) | 0x9b1000; /* from chip spec */
2306 if (want_gen2i)
2307 ifctl |= (1 << 7); /* enable gen2i speed */
2308 writelfl(ifctl, port_mmio + SATA_INTERFACE_CFG);
2309}
2310
2311/*
2312 * Caller must ensure that EDMA is not active,
2313 * by first doing mv_stop_edma() where needed.
2314 */
2315static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio,
2318 unsigned int port_no) 2316 unsigned int port_no)
2319{ 2317{
2320 void __iomem *port_mmio = mv_port_base(mmio, port_no); 2318 void __iomem *port_mmio = mv_port_base(mmio, port_no);
2321 2319
2320 mv_stop_edma_engine(port_mmio);
2322 writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS); 2321 writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS);
2323 2322
2324 if (IS_GEN_II(hpriv)) { 2323 if (!IS_GEN_I(hpriv)) {
2325 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL); 2324 /* Enable 3.0gb/s link speed */
2326 ifctl |= (1 << 7); /* enable gen2i speed */ 2325 mv_setup_ifctl(port_mmio, 1);
2327 ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
2328 writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
2329 } 2326 }
2330 2327 /*
2331 udelay(25); /* allow reset propagation */ 2328 * Strobing ATA_RST here causes a hard reset of the SATA transport,
2332 2329 * link, and physical layers. It resets all SATA interface registers
2333 /* Spec never mentions clearing the bit. Marvell's driver does 2330 * (except for SATA_INTERFACE_CFG), and issues a COMRESET to the dev.
2334 * clear the bit, however.
2335 */ 2331 */
2332 writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS);
2333 udelay(25); /* allow reset propagation */
2336 writelfl(0, port_mmio + EDMA_CMD_OFS); 2334 writelfl(0, port_mmio + EDMA_CMD_OFS);
2337 2335
2338 hpriv->ops->phy_errata(hpriv, mmio, port_no); 2336 hpriv->ops->phy_errata(hpriv, mmio, port_no);
@@ -2341,136 +2339,32 @@ static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
2341 mdelay(1); 2339 mdelay(1);
2342} 2340}
2343 2341
2344/** 2342static void mv_pmp_select(struct ata_port *ap, int pmp)
2345 * mv_phy_reset - Perform eDMA reset followed by COMRESET
2346 * @ap: ATA channel to manipulate
2347 *
2348 * Part of this is taken from __sata_phy_reset and modified to
2349 * not sleep since this routine gets called from interrupt level.
2350 *
2351 * LOCKING:
2352 * Inherited from caller. This is coded to safe to call at
2353 * interrupt level, i.e. it does not sleep.
2354 */
2355static void mv_phy_reset(struct ata_port *ap, unsigned int *class,
2356 unsigned long deadline)
2357{ 2343{
2358 struct mv_port_priv *pp = ap->private_data; 2344 if (sata_pmp_supported(ap)) {
2359 struct mv_host_priv *hpriv = ap->host->private_data; 2345 void __iomem *port_mmio = mv_ap_base(ap);
2360 void __iomem *port_mmio = mv_ap_base(ap); 2346 u32 reg = readl(port_mmio + SATA_IFCTL_OFS);
2361 int retry = 5; 2347 int old = reg & 0xf;
2362 u32 sstatus;
2363
2364 VPRINTK("ENTER, port %u, mmio 0x%p\n", ap->port_no, port_mmio);
2365
2366#ifdef DEBUG
2367 {
2368 u32 sstatus, serror, scontrol;
2369
2370 mv_scr_read(ap, SCR_STATUS, &sstatus);
2371 mv_scr_read(ap, SCR_ERROR, &serror);
2372 mv_scr_read(ap, SCR_CONTROL, &scontrol);
2373 DPRINTK("S-regs after ATA_RST: SStat 0x%08x SErr 0x%08x "
2374 "SCtrl 0x%08x\n", sstatus, serror, scontrol);
2375 }
2376#endif
2377
2378 /* Issue COMRESET via SControl */
2379comreset_retry:
2380 sata_scr_write_flush(&ap->link, SCR_CONTROL, 0x301);
2381 msleep(1);
2382
2383 sata_scr_write_flush(&ap->link, SCR_CONTROL, 0x300);
2384 msleep(20);
2385 2348
2386 do { 2349 if (old != pmp) {
2387 sata_scr_read(&ap->link, SCR_STATUS, &sstatus); 2350 reg = (reg & ~0xf) | pmp;
2388 if (((sstatus & 0x3) == 3) || ((sstatus & 0x3) == 0)) 2351 writelfl(reg, port_mmio + SATA_IFCTL_OFS);
2389 break; 2352 }
2390
2391 msleep(1);
2392 } while (time_before(jiffies, deadline));
2393
2394 /* work around errata */
2395 if (IS_GEN_II(hpriv) &&
2396 (sstatus != 0x0) && (sstatus != 0x113) && (sstatus != 0x123) &&
2397 (retry-- > 0))
2398 goto comreset_retry;
2399
2400#ifdef DEBUG
2401 {
2402 u32 sstatus, serror, scontrol;
2403
2404 mv_scr_read(ap, SCR_STATUS, &sstatus);
2405 mv_scr_read(ap, SCR_ERROR, &serror);
2406 mv_scr_read(ap, SCR_CONTROL, &scontrol);
2407 DPRINTK("S-regs after PHY wake: SStat 0x%08x SErr 0x%08x "
2408 "SCtrl 0x%08x\n", sstatus, serror, scontrol);
2409 }
2410#endif
2411
2412 if (ata_link_offline(&ap->link)) {
2413 *class = ATA_DEV_NONE;
2414 return;
2415 }
2416
2417 /* even after SStatus reflects that device is ready,
2418 * it seems to take a while for link to be fully
2419 * established (and thus Status no longer 0x80/0x7F),
2420 * so we poll a bit for that, here.
2421 */
2422 retry = 20;
2423 while (1) {
2424 u8 drv_stat = ata_check_status(ap);
2425 if ((drv_stat != 0x80) && (drv_stat != 0x7f))
2426 break;
2427 msleep(500);
2428 if (retry-- <= 0)
2429 break;
2430 if (time_after(jiffies, deadline))
2431 break;
2432 } 2353 }
2433
2434 /* FIXME: if we passed the deadline, the following
2435 * code probably produces an invalid result
2436 */
2437
2438 /* finally, read device signature from TF registers */
2439 *class = ata_dev_try_classify(ap->link.device, 1, NULL);
2440
2441 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2442
2443 WARN_ON(pp->pp_flags & MV_PP_FLAG_EDMA_EN);
2444
2445 VPRINTK("EXIT\n");
2446} 2354}
2447 2355
2448static int mv_prereset(struct ata_link *link, unsigned long deadline) 2356static int mv_pmp_hardreset(struct ata_link *link, unsigned int *class,
2357 unsigned long deadline)
2449{ 2358{
2450 struct ata_port *ap = link->ap; 2359 mv_pmp_select(link->ap, sata_srst_pmp(link));
2451 struct mv_port_priv *pp = ap->private_data; 2360 return sata_std_hardreset(link, class, deadline);
2452 struct ata_eh_context *ehc = &link->eh_context; 2361}
2453 int rc;
2454
2455 rc = mv_stop_dma(ap);
2456 if (rc)
2457 ehc->i.action |= ATA_EH_HARDRESET;
2458
2459 if (!(pp->pp_flags & MV_PP_FLAG_HAD_A_RESET)) {
2460 pp->pp_flags |= MV_PP_FLAG_HAD_A_RESET;
2461 ehc->i.action |= ATA_EH_HARDRESET;
2462 }
2463
2464 /* if we're about to do hardreset, nothing more to do */
2465 if (ehc->i.action & ATA_EH_HARDRESET)
2466 return 0;
2467
2468 if (ata_link_online(link))
2469 rc = ata_wait_ready(ap, deadline);
2470 else
2471 rc = -ENODEV;
2472 2362
2473 return rc; 2363static int mv_softreset(struct ata_link *link, unsigned int *class,
2364 unsigned long deadline)
2365{
2366 mv_pmp_select(link->ap, sata_srst_pmp(link));
2367 return ata_sff_softreset(link, class, deadline);
2474} 2368}
2475 2369
2476static int mv_hardreset(struct ata_link *link, unsigned int *class, 2370static int mv_hardreset(struct ata_link *link, unsigned int *class,
@@ -2478,43 +2372,34 @@ static int mv_hardreset(struct ata_link *link, unsigned int *class,
2478{ 2372{
2479 struct ata_port *ap = link->ap; 2373 struct ata_port *ap = link->ap;
2480 struct mv_host_priv *hpriv = ap->host->private_data; 2374 struct mv_host_priv *hpriv = ap->host->private_data;
2375 struct mv_port_priv *pp = ap->private_data;
2481 void __iomem *mmio = hpriv->base; 2376 void __iomem *mmio = hpriv->base;
2377 int rc, attempts = 0, extra = 0;
2378 u32 sstatus;
2379 bool online;
2482 2380
2483 mv_stop_dma(ap); 2381 mv_reset_channel(hpriv, mmio, ap->port_no);
2484 2382 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
2485 mv_channel_reset(hpriv, mmio, ap->port_no);
2486
2487 mv_phy_reset(ap, class, deadline);
2488
2489 return 0;
2490}
2491
2492static void mv_postreset(struct ata_link *link, unsigned int *classes)
2493{
2494 struct ata_port *ap = link->ap;
2495 u32 serr;
2496
2497 /* print link status */
2498 sata_print_link_status(link);
2499
2500 /* clear SError */
2501 sata_scr_read(link, SCR_ERROR, &serr);
2502 sata_scr_write_flush(link, SCR_ERROR, serr);
2503 2383
2504 /* bail out if no device is present */ 2384 /* Workaround for errata FEr SATA#10 (part 2) */
2505 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) { 2385 do {
2506 DPRINTK("EXIT, no device\n"); 2386 const unsigned long *timing =
2507 return; 2387 sata_ehc_deb_timing(&link->eh_context);
2508 }
2509 2388
2510 /* set up device control */ 2389 rc = sata_link_hardreset(link, timing, deadline + extra,
2511 iowrite8(ap->ctl, ap->ioaddr.ctl_addr); 2390 &online, NULL);
2512} 2391 if (rc)
2392 return rc;
2393 sata_scr_read(link, SCR_STATUS, &sstatus);
2394 if (!IS_GEN_I(hpriv) && ++attempts >= 5 && sstatus == 0x121) {
2395 /* Force 1.5gb/s link speed and try again */
2396 mv_setup_ifctl(mv_ap_base(ap), 0);
2397 if (time_after(jiffies + HZ, deadline))
2398 extra = HZ; /* only extend it once, max */
2399 }
2400 } while (sstatus != 0x0 && sstatus != 0x113 && sstatus != 0x123);
2513 2401
2514static void mv_error_handler(struct ata_port *ap) 2402 return rc;
2515{
2516 ata_do_eh(ap, mv_prereset, ata_std_softreset,
2517 mv_hardreset, mv_postreset);
2518} 2403}
2519 2404
2520static void mv_eh_freeze(struct ata_port *ap) 2405static void mv_eh_freeze(struct ata_port *ap)
@@ -2808,19 +2693,6 @@ static int mv_init_host(struct ata_host *host, unsigned int board_idx)
2808 hpriv->ops->enable_leds(hpriv, mmio); 2693 hpriv->ops->enable_leds(hpriv, mmio);
2809 2694
2810 for (port = 0; port < host->n_ports; port++) { 2695 for (port = 0; port < host->n_ports; port++) {
2811 if (IS_GEN_II(hpriv)) {
2812 void __iomem *port_mmio = mv_port_base(mmio, port);
2813
2814 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
2815 ifctl |= (1 << 7); /* enable gen2i speed */
2816 ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
2817 writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
2818 }
2819
2820 hpriv->ops->phy_errata(hpriv, mmio, port);
2821 }
2822
2823 for (port = 0; port < host->n_ports; port++) {
2824 struct ata_port *ap = host->ports[port]; 2696 struct ata_port *ap = host->ports[port];
2825 void __iomem *port_mmio = mv_port_base(mmio, port); 2697 void __iomem *port_mmio = mv_port_base(mmio, port);
2826 2698
@@ -3192,7 +3064,7 @@ MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
3192MODULE_LICENSE("GPL"); 3064MODULE_LICENSE("GPL");
3193MODULE_DEVICE_TABLE(pci, mv_pci_tbl); 3065MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
3194MODULE_VERSION(DRV_VERSION); 3066MODULE_VERSION(DRV_VERSION);
3195MODULE_ALIAS("platform:sata_mv"); 3067MODULE_ALIAS("platform:" DRV_NAME);
3196 3068
3197#ifdef CONFIG_PCI 3069#ifdef CONFIG_PCI
3198module_param(msi, int, 0444); 3070module_param(msi, int, 0444);