diff options
Diffstat (limited to 'drivers/ata/sata_mv.c')
-rw-r--r-- | drivers/ata/sata_mv.c | 868 |
1 files changed, 739 insertions, 129 deletions
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c index 74b1080d116d..a377226b81c8 100644 --- a/drivers/ata/sata_mv.c +++ b/drivers/ata/sata_mv.c | |||
@@ -1,10 +1,13 @@ | |||
1 | /* | 1 | /* |
2 | * sata_mv.c - Marvell SATA support | 2 | * sata_mv.c - Marvell SATA support |
3 | * | 3 | * |
4 | * Copyright 2008: Marvell Corporation, all rights reserved. | 4 | * Copyright 2008-2009: Marvell Corporation, all rights reserved. |
5 | * Copyright 2005: EMC Corporation, all rights reserved. | 5 | * Copyright 2005: EMC Corporation, all rights reserved. |
6 | * Copyright 2005 Red Hat, Inc. All rights reserved. | 6 | * Copyright 2005 Red Hat, Inc. All rights reserved. |
7 | * | 7 | * |
8 | * Originally written by Brett Russ. | ||
9 | * Extensive overhaul and enhancement by Mark Lord <mlord@pobox.com>. | ||
10 | * | ||
8 | * Please ALWAYS copy linux-ide@vger.kernel.org on emails. | 11 | * Please ALWAYS copy linux-ide@vger.kernel.org on emails. |
9 | * | 12 | * |
10 | * This program is free software; you can redistribute it and/or modify | 13 | * This program is free software; you can redistribute it and/or modify |
@@ -25,20 +28,13 @@ | |||
25 | /* | 28 | /* |
26 | * sata_mv TODO list: | 29 | * sata_mv TODO list: |
27 | * | 30 | * |
28 | * --> Errata workaround for NCQ device errors. | ||
29 | * | ||
30 | * --> More errata workarounds for PCI-X. | 31 | * --> More errata workarounds for PCI-X. |
31 | * | 32 | * |
32 | * --> Complete a full errata audit for all chipsets to identify others. | 33 | * --> Complete a full errata audit for all chipsets to identify others. |
33 | * | 34 | * |
34 | * --> ATAPI support (Marvell claims the 60xx/70xx chips can do it). | ||
35 | * | ||
36 | * --> Develop a low-power-consumption strategy, and implement it. | 35 | * --> Develop a low-power-consumption strategy, and implement it. |
37 | * | 36 | * |
38 | * --> [Experiment, low priority] Investigate interrupt coalescing. | 37 | * --> Add sysfs attributes for per-chip / per-HC IRQ coalescing thresholds. |
39 | * Quite often, especially with PCI Message Signalled Interrupts (MSI), | ||
40 | * the overhead reduced by interrupt mitigation is quite often not | ||
41 | * worth the latency cost. | ||
42 | * | 38 | * |
43 | * --> [Experiment, Marvell value added] Is it possible to use target | 39 | * --> [Experiment, Marvell value added] Is it possible to use target |
44 | * mode to cross-connect two Linux boxes with Marvell cards? If so, | 40 | * mode to cross-connect two Linux boxes with Marvell cards? If so, |
@@ -68,7 +64,27 @@ | |||
68 | #include <linux/libata.h> | 64 | #include <linux/libata.h> |
69 | 65 | ||
70 | #define DRV_NAME "sata_mv" | 66 | #define DRV_NAME "sata_mv" |
71 | #define DRV_VERSION "1.25" | 67 | #define DRV_VERSION "1.27" |
68 | |||
69 | /* | ||
70 | * module options | ||
71 | */ | ||
72 | |||
73 | static int msi; | ||
74 | #ifdef CONFIG_PCI | ||
75 | module_param(msi, int, S_IRUGO); | ||
76 | MODULE_PARM_DESC(msi, "Enable use of PCI MSI (0=off, 1=on)"); | ||
77 | #endif | ||
78 | |||
79 | static int irq_coalescing_io_count; | ||
80 | module_param(irq_coalescing_io_count, int, S_IRUGO); | ||
81 | MODULE_PARM_DESC(irq_coalescing_io_count, | ||
82 | "IRQ coalescing I/O count threshold (0..255)"); | ||
83 | |||
84 | static int irq_coalescing_usecs; | ||
85 | module_param(irq_coalescing_usecs, int, S_IRUGO); | ||
86 | MODULE_PARM_DESC(irq_coalescing_usecs, | ||
87 | "IRQ coalescing time threshold in usecs"); | ||
72 | 88 | ||
73 | enum { | 89 | enum { |
74 | /* BAR's are enumerated in terms of pci_resource_start() terms */ | 90 | /* BAR's are enumerated in terms of pci_resource_start() terms */ |
@@ -79,13 +95,32 @@ enum { | |||
79 | MV_MAJOR_REG_AREA_SZ = 0x10000, /* 64KB */ | 95 | MV_MAJOR_REG_AREA_SZ = 0x10000, /* 64KB */ |
80 | MV_MINOR_REG_AREA_SZ = 0x2000, /* 8KB */ | 96 | MV_MINOR_REG_AREA_SZ = 0x2000, /* 8KB */ |
81 | 97 | ||
98 | /* For use with both IRQ coalescing methods ("all ports" or "per-HC" */ | ||
99 | COAL_CLOCKS_PER_USEC = 150, /* for calculating COAL_TIMEs */ | ||
100 | MAX_COAL_TIME_THRESHOLD = ((1 << 24) - 1), /* internal clocks count */ | ||
101 | MAX_COAL_IO_COUNT = 255, /* completed I/O count */ | ||
102 | |||
82 | MV_PCI_REG_BASE = 0, | 103 | MV_PCI_REG_BASE = 0, |
83 | MV_IRQ_COAL_REG_BASE = 0x18000, /* 6xxx part only */ | 104 | |
84 | MV_IRQ_COAL_CAUSE = (MV_IRQ_COAL_REG_BASE + 0x08), | 105 | /* |
85 | MV_IRQ_COAL_CAUSE_LO = (MV_IRQ_COAL_REG_BASE + 0x88), | 106 | * Per-chip ("all ports") interrupt coalescing feature. |
86 | MV_IRQ_COAL_CAUSE_HI = (MV_IRQ_COAL_REG_BASE + 0x8c), | 107 | * This is only for GEN_II / GEN_IIE hardware. |
87 | MV_IRQ_COAL_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xcc), | 108 | * |
88 | MV_IRQ_COAL_TIME_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xd0), | 109 | * Coalescing defers the interrupt until either the IO_THRESHOLD |
110 | * (count of completed I/Os) is met, or the TIME_THRESHOLD is met. | ||
111 | */ | ||
112 | MV_COAL_REG_BASE = 0x18000, | ||
113 | MV_IRQ_COAL_CAUSE = (MV_COAL_REG_BASE + 0x08), | ||
114 | ALL_PORTS_COAL_IRQ = (1 << 4), /* all ports irq event */ | ||
115 | |||
116 | MV_IRQ_COAL_IO_THRESHOLD = (MV_COAL_REG_BASE + 0xcc), | ||
117 | MV_IRQ_COAL_TIME_THRESHOLD = (MV_COAL_REG_BASE + 0xd0), | ||
118 | |||
119 | /* | ||
120 | * Registers for the (unused here) transaction coalescing feature: | ||
121 | */ | ||
122 | MV_TRAN_COAL_CAUSE_LO = (MV_COAL_REG_BASE + 0x88), | ||
123 | MV_TRAN_COAL_CAUSE_HI = (MV_COAL_REG_BASE + 0x8c), | ||
89 | 124 | ||
90 | MV_SATAHC0_REG_BASE = 0x20000, | 125 | MV_SATAHC0_REG_BASE = 0x20000, |
91 | MV_FLASH_CTL_OFS = 0x1046c, | 126 | MV_FLASH_CTL_OFS = 0x1046c, |
@@ -117,17 +152,16 @@ enum { | |||
117 | 152 | ||
118 | /* Host Flags */ | 153 | /* Host Flags */ |
119 | MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */ | 154 | MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */ |
120 | MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */ | ||
121 | 155 | ||
122 | MV_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | | 156 | MV_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | |
123 | ATA_FLAG_MMIO | ATA_FLAG_NO_ATAPI | | 157 | ATA_FLAG_MMIO | ATA_FLAG_PIO_POLLING, |
124 | ATA_FLAG_PIO_POLLING, | 158 | |
159 | MV_GEN_I_FLAGS = MV_COMMON_FLAGS | ATA_FLAG_NO_ATAPI, | ||
125 | 160 | ||
126 | MV_6XXX_FLAGS = MV_FLAG_IRQ_COALESCE, | 161 | MV_GEN_II_FLAGS = MV_COMMON_FLAGS | ATA_FLAG_NCQ | |
162 | ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA, | ||
127 | 163 | ||
128 | MV_GENIIE_FLAGS = MV_COMMON_FLAGS | MV_6XXX_FLAGS | | 164 | MV_GEN_IIE_FLAGS = MV_GEN_II_FLAGS | ATA_FLAG_AN, |
129 | ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA | | ||
130 | ATA_FLAG_NCQ | ATA_FLAG_AN, | ||
131 | 165 | ||
132 | CRQB_FLAG_READ = (1 << 0), | 166 | CRQB_FLAG_READ = (1 << 0), |
133 | CRQB_TAG_SHIFT = 1, | 167 | CRQB_TAG_SHIFT = 1, |
@@ -180,16 +214,18 @@ enum { | |||
180 | PCI_HC_MAIN_IRQ_MASK_OFS = 0x1d64, | 214 | PCI_HC_MAIN_IRQ_MASK_OFS = 0x1d64, |
181 | SOC_HC_MAIN_IRQ_CAUSE_OFS = 0x20020, | 215 | SOC_HC_MAIN_IRQ_CAUSE_OFS = 0x20020, |
182 | SOC_HC_MAIN_IRQ_MASK_OFS = 0x20024, | 216 | SOC_HC_MAIN_IRQ_MASK_OFS = 0x20024, |
183 | ERR_IRQ = (1 << 0), /* shift by port # */ | 217 | ERR_IRQ = (1 << 0), /* shift by (2 * port #) */ |
184 | DONE_IRQ = (1 << 1), /* shift by port # */ | 218 | DONE_IRQ = (1 << 1), /* shift by (2 * port #) */ |
185 | HC0_IRQ_PEND = 0x1ff, /* bits 0-8 = HC0's ports */ | 219 | HC0_IRQ_PEND = 0x1ff, /* bits 0-8 = HC0's ports */ |
186 | HC_SHIFT = 9, /* bits 9-17 = HC1's ports */ | 220 | HC_SHIFT = 9, /* bits 9-17 = HC1's ports */ |
221 | DONE_IRQ_0_3 = 0x000000aa, /* DONE_IRQ ports 0,1,2,3 */ | ||
222 | DONE_IRQ_4_7 = (DONE_IRQ_0_3 << HC_SHIFT), /* 4,5,6,7 */ | ||
187 | PCI_ERR = (1 << 18), | 223 | PCI_ERR = (1 << 18), |
188 | TRAN_LO_DONE = (1 << 19), /* 6xxx: IRQ coalescing */ | 224 | TRAN_COAL_LO_DONE = (1 << 19), /* transaction coalescing */ |
189 | TRAN_HI_DONE = (1 << 20), /* 6xxx: IRQ coalescing */ | 225 | TRAN_COAL_HI_DONE = (1 << 20), /* transaction coalescing */ |
190 | PORTS_0_3_COAL_DONE = (1 << 8), | 226 | PORTS_0_3_COAL_DONE = (1 << 8), /* HC0 IRQ coalescing */ |
191 | PORTS_4_7_COAL_DONE = (1 << 17), | 227 | PORTS_4_7_COAL_DONE = (1 << 17), /* HC1 IRQ coalescing */ |
192 | PORTS_0_7_COAL_DONE = (1 << 21), /* 6xxx: IRQ coalescing */ | 228 | ALL_PORTS_COAL_DONE = (1 << 21), /* GEN_II(E) IRQ coalescing */ |
193 | GPIO_INT = (1 << 22), | 229 | GPIO_INT = (1 << 22), |
194 | SELF_INT = (1 << 23), | 230 | SELF_INT = (1 << 23), |
195 | TWSI_INT = (1 << 24), | 231 | TWSI_INT = (1 << 24), |
@@ -205,6 +241,21 @@ enum { | |||
205 | HC_COAL_IRQ = (1 << 4), /* IRQ coalescing */ | 241 | HC_COAL_IRQ = (1 << 4), /* IRQ coalescing */ |
206 | DEV_IRQ = (1 << 8), /* shift by port # */ | 242 | DEV_IRQ = (1 << 8), /* shift by port # */ |
207 | 243 | ||
244 | /* | ||
245 | * Per-HC (Host-Controller) interrupt coalescing feature. | ||
246 | * This is present on all chip generations. | ||
247 | * | ||
248 | * Coalescing defers the interrupt until either the IO_THRESHOLD | ||
249 | * (count of completed I/Os) is met, or the TIME_THRESHOLD is met. | ||
250 | */ | ||
251 | HC_IRQ_COAL_IO_THRESHOLD_OFS = 0x000c, | ||
252 | HC_IRQ_COAL_TIME_THRESHOLD_OFS = 0x0010, | ||
253 | |||
254 | SOC_LED_CTRL_OFS = 0x2c, | ||
255 | SOC_LED_CTRL_BLINK = (1 << 0), /* Active LED blink */ | ||
256 | SOC_LED_CTRL_ACT_PRESENCE = (1 << 2), /* Multiplex dev presence */ | ||
257 | /* with dev activity LED */ | ||
258 | |||
208 | /* Shadow block registers */ | 259 | /* Shadow block registers */ |
209 | SHD_BLK_OFS = 0x100, | 260 | SHD_BLK_OFS = 0x100, |
210 | SHD_CTL_AST_OFS = 0x20, /* ofs from SHD_BLK_OFS */ | 261 | SHD_CTL_AST_OFS = 0x20, /* ofs from SHD_BLK_OFS */ |
@@ -346,6 +397,12 @@ enum { | |||
346 | EDMA_ARB_CFG_OFS = 0x38, | 397 | EDMA_ARB_CFG_OFS = 0x38, |
347 | 398 | ||
348 | EDMA_HALTCOND_OFS = 0x60, /* GenIIe halt conditions */ | 399 | EDMA_HALTCOND_OFS = 0x60, /* GenIIe halt conditions */ |
400 | EDMA_UNKNOWN_RSVD_OFS = 0x6C, /* GenIIe unknown/reserved */ | ||
401 | |||
402 | BMDMA_CMD_OFS = 0x224, /* bmdma command register */ | ||
403 | BMDMA_STATUS_OFS = 0x228, /* bmdma status register */ | ||
404 | BMDMA_PRD_LOW_OFS = 0x22c, /* bmdma PRD addr 31:0 */ | ||
405 | BMDMA_PRD_HIGH_OFS = 0x230, /* bmdma PRD addr 63:32 */ | ||
349 | 406 | ||
350 | /* Host private flags (hp_flags) */ | 407 | /* Host private flags (hp_flags) */ |
351 | MV_HP_FLAG_MSI = (1 << 0), | 408 | MV_HP_FLAG_MSI = (1 << 0), |
@@ -359,12 +416,14 @@ enum { | |||
359 | MV_HP_PCIE = (1 << 9), /* PCIe bus/regs: 7042 */ | 416 | MV_HP_PCIE = (1 << 9), /* PCIe bus/regs: 7042 */ |
360 | MV_HP_CUT_THROUGH = (1 << 10), /* can use EDMA cut-through */ | 417 | MV_HP_CUT_THROUGH = (1 << 10), /* can use EDMA cut-through */ |
361 | MV_HP_FLAG_SOC = (1 << 11), /* SystemOnChip, no PCI */ | 418 | MV_HP_FLAG_SOC = (1 << 11), /* SystemOnChip, no PCI */ |
419 | MV_HP_QUIRK_LED_BLINK_EN = (1 << 12), /* is led blinking enabled? */ | ||
362 | 420 | ||
363 | /* Port private flags (pp_flags) */ | 421 | /* Port private flags (pp_flags) */ |
364 | MV_PP_FLAG_EDMA_EN = (1 << 0), /* is EDMA engine enabled? */ | 422 | MV_PP_FLAG_EDMA_EN = (1 << 0), /* is EDMA engine enabled? */ |
365 | MV_PP_FLAG_NCQ_EN = (1 << 1), /* is EDMA set up for NCQ? */ | 423 | MV_PP_FLAG_NCQ_EN = (1 << 1), /* is EDMA set up for NCQ? */ |
366 | MV_PP_FLAG_FBS_EN = (1 << 2), /* is EDMA set up for FBS? */ | 424 | MV_PP_FLAG_FBS_EN = (1 << 2), /* is EDMA set up for FBS? */ |
367 | MV_PP_FLAG_DELAYED_EH = (1 << 3), /* delayed dev err handling */ | 425 | MV_PP_FLAG_DELAYED_EH = (1 << 3), /* delayed dev err handling */ |
426 | MV_PP_FLAG_FAKE_ATA_BUSY = (1 << 4), /* ignore initial ATA_DRDY */ | ||
368 | }; | 427 | }; |
369 | 428 | ||
370 | #define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I) | 429 | #define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I) |
@@ -433,6 +492,18 @@ struct mv_sg { | |||
433 | __le32 reserved; | 492 | __le32 reserved; |
434 | }; | 493 | }; |
435 | 494 | ||
495 | /* | ||
496 | * We keep a local cache of a few frequently accessed port | ||
497 | * registers here, to avoid having to read them (very slow) | ||
498 | * when switching between EDMA and non-EDMA modes. | ||
499 | */ | ||
500 | struct mv_cached_regs { | ||
501 | u32 fiscfg; | ||
502 | u32 ltmode; | ||
503 | u32 haltcond; | ||
504 | u32 unknown_rsvd; | ||
505 | }; | ||
506 | |||
436 | struct mv_port_priv { | 507 | struct mv_port_priv { |
437 | struct mv_crqb *crqb; | 508 | struct mv_crqb *crqb; |
438 | dma_addr_t crqb_dma; | 509 | dma_addr_t crqb_dma; |
@@ -445,6 +516,7 @@ struct mv_port_priv { | |||
445 | unsigned int resp_idx; | 516 | unsigned int resp_idx; |
446 | 517 | ||
447 | u32 pp_flags; | 518 | u32 pp_flags; |
519 | struct mv_cached_regs cached; | ||
448 | unsigned int delayed_eh_pmp_map; | 520 | unsigned int delayed_eh_pmp_map; |
449 | }; | 521 | }; |
450 | 522 | ||
@@ -535,7 +607,7 @@ static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio, | |||
535 | unsigned int port_no); | 607 | unsigned int port_no); |
536 | static int mv_stop_edma(struct ata_port *ap); | 608 | static int mv_stop_edma(struct ata_port *ap); |
537 | static int mv_stop_edma_engine(void __iomem *port_mmio); | 609 | static int mv_stop_edma_engine(void __iomem *port_mmio); |
538 | static void mv_edma_cfg(struct ata_port *ap, int want_ncq); | 610 | static void mv_edma_cfg(struct ata_port *ap, int want_ncq, int want_edma); |
539 | 611 | ||
540 | static void mv_pmp_select(struct ata_port *ap, int pmp); | 612 | static void mv_pmp_select(struct ata_port *ap, int pmp); |
541 | static int mv_pmp_hardreset(struct ata_link *link, unsigned int *class, | 613 | static int mv_pmp_hardreset(struct ata_link *link, unsigned int *class, |
@@ -546,6 +618,14 @@ static void mv_pmp_error_handler(struct ata_port *ap); | |||
546 | static void mv_process_crpb_entries(struct ata_port *ap, | 618 | static void mv_process_crpb_entries(struct ata_port *ap, |
547 | struct mv_port_priv *pp); | 619 | struct mv_port_priv *pp); |
548 | 620 | ||
621 | static void mv_sff_irq_clear(struct ata_port *ap); | ||
622 | static int mv_check_atapi_dma(struct ata_queued_cmd *qc); | ||
623 | static void mv_bmdma_setup(struct ata_queued_cmd *qc); | ||
624 | static void mv_bmdma_start(struct ata_queued_cmd *qc); | ||
625 | static void mv_bmdma_stop(struct ata_queued_cmd *qc); | ||
626 | static u8 mv_bmdma_status(struct ata_port *ap); | ||
627 | static u8 mv_sff_check_status(struct ata_port *ap); | ||
628 | |||
549 | /* .sg_tablesize is (MV_MAX_SG_CT / 2) in the structures below | 629 | /* .sg_tablesize is (MV_MAX_SG_CT / 2) in the structures below |
550 | * because we have to allow room for worst case splitting of | 630 | * because we have to allow room for worst case splitting of |
551 | * PRDs for 64K boundaries in mv_fill_sg(). | 631 | * PRDs for 64K boundaries in mv_fill_sg(). |
@@ -566,6 +646,8 @@ static struct scsi_host_template mv6_sht = { | |||
566 | static struct ata_port_operations mv5_ops = { | 646 | static struct ata_port_operations mv5_ops = { |
567 | .inherits = &ata_sff_port_ops, | 647 | .inherits = &ata_sff_port_ops, |
568 | 648 | ||
649 | .lost_interrupt = ATA_OP_NULL, | ||
650 | |||
569 | .qc_defer = mv_qc_defer, | 651 | .qc_defer = mv_qc_defer, |
570 | .qc_prep = mv_qc_prep, | 652 | .qc_prep = mv_qc_prep, |
571 | .qc_issue = mv_qc_issue, | 653 | .qc_issue = mv_qc_issue, |
@@ -593,6 +675,14 @@ static struct ata_port_operations mv6_ops = { | |||
593 | .pmp_softreset = mv_softreset, | 675 | .pmp_softreset = mv_softreset, |
594 | .softreset = mv_softreset, | 676 | .softreset = mv_softreset, |
595 | .error_handler = mv_pmp_error_handler, | 677 | .error_handler = mv_pmp_error_handler, |
678 | |||
679 | .sff_check_status = mv_sff_check_status, | ||
680 | .sff_irq_clear = mv_sff_irq_clear, | ||
681 | .check_atapi_dma = mv_check_atapi_dma, | ||
682 | .bmdma_setup = mv_bmdma_setup, | ||
683 | .bmdma_start = mv_bmdma_start, | ||
684 | .bmdma_stop = mv_bmdma_stop, | ||
685 | .bmdma_status = mv_bmdma_status, | ||
596 | }; | 686 | }; |
597 | 687 | ||
598 | static struct ata_port_operations mv_iie_ops = { | 688 | static struct ata_port_operations mv_iie_ops = { |
@@ -603,53 +693,49 @@ static struct ata_port_operations mv_iie_ops = { | |||
603 | 693 | ||
604 | static const struct ata_port_info mv_port_info[] = { | 694 | static const struct ata_port_info mv_port_info[] = { |
605 | { /* chip_504x */ | 695 | { /* chip_504x */ |
606 | .flags = MV_COMMON_FLAGS, | 696 | .flags = MV_GEN_I_FLAGS, |
607 | .pio_mask = 0x1f, /* pio0-4 */ | 697 | .pio_mask = 0x1f, /* pio0-4 */ |
608 | .udma_mask = ATA_UDMA6, | 698 | .udma_mask = ATA_UDMA6, |
609 | .port_ops = &mv5_ops, | 699 | .port_ops = &mv5_ops, |
610 | }, | 700 | }, |
611 | { /* chip_508x */ | 701 | { /* chip_508x */ |
612 | .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC, | 702 | .flags = MV_GEN_I_FLAGS | MV_FLAG_DUAL_HC, |
613 | .pio_mask = 0x1f, /* pio0-4 */ | 703 | .pio_mask = 0x1f, /* pio0-4 */ |
614 | .udma_mask = ATA_UDMA6, | 704 | .udma_mask = ATA_UDMA6, |
615 | .port_ops = &mv5_ops, | 705 | .port_ops = &mv5_ops, |
616 | }, | 706 | }, |
617 | { /* chip_5080 */ | 707 | { /* chip_5080 */ |
618 | .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC, | 708 | .flags = MV_GEN_I_FLAGS | MV_FLAG_DUAL_HC, |
619 | .pio_mask = 0x1f, /* pio0-4 */ | 709 | .pio_mask = 0x1f, /* pio0-4 */ |
620 | .udma_mask = ATA_UDMA6, | 710 | .udma_mask = ATA_UDMA6, |
621 | .port_ops = &mv5_ops, | 711 | .port_ops = &mv5_ops, |
622 | }, | 712 | }, |
623 | { /* chip_604x */ | 713 | { /* chip_604x */ |
624 | .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS | | 714 | .flags = MV_GEN_II_FLAGS, |
625 | ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA | | ||
626 | ATA_FLAG_NCQ, | ||
627 | .pio_mask = 0x1f, /* pio0-4 */ | 715 | .pio_mask = 0x1f, /* pio0-4 */ |
628 | .udma_mask = ATA_UDMA6, | 716 | .udma_mask = ATA_UDMA6, |
629 | .port_ops = &mv6_ops, | 717 | .port_ops = &mv6_ops, |
630 | }, | 718 | }, |
631 | { /* chip_608x */ | 719 | { /* chip_608x */ |
632 | .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS | | 720 | .flags = MV_GEN_II_FLAGS | MV_FLAG_DUAL_HC, |
633 | ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA | | ||
634 | ATA_FLAG_NCQ | MV_FLAG_DUAL_HC, | ||
635 | .pio_mask = 0x1f, /* pio0-4 */ | 721 | .pio_mask = 0x1f, /* pio0-4 */ |
636 | .udma_mask = ATA_UDMA6, | 722 | .udma_mask = ATA_UDMA6, |
637 | .port_ops = &mv6_ops, | 723 | .port_ops = &mv6_ops, |
638 | }, | 724 | }, |
639 | { /* chip_6042 */ | 725 | { /* chip_6042 */ |
640 | .flags = MV_GENIIE_FLAGS, | 726 | .flags = MV_GEN_IIE_FLAGS, |
641 | .pio_mask = 0x1f, /* pio0-4 */ | 727 | .pio_mask = 0x1f, /* pio0-4 */ |
642 | .udma_mask = ATA_UDMA6, | 728 | .udma_mask = ATA_UDMA6, |
643 | .port_ops = &mv_iie_ops, | 729 | .port_ops = &mv_iie_ops, |
644 | }, | 730 | }, |
645 | { /* chip_7042 */ | 731 | { /* chip_7042 */ |
646 | .flags = MV_GENIIE_FLAGS, | 732 | .flags = MV_GEN_IIE_FLAGS, |
647 | .pio_mask = 0x1f, /* pio0-4 */ | 733 | .pio_mask = 0x1f, /* pio0-4 */ |
648 | .udma_mask = ATA_UDMA6, | 734 | .udma_mask = ATA_UDMA6, |
649 | .port_ops = &mv_iie_ops, | 735 | .port_ops = &mv_iie_ops, |
650 | }, | 736 | }, |
651 | { /* chip_soc */ | 737 | { /* chip_soc */ |
652 | .flags = MV_GENIIE_FLAGS, | 738 | .flags = MV_GEN_IIE_FLAGS, |
653 | .pio_mask = 0x1f, /* pio0-4 */ | 739 | .pio_mask = 0x1f, /* pio0-4 */ |
654 | .udma_mask = ATA_UDMA6, | 740 | .udma_mask = ATA_UDMA6, |
655 | .port_ops = &mv_iie_ops, | 741 | .port_ops = &mv_iie_ops, |
@@ -794,6 +880,44 @@ static inline int mv_get_hc_count(unsigned long port_flags) | |||
794 | return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1); | 880 | return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1); |
795 | } | 881 | } |
796 | 882 | ||
883 | /** | ||
884 | * mv_save_cached_regs - (re-)initialize cached port registers | ||
885 | * @ap: the port whose registers we are caching | ||
886 | * | ||
887 | * Initialize the local cache of port registers, | ||
888 | * so that reading them over and over again can | ||
889 | * be avoided on the hotter paths of this driver. | ||
890 | * This saves a few microseconds each time we switch | ||
891 | * to/from EDMA mode to perform (eg.) a drive cache flush. | ||
892 | */ | ||
893 | static void mv_save_cached_regs(struct ata_port *ap) | ||
894 | { | ||
895 | void __iomem *port_mmio = mv_ap_base(ap); | ||
896 | struct mv_port_priv *pp = ap->private_data; | ||
897 | |||
898 | pp->cached.fiscfg = readl(port_mmio + FISCFG_OFS); | ||
899 | pp->cached.ltmode = readl(port_mmio + LTMODE_OFS); | ||
900 | pp->cached.haltcond = readl(port_mmio + EDMA_HALTCOND_OFS); | ||
901 | pp->cached.unknown_rsvd = readl(port_mmio + EDMA_UNKNOWN_RSVD_OFS); | ||
902 | } | ||
903 | |||
904 | /** | ||
905 | * mv_write_cached_reg - write to a cached port register | ||
906 | * @addr: hardware address of the register | ||
907 | * @old: pointer to cached value of the register | ||
908 | * @new: new value for the register | ||
909 | * | ||
910 | * Write a new value to a cached register, | ||
911 | * but only if the value is different from before. | ||
912 | */ | ||
913 | static inline void mv_write_cached_reg(void __iomem *addr, u32 *old, u32 new) | ||
914 | { | ||
915 | if (new != *old) { | ||
916 | *old = new; | ||
917 | writel(new, addr); | ||
918 | } | ||
919 | } | ||
920 | |||
797 | static void mv_set_edma_ptrs(void __iomem *port_mmio, | 921 | static void mv_set_edma_ptrs(void __iomem *port_mmio, |
798 | struct mv_host_priv *hpriv, | 922 | struct mv_host_priv *hpriv, |
799 | struct mv_port_priv *pp) | 923 | struct mv_port_priv *pp) |
@@ -825,6 +949,23 @@ static void mv_set_edma_ptrs(void __iomem *port_mmio, | |||
825 | port_mmio + EDMA_RSP_Q_OUT_PTR_OFS); | 949 | port_mmio + EDMA_RSP_Q_OUT_PTR_OFS); |
826 | } | 950 | } |
827 | 951 | ||
952 | static void mv_write_main_irq_mask(u32 mask, struct mv_host_priv *hpriv) | ||
953 | { | ||
954 | /* | ||
955 | * When writing to the main_irq_mask in hardware, | ||
956 | * we must ensure exclusivity between the interrupt coalescing bits | ||
957 | * and the corresponding individual port DONE_IRQ bits. | ||
958 | * | ||
959 | * Note that this register is really an "IRQ enable" register, | ||
960 | * not an "IRQ mask" register as Marvell's naming might suggest. | ||
961 | */ | ||
962 | if (mask & (ALL_PORTS_COAL_DONE | PORTS_0_3_COAL_DONE)) | ||
963 | mask &= ~DONE_IRQ_0_3; | ||
964 | if (mask & (ALL_PORTS_COAL_DONE | PORTS_4_7_COAL_DONE)) | ||
965 | mask &= ~DONE_IRQ_4_7; | ||
966 | writelfl(mask, hpriv->main_irq_mask_addr); | ||
967 | } | ||
968 | |||
828 | static void mv_set_main_irq_mask(struct ata_host *host, | 969 | static void mv_set_main_irq_mask(struct ata_host *host, |
829 | u32 disable_bits, u32 enable_bits) | 970 | u32 disable_bits, u32 enable_bits) |
830 | { | 971 | { |
@@ -835,7 +976,7 @@ static void mv_set_main_irq_mask(struct ata_host *host, | |||
835 | new_mask = (old_mask & ~disable_bits) | enable_bits; | 976 | new_mask = (old_mask & ~disable_bits) | enable_bits; |
836 | if (new_mask != old_mask) { | 977 | if (new_mask != old_mask) { |
837 | hpriv->main_irq_mask = new_mask; | 978 | hpriv->main_irq_mask = new_mask; |
838 | writelfl(new_mask, hpriv->main_irq_mask_addr); | 979 | mv_write_main_irq_mask(new_mask, hpriv); |
839 | } | 980 | } |
840 | } | 981 | } |
841 | 982 | ||
@@ -852,8 +993,94 @@ static void mv_enable_port_irqs(struct ata_port *ap, | |||
852 | mv_set_main_irq_mask(ap->host, disable_bits, enable_bits); | 993 | mv_set_main_irq_mask(ap->host, disable_bits, enable_bits); |
853 | } | 994 | } |
854 | 995 | ||
996 | static void mv_clear_and_enable_port_irqs(struct ata_port *ap, | ||
997 | void __iomem *port_mmio, | ||
998 | unsigned int port_irqs) | ||
999 | { | ||
1000 | struct mv_host_priv *hpriv = ap->host->private_data; | ||
1001 | int hardport = mv_hardport_from_port(ap->port_no); | ||
1002 | void __iomem *hc_mmio = mv_hc_base_from_port( | ||
1003 | mv_host_base(ap->host), ap->port_no); | ||
1004 | u32 hc_irq_cause; | ||
1005 | |||
1006 | /* clear EDMA event indicators, if any */ | ||
1007 | writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS); | ||
1008 | |||
1009 | /* clear pending irq events */ | ||
1010 | hc_irq_cause = ~((DEV_IRQ | DMA_IRQ) << hardport); | ||
1011 | writelfl(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS); | ||
1012 | |||
1013 | /* clear FIS IRQ Cause */ | ||
1014 | if (IS_GEN_IIE(hpriv)) | ||
1015 | writelfl(0, port_mmio + SATA_FIS_IRQ_CAUSE_OFS); | ||
1016 | |||
1017 | mv_enable_port_irqs(ap, port_irqs); | ||
1018 | } | ||
1019 | |||
1020 | static void mv_set_irq_coalescing(struct ata_host *host, | ||
1021 | unsigned int count, unsigned int usecs) | ||
1022 | { | ||
1023 | struct mv_host_priv *hpriv = host->private_data; | ||
1024 | void __iomem *mmio = hpriv->base, *hc_mmio; | ||
1025 | u32 coal_enable = 0; | ||
1026 | unsigned long flags; | ||
1027 | unsigned int clks, is_dual_hc = hpriv->n_ports > MV_PORTS_PER_HC; | ||
1028 | const u32 coal_disable = PORTS_0_3_COAL_DONE | PORTS_4_7_COAL_DONE | | ||
1029 | ALL_PORTS_COAL_DONE; | ||
1030 | |||
1031 | /* Disable IRQ coalescing if either threshold is zero */ | ||
1032 | if (!usecs || !count) { | ||
1033 | clks = count = 0; | ||
1034 | } else { | ||
1035 | /* Respect maximum limits of the hardware */ | ||
1036 | clks = usecs * COAL_CLOCKS_PER_USEC; | ||
1037 | if (clks > MAX_COAL_TIME_THRESHOLD) | ||
1038 | clks = MAX_COAL_TIME_THRESHOLD; | ||
1039 | if (count > MAX_COAL_IO_COUNT) | ||
1040 | count = MAX_COAL_IO_COUNT; | ||
1041 | } | ||
1042 | |||
1043 | spin_lock_irqsave(&host->lock, flags); | ||
1044 | mv_set_main_irq_mask(host, coal_disable, 0); | ||
1045 | |||
1046 | if (is_dual_hc && !IS_GEN_I(hpriv)) { | ||
1047 | /* | ||
1048 | * GEN_II/GEN_IIE with dual host controllers: | ||
1049 | * one set of global thresholds for the entire chip. | ||
1050 | */ | ||
1051 | writel(clks, mmio + MV_IRQ_COAL_TIME_THRESHOLD); | ||
1052 | writel(count, mmio + MV_IRQ_COAL_IO_THRESHOLD); | ||
1053 | /* clear leftover coal IRQ bit */ | ||
1054 | writel(~ALL_PORTS_COAL_IRQ, mmio + MV_IRQ_COAL_CAUSE); | ||
1055 | if (count) | ||
1056 | coal_enable = ALL_PORTS_COAL_DONE; | ||
1057 | clks = count = 0; /* force clearing of regular regs below */ | ||
1058 | } | ||
1059 | |||
1060 | /* | ||
1061 | * All chips: independent thresholds for each HC on the chip. | ||
1062 | */ | ||
1063 | hc_mmio = mv_hc_base_from_port(mmio, 0); | ||
1064 | writel(clks, hc_mmio + HC_IRQ_COAL_TIME_THRESHOLD_OFS); | ||
1065 | writel(count, hc_mmio + HC_IRQ_COAL_IO_THRESHOLD_OFS); | ||
1066 | writel(~HC_COAL_IRQ, hc_mmio + HC_IRQ_CAUSE_OFS); | ||
1067 | if (count) | ||
1068 | coal_enable |= PORTS_0_3_COAL_DONE; | ||
1069 | if (is_dual_hc) { | ||
1070 | hc_mmio = mv_hc_base_from_port(mmio, MV_PORTS_PER_HC); | ||
1071 | writel(clks, hc_mmio + HC_IRQ_COAL_TIME_THRESHOLD_OFS); | ||
1072 | writel(count, hc_mmio + HC_IRQ_COAL_IO_THRESHOLD_OFS); | ||
1073 | writel(~HC_COAL_IRQ, hc_mmio + HC_IRQ_CAUSE_OFS); | ||
1074 | if (count) | ||
1075 | coal_enable |= PORTS_4_7_COAL_DONE; | ||
1076 | } | ||
1077 | |||
1078 | mv_set_main_irq_mask(host, 0, coal_enable); | ||
1079 | spin_unlock_irqrestore(&host->lock, flags); | ||
1080 | } | ||
1081 | |||
855 | /** | 1082 | /** |
856 | * mv_start_dma - Enable eDMA engine | 1083 | * mv_start_edma - Enable eDMA engine |
857 | * @base: port base address | 1084 | * @base: port base address |
858 | * @pp: port private data | 1085 | * @pp: port private data |
859 | * | 1086 | * |
@@ -863,7 +1090,7 @@ static void mv_enable_port_irqs(struct ata_port *ap, | |||
863 | * LOCKING: | 1090 | * LOCKING: |
864 | * Inherited from caller. | 1091 | * Inherited from caller. |
865 | */ | 1092 | */ |
866 | static void mv_start_dma(struct ata_port *ap, void __iomem *port_mmio, | 1093 | static void mv_start_edma(struct ata_port *ap, void __iomem *port_mmio, |
867 | struct mv_port_priv *pp, u8 protocol) | 1094 | struct mv_port_priv *pp, u8 protocol) |
868 | { | 1095 | { |
869 | int want_ncq = (protocol == ATA_PROT_NCQ); | 1096 | int want_ncq = (protocol == ATA_PROT_NCQ); |
@@ -875,26 +1102,11 @@ static void mv_start_dma(struct ata_port *ap, void __iomem *port_mmio, | |||
875 | } | 1102 | } |
876 | if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) { | 1103 | if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) { |
877 | struct mv_host_priv *hpriv = ap->host->private_data; | 1104 | struct mv_host_priv *hpriv = ap->host->private_data; |
878 | int hardport = mv_hardport_from_port(ap->port_no); | ||
879 | void __iomem *hc_mmio = mv_hc_base_from_port( | ||
880 | mv_host_base(ap->host), ap->port_no); | ||
881 | u32 hc_irq_cause; | ||
882 | |||
883 | /* clear EDMA event indicators, if any */ | ||
884 | writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS); | ||
885 | 1105 | ||
886 | /* clear pending irq events */ | 1106 | mv_edma_cfg(ap, want_ncq, 1); |
887 | hc_irq_cause = ~((DEV_IRQ | DMA_IRQ) << hardport); | ||
888 | writelfl(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS); | ||
889 | |||
890 | mv_edma_cfg(ap, want_ncq); | ||
891 | |||
892 | /* clear FIS IRQ Cause */ | ||
893 | if (IS_GEN_IIE(hpriv)) | ||
894 | writelfl(0, port_mmio + SATA_FIS_IRQ_CAUSE_OFS); | ||
895 | 1107 | ||
896 | mv_set_edma_ptrs(port_mmio, hpriv, pp); | 1108 | mv_set_edma_ptrs(port_mmio, hpriv, pp); |
897 | mv_enable_port_irqs(ap, DONE_IRQ|ERR_IRQ); | 1109 | mv_clear_and_enable_port_irqs(ap, port_mmio, DONE_IRQ|ERR_IRQ); |
898 | 1110 | ||
899 | writelfl(EDMA_EN, port_mmio + EDMA_CMD_OFS); | 1111 | writelfl(EDMA_EN, port_mmio + EDMA_CMD_OFS); |
900 | pp->pp_flags |= MV_PP_FLAG_EDMA_EN; | 1112 | pp->pp_flags |= MV_PP_FLAG_EDMA_EN; |
@@ -952,6 +1164,7 @@ static int mv_stop_edma(struct ata_port *ap) | |||
952 | { | 1164 | { |
953 | void __iomem *port_mmio = mv_ap_base(ap); | 1165 | void __iomem *port_mmio = mv_ap_base(ap); |
954 | struct mv_port_priv *pp = ap->private_data; | 1166 | struct mv_port_priv *pp = ap->private_data; |
1167 | int err = 0; | ||
955 | 1168 | ||
956 | if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) | 1169 | if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) |
957 | return 0; | 1170 | return 0; |
@@ -959,9 +1172,10 @@ static int mv_stop_edma(struct ata_port *ap) | |||
959 | mv_wait_for_edma_empty_idle(ap); | 1172 | mv_wait_for_edma_empty_idle(ap); |
960 | if (mv_stop_edma_engine(port_mmio)) { | 1173 | if (mv_stop_edma_engine(port_mmio)) { |
961 | ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n"); | 1174 | ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n"); |
962 | return -EIO; | 1175 | err = -EIO; |
963 | } | 1176 | } |
964 | return 0; | 1177 | mv_edma_cfg(ap, 0, 0); |
1178 | return err; | ||
965 | } | 1179 | } |
966 | 1180 | ||
967 | #ifdef ATA_DEBUG | 1181 | #ifdef ATA_DEBUG |
@@ -1130,35 +1344,33 @@ static int mv_qc_defer(struct ata_queued_cmd *qc) | |||
1130 | return ATA_DEFER_PORT; | 1344 | return ATA_DEFER_PORT; |
1131 | } | 1345 | } |
1132 | 1346 | ||
1133 | static void mv_config_fbs(void __iomem *port_mmio, int want_ncq, int want_fbs) | 1347 | static void mv_config_fbs(struct ata_port *ap, int want_ncq, int want_fbs) |
1134 | { | 1348 | { |
1135 | u32 new_fiscfg, old_fiscfg; | 1349 | struct mv_port_priv *pp = ap->private_data; |
1136 | u32 new_ltmode, old_ltmode; | 1350 | void __iomem *port_mmio; |
1137 | u32 new_haltcond, old_haltcond; | ||
1138 | 1351 | ||
1139 | old_fiscfg = readl(port_mmio + FISCFG_OFS); | 1352 | u32 fiscfg, *old_fiscfg = &pp->cached.fiscfg; |
1140 | old_ltmode = readl(port_mmio + LTMODE_OFS); | 1353 | u32 ltmode, *old_ltmode = &pp->cached.ltmode; |
1141 | old_haltcond = readl(port_mmio + EDMA_HALTCOND_OFS); | 1354 | u32 haltcond, *old_haltcond = &pp->cached.haltcond; |
1142 | 1355 | ||
1143 | new_fiscfg = old_fiscfg & ~(FISCFG_SINGLE_SYNC | FISCFG_WAIT_DEV_ERR); | 1356 | ltmode = *old_ltmode & ~LTMODE_BIT8; |
1144 | new_ltmode = old_ltmode & ~LTMODE_BIT8; | 1357 | haltcond = *old_haltcond | EDMA_ERR_DEV; |
1145 | new_haltcond = old_haltcond | EDMA_ERR_DEV; | ||
1146 | 1358 | ||
1147 | if (want_fbs) { | 1359 | if (want_fbs) { |
1148 | new_fiscfg = old_fiscfg | FISCFG_SINGLE_SYNC; | 1360 | fiscfg = *old_fiscfg | FISCFG_SINGLE_SYNC; |
1149 | new_ltmode = old_ltmode | LTMODE_BIT8; | 1361 | ltmode = *old_ltmode | LTMODE_BIT8; |
1150 | if (want_ncq) | 1362 | if (want_ncq) |
1151 | new_haltcond &= ~EDMA_ERR_DEV; | 1363 | haltcond &= ~EDMA_ERR_DEV; |
1152 | else | 1364 | else |
1153 | new_fiscfg |= FISCFG_WAIT_DEV_ERR; | 1365 | fiscfg |= FISCFG_WAIT_DEV_ERR; |
1366 | } else { | ||
1367 | fiscfg = *old_fiscfg & ~(FISCFG_SINGLE_SYNC | FISCFG_WAIT_DEV_ERR); | ||
1154 | } | 1368 | } |
1155 | 1369 | ||
1156 | if (new_fiscfg != old_fiscfg) | 1370 | port_mmio = mv_ap_base(ap); |
1157 | writelfl(new_fiscfg, port_mmio + FISCFG_OFS); | 1371 | mv_write_cached_reg(port_mmio + FISCFG_OFS, old_fiscfg, fiscfg); |
1158 | if (new_ltmode != old_ltmode) | 1372 | mv_write_cached_reg(port_mmio + LTMODE_OFS, old_ltmode, ltmode); |
1159 | writelfl(new_ltmode, port_mmio + LTMODE_OFS); | 1373 | mv_write_cached_reg(port_mmio + EDMA_HALTCOND_OFS, old_haltcond, haltcond); |
1160 | if (new_haltcond != old_haltcond) | ||
1161 | writelfl(new_haltcond, port_mmio + EDMA_HALTCOND_OFS); | ||
1162 | } | 1374 | } |
1163 | 1375 | ||
1164 | static void mv_60x1_errata_sata25(struct ata_port *ap, int want_ncq) | 1376 | static void mv_60x1_errata_sata25(struct ata_port *ap, int want_ncq) |
@@ -1176,7 +1388,86 @@ static void mv_60x1_errata_sata25(struct ata_port *ap, int want_ncq) | |||
1176 | writel(new, hpriv->base + MV_GPIO_PORT_CTL_OFS); | 1388 | writel(new, hpriv->base + MV_GPIO_PORT_CTL_OFS); |
1177 | } | 1389 | } |
1178 | 1390 | ||
1179 | static void mv_edma_cfg(struct ata_port *ap, int want_ncq) | 1391 | /** |
1392 | * mv_bmdma_enable - set a magic bit on GEN_IIE to allow bmdma | ||
1393 | * @ap: Port being initialized | ||
1394 | * | ||
1395 | * There are two DMA modes on these chips: basic DMA, and EDMA. | ||
1396 | * | ||
1397 | * Bit-0 of the "EDMA RESERVED" register enables/disables use | ||
1398 | * of basic DMA on the GEN_IIE versions of the chips. | ||
1399 | * | ||
1400 | * This bit survives EDMA resets, and must be set for basic DMA | ||
1401 | * to function, and should be cleared when EDMA is active. | ||
1402 | */ | ||
1403 | static void mv_bmdma_enable_iie(struct ata_port *ap, int enable_bmdma) | ||
1404 | { | ||
1405 | struct mv_port_priv *pp = ap->private_data; | ||
1406 | u32 new, *old = &pp->cached.unknown_rsvd; | ||
1407 | |||
1408 | if (enable_bmdma) | ||
1409 | new = *old | 1; | ||
1410 | else | ||
1411 | new = *old & ~1; | ||
1412 | mv_write_cached_reg(mv_ap_base(ap) + EDMA_UNKNOWN_RSVD_OFS, old, new); | ||
1413 | } | ||
1414 | |||
1415 | /* | ||
1416 | * SOC chips have an issue whereby the HDD LEDs don't always blink | ||
1417 | * during I/O when NCQ is enabled. Enabling a special "LED blink" mode | ||
1418 | * of the SOC takes care of it, generating a steady blink rate when | ||
1419 | * any drive on the chip is active. | ||
1420 | * | ||
1421 | * Unfortunately, the blink mode is a global hardware setting for the SOC, | ||
1422 | * so we must use it whenever at least one port on the SOC has NCQ enabled. | ||
1423 | * | ||
1424 | * We turn "LED blink" off when NCQ is not in use anywhere, because the normal | ||
1425 | * LED operation works then, and provides better (more accurate) feedback. | ||
1426 | * | ||
1427 | * Note that this code assumes that an SOC never has more than one HC onboard. | ||
1428 | */ | ||
1429 | static void mv_soc_led_blink_enable(struct ata_port *ap) | ||
1430 | { | ||
1431 | struct ata_host *host = ap->host; | ||
1432 | struct mv_host_priv *hpriv = host->private_data; | ||
1433 | void __iomem *hc_mmio; | ||
1434 | u32 led_ctrl; | ||
1435 | |||
1436 | if (hpriv->hp_flags & MV_HP_QUIRK_LED_BLINK_EN) | ||
1437 | return; | ||
1438 | hpriv->hp_flags |= MV_HP_QUIRK_LED_BLINK_EN; | ||
1439 | hc_mmio = mv_hc_base_from_port(mv_host_base(host), ap->port_no); | ||
1440 | led_ctrl = readl(hc_mmio + SOC_LED_CTRL_OFS); | ||
1441 | writel(led_ctrl | SOC_LED_CTRL_BLINK, hc_mmio + SOC_LED_CTRL_OFS); | ||
1442 | } | ||
1443 | |||
1444 | static void mv_soc_led_blink_disable(struct ata_port *ap) | ||
1445 | { | ||
1446 | struct ata_host *host = ap->host; | ||
1447 | struct mv_host_priv *hpriv = host->private_data; | ||
1448 | void __iomem *hc_mmio; | ||
1449 | u32 led_ctrl; | ||
1450 | unsigned int port; | ||
1451 | |||
1452 | if (!(hpriv->hp_flags & MV_HP_QUIRK_LED_BLINK_EN)) | ||
1453 | return; | ||
1454 | |||
1455 | /* disable led-blink only if no ports are using NCQ */ | ||
1456 | for (port = 0; port < hpriv->n_ports; port++) { | ||
1457 | struct ata_port *this_ap = host->ports[port]; | ||
1458 | struct mv_port_priv *pp = this_ap->private_data; | ||
1459 | |||
1460 | if (pp->pp_flags & MV_PP_FLAG_NCQ_EN) | ||
1461 | return; | ||
1462 | } | ||
1463 | |||
1464 | hpriv->hp_flags &= ~MV_HP_QUIRK_LED_BLINK_EN; | ||
1465 | hc_mmio = mv_hc_base_from_port(mv_host_base(host), ap->port_no); | ||
1466 | led_ctrl = readl(hc_mmio + SOC_LED_CTRL_OFS); | ||
1467 | writel(led_ctrl & ~SOC_LED_CTRL_BLINK, hc_mmio + SOC_LED_CTRL_OFS); | ||
1468 | } | ||
1469 | |||
1470 | static void mv_edma_cfg(struct ata_port *ap, int want_ncq, int want_edma) | ||
1180 | { | 1471 | { |
1181 | u32 cfg; | 1472 | u32 cfg; |
1182 | struct mv_port_priv *pp = ap->private_data; | 1473 | struct mv_port_priv *pp = ap->private_data; |
@@ -1185,7 +1476,8 @@ static void mv_edma_cfg(struct ata_port *ap, int want_ncq) | |||
1185 | 1476 | ||
1186 | /* set up non-NCQ EDMA configuration */ | 1477 | /* set up non-NCQ EDMA configuration */ |
1187 | cfg = EDMA_CFG_Q_DEPTH; /* always 0x1f for *all* chips */ | 1478 | cfg = EDMA_CFG_Q_DEPTH; /* always 0x1f for *all* chips */ |
1188 | pp->pp_flags &= ~MV_PP_FLAG_FBS_EN; | 1479 | pp->pp_flags &= |
1480 | ~(MV_PP_FLAG_FBS_EN | MV_PP_FLAG_NCQ_EN | MV_PP_FLAG_FAKE_ATA_BUSY); | ||
1189 | 1481 | ||
1190 | if (IS_GEN_I(hpriv)) | 1482 | if (IS_GEN_I(hpriv)) |
1191 | cfg |= (1 << 8); /* enab config burst size mask */ | 1483 | cfg |= (1 << 8); /* enab config burst size mask */ |
@@ -1206,7 +1498,7 @@ static void mv_edma_cfg(struct ata_port *ap, int want_ncq) | |||
1206 | */ | 1498 | */ |
1207 | want_fbs &= want_ncq; | 1499 | want_fbs &= want_ncq; |
1208 | 1500 | ||
1209 | mv_config_fbs(port_mmio, want_ncq, want_fbs); | 1501 | mv_config_fbs(ap, want_ncq, want_fbs); |
1210 | 1502 | ||
1211 | if (want_fbs) { | 1503 | if (want_fbs) { |
1212 | pp->pp_flags |= MV_PP_FLAG_FBS_EN; | 1504 | pp->pp_flags |= MV_PP_FLAG_FBS_EN; |
@@ -1214,18 +1506,27 @@ static void mv_edma_cfg(struct ata_port *ap, int want_ncq) | |||
1214 | } | 1506 | } |
1215 | 1507 | ||
1216 | cfg |= (1 << 23); /* do not mask PM field in rx'd FIS */ | 1508 | cfg |= (1 << 23); /* do not mask PM field in rx'd FIS */ |
1217 | cfg |= (1 << 22); /* enab 4-entry host queue cache */ | 1509 | if (want_edma) { |
1218 | if (!IS_SOC(hpriv)) | 1510 | cfg |= (1 << 22); /* enab 4-entry host queue cache */ |
1219 | cfg |= (1 << 18); /* enab early completion */ | 1511 | if (!IS_SOC(hpriv)) |
1512 | cfg |= (1 << 18); /* enab early completion */ | ||
1513 | } | ||
1220 | if (hpriv->hp_flags & MV_HP_CUT_THROUGH) | 1514 | if (hpriv->hp_flags & MV_HP_CUT_THROUGH) |
1221 | cfg |= (1 << 17); /* enab cut-thru (dis stor&forwrd) */ | 1515 | cfg |= (1 << 17); /* enab cut-thru (dis stor&forwrd) */ |
1516 | mv_bmdma_enable_iie(ap, !want_edma); | ||
1517 | |||
1518 | if (IS_SOC(hpriv)) { | ||
1519 | if (want_ncq) | ||
1520 | mv_soc_led_blink_enable(ap); | ||
1521 | else | ||
1522 | mv_soc_led_blink_disable(ap); | ||
1523 | } | ||
1222 | } | 1524 | } |
1223 | 1525 | ||
1224 | if (want_ncq) { | 1526 | if (want_ncq) { |
1225 | cfg |= EDMA_CFG_NCQ; | 1527 | cfg |= EDMA_CFG_NCQ; |
1226 | pp->pp_flags |= MV_PP_FLAG_NCQ_EN; | 1528 | pp->pp_flags |= MV_PP_FLAG_NCQ_EN; |
1227 | } else | 1529 | } |
1228 | pp->pp_flags &= ~MV_PP_FLAG_NCQ_EN; | ||
1229 | 1530 | ||
1230 | writelfl(cfg, port_mmio + EDMA_CFG_OFS); | 1531 | writelfl(cfg, port_mmio + EDMA_CFG_OFS); |
1231 | } | 1532 | } |
@@ -1309,6 +1610,8 @@ static int mv_port_start(struct ata_port *ap) | |||
1309 | pp->sg_tbl_dma[tag] = pp->sg_tbl_dma[0]; | 1610 | pp->sg_tbl_dma[tag] = pp->sg_tbl_dma[0]; |
1310 | } | 1611 | } |
1311 | } | 1612 | } |
1613 | mv_save_cached_regs(ap); | ||
1614 | mv_edma_cfg(ap, 0, 0); | ||
1312 | return 0; | 1615 | return 0; |
1313 | 1616 | ||
1314 | out_port_free_dma_mem: | 1617 | out_port_free_dma_mem: |
@@ -1357,12 +1660,13 @@ static void mv_fill_sg(struct ata_queued_cmd *qc) | |||
1357 | u32 offset = addr & 0xffff; | 1660 | u32 offset = addr & 0xffff; |
1358 | u32 len = sg_len; | 1661 | u32 len = sg_len; |
1359 | 1662 | ||
1360 | if ((offset + sg_len > 0x10000)) | 1663 | if (offset + len > 0x10000) |
1361 | len = 0x10000 - offset; | 1664 | len = 0x10000 - offset; |
1362 | 1665 | ||
1363 | mv_sg->addr = cpu_to_le32(addr & 0xffffffff); | 1666 | mv_sg->addr = cpu_to_le32(addr & 0xffffffff); |
1364 | mv_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16); | 1667 | mv_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16); |
1365 | mv_sg->flags_size = cpu_to_le32(len & 0xffff); | 1668 | mv_sg->flags_size = cpu_to_le32(len & 0xffff); |
1669 | mv_sg->reserved = 0; | ||
1366 | 1670 | ||
1367 | sg_len -= len; | 1671 | sg_len -= len; |
1368 | addr += len; | 1672 | addr += len; |
@@ -1374,6 +1678,7 @@ static void mv_fill_sg(struct ata_queued_cmd *qc) | |||
1374 | 1678 | ||
1375 | if (likely(last_sg)) | 1679 | if (likely(last_sg)) |
1376 | last_sg->flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL); | 1680 | last_sg->flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL); |
1681 | mb(); /* ensure data structure is visible to the chipset */ | ||
1377 | } | 1682 | } |
1378 | 1683 | ||
1379 | static void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last) | 1684 | static void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last) |
@@ -1384,6 +1689,147 @@ static void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last) | |||
1384 | } | 1689 | } |
1385 | 1690 | ||
1386 | /** | 1691 | /** |
1692 | * mv_sff_irq_clear - Clear hardware interrupt after DMA. | ||
1693 | * @ap: Port associated with this ATA transaction. | ||
1694 | * | ||
1695 | * We need this only for ATAPI bmdma transactions, | ||
1696 | * as otherwise we experience spurious interrupts | ||
1697 | * after libata-sff handles the bmdma interrupts. | ||
1698 | */ | ||
1699 | static void mv_sff_irq_clear(struct ata_port *ap) | ||
1700 | { | ||
1701 | mv_clear_and_enable_port_irqs(ap, mv_ap_base(ap), ERR_IRQ); | ||
1702 | } | ||
1703 | |||
1704 | /** | ||
1705 | * mv_check_atapi_dma - Filter ATAPI cmds which are unsuitable for DMA. | ||
1706 | * @qc: queued command to check for chipset/DMA compatibility. | ||
1707 | * | ||
1708 | * The bmdma engines cannot handle speculative data sizes | ||
1709 | * (bytecount under/over flow). So only allow DMA for | ||
1710 | * data transfer commands with known data sizes. | ||
1711 | * | ||
1712 | * LOCKING: | ||
1713 | * Inherited from caller. | ||
1714 | */ | ||
1715 | static int mv_check_atapi_dma(struct ata_queued_cmd *qc) | ||
1716 | { | ||
1717 | struct scsi_cmnd *scmd = qc->scsicmd; | ||
1718 | |||
1719 | if (scmd) { | ||
1720 | switch (scmd->cmnd[0]) { | ||
1721 | case READ_6: | ||
1722 | case READ_10: | ||
1723 | case READ_12: | ||
1724 | case WRITE_6: | ||
1725 | case WRITE_10: | ||
1726 | case WRITE_12: | ||
1727 | case GPCMD_READ_CD: | ||
1728 | case GPCMD_SEND_DVD_STRUCTURE: | ||
1729 | case GPCMD_SEND_CUE_SHEET: | ||
1730 | return 0; /* DMA is safe */ | ||
1731 | } | ||
1732 | } | ||
1733 | return -EOPNOTSUPP; /* use PIO instead */ | ||
1734 | } | ||
1735 | |||
1736 | /** | ||
1737 | * mv_bmdma_setup - Set up BMDMA transaction | ||
1738 | * @qc: queued command to prepare DMA for. | ||
1739 | * | ||
1740 | * LOCKING: | ||
1741 | * Inherited from caller. | ||
1742 | */ | ||
1743 | static void mv_bmdma_setup(struct ata_queued_cmd *qc) | ||
1744 | { | ||
1745 | struct ata_port *ap = qc->ap; | ||
1746 | void __iomem *port_mmio = mv_ap_base(ap); | ||
1747 | struct mv_port_priv *pp = ap->private_data; | ||
1748 | |||
1749 | mv_fill_sg(qc); | ||
1750 | |||
1751 | /* clear all DMA cmd bits */ | ||
1752 | writel(0, port_mmio + BMDMA_CMD_OFS); | ||
1753 | |||
1754 | /* load PRD table addr. */ | ||
1755 | writel((pp->sg_tbl_dma[qc->tag] >> 16) >> 16, | ||
1756 | port_mmio + BMDMA_PRD_HIGH_OFS); | ||
1757 | writelfl(pp->sg_tbl_dma[qc->tag], | ||
1758 | port_mmio + BMDMA_PRD_LOW_OFS); | ||
1759 | |||
1760 | /* issue r/w command */ | ||
1761 | ap->ops->sff_exec_command(ap, &qc->tf); | ||
1762 | } | ||
1763 | |||
1764 | /** | ||
1765 | * mv_bmdma_start - Start a BMDMA transaction | ||
1766 | * @qc: queued command to start DMA on. | ||
1767 | * | ||
1768 | * LOCKING: | ||
1769 | * Inherited from caller. | ||
1770 | */ | ||
1771 | static void mv_bmdma_start(struct ata_queued_cmd *qc) | ||
1772 | { | ||
1773 | struct ata_port *ap = qc->ap; | ||
1774 | void __iomem *port_mmio = mv_ap_base(ap); | ||
1775 | unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE); | ||
1776 | u32 cmd = (rw ? 0 : ATA_DMA_WR) | ATA_DMA_START; | ||
1777 | |||
1778 | /* start host DMA transaction */ | ||
1779 | writelfl(cmd, port_mmio + BMDMA_CMD_OFS); | ||
1780 | } | ||
1781 | |||
1782 | /** | ||
1783 | * mv_bmdma_stop - Stop BMDMA transfer | ||
1784 | * @qc: queued command to stop DMA on. | ||
1785 | * | ||
1786 | * Clears the ATA_DMA_START flag in the bmdma control register | ||
1787 | * | ||
1788 | * LOCKING: | ||
1789 | * Inherited from caller. | ||
1790 | */ | ||
1791 | static void mv_bmdma_stop(struct ata_queued_cmd *qc) | ||
1792 | { | ||
1793 | struct ata_port *ap = qc->ap; | ||
1794 | void __iomem *port_mmio = mv_ap_base(ap); | ||
1795 | u32 cmd; | ||
1796 | |||
1797 | /* clear start/stop bit */ | ||
1798 | cmd = readl(port_mmio + BMDMA_CMD_OFS); | ||
1799 | cmd &= ~ATA_DMA_START; | ||
1800 | writelfl(cmd, port_mmio + BMDMA_CMD_OFS); | ||
1801 | |||
1802 | /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */ | ||
1803 | ata_sff_dma_pause(ap); | ||
1804 | } | ||
1805 | |||
1806 | /** | ||
1807 | * mv_bmdma_status - Read BMDMA status | ||
1808 | * @ap: port for which to retrieve DMA status. | ||
1809 | * | ||
1810 | * Read and return equivalent of the sff BMDMA status register. | ||
1811 | * | ||
1812 | * LOCKING: | ||
1813 | * Inherited from caller. | ||
1814 | */ | ||
1815 | static u8 mv_bmdma_status(struct ata_port *ap) | ||
1816 | { | ||
1817 | void __iomem *port_mmio = mv_ap_base(ap); | ||
1818 | u32 reg, status; | ||
1819 | |||
1820 | /* | ||
1821 | * Other bits are valid only if ATA_DMA_ACTIVE==0, | ||
1822 | * and the ATA_DMA_INTR bit doesn't exist. | ||
1823 | */ | ||
1824 | reg = readl(port_mmio + BMDMA_STATUS_OFS); | ||
1825 | if (reg & ATA_DMA_ACTIVE) | ||
1826 | status = ATA_DMA_ACTIVE; | ||
1827 | else | ||
1828 | status = (reg & ATA_DMA_ERR) | ATA_DMA_INTR; | ||
1829 | return status; | ||
1830 | } | ||
1831 | |||
1832 | /** | ||
1387 | * mv_qc_prep - Host specific command preparation. | 1833 | * mv_qc_prep - Host specific command preparation. |
1388 | * @qc: queued command to prepare | 1834 | * @qc: queued command to prepare |
1389 | * | 1835 | * |
@@ -1545,6 +1991,132 @@ static void mv_qc_prep_iie(struct ata_queued_cmd *qc) | |||
1545 | } | 1991 | } |
1546 | 1992 | ||
1547 | /** | 1993 | /** |
1994 | * mv_sff_check_status - fetch device status, if valid | ||
1995 | * @ap: ATA port to fetch status from | ||
1996 | * | ||
1997 | * When using command issue via mv_qc_issue_fis(), | ||
1998 | * the initial ATA_BUSY state does not show up in the | ||
1999 | * ATA status (shadow) register. This can confuse libata! | ||
2000 | * | ||
2001 | * So we have a hook here to fake ATA_BUSY for that situation, | ||
2002 | * until the first time a BUSY, DRQ, or ERR bit is seen. | ||
2003 | * | ||
2004 | * The rest of the time, it simply returns the ATA status register. | ||
2005 | */ | ||
2006 | static u8 mv_sff_check_status(struct ata_port *ap) | ||
2007 | { | ||
2008 | u8 stat = ioread8(ap->ioaddr.status_addr); | ||
2009 | struct mv_port_priv *pp = ap->private_data; | ||
2010 | |||
2011 | if (pp->pp_flags & MV_PP_FLAG_FAKE_ATA_BUSY) { | ||
2012 | if (stat & (ATA_BUSY | ATA_DRQ | ATA_ERR)) | ||
2013 | pp->pp_flags &= ~MV_PP_FLAG_FAKE_ATA_BUSY; | ||
2014 | else | ||
2015 | stat = ATA_BUSY; | ||
2016 | } | ||
2017 | return stat; | ||
2018 | } | ||
2019 | |||
2020 | /** | ||
2021 | * mv_send_fis - Send a FIS, using the "Vendor-Unique FIS" register | ||
2022 | * @fis: fis to be sent | ||
2023 | * @nwords: number of 32-bit words in the fis | ||
2024 | */ | ||
2025 | static unsigned int mv_send_fis(struct ata_port *ap, u32 *fis, int nwords) | ||
2026 | { | ||
2027 | void __iomem *port_mmio = mv_ap_base(ap); | ||
2028 | u32 ifctl, old_ifctl, ifstat; | ||
2029 | int i, timeout = 200, final_word = nwords - 1; | ||
2030 | |||
2031 | /* Initiate FIS transmission mode */ | ||
2032 | old_ifctl = readl(port_mmio + SATA_IFCTL_OFS); | ||
2033 | ifctl = 0x100 | (old_ifctl & 0xf); | ||
2034 | writelfl(ifctl, port_mmio + SATA_IFCTL_OFS); | ||
2035 | |||
2036 | /* Send all words of the FIS except for the final word */ | ||
2037 | for (i = 0; i < final_word; ++i) | ||
2038 | writel(fis[i], port_mmio + VENDOR_UNIQUE_FIS_OFS); | ||
2039 | |||
2040 | /* Flag end-of-transmission, and then send the final word */ | ||
2041 | writelfl(ifctl | 0x200, port_mmio + SATA_IFCTL_OFS); | ||
2042 | writelfl(fis[final_word], port_mmio + VENDOR_UNIQUE_FIS_OFS); | ||
2043 | |||
2044 | /* | ||
2045 | * Wait for FIS transmission to complete. | ||
2046 | * This typically takes just a single iteration. | ||
2047 | */ | ||
2048 | do { | ||
2049 | ifstat = readl(port_mmio + SATA_IFSTAT_OFS); | ||
2050 | } while (!(ifstat & 0x1000) && --timeout); | ||
2051 | |||
2052 | /* Restore original port configuration */ | ||
2053 | writelfl(old_ifctl, port_mmio + SATA_IFCTL_OFS); | ||
2054 | |||
2055 | /* See if it worked */ | ||
2056 | if ((ifstat & 0x3000) != 0x1000) { | ||
2057 | ata_port_printk(ap, KERN_WARNING, | ||
2058 | "%s transmission error, ifstat=%08x\n", | ||
2059 | __func__, ifstat); | ||
2060 | return AC_ERR_OTHER; | ||
2061 | } | ||
2062 | return 0; | ||
2063 | } | ||
2064 | |||
2065 | /** | ||
2066 | * mv_qc_issue_fis - Issue a command directly as a FIS | ||
2067 | * @qc: queued command to start | ||
2068 | * | ||
2069 | * Note that the ATA shadow registers are not updated | ||
2070 | * after command issue, so the device will appear "READY" | ||
2071 | * if polled, even while it is BUSY processing the command. | ||
2072 | * | ||
2073 | * So we use a status hook to fake ATA_BUSY until the drive changes state. | ||
2074 | * | ||
2075 | * Note: we don't get updated shadow regs on *completion* | ||
2076 | * of non-data commands. So avoid sending them via this function, | ||
2077 | * as they will appear to have completed immediately. | ||
2078 | * | ||
2079 | * GEN_IIE has special registers that we could get the result tf from, | ||
2080 | * but earlier chipsets do not. For now, we ignore those registers. | ||
2081 | */ | ||
2082 | static unsigned int mv_qc_issue_fis(struct ata_queued_cmd *qc) | ||
2083 | { | ||
2084 | struct ata_port *ap = qc->ap; | ||
2085 | struct mv_port_priv *pp = ap->private_data; | ||
2086 | struct ata_link *link = qc->dev->link; | ||
2087 | u32 fis[5]; | ||
2088 | int err = 0; | ||
2089 | |||
2090 | ata_tf_to_fis(&qc->tf, link->pmp, 1, (void *)fis); | ||
2091 | err = mv_send_fis(ap, fis, sizeof(fis) / sizeof(fis[0])); | ||
2092 | if (err) | ||
2093 | return err; | ||
2094 | |||
2095 | switch (qc->tf.protocol) { | ||
2096 | case ATAPI_PROT_PIO: | ||
2097 | pp->pp_flags |= MV_PP_FLAG_FAKE_ATA_BUSY; | ||
2098 | /* fall through */ | ||
2099 | case ATAPI_PROT_NODATA: | ||
2100 | ap->hsm_task_state = HSM_ST_FIRST; | ||
2101 | break; | ||
2102 | case ATA_PROT_PIO: | ||
2103 | pp->pp_flags |= MV_PP_FLAG_FAKE_ATA_BUSY; | ||
2104 | if (qc->tf.flags & ATA_TFLAG_WRITE) | ||
2105 | ap->hsm_task_state = HSM_ST_FIRST; | ||
2106 | else | ||
2107 | ap->hsm_task_state = HSM_ST; | ||
2108 | break; | ||
2109 | default: | ||
2110 | ap->hsm_task_state = HSM_ST_LAST; | ||
2111 | break; | ||
2112 | } | ||
2113 | |||
2114 | if (qc->tf.flags & ATA_TFLAG_POLLING) | ||
2115 | ata_pio_queue_task(ap, qc, 0); | ||
2116 | return 0; | ||
2117 | } | ||
2118 | |||
2119 | /** | ||
1548 | * mv_qc_issue - Initiate a command to the host | 2120 | * mv_qc_issue - Initiate a command to the host |
1549 | * @qc: queued command to start | 2121 | * @qc: queued command to start |
1550 | * | 2122 | * |
@@ -1558,14 +2130,28 @@ static void mv_qc_prep_iie(struct ata_queued_cmd *qc) | |||
1558 | */ | 2130 | */ |
1559 | static unsigned int mv_qc_issue(struct ata_queued_cmd *qc) | 2131 | static unsigned int mv_qc_issue(struct ata_queued_cmd *qc) |
1560 | { | 2132 | { |
2133 | static int limit_warnings = 10; | ||
1561 | struct ata_port *ap = qc->ap; | 2134 | struct ata_port *ap = qc->ap; |
1562 | void __iomem *port_mmio = mv_ap_base(ap); | 2135 | void __iomem *port_mmio = mv_ap_base(ap); |
1563 | struct mv_port_priv *pp = ap->private_data; | 2136 | struct mv_port_priv *pp = ap->private_data; |
1564 | u32 in_index; | 2137 | u32 in_index; |
2138 | unsigned int port_irqs; | ||
1565 | 2139 | ||
1566 | if ((qc->tf.protocol != ATA_PROT_DMA) && | 2140 | pp->pp_flags &= ~MV_PP_FLAG_FAKE_ATA_BUSY; /* paranoia */ |
1567 | (qc->tf.protocol != ATA_PROT_NCQ)) { | 2141 | |
1568 | static int limit_warnings = 10; | 2142 | switch (qc->tf.protocol) { |
2143 | case ATA_PROT_DMA: | ||
2144 | case ATA_PROT_NCQ: | ||
2145 | mv_start_edma(ap, port_mmio, pp, qc->tf.protocol); | ||
2146 | pp->req_idx = (pp->req_idx + 1) & MV_MAX_Q_DEPTH_MASK; | ||
2147 | in_index = pp->req_idx << EDMA_REQ_Q_PTR_SHIFT; | ||
2148 | |||
2149 | /* Write the request in pointer to kick the EDMA to life */ | ||
2150 | writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | in_index, | ||
2151 | port_mmio + EDMA_REQ_Q_IN_PTR_OFS); | ||
2152 | return 0; | ||
2153 | |||
2154 | case ATA_PROT_PIO: | ||
1569 | /* | 2155 | /* |
1570 | * Errata SATA#16, SATA#24: warn if multiple DRQs expected. | 2156 | * Errata SATA#16, SATA#24: warn if multiple DRQs expected. |
1571 | * | 2157 | * |
@@ -1583,27 +2169,46 @@ static unsigned int mv_qc_issue(struct ata_queued_cmd *qc) | |||
1583 | ": attempting PIO w/multiple DRQ: " | 2169 | ": attempting PIO w/multiple DRQ: " |
1584 | "this may fail due to h/w errata\n"); | 2170 | "this may fail due to h/w errata\n"); |
1585 | } | 2171 | } |
1586 | /* | 2172 | /* drop through */ |
1587 | * We're about to send a non-EDMA capable command to the | 2173 | case ATA_PROT_NODATA: |
1588 | * port. Turn off EDMA so there won't be problems accessing | 2174 | case ATAPI_PROT_PIO: |
1589 | * shadow block, etc registers. | 2175 | case ATAPI_PROT_NODATA: |
1590 | */ | 2176 | if (ap->flags & ATA_FLAG_PIO_POLLING) |
1591 | mv_stop_edma(ap); | 2177 | qc->tf.flags |= ATA_TFLAG_POLLING; |
1592 | mv_enable_port_irqs(ap, ERR_IRQ); | 2178 | break; |
1593 | mv_pmp_select(ap, qc->dev->link->pmp); | ||
1594 | return ata_sff_qc_issue(qc); | ||
1595 | } | 2179 | } |
1596 | 2180 | ||
1597 | mv_start_dma(ap, port_mmio, pp, qc->tf.protocol); | 2181 | if (qc->tf.flags & ATA_TFLAG_POLLING) |
1598 | 2182 | port_irqs = ERR_IRQ; /* mask device interrupt when polling */ | |
1599 | pp->req_idx = (pp->req_idx + 1) & MV_MAX_Q_DEPTH_MASK; | 2183 | else |
1600 | in_index = pp->req_idx << EDMA_REQ_Q_PTR_SHIFT; | 2184 | port_irqs = ERR_IRQ | DONE_IRQ; /* unmask all interrupts */ |
1601 | 2185 | ||
1602 | /* and write the request in pointer to kick the EDMA to life */ | 2186 | /* |
1603 | writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | in_index, | 2187 | * We're about to send a non-EDMA capable command to the |
1604 | port_mmio + EDMA_REQ_Q_IN_PTR_OFS); | 2188 | * port. Turn off EDMA so there won't be problems accessing |
2189 | * shadow block, etc registers. | ||
2190 | */ | ||
2191 | mv_stop_edma(ap); | ||
2192 | mv_clear_and_enable_port_irqs(ap, mv_ap_base(ap), port_irqs); | ||
2193 | mv_pmp_select(ap, qc->dev->link->pmp); | ||
1605 | 2194 | ||
1606 | return 0; | 2195 | if (qc->tf.command == ATA_CMD_READ_LOG_EXT) { |
2196 | struct mv_host_priv *hpriv = ap->host->private_data; | ||
2197 | /* | ||
2198 | * Workaround for 88SX60x1 FEr SATA#25 (part 2). | ||
2199 | * | ||
2200 | * After any NCQ error, the READ_LOG_EXT command | ||
2201 | * from libata-eh *must* use mv_qc_issue_fis(). | ||
2202 | * Otherwise it might fail, due to chip errata. | ||
2203 | * | ||
2204 | * Rather than special-case it, we'll just *always* | ||
2205 | * use this method here for READ_LOG_EXT, making for | ||
2206 | * easier testing. | ||
2207 | */ | ||
2208 | if (IS_GEN_II(hpriv)) | ||
2209 | return mv_qc_issue_fis(qc); | ||
2210 | } | ||
2211 | return ata_sff_qc_issue(qc); | ||
1607 | } | 2212 | } |
1608 | 2213 | ||
1609 | static struct ata_queued_cmd *mv_get_active_qc(struct ata_port *ap) | 2214 | static struct ata_queued_cmd *mv_get_active_qc(struct ata_port *ap) |
@@ -1614,8 +2219,12 @@ static struct ata_queued_cmd *mv_get_active_qc(struct ata_port *ap) | |||
1614 | if (pp->pp_flags & MV_PP_FLAG_NCQ_EN) | 2219 | if (pp->pp_flags & MV_PP_FLAG_NCQ_EN) |
1615 | return NULL; | 2220 | return NULL; |
1616 | qc = ata_qc_from_tag(ap, ap->link.active_tag); | 2221 | qc = ata_qc_from_tag(ap, ap->link.active_tag); |
1617 | if (qc && (qc->tf.flags & ATA_TFLAG_POLLING)) | 2222 | if (qc) { |
1618 | qc = NULL; | 2223 | if (qc->tf.flags & ATA_TFLAG_POLLING) |
2224 | qc = NULL; | ||
2225 | else if (!(qc->flags & ATA_QCFLAG_ACTIVE)) | ||
2226 | qc = NULL; | ||
2227 | } | ||
1619 | return qc; | 2228 | return qc; |
1620 | } | 2229 | } |
1621 | 2230 | ||
@@ -2084,6 +2693,10 @@ static int mv_host_intr(struct ata_host *host, u32 main_irq_cause) | |||
2084 | void __iomem *mmio = hpriv->base, *hc_mmio; | 2693 | void __iomem *mmio = hpriv->base, *hc_mmio; |
2085 | unsigned int handled = 0, port; | 2694 | unsigned int handled = 0, port; |
2086 | 2695 | ||
2696 | /* If asserted, clear the "all ports" IRQ coalescing bit */ | ||
2697 | if (main_irq_cause & ALL_PORTS_COAL_DONE) | ||
2698 | writel(~ALL_PORTS_COAL_IRQ, mmio + MV_IRQ_COAL_CAUSE); | ||
2699 | |||
2087 | for (port = 0; port < hpriv->n_ports; port++) { | 2700 | for (port = 0; port < hpriv->n_ports; port++) { |
2088 | struct ata_port *ap = host->ports[port]; | 2701 | struct ata_port *ap = host->ports[port]; |
2089 | unsigned int p, shift, hardport, port_cause; | 2702 | unsigned int p, shift, hardport, port_cause; |
@@ -2116,6 +2729,8 @@ static int mv_host_intr(struct ata_host *host, u32 main_irq_cause) | |||
2116 | * to ack (only) those ports via hc_irq_cause. | 2729 | * to ack (only) those ports via hc_irq_cause. |
2117 | */ | 2730 | */ |
2118 | ack_irqs = 0; | 2731 | ack_irqs = 0; |
2732 | if (hc_cause & PORTS_0_3_COAL_DONE) | ||
2733 | ack_irqs = HC_COAL_IRQ; | ||
2119 | for (p = 0; p < MV_PORTS_PER_HC; ++p) { | 2734 | for (p = 0; p < MV_PORTS_PER_HC; ++p) { |
2120 | if ((port + p) >= hpriv->n_ports) | 2735 | if ((port + p) >= hpriv->n_ports) |
2121 | break; | 2736 | break; |
@@ -2204,7 +2819,7 @@ static irqreturn_t mv_interrupt(int irq, void *dev_instance) | |||
2204 | 2819 | ||
2205 | /* for MSI: block new interrupts while in here */ | 2820 | /* for MSI: block new interrupts while in here */ |
2206 | if (using_msi) | 2821 | if (using_msi) |
2207 | writel(0, hpriv->main_irq_mask_addr); | 2822 | mv_write_main_irq_mask(0, hpriv); |
2208 | 2823 | ||
2209 | main_irq_cause = readl(hpriv->main_irq_cause_addr); | 2824 | main_irq_cause = readl(hpriv->main_irq_cause_addr); |
2210 | pending_irqs = main_irq_cause & hpriv->main_irq_mask; | 2825 | pending_irqs = main_irq_cause & hpriv->main_irq_mask; |
@@ -2221,7 +2836,7 @@ static irqreturn_t mv_interrupt(int irq, void *dev_instance) | |||
2221 | 2836 | ||
2222 | /* for MSI: unmask; interrupt cause bits will retrigger now */ | 2837 | /* for MSI: unmask; interrupt cause bits will retrigger now */ |
2223 | if (using_msi) | 2838 | if (using_msi) |
2224 | writel(hpriv->main_irq_mask, hpriv->main_irq_mask_addr); | 2839 | mv_write_main_irq_mask(hpriv->main_irq_mask, hpriv); |
2225 | 2840 | ||
2226 | spin_unlock(&host->lock); | 2841 | spin_unlock(&host->lock); |
2227 | 2842 | ||
@@ -2774,6 +3389,8 @@ static int mv_hardreset(struct ata_link *link, unsigned int *class, | |||
2774 | 3389 | ||
2775 | mv_reset_channel(hpriv, mmio, ap->port_no); | 3390 | mv_reset_channel(hpriv, mmio, ap->port_no); |
2776 | pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN; | 3391 | pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN; |
3392 | pp->pp_flags &= | ||
3393 | ~(MV_PP_FLAG_FBS_EN | MV_PP_FLAG_NCQ_EN | MV_PP_FLAG_FAKE_ATA_BUSY); | ||
2777 | 3394 | ||
2778 | /* Workaround for errata FEr SATA#10 (part 2) */ | 3395 | /* Workaround for errata FEr SATA#10 (part 2) */ |
2779 | do { | 3396 | do { |
@@ -2793,6 +3410,8 @@ static int mv_hardreset(struct ata_link *link, unsigned int *class, | |||
2793 | extra = HZ; /* only extend it once, max */ | 3410 | extra = HZ; /* only extend it once, max */ |
2794 | } | 3411 | } |
2795 | } while (sstatus != 0x0 && sstatus != 0x113 && sstatus != 0x123); | 3412 | } while (sstatus != 0x0 && sstatus != 0x113 && sstatus != 0x123); |
3413 | mv_save_cached_regs(ap); | ||
3414 | mv_edma_cfg(ap, 0, 0); | ||
2796 | 3415 | ||
2797 | return rc; | 3416 | return rc; |
2798 | } | 3417 | } |
@@ -3126,6 +3745,8 @@ static int mv_init_host(struct ata_host *host, unsigned int board_idx) | |||
3126 | * The per-port interrupts get done later as ports are set up. | 3745 | * The per-port interrupts get done later as ports are set up. |
3127 | */ | 3746 | */ |
3128 | mv_set_main_irq_mask(host, 0, PCI_ERR); | 3747 | mv_set_main_irq_mask(host, 0, PCI_ERR); |
3748 | mv_set_irq_coalescing(host, irq_coalescing_io_count, | ||
3749 | irq_coalescing_usecs); | ||
3129 | done: | 3750 | done: |
3130 | return rc; | 3751 | return rc; |
3131 | } | 3752 | } |
@@ -3287,12 +3908,6 @@ static struct pci_driver mv_pci_driver = { | |||
3287 | .remove = ata_pci_remove_one, | 3908 | .remove = ata_pci_remove_one, |
3288 | }; | 3909 | }; |
3289 | 3910 | ||
3290 | /* | ||
3291 | * module options | ||
3292 | */ | ||
3293 | static int msi; /* Use PCI msi; either zero (off, default) or non-zero */ | ||
3294 | |||
3295 | |||
3296 | /* move to PCI layer or libata core? */ | 3911 | /* move to PCI layer or libata core? */ |
3297 | static int pci_go_64(struct pci_dev *pdev) | 3912 | static int pci_go_64(struct pci_dev *pdev) |
3298 | { | 3913 | { |
@@ -3474,10 +4089,5 @@ MODULE_DEVICE_TABLE(pci, mv_pci_tbl); | |||
3474 | MODULE_VERSION(DRV_VERSION); | 4089 | MODULE_VERSION(DRV_VERSION); |
3475 | MODULE_ALIAS("platform:" DRV_NAME); | 4090 | MODULE_ALIAS("platform:" DRV_NAME); |
3476 | 4091 | ||
3477 | #ifdef CONFIG_PCI | ||
3478 | module_param(msi, int, 0444); | ||
3479 | MODULE_PARM_DESC(msi, "Enable use of PCI MSI (0=off, 1=on)"); | ||
3480 | #endif | ||
3481 | |||
3482 | module_init(mv_init); | 4092 | module_init(mv_init); |
3483 | module_exit(mv_exit); | 4093 | module_exit(mv_exit); |