diff options
Diffstat (limited to 'drivers/ata/sata_mv.c')
-rw-r--r-- | drivers/ata/sata_mv.c | 690 |
1 files changed, 531 insertions, 159 deletions
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c index 842b1a15b78c..bb73b2222627 100644 --- a/drivers/ata/sata_mv.c +++ b/drivers/ata/sata_mv.c | |||
@@ -65,6 +65,7 @@ | |||
65 | #include <linux/platform_device.h> | 65 | #include <linux/platform_device.h> |
66 | #include <linux/ata_platform.h> | 66 | #include <linux/ata_platform.h> |
67 | #include <linux/mbus.h> | 67 | #include <linux/mbus.h> |
68 | #include <linux/bitops.h> | ||
68 | #include <scsi/scsi_host.h> | 69 | #include <scsi/scsi_host.h> |
69 | #include <scsi/scsi_cmnd.h> | 70 | #include <scsi/scsi_cmnd.h> |
70 | #include <scsi/scsi_device.h> | 71 | #include <scsi/scsi_device.h> |
@@ -91,9 +92,9 @@ enum { | |||
91 | MV_IRQ_COAL_TIME_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xd0), | 92 | MV_IRQ_COAL_TIME_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xd0), |
92 | 93 | ||
93 | MV_SATAHC0_REG_BASE = 0x20000, | 94 | MV_SATAHC0_REG_BASE = 0x20000, |
94 | MV_FLASH_CTL = 0x1046c, | 95 | MV_FLASH_CTL_OFS = 0x1046c, |
95 | MV_GPIO_PORT_CTL = 0x104f0, | 96 | MV_GPIO_PORT_CTL_OFS = 0x104f0, |
96 | MV_RESET_CFG = 0x180d8, | 97 | MV_RESET_CFG_OFS = 0x180d8, |
97 | 98 | ||
98 | MV_PCI_REG_SZ = MV_MAJOR_REG_AREA_SZ, | 99 | MV_PCI_REG_SZ = MV_MAJOR_REG_AREA_SZ, |
99 | MV_SATAHC_REG_SZ = MV_MAJOR_REG_AREA_SZ, | 100 | MV_SATAHC_REG_SZ = MV_MAJOR_REG_AREA_SZ, |
@@ -147,18 +148,21 @@ enum { | |||
147 | /* PCI interface registers */ | 148 | /* PCI interface registers */ |
148 | 149 | ||
149 | PCI_COMMAND_OFS = 0xc00, | 150 | PCI_COMMAND_OFS = 0xc00, |
151 | PCI_COMMAND_MRDTRIG = (1 << 7), /* PCI Master Read Trigger */ | ||
150 | 152 | ||
151 | PCI_MAIN_CMD_STS_OFS = 0xd30, | 153 | PCI_MAIN_CMD_STS_OFS = 0xd30, |
152 | STOP_PCI_MASTER = (1 << 2), | 154 | STOP_PCI_MASTER = (1 << 2), |
153 | PCI_MASTER_EMPTY = (1 << 3), | 155 | PCI_MASTER_EMPTY = (1 << 3), |
154 | GLOB_SFT_RST = (1 << 4), | 156 | GLOB_SFT_RST = (1 << 4), |
155 | 157 | ||
156 | MV_PCI_MODE = 0xd00, | 158 | MV_PCI_MODE_OFS = 0xd00, |
159 | MV_PCI_MODE_MASK = 0x30, | ||
160 | |||
157 | MV_PCI_EXP_ROM_BAR_CTL = 0xd2c, | 161 | MV_PCI_EXP_ROM_BAR_CTL = 0xd2c, |
158 | MV_PCI_DISC_TIMER = 0xd04, | 162 | MV_PCI_DISC_TIMER = 0xd04, |
159 | MV_PCI_MSI_TRIGGER = 0xc38, | 163 | MV_PCI_MSI_TRIGGER = 0xc38, |
160 | MV_PCI_SERR_MASK = 0xc28, | 164 | MV_PCI_SERR_MASK = 0xc28, |
161 | MV_PCI_XBAR_TMOUT = 0x1d04, | 165 | MV_PCI_XBAR_TMOUT_OFS = 0x1d04, |
162 | MV_PCI_ERR_LOW_ADDRESS = 0x1d40, | 166 | MV_PCI_ERR_LOW_ADDRESS = 0x1d40, |
163 | MV_PCI_ERR_HIGH_ADDRESS = 0x1d44, | 167 | MV_PCI_ERR_HIGH_ADDRESS = 0x1d44, |
164 | MV_PCI_ERR_ATTRIBUTE = 0x1d48, | 168 | MV_PCI_ERR_ATTRIBUTE = 0x1d48, |
@@ -225,16 +229,18 @@ enum { | |||
225 | PHY_MODE4 = 0x314, | 229 | PHY_MODE4 = 0x314, |
226 | PHY_MODE2 = 0x330, | 230 | PHY_MODE2 = 0x330, |
227 | SATA_IFCTL_OFS = 0x344, | 231 | SATA_IFCTL_OFS = 0x344, |
232 | SATA_TESTCTL_OFS = 0x348, | ||
228 | SATA_IFSTAT_OFS = 0x34c, | 233 | SATA_IFSTAT_OFS = 0x34c, |
229 | VENDOR_UNIQUE_FIS_OFS = 0x35c, | 234 | VENDOR_UNIQUE_FIS_OFS = 0x35c, |
230 | 235 | ||
231 | FIS_CFG_OFS = 0x360, | 236 | FISCFG_OFS = 0x360, |
232 | FIS_CFG_SINGLE_SYNC = (1 << 16), /* SYNC on DMA activation */ | 237 | FISCFG_WAIT_DEV_ERR = (1 << 8), /* wait for host on DevErr */ |
238 | FISCFG_SINGLE_SYNC = (1 << 16), /* SYNC on DMA activation */ | ||
233 | 239 | ||
234 | MV5_PHY_MODE = 0x74, | 240 | MV5_PHY_MODE = 0x74, |
235 | MV5_LT_MODE = 0x30, | 241 | MV5_LTMODE_OFS = 0x30, |
236 | MV5_PHY_CTL = 0x0C, | 242 | MV5_PHY_CTL_OFS = 0x0C, |
237 | SATA_INTERFACE_CFG = 0x050, | 243 | SATA_INTERFACE_CFG_OFS = 0x050, |
238 | 244 | ||
239 | MV_M2_PREAMP_MASK = 0x7e0, | 245 | MV_M2_PREAMP_MASK = 0x7e0, |
240 | 246 | ||
@@ -332,10 +338,16 @@ enum { | |||
332 | EDMA_CMD_OFS = 0x28, /* EDMA command register */ | 338 | EDMA_CMD_OFS = 0x28, /* EDMA command register */ |
333 | EDMA_EN = (1 << 0), /* enable EDMA */ | 339 | EDMA_EN = (1 << 0), /* enable EDMA */ |
334 | EDMA_DS = (1 << 1), /* disable EDMA; self-negated */ | 340 | EDMA_DS = (1 << 1), /* disable EDMA; self-negated */ |
335 | ATA_RST = (1 << 2), /* reset trans/link/phy */ | 341 | EDMA_RESET = (1 << 2), /* reset eng/trans/link/phy */ |
342 | |||
343 | EDMA_STATUS_OFS = 0x30, /* EDMA engine status */ | ||
344 | EDMA_STATUS_CACHE_EMPTY = (1 << 6), /* GenIIe command cache empty */ | ||
345 | EDMA_STATUS_IDLE = (1 << 7), /* GenIIe EDMA enabled/idle */ | ||
336 | 346 | ||
337 | EDMA_IORDY_TMOUT = 0x34, | 347 | EDMA_IORDY_TMOUT_OFS = 0x34, |
338 | EDMA_ARB_CFG = 0x38, | 348 | EDMA_ARB_CFG_OFS = 0x38, |
349 | |||
350 | EDMA_HALTCOND_OFS = 0x60, /* GenIIe halt conditions */ | ||
339 | 351 | ||
340 | GEN_II_NCQ_MAX_SECTORS = 256, /* max sects/io on Gen2 w/NCQ */ | 352 | GEN_II_NCQ_MAX_SECTORS = 256, /* max sects/io on Gen2 w/NCQ */ |
341 | 353 | ||
@@ -350,15 +362,19 @@ enum { | |||
350 | MV_HP_GEN_II = (1 << 7), /* Generation II: 60xx */ | 362 | MV_HP_GEN_II = (1 << 7), /* Generation II: 60xx */ |
351 | MV_HP_GEN_IIE = (1 << 8), /* Generation IIE: 6042/7042 */ | 363 | MV_HP_GEN_IIE = (1 << 8), /* Generation IIE: 6042/7042 */ |
352 | MV_HP_PCIE = (1 << 9), /* PCIe bus/regs: 7042 */ | 364 | MV_HP_PCIE = (1 << 9), /* PCIe bus/regs: 7042 */ |
365 | MV_HP_CUT_THROUGH = (1 << 10), /* can use EDMA cut-through */ | ||
353 | 366 | ||
354 | /* Port private flags (pp_flags) */ | 367 | /* Port private flags (pp_flags) */ |
355 | MV_PP_FLAG_EDMA_EN = (1 << 0), /* is EDMA engine enabled? */ | 368 | MV_PP_FLAG_EDMA_EN = (1 << 0), /* is EDMA engine enabled? */ |
356 | MV_PP_FLAG_NCQ_EN = (1 << 1), /* is EDMA set up for NCQ? */ | 369 | MV_PP_FLAG_NCQ_EN = (1 << 1), /* is EDMA set up for NCQ? */ |
370 | MV_PP_FLAG_FBS_EN = (1 << 2), /* is EDMA set up for FBS? */ | ||
371 | MV_PP_FLAG_DELAYED_EH = (1 << 3), /* delayed dev err handling */ | ||
357 | }; | 372 | }; |
358 | 373 | ||
359 | #define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I) | 374 | #define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I) |
360 | #define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II) | 375 | #define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II) |
361 | #define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE) | 376 | #define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE) |
377 | #define IS_PCIE(hpriv) ((hpriv)->hp_flags & MV_HP_PCIE) | ||
362 | #define HAS_PCI(host) (!((host)->ports[0]->flags & MV_FLAG_SOC)) | 378 | #define HAS_PCI(host) (!((host)->ports[0]->flags & MV_FLAG_SOC)) |
363 | 379 | ||
364 | #define WINDOW_CTRL(i) (0x20030 + ((i) << 4)) | 380 | #define WINDOW_CTRL(i) (0x20030 + ((i) << 4)) |
@@ -433,6 +449,7 @@ struct mv_port_priv { | |||
433 | unsigned int resp_idx; | 449 | unsigned int resp_idx; |
434 | 450 | ||
435 | u32 pp_flags; | 451 | u32 pp_flags; |
452 | unsigned int delayed_eh_pmp_map; | ||
436 | }; | 453 | }; |
437 | 454 | ||
438 | struct mv_port_signal { | 455 | struct mv_port_signal { |
@@ -479,6 +496,7 @@ static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val); | |||
479 | static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val); | 496 | static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val); |
480 | static int mv_port_start(struct ata_port *ap); | 497 | static int mv_port_start(struct ata_port *ap); |
481 | static void mv_port_stop(struct ata_port *ap); | 498 | static void mv_port_stop(struct ata_port *ap); |
499 | static int mv_qc_defer(struct ata_queued_cmd *qc); | ||
482 | static void mv_qc_prep(struct ata_queued_cmd *qc); | 500 | static void mv_qc_prep(struct ata_queued_cmd *qc); |
483 | static void mv_qc_prep_iie(struct ata_queued_cmd *qc); | 501 | static void mv_qc_prep_iie(struct ata_queued_cmd *qc); |
484 | static unsigned int mv_qc_issue(struct ata_queued_cmd *qc); | 502 | static unsigned int mv_qc_issue(struct ata_queued_cmd *qc); |
@@ -527,6 +545,9 @@ static int mv_pmp_hardreset(struct ata_link *link, unsigned int *class, | |||
527 | unsigned long deadline); | 545 | unsigned long deadline); |
528 | static int mv_softreset(struct ata_link *link, unsigned int *class, | 546 | static int mv_softreset(struct ata_link *link, unsigned int *class, |
529 | unsigned long deadline); | 547 | unsigned long deadline); |
548 | static void mv_pmp_error_handler(struct ata_port *ap); | ||
549 | static void mv_process_crpb_entries(struct ata_port *ap, | ||
550 | struct mv_port_priv *pp); | ||
530 | 551 | ||
531 | /* .sg_tablesize is (MV_MAX_SG_CT / 2) in the structures below | 552 | /* .sg_tablesize is (MV_MAX_SG_CT / 2) in the structures below |
532 | * because we have to allow room for worst case splitting of | 553 | * because we have to allow room for worst case splitting of |
@@ -548,6 +569,7 @@ static struct scsi_host_template mv6_sht = { | |||
548 | static struct ata_port_operations mv5_ops = { | 569 | static struct ata_port_operations mv5_ops = { |
549 | .inherits = &ata_sff_port_ops, | 570 | .inherits = &ata_sff_port_ops, |
550 | 571 | ||
572 | .qc_defer = mv_qc_defer, | ||
551 | .qc_prep = mv_qc_prep, | 573 | .qc_prep = mv_qc_prep, |
552 | .qc_issue = mv_qc_issue, | 574 | .qc_issue = mv_qc_issue, |
553 | 575 | ||
@@ -566,7 +588,6 @@ static struct ata_port_operations mv5_ops = { | |||
566 | 588 | ||
567 | static struct ata_port_operations mv6_ops = { | 589 | static struct ata_port_operations mv6_ops = { |
568 | .inherits = &mv5_ops, | 590 | .inherits = &mv5_ops, |
569 | .qc_defer = sata_pmp_qc_defer_cmd_switch, | ||
570 | .dev_config = mv6_dev_config, | 591 | .dev_config = mv6_dev_config, |
571 | .scr_read = mv_scr_read, | 592 | .scr_read = mv_scr_read, |
572 | .scr_write = mv_scr_write, | 593 | .scr_write = mv_scr_write, |
@@ -574,12 +595,11 @@ static struct ata_port_operations mv6_ops = { | |||
574 | .pmp_hardreset = mv_pmp_hardreset, | 595 | .pmp_hardreset = mv_pmp_hardreset, |
575 | .pmp_softreset = mv_softreset, | 596 | .pmp_softreset = mv_softreset, |
576 | .softreset = mv_softreset, | 597 | .softreset = mv_softreset, |
577 | .error_handler = sata_pmp_error_handler, | 598 | .error_handler = mv_pmp_error_handler, |
578 | }; | 599 | }; |
579 | 600 | ||
580 | static struct ata_port_operations mv_iie_ops = { | 601 | static struct ata_port_operations mv_iie_ops = { |
581 | .inherits = &mv6_ops, | 602 | .inherits = &mv6_ops, |
582 | .qc_defer = ata_std_qc_defer, /* FIS-based switching */ | ||
583 | .dev_config = ATA_OP_NULL, | 603 | .dev_config = ATA_OP_NULL, |
584 | .qc_prep = mv_qc_prep_iie, | 604 | .qc_prep = mv_qc_prep_iie, |
585 | }; | 605 | }; |
@@ -875,6 +895,29 @@ static void mv_start_dma(struct ata_port *ap, void __iomem *port_mmio, | |||
875 | } | 895 | } |
876 | } | 896 | } |
877 | 897 | ||
898 | static void mv_wait_for_edma_empty_idle(struct ata_port *ap) | ||
899 | { | ||
900 | void __iomem *port_mmio = mv_ap_base(ap); | ||
901 | const u32 empty_idle = (EDMA_STATUS_CACHE_EMPTY | EDMA_STATUS_IDLE); | ||
902 | const int per_loop = 5, timeout = (15 * 1000 / per_loop); | ||
903 | int i; | ||
904 | |||
905 | /* | ||
906 | * Wait for the EDMA engine to finish transactions in progress. | ||
907 | * No idea what a good "timeout" value might be, but measurements | ||
908 | * indicate that it often requires hundreds of microseconds | ||
909 | * with two drives in-use. So we use the 15msec value above | ||
910 | * as a rough guess at what even more drives might require. | ||
911 | */ | ||
912 | for (i = 0; i < timeout; ++i) { | ||
913 | u32 edma_stat = readl(port_mmio + EDMA_STATUS_OFS); | ||
914 | if ((edma_stat & empty_idle) == empty_idle) | ||
915 | break; | ||
916 | udelay(per_loop); | ||
917 | } | ||
918 | /* ata_port_printk(ap, KERN_INFO, "%s: %u+ usecs\n", __func__, i); */ | ||
919 | } | ||
920 | |||
878 | /** | 921 | /** |
879 | * mv_stop_edma_engine - Disable eDMA engine | 922 | * mv_stop_edma_engine - Disable eDMA engine |
880 | * @port_mmio: io base address | 923 | * @port_mmio: io base address |
@@ -907,6 +950,7 @@ static int mv_stop_edma(struct ata_port *ap) | |||
907 | if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) | 950 | if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) |
908 | return 0; | 951 | return 0; |
909 | pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN; | 952 | pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN; |
953 | mv_wait_for_edma_empty_idle(ap); | ||
910 | if (mv_stop_edma_engine(port_mmio)) { | 954 | if (mv_stop_edma_engine(port_mmio)) { |
911 | ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n"); | 955 | ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n"); |
912 | return -EIO; | 956 | return -EIO; |
@@ -1057,26 +1101,95 @@ static void mv6_dev_config(struct ata_device *adev) | |||
1057 | } | 1101 | } |
1058 | } | 1102 | } |
1059 | 1103 | ||
1060 | static void mv_config_fbs(void __iomem *port_mmio, int enable_fbs) | 1104 | static int mv_qc_defer(struct ata_queued_cmd *qc) |
1061 | { | 1105 | { |
1062 | u32 old_fcfg, new_fcfg, old_ltmode, new_ltmode; | 1106 | struct ata_link *link = qc->dev->link; |
1107 | struct ata_port *ap = link->ap; | ||
1108 | struct mv_port_priv *pp = ap->private_data; | ||
1109 | |||
1110 | /* | ||
1111 | * Don't allow new commands if we're in a delayed EH state | ||
1112 | * for NCQ and/or FIS-based switching. | ||
1113 | */ | ||
1114 | if (pp->pp_flags & MV_PP_FLAG_DELAYED_EH) | ||
1115 | return ATA_DEFER_PORT; | ||
1063 | /* | 1116 | /* |
1064 | * Various bit settings required for operation | 1117 | * If the port is completely idle, then allow the new qc. |
1065 | * in FIS-based switching (fbs) mode on GenIIe: | ||
1066 | */ | 1118 | */ |
1067 | old_fcfg = readl(port_mmio + FIS_CFG_OFS); | 1119 | if (ap->nr_active_links == 0) |
1068 | old_ltmode = readl(port_mmio + LTMODE_OFS); | 1120 | return 0; |
1069 | if (enable_fbs) { | 1121 | |
1070 | new_fcfg = old_fcfg | FIS_CFG_SINGLE_SYNC; | 1122 | if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) { |
1071 | new_ltmode = old_ltmode | LTMODE_BIT8; | 1123 | /* |
1072 | } else { /* disable fbs */ | 1124 | * The port is operating in host queuing mode (EDMA). |
1073 | new_fcfg = old_fcfg & ~FIS_CFG_SINGLE_SYNC; | 1125 | * It can accomodate a new qc if the qc protocol |
1074 | new_ltmode = old_ltmode & ~LTMODE_BIT8; | 1126 | * is compatible with the current host queue mode. |
1075 | } | 1127 | */ |
1076 | if (new_fcfg != old_fcfg) | 1128 | if (pp->pp_flags & MV_PP_FLAG_NCQ_EN) { |
1077 | writelfl(new_fcfg, port_mmio + FIS_CFG_OFS); | 1129 | /* |
1130 | * The host queue (EDMA) is in NCQ mode. | ||
1131 | * If the new qc is also an NCQ command, | ||
1132 | * then allow the new qc. | ||
1133 | */ | ||
1134 | if (qc->tf.protocol == ATA_PROT_NCQ) | ||
1135 | return 0; | ||
1136 | } else { | ||
1137 | /* | ||
1138 | * The host queue (EDMA) is in non-NCQ, DMA mode. | ||
1139 | * If the new qc is also a non-NCQ, DMA command, | ||
1140 | * then allow the new qc. | ||
1141 | */ | ||
1142 | if (qc->tf.protocol == ATA_PROT_DMA) | ||
1143 | return 0; | ||
1144 | } | ||
1145 | } | ||
1146 | return ATA_DEFER_PORT; | ||
1147 | } | ||
1148 | |||
1149 | static void mv_config_fbs(void __iomem *port_mmio, int want_ncq, int want_fbs) | ||
1150 | { | ||
1151 | u32 new_fiscfg, old_fiscfg; | ||
1152 | u32 new_ltmode, old_ltmode; | ||
1153 | u32 new_haltcond, old_haltcond; | ||
1154 | |||
1155 | old_fiscfg = readl(port_mmio + FISCFG_OFS); | ||
1156 | old_ltmode = readl(port_mmio + LTMODE_OFS); | ||
1157 | old_haltcond = readl(port_mmio + EDMA_HALTCOND_OFS); | ||
1158 | |||
1159 | new_fiscfg = old_fiscfg & ~(FISCFG_SINGLE_SYNC | FISCFG_WAIT_DEV_ERR); | ||
1160 | new_ltmode = old_ltmode & ~LTMODE_BIT8; | ||
1161 | new_haltcond = old_haltcond | EDMA_ERR_DEV; | ||
1162 | |||
1163 | if (want_fbs) { | ||
1164 | new_fiscfg = old_fiscfg | FISCFG_SINGLE_SYNC; | ||
1165 | new_ltmode = old_ltmode | LTMODE_BIT8; | ||
1166 | if (want_ncq) | ||
1167 | new_haltcond &= ~EDMA_ERR_DEV; | ||
1168 | else | ||
1169 | new_fiscfg |= FISCFG_WAIT_DEV_ERR; | ||
1170 | } | ||
1171 | |||
1172 | if (new_fiscfg != old_fiscfg) | ||
1173 | writelfl(new_fiscfg, port_mmio + FISCFG_OFS); | ||
1078 | if (new_ltmode != old_ltmode) | 1174 | if (new_ltmode != old_ltmode) |
1079 | writelfl(new_ltmode, port_mmio + LTMODE_OFS); | 1175 | writelfl(new_ltmode, port_mmio + LTMODE_OFS); |
1176 | if (new_haltcond != old_haltcond) | ||
1177 | writelfl(new_haltcond, port_mmio + EDMA_HALTCOND_OFS); | ||
1178 | } | ||
1179 | |||
1180 | static void mv_60x1_errata_sata25(struct ata_port *ap, int want_ncq) | ||
1181 | { | ||
1182 | struct mv_host_priv *hpriv = ap->host->private_data; | ||
1183 | u32 old, new; | ||
1184 | |||
1185 | /* workaround for 88SX60x1 FEr SATA#25 (part 1) */ | ||
1186 | old = readl(hpriv->base + MV_GPIO_PORT_CTL_OFS); | ||
1187 | if (want_ncq) | ||
1188 | new = old | (1 << 22); | ||
1189 | else | ||
1190 | new = old & ~(1 << 22); | ||
1191 | if (new != old) | ||
1192 | writel(new, hpriv->base + MV_GPIO_PORT_CTL_OFS); | ||
1080 | } | 1193 | } |
1081 | 1194 | ||
1082 | static void mv_edma_cfg(struct ata_port *ap, int want_ncq) | 1195 | static void mv_edma_cfg(struct ata_port *ap, int want_ncq) |
@@ -1088,25 +1201,40 @@ static void mv_edma_cfg(struct ata_port *ap, int want_ncq) | |||
1088 | 1201 | ||
1089 | /* set up non-NCQ EDMA configuration */ | 1202 | /* set up non-NCQ EDMA configuration */ |
1090 | cfg = EDMA_CFG_Q_DEPTH; /* always 0x1f for *all* chips */ | 1203 | cfg = EDMA_CFG_Q_DEPTH; /* always 0x1f for *all* chips */ |
1204 | pp->pp_flags &= ~MV_PP_FLAG_FBS_EN; | ||
1091 | 1205 | ||
1092 | if (IS_GEN_I(hpriv)) | 1206 | if (IS_GEN_I(hpriv)) |
1093 | cfg |= (1 << 8); /* enab config burst size mask */ | 1207 | cfg |= (1 << 8); /* enab config burst size mask */ |
1094 | 1208 | ||
1095 | else if (IS_GEN_II(hpriv)) | 1209 | else if (IS_GEN_II(hpriv)) { |
1096 | cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN; | 1210 | cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN; |
1211 | mv_60x1_errata_sata25(ap, want_ncq); | ||
1097 | 1212 | ||
1098 | else if (IS_GEN_IIE(hpriv)) { | 1213 | } else if (IS_GEN_IIE(hpriv)) { |
1099 | cfg |= (1 << 23); /* do not mask PM field in rx'd FIS */ | 1214 | int want_fbs = sata_pmp_attached(ap); |
1100 | cfg |= (1 << 22); /* enab 4-entry host queue cache */ | 1215 | /* |
1101 | cfg |= (1 << 18); /* enab early completion */ | 1216 | * Possible future enhancement: |
1102 | cfg |= (1 << 17); /* enab cut-through (dis stor&forwrd) */ | 1217 | * |
1218 | * The chip can use FBS with non-NCQ, if we allow it, | ||
1219 | * But first we need to have the error handling in place | ||
1220 | * for this mode (datasheet section 7.3.15.4.2.3). | ||
1221 | * So disallow non-NCQ FBS for now. | ||
1222 | */ | ||
1223 | want_fbs &= want_ncq; | ||
1224 | |||
1225 | mv_config_fbs(port_mmio, want_ncq, want_fbs); | ||
1103 | 1226 | ||
1104 | if (want_ncq && sata_pmp_attached(ap)) { | 1227 | if (want_fbs) { |
1228 | pp->pp_flags |= MV_PP_FLAG_FBS_EN; | ||
1105 | cfg |= EDMA_CFG_EDMA_FBS; /* FIS-based switching */ | 1229 | cfg |= EDMA_CFG_EDMA_FBS; /* FIS-based switching */ |
1106 | mv_config_fbs(port_mmio, 1); | ||
1107 | } else { | ||
1108 | mv_config_fbs(port_mmio, 0); | ||
1109 | } | 1230 | } |
1231 | |||
1232 | cfg |= (1 << 23); /* do not mask PM field in rx'd FIS */ | ||
1233 | cfg |= (1 << 22); /* enab 4-entry host queue cache */ | ||
1234 | if (HAS_PCI(ap->host)) | ||
1235 | cfg |= (1 << 18); /* enab early completion */ | ||
1236 | if (hpriv->hp_flags & MV_HP_CUT_THROUGH) | ||
1237 | cfg |= (1 << 17); /* enab cut-thru (dis stor&forwrd) */ | ||
1110 | } | 1238 | } |
1111 | 1239 | ||
1112 | if (want_ncq) { | 1240 | if (want_ncq) { |
@@ -1483,25 +1611,186 @@ static struct ata_queued_cmd *mv_get_active_qc(struct ata_port *ap) | |||
1483 | return qc; | 1611 | return qc; |
1484 | } | 1612 | } |
1485 | 1613 | ||
1486 | static void mv_unexpected_intr(struct ata_port *ap) | 1614 | static void mv_pmp_error_handler(struct ata_port *ap) |
1487 | { | 1615 | { |
1616 | unsigned int pmp, pmp_map; | ||
1488 | struct mv_port_priv *pp = ap->private_data; | 1617 | struct mv_port_priv *pp = ap->private_data; |
1489 | struct ata_eh_info *ehi = &ap->link.eh_info; | ||
1490 | char *when = ""; | ||
1491 | 1618 | ||
1619 | if (pp->pp_flags & MV_PP_FLAG_DELAYED_EH) { | ||
1620 | /* | ||
1621 | * Perform NCQ error analysis on failed PMPs | ||
1622 | * before we freeze the port entirely. | ||
1623 | * | ||
1624 | * The failed PMPs are marked earlier by mv_pmp_eh_prep(). | ||
1625 | */ | ||
1626 | pmp_map = pp->delayed_eh_pmp_map; | ||
1627 | pp->pp_flags &= ~MV_PP_FLAG_DELAYED_EH; | ||
1628 | for (pmp = 0; pmp_map != 0; pmp++) { | ||
1629 | unsigned int this_pmp = (1 << pmp); | ||
1630 | if (pmp_map & this_pmp) { | ||
1631 | struct ata_link *link = &ap->pmp_link[pmp]; | ||
1632 | pmp_map &= ~this_pmp; | ||
1633 | ata_eh_analyze_ncq_error(link); | ||
1634 | } | ||
1635 | } | ||
1636 | ata_port_freeze(ap); | ||
1637 | } | ||
1638 | sata_pmp_error_handler(ap); | ||
1639 | } | ||
1640 | |||
1641 | static unsigned int mv_get_err_pmp_map(struct ata_port *ap) | ||
1642 | { | ||
1643 | void __iomem *port_mmio = mv_ap_base(ap); | ||
1644 | |||
1645 | return readl(port_mmio + SATA_TESTCTL_OFS) >> 16; | ||
1646 | } | ||
1647 | |||
1648 | static void mv_pmp_eh_prep(struct ata_port *ap, unsigned int pmp_map) | ||
1649 | { | ||
1650 | struct ata_eh_info *ehi; | ||
1651 | unsigned int pmp; | ||
1652 | |||
1653 | /* | ||
1654 | * Initialize EH info for PMPs which saw device errors | ||
1655 | */ | ||
1656 | ehi = &ap->link.eh_info; | ||
1657 | for (pmp = 0; pmp_map != 0; pmp++) { | ||
1658 | unsigned int this_pmp = (1 << pmp); | ||
1659 | if (pmp_map & this_pmp) { | ||
1660 | struct ata_link *link = &ap->pmp_link[pmp]; | ||
1661 | |||
1662 | pmp_map &= ~this_pmp; | ||
1663 | ehi = &link->eh_info; | ||
1664 | ata_ehi_clear_desc(ehi); | ||
1665 | ata_ehi_push_desc(ehi, "dev err"); | ||
1666 | ehi->err_mask |= AC_ERR_DEV; | ||
1667 | ehi->action |= ATA_EH_RESET; | ||
1668 | ata_link_abort(link); | ||
1669 | } | ||
1670 | } | ||
1671 | } | ||
1672 | |||
1673 | static int mv_handle_fbs_ncq_dev_err(struct ata_port *ap) | ||
1674 | { | ||
1675 | struct mv_port_priv *pp = ap->private_data; | ||
1676 | int failed_links; | ||
1677 | unsigned int old_map, new_map; | ||
1678 | |||
1679 | /* | ||
1680 | * Device error during FBS+NCQ operation: | ||
1681 | * | ||
1682 | * Set a port flag to prevent further I/O being enqueued. | ||
1683 | * Leave the EDMA running to drain outstanding commands from this port. | ||
1684 | * Perform the post-mortem/EH only when all responses are complete. | ||
1685 | * Follow recovery sequence from 6042/7042 datasheet (7.3.15.4.2.2). | ||
1686 | */ | ||
1687 | if (!(pp->pp_flags & MV_PP_FLAG_DELAYED_EH)) { | ||
1688 | pp->pp_flags |= MV_PP_FLAG_DELAYED_EH; | ||
1689 | pp->delayed_eh_pmp_map = 0; | ||
1690 | } | ||
1691 | old_map = pp->delayed_eh_pmp_map; | ||
1692 | new_map = old_map | mv_get_err_pmp_map(ap); | ||
1693 | |||
1694 | if (old_map != new_map) { | ||
1695 | pp->delayed_eh_pmp_map = new_map; | ||
1696 | mv_pmp_eh_prep(ap, new_map & ~old_map); | ||
1697 | } | ||
1698 | failed_links = hweight16(new_map); | ||
1699 | |||
1700 | ata_port_printk(ap, KERN_INFO, "%s: pmp_map=%04x qc_map=%04x " | ||
1701 | "failed_links=%d nr_active_links=%d\n", | ||
1702 | __func__, pp->delayed_eh_pmp_map, | ||
1703 | ap->qc_active, failed_links, | ||
1704 | ap->nr_active_links); | ||
1705 | |||
1706 | if (ap->nr_active_links <= failed_links) { | ||
1707 | mv_process_crpb_entries(ap, pp); | ||
1708 | mv_stop_edma(ap); | ||
1709 | mv_eh_freeze(ap); | ||
1710 | ata_port_printk(ap, KERN_INFO, "%s: done\n", __func__); | ||
1711 | return 1; /* handled */ | ||
1712 | } | ||
1713 | ata_port_printk(ap, KERN_INFO, "%s: waiting\n", __func__); | ||
1714 | return 1; /* handled */ | ||
1715 | } | ||
1716 | |||
1717 | static int mv_handle_fbs_non_ncq_dev_err(struct ata_port *ap) | ||
1718 | { | ||
1492 | /* | 1719 | /* |
1493 | * We got a device interrupt from something that | 1720 | * Possible future enhancement: |
1494 | * was supposed to be using EDMA or polling. | 1721 | * |
1722 | * FBS+non-NCQ operation is not yet implemented. | ||
1723 | * See related notes in mv_edma_cfg(). | ||
1724 | * | ||
1725 | * Device error during FBS+non-NCQ operation: | ||
1726 | * | ||
1727 | * We need to snapshot the shadow registers for each failed command. | ||
1728 | * Follow recovery sequence from 6042/7042 datasheet (7.3.15.4.2.3). | ||
1495 | */ | 1729 | */ |
1730 | return 0; /* not handled */ | ||
1731 | } | ||
1732 | |||
1733 | static int mv_handle_dev_err(struct ata_port *ap, u32 edma_err_cause) | ||
1734 | { | ||
1735 | struct mv_port_priv *pp = ap->private_data; | ||
1736 | |||
1737 | if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) | ||
1738 | return 0; /* EDMA was not active: not handled */ | ||
1739 | if (!(pp->pp_flags & MV_PP_FLAG_FBS_EN)) | ||
1740 | return 0; /* FBS was not active: not handled */ | ||
1741 | |||
1742 | if (!(edma_err_cause & EDMA_ERR_DEV)) | ||
1743 | return 0; /* non DEV error: not handled */ | ||
1744 | edma_err_cause &= ~EDMA_ERR_IRQ_TRANSIENT; | ||
1745 | if (edma_err_cause & ~(EDMA_ERR_DEV | EDMA_ERR_SELF_DIS)) | ||
1746 | return 0; /* other problems: not handled */ | ||
1747 | |||
1748 | if (pp->pp_flags & MV_PP_FLAG_NCQ_EN) { | ||
1749 | /* | ||
1750 | * EDMA should NOT have self-disabled for this case. | ||
1751 | * If it did, then something is wrong elsewhere, | ||
1752 | * and we cannot handle it here. | ||
1753 | */ | ||
1754 | if (edma_err_cause & EDMA_ERR_SELF_DIS) { | ||
1755 | ata_port_printk(ap, KERN_WARNING, | ||
1756 | "%s: err_cause=0x%x pp_flags=0x%x\n", | ||
1757 | __func__, edma_err_cause, pp->pp_flags); | ||
1758 | return 0; /* not handled */ | ||
1759 | } | ||
1760 | return mv_handle_fbs_ncq_dev_err(ap); | ||
1761 | } else { | ||
1762 | /* | ||
1763 | * EDMA should have self-disabled for this case. | ||
1764 | * If it did not, then something is wrong elsewhere, | ||
1765 | * and we cannot handle it here. | ||
1766 | */ | ||
1767 | if (!(edma_err_cause & EDMA_ERR_SELF_DIS)) { | ||
1768 | ata_port_printk(ap, KERN_WARNING, | ||
1769 | "%s: err_cause=0x%x pp_flags=0x%x\n", | ||
1770 | __func__, edma_err_cause, pp->pp_flags); | ||
1771 | return 0; /* not handled */ | ||
1772 | } | ||
1773 | return mv_handle_fbs_non_ncq_dev_err(ap); | ||
1774 | } | ||
1775 | return 0; /* not handled */ | ||
1776 | } | ||
1777 | |||
1778 | static void mv_unexpected_intr(struct ata_port *ap, int edma_was_enabled) | ||
1779 | { | ||
1780 | struct ata_eh_info *ehi = &ap->link.eh_info; | ||
1781 | char *when = "idle"; | ||
1782 | |||
1496 | ata_ehi_clear_desc(ehi); | 1783 | ata_ehi_clear_desc(ehi); |
1497 | if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) { | 1784 | if (!ap || (ap->flags & ATA_FLAG_DISABLED)) { |
1498 | when = " while EDMA enabled"; | 1785 | when = "disabled"; |
1786 | } else if (edma_was_enabled) { | ||
1787 | when = "EDMA enabled"; | ||
1499 | } else { | 1788 | } else { |
1500 | struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag); | 1789 | struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag); |
1501 | if (qc && (qc->tf.flags & ATA_TFLAG_POLLING)) | 1790 | if (qc && (qc->tf.flags & ATA_TFLAG_POLLING)) |
1502 | when = " while polling"; | 1791 | when = "polling"; |
1503 | } | 1792 | } |
1504 | ata_ehi_push_desc(ehi, "unexpected device interrupt%s", when); | 1793 | ata_ehi_push_desc(ehi, "unexpected device interrupt while %s", when); |
1505 | ehi->err_mask |= AC_ERR_OTHER; | 1794 | ehi->err_mask |= AC_ERR_OTHER; |
1506 | ehi->action |= ATA_EH_RESET; | 1795 | ehi->action |= ATA_EH_RESET; |
1507 | ata_port_freeze(ap); | 1796 | ata_port_freeze(ap); |
@@ -1519,7 +1808,7 @@ static void mv_unexpected_intr(struct ata_port *ap) | |||
1519 | * LOCKING: | 1808 | * LOCKING: |
1520 | * Inherited from caller. | 1809 | * Inherited from caller. |
1521 | */ | 1810 | */ |
1522 | static void mv_err_intr(struct ata_port *ap, struct ata_queued_cmd *qc) | 1811 | static void mv_err_intr(struct ata_port *ap) |
1523 | { | 1812 | { |
1524 | void __iomem *port_mmio = mv_ap_base(ap); | 1813 | void __iomem *port_mmio = mv_ap_base(ap); |
1525 | u32 edma_err_cause, eh_freeze_mask, serr = 0; | 1814 | u32 edma_err_cause, eh_freeze_mask, serr = 0; |
@@ -1527,24 +1816,42 @@ static void mv_err_intr(struct ata_port *ap, struct ata_queued_cmd *qc) | |||
1527 | struct mv_host_priv *hpriv = ap->host->private_data; | 1816 | struct mv_host_priv *hpriv = ap->host->private_data; |
1528 | unsigned int action = 0, err_mask = 0; | 1817 | unsigned int action = 0, err_mask = 0; |
1529 | struct ata_eh_info *ehi = &ap->link.eh_info; | 1818 | struct ata_eh_info *ehi = &ap->link.eh_info; |
1530 | 1819 | struct ata_queued_cmd *qc; | |
1531 | ata_ehi_clear_desc(ehi); | 1820 | int abort = 0; |
1532 | 1821 | ||
1533 | /* | 1822 | /* |
1534 | * Read and clear the err_cause bits. This won't actually | 1823 | * Read and clear the SError and err_cause bits. |
1535 | * clear for some errors (eg. SError), but we will be doing | ||
1536 | * a hard reset in those cases regardless, which *will* clear it. | ||
1537 | */ | 1824 | */ |
1825 | sata_scr_read(&ap->link, SCR_ERROR, &serr); | ||
1826 | sata_scr_write_flush(&ap->link, SCR_ERROR, serr); | ||
1827 | |||
1538 | edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS); | 1828 | edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS); |
1539 | writelfl(~edma_err_cause, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS); | 1829 | writelfl(~edma_err_cause, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS); |
1540 | 1830 | ||
1541 | ata_ehi_push_desc(ehi, "edma_err_cause=%08x", edma_err_cause); | 1831 | ata_port_printk(ap, KERN_INFO, "%s: err_cause=%08x pp_flags=0x%x\n", |
1832 | __func__, edma_err_cause, pp->pp_flags); | ||
1833 | |||
1834 | if (edma_err_cause & EDMA_ERR_DEV) { | ||
1835 | /* | ||
1836 | * Device errors during FIS-based switching operation | ||
1837 | * require special handling. | ||
1838 | */ | ||
1839 | if (mv_handle_dev_err(ap, edma_err_cause)) | ||
1840 | return; | ||
1841 | } | ||
1542 | 1842 | ||
1843 | qc = mv_get_active_qc(ap); | ||
1844 | ata_ehi_clear_desc(ehi); | ||
1845 | ata_ehi_push_desc(ehi, "edma_err_cause=%08x pp_flags=%08x", | ||
1846 | edma_err_cause, pp->pp_flags); | ||
1543 | /* | 1847 | /* |
1544 | * All generations share these EDMA error cause bits: | 1848 | * All generations share these EDMA error cause bits: |
1545 | */ | 1849 | */ |
1546 | if (edma_err_cause & EDMA_ERR_DEV) | 1850 | if (edma_err_cause & EDMA_ERR_DEV) { |
1547 | err_mask |= AC_ERR_DEV; | 1851 | err_mask |= AC_ERR_DEV; |
1852 | action |= ATA_EH_RESET; | ||
1853 | ata_ehi_push_desc(ehi, "dev error"); | ||
1854 | } | ||
1548 | if (edma_err_cause & (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR | | 1855 | if (edma_err_cause & (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR | |
1549 | EDMA_ERR_CRQB_PAR | EDMA_ERR_CRPB_PAR | | 1856 | EDMA_ERR_CRQB_PAR | EDMA_ERR_CRPB_PAR | |
1550 | EDMA_ERR_INTRL_PAR)) { | 1857 | EDMA_ERR_INTRL_PAR)) { |
@@ -1576,13 +1883,6 @@ static void mv_err_intr(struct ata_port *ap, struct ata_queued_cmd *qc) | |||
1576 | ata_ehi_push_desc(ehi, "EDMA self-disable"); | 1883 | ata_ehi_push_desc(ehi, "EDMA self-disable"); |
1577 | } | 1884 | } |
1578 | if (edma_err_cause & EDMA_ERR_SERR) { | 1885 | if (edma_err_cause & EDMA_ERR_SERR) { |
1579 | /* | ||
1580 | * Ensure that we read our own SCR, not a pmp link SCR: | ||
1581 | */ | ||
1582 | ap->ops->scr_read(ap, SCR_ERROR, &serr); | ||
1583 | /* | ||
1584 | * Don't clear SError here; leave it for libata-eh: | ||
1585 | */ | ||
1586 | ata_ehi_push_desc(ehi, "SError=%08x", serr); | 1886 | ata_ehi_push_desc(ehi, "SError=%08x", serr); |
1587 | err_mask |= AC_ERR_ATA_BUS; | 1887 | err_mask |= AC_ERR_ATA_BUS; |
1588 | action |= ATA_EH_RESET; | 1888 | action |= ATA_EH_RESET; |
@@ -1602,10 +1902,29 @@ static void mv_err_intr(struct ata_port *ap, struct ata_queued_cmd *qc) | |||
1602 | else | 1902 | else |
1603 | ehi->err_mask |= err_mask; | 1903 | ehi->err_mask |= err_mask; |
1604 | 1904 | ||
1605 | if (edma_err_cause & eh_freeze_mask) | 1905 | if (err_mask == AC_ERR_DEV) { |
1906 | /* | ||
1907 | * Cannot do ata_port_freeze() here, | ||
1908 | * because it would kill PIO access, | ||
1909 | * which is needed for further diagnosis. | ||
1910 | */ | ||
1911 | mv_eh_freeze(ap); | ||
1912 | abort = 1; | ||
1913 | } else if (edma_err_cause & eh_freeze_mask) { | ||
1914 | /* | ||
1915 | * Note to self: ata_port_freeze() calls ata_port_abort() | ||
1916 | */ | ||
1606 | ata_port_freeze(ap); | 1917 | ata_port_freeze(ap); |
1607 | else | 1918 | } else { |
1608 | ata_port_abort(ap); | 1919 | abort = 1; |
1920 | } | ||
1921 | |||
1922 | if (abort) { | ||
1923 | if (qc) | ||
1924 | ata_link_abort(qc->dev->link); | ||
1925 | else | ||
1926 | ata_port_abort(ap); | ||
1927 | } | ||
1609 | } | 1928 | } |
1610 | 1929 | ||
1611 | static void mv_process_crpb_response(struct ata_port *ap, | 1930 | static void mv_process_crpb_response(struct ata_port *ap, |
@@ -1632,8 +1951,9 @@ static void mv_process_crpb_response(struct ata_port *ap, | |||
1632 | } | 1951 | } |
1633 | } | 1952 | } |
1634 | ata_status = edma_status >> CRPB_FLAG_STATUS_SHIFT; | 1953 | ata_status = edma_status >> CRPB_FLAG_STATUS_SHIFT; |
1635 | qc->err_mask |= ac_err_mask(ata_status); | 1954 | if (!ac_err_mask(ata_status)) |
1636 | ata_qc_complete(qc); | 1955 | ata_qc_complete(qc); |
1956 | /* else: leave it for mv_err_intr() */ | ||
1637 | } else { | 1957 | } else { |
1638 | ata_port_printk(ap, KERN_ERR, "%s: no qc for tag=%d\n", | 1958 | ata_port_printk(ap, KERN_ERR, "%s: no qc for tag=%d\n", |
1639 | __func__, tag); | 1959 | __func__, tag); |
@@ -1677,6 +1997,44 @@ static void mv_process_crpb_entries(struct ata_port *ap, struct mv_port_priv *pp | |||
1677 | port_mmio + EDMA_RSP_Q_OUT_PTR_OFS); | 1997 | port_mmio + EDMA_RSP_Q_OUT_PTR_OFS); |
1678 | } | 1998 | } |
1679 | 1999 | ||
2000 | static void mv_port_intr(struct ata_port *ap, u32 port_cause) | ||
2001 | { | ||
2002 | struct mv_port_priv *pp; | ||
2003 | int edma_was_enabled; | ||
2004 | |||
2005 | if (!ap || (ap->flags & ATA_FLAG_DISABLED)) { | ||
2006 | mv_unexpected_intr(ap, 0); | ||
2007 | return; | ||
2008 | } | ||
2009 | /* | ||
2010 | * Grab a snapshot of the EDMA_EN flag setting, | ||
2011 | * so that we have a consistent view for this port, | ||
2012 | * even if something we call of our routines changes it. | ||
2013 | */ | ||
2014 | pp = ap->private_data; | ||
2015 | edma_was_enabled = (pp->pp_flags & MV_PP_FLAG_EDMA_EN); | ||
2016 | /* | ||
2017 | * Process completed CRPB response(s) before other events. | ||
2018 | */ | ||
2019 | if (edma_was_enabled && (port_cause & DONE_IRQ)) { | ||
2020 | mv_process_crpb_entries(ap, pp); | ||
2021 | if (pp->pp_flags & MV_PP_FLAG_DELAYED_EH) | ||
2022 | mv_handle_fbs_ncq_dev_err(ap); | ||
2023 | } | ||
2024 | /* | ||
2025 | * Handle chip-reported errors, or continue on to handle PIO. | ||
2026 | */ | ||
2027 | if (unlikely(port_cause & ERR_IRQ)) { | ||
2028 | mv_err_intr(ap); | ||
2029 | } else if (!edma_was_enabled) { | ||
2030 | struct ata_queued_cmd *qc = mv_get_active_qc(ap); | ||
2031 | if (qc) | ||
2032 | ata_sff_host_intr(ap, qc); | ||
2033 | else | ||
2034 | mv_unexpected_intr(ap, edma_was_enabled); | ||
2035 | } | ||
2036 | } | ||
2037 | |||
1680 | /** | 2038 | /** |
1681 | * mv_host_intr - Handle all interrupts on the given host controller | 2039 | * mv_host_intr - Handle all interrupts on the given host controller |
1682 | * @host: host specific structure | 2040 | * @host: host specific structure |
@@ -1688,66 +2046,58 @@ static void mv_process_crpb_entries(struct ata_port *ap, struct mv_port_priv *pp | |||
1688 | static int mv_host_intr(struct ata_host *host, u32 main_irq_cause) | 2046 | static int mv_host_intr(struct ata_host *host, u32 main_irq_cause) |
1689 | { | 2047 | { |
1690 | struct mv_host_priv *hpriv = host->private_data; | 2048 | struct mv_host_priv *hpriv = host->private_data; |
1691 | void __iomem *mmio = hpriv->base, *hc_mmio = NULL; | 2049 | void __iomem *mmio = hpriv->base, *hc_mmio; |
1692 | u32 hc_irq_cause = 0; | ||
1693 | unsigned int handled = 0, port; | 2050 | unsigned int handled = 0, port; |
1694 | 2051 | ||
1695 | for (port = 0; port < hpriv->n_ports; port++) { | 2052 | for (port = 0; port < hpriv->n_ports; port++) { |
1696 | struct ata_port *ap = host->ports[port]; | 2053 | struct ata_port *ap = host->ports[port]; |
1697 | struct mv_port_priv *pp; | 2054 | unsigned int p, shift, hardport, port_cause; |
1698 | unsigned int shift, hardport, port_cause; | 2055 | |
1699 | /* | ||
1700 | * When we move to the second hc, flag our cached | ||
1701 | * copies of hc_mmio (and hc_irq_cause) as invalid again. | ||
1702 | */ | ||
1703 | if (port == MV_PORTS_PER_HC) | ||
1704 | hc_mmio = NULL; | ||
1705 | /* | ||
1706 | * Do nothing if port is not interrupting or is disabled: | ||
1707 | */ | ||
1708 | MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport); | 2056 | MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport); |
1709 | port_cause = (main_irq_cause >> shift) & (DONE_IRQ | ERR_IRQ); | ||
1710 | if (!port_cause || !ap || (ap->flags & ATA_FLAG_DISABLED)) | ||
1711 | continue; | ||
1712 | /* | 2057 | /* |
1713 | * Each hc within the host has its own hc_irq_cause register. | 2058 | * Each hc within the host has its own hc_irq_cause register, |
1714 | * We defer reading it until we know we need it, right now: | 2059 | * where the interrupting ports bits get ack'd. |
1715 | * | ||
1716 | * FIXME later: we don't really need to read this register | ||
1717 | * (some logic changes required below if we go that way), | ||
1718 | * because it doesn't tell us anything new. But we do need | ||
1719 | * to write to it, outside the top of this loop, | ||
1720 | * to reset the interrupt triggers for next time. | ||
1721 | */ | 2060 | */ |
1722 | if (!hc_mmio) { | 2061 | if (hardport == 0) { /* first port on this hc ? */ |
2062 | u32 hc_cause = (main_irq_cause >> shift) & HC0_IRQ_PEND; | ||
2063 | u32 port_mask, ack_irqs; | ||
2064 | /* | ||
2065 | * Skip this entire hc if nothing pending for any ports | ||
2066 | */ | ||
2067 | if (!hc_cause) { | ||
2068 | port += MV_PORTS_PER_HC - 1; | ||
2069 | continue; | ||
2070 | } | ||
2071 | /* | ||
2072 | * We don't need/want to read the hc_irq_cause register, | ||
2073 | * because doing so hurts performance, and | ||
2074 | * main_irq_cause already gives us everything we need. | ||
2075 | * | ||
2076 | * But we do have to *write* to the hc_irq_cause to ack | ||
2077 | * the ports that we are handling this time through. | ||
2078 | * | ||
2079 | * This requires that we create a bitmap for those | ||
2080 | * ports which interrupted us, and use that bitmap | ||
2081 | * to ack (only) those ports via hc_irq_cause. | ||
2082 | */ | ||
2083 | ack_irqs = 0; | ||
2084 | for (p = 0; p < MV_PORTS_PER_HC; ++p) { | ||
2085 | if ((port + p) >= hpriv->n_ports) | ||
2086 | break; | ||
2087 | port_mask = (DONE_IRQ | ERR_IRQ) << (p * 2); | ||
2088 | if (hc_cause & port_mask) | ||
2089 | ack_irqs |= (DMA_IRQ | DEV_IRQ) << p; | ||
2090 | } | ||
1723 | hc_mmio = mv_hc_base_from_port(mmio, port); | 2091 | hc_mmio = mv_hc_base_from_port(mmio, port); |
1724 | hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS); | 2092 | writelfl(~ack_irqs, hc_mmio + HC_IRQ_CAUSE_OFS); |
1725 | writelfl(~hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS); | ||
1726 | handled = 1; | 2093 | handled = 1; |
1727 | } | 2094 | } |
1728 | /* | 2095 | /* |
1729 | * Process completed CRPB response(s) before other events. | 2096 | * Handle interrupts signalled for this port: |
1730 | */ | ||
1731 | pp = ap->private_data; | ||
1732 | if (hc_irq_cause & (DMA_IRQ << hardport)) { | ||
1733 | if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) | ||
1734 | mv_process_crpb_entries(ap, pp); | ||
1735 | } | ||
1736 | /* | ||
1737 | * Handle chip-reported errors, or continue on to handle PIO. | ||
1738 | */ | 2097 | */ |
1739 | if (unlikely(port_cause & ERR_IRQ)) { | 2098 | port_cause = (main_irq_cause >> shift) & (DONE_IRQ | ERR_IRQ); |
1740 | mv_err_intr(ap, mv_get_active_qc(ap)); | 2099 | if (port_cause) |
1741 | } else if (hc_irq_cause & (DEV_IRQ << hardport)) { | 2100 | mv_port_intr(ap, port_cause); |
1742 | if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) { | ||
1743 | struct ata_queued_cmd *qc = mv_get_active_qc(ap); | ||
1744 | if (qc) { | ||
1745 | ata_sff_host_intr(ap, qc); | ||
1746 | continue; | ||
1747 | } | ||
1748 | } | ||
1749 | mv_unexpected_intr(ap); | ||
1750 | } | ||
1751 | } | 2101 | } |
1752 | return handled; | 2102 | return handled; |
1753 | } | 2103 | } |
@@ -1894,7 +2244,7 @@ static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio) | |||
1894 | 2244 | ||
1895 | static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio) | 2245 | static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio) |
1896 | { | 2246 | { |
1897 | writel(0x0fcfffff, mmio + MV_FLASH_CTL); | 2247 | writel(0x0fcfffff, mmio + MV_FLASH_CTL_OFS); |
1898 | } | 2248 | } |
1899 | 2249 | ||
1900 | static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx, | 2250 | static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx, |
@@ -1913,7 +2263,7 @@ static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio) | |||
1913 | { | 2263 | { |
1914 | u32 tmp; | 2264 | u32 tmp; |
1915 | 2265 | ||
1916 | writel(0, mmio + MV_GPIO_PORT_CTL); | 2266 | writel(0, mmio + MV_GPIO_PORT_CTL_OFS); |
1917 | 2267 | ||
1918 | /* FIXME: handle MV_HP_ERRATA_50XXB2 errata */ | 2268 | /* FIXME: handle MV_HP_ERRATA_50XXB2 errata */ |
1919 | 2269 | ||
@@ -1931,14 +2281,14 @@ static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio, | |||
1931 | int fix_apm_sq = (hpriv->hp_flags & MV_HP_ERRATA_50XXB0); | 2281 | int fix_apm_sq = (hpriv->hp_flags & MV_HP_ERRATA_50XXB0); |
1932 | 2282 | ||
1933 | if (fix_apm_sq) { | 2283 | if (fix_apm_sq) { |
1934 | tmp = readl(phy_mmio + MV5_LT_MODE); | 2284 | tmp = readl(phy_mmio + MV5_LTMODE_OFS); |
1935 | tmp |= (1 << 19); | 2285 | tmp |= (1 << 19); |
1936 | writel(tmp, phy_mmio + MV5_LT_MODE); | 2286 | writel(tmp, phy_mmio + MV5_LTMODE_OFS); |
1937 | 2287 | ||
1938 | tmp = readl(phy_mmio + MV5_PHY_CTL); | 2288 | tmp = readl(phy_mmio + MV5_PHY_CTL_OFS); |
1939 | tmp &= ~0x3; | 2289 | tmp &= ~0x3; |
1940 | tmp |= 0x1; | 2290 | tmp |= 0x1; |
1941 | writel(tmp, phy_mmio + MV5_PHY_CTL); | 2291 | writel(tmp, phy_mmio + MV5_PHY_CTL_OFS); |
1942 | } | 2292 | } |
1943 | 2293 | ||
1944 | tmp = readl(phy_mmio + MV5_PHY_MODE); | 2294 | tmp = readl(phy_mmio + MV5_PHY_MODE); |
@@ -1956,11 +2306,6 @@ static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio, | |||
1956 | { | 2306 | { |
1957 | void __iomem *port_mmio = mv_port_base(mmio, port); | 2307 | void __iomem *port_mmio = mv_port_base(mmio, port); |
1958 | 2308 | ||
1959 | /* | ||
1960 | * The datasheet warns against setting ATA_RST when EDMA is active | ||
1961 | * (but doesn't say what the problem might be). So we first try | ||
1962 | * to disable the EDMA engine before doing the ATA_RST operation. | ||
1963 | */ | ||
1964 | mv_reset_channel(hpriv, mmio, port); | 2309 | mv_reset_channel(hpriv, mmio, port); |
1965 | 2310 | ||
1966 | ZERO(0x028); /* command */ | 2311 | ZERO(0x028); /* command */ |
@@ -1975,7 +2320,7 @@ static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio, | |||
1975 | ZERO(0x024); /* respq outp */ | 2320 | ZERO(0x024); /* respq outp */ |
1976 | ZERO(0x020); /* respq inp */ | 2321 | ZERO(0x020); /* respq inp */ |
1977 | ZERO(0x02c); /* test control */ | 2322 | ZERO(0x02c); /* test control */ |
1978 | writel(0xbc, port_mmio + EDMA_IORDY_TMOUT); | 2323 | writel(0xbc, port_mmio + EDMA_IORDY_TMOUT_OFS); |
1979 | } | 2324 | } |
1980 | #undef ZERO | 2325 | #undef ZERO |
1981 | 2326 | ||
@@ -2021,13 +2366,13 @@ static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio) | |||
2021 | struct mv_host_priv *hpriv = host->private_data; | 2366 | struct mv_host_priv *hpriv = host->private_data; |
2022 | u32 tmp; | 2367 | u32 tmp; |
2023 | 2368 | ||
2024 | tmp = readl(mmio + MV_PCI_MODE); | 2369 | tmp = readl(mmio + MV_PCI_MODE_OFS); |
2025 | tmp &= 0xff00ffff; | 2370 | tmp &= 0xff00ffff; |
2026 | writel(tmp, mmio + MV_PCI_MODE); | 2371 | writel(tmp, mmio + MV_PCI_MODE_OFS); |
2027 | 2372 | ||
2028 | ZERO(MV_PCI_DISC_TIMER); | 2373 | ZERO(MV_PCI_DISC_TIMER); |
2029 | ZERO(MV_PCI_MSI_TRIGGER); | 2374 | ZERO(MV_PCI_MSI_TRIGGER); |
2030 | writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT); | 2375 | writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT_OFS); |
2031 | ZERO(PCI_HC_MAIN_IRQ_MASK_OFS); | 2376 | ZERO(PCI_HC_MAIN_IRQ_MASK_OFS); |
2032 | ZERO(MV_PCI_SERR_MASK); | 2377 | ZERO(MV_PCI_SERR_MASK); |
2033 | ZERO(hpriv->irq_cause_ofs); | 2378 | ZERO(hpriv->irq_cause_ofs); |
@@ -2045,10 +2390,10 @@ static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio) | |||
2045 | 2390 | ||
2046 | mv5_reset_flash(hpriv, mmio); | 2391 | mv5_reset_flash(hpriv, mmio); |
2047 | 2392 | ||
2048 | tmp = readl(mmio + MV_GPIO_PORT_CTL); | 2393 | tmp = readl(mmio + MV_GPIO_PORT_CTL_OFS); |
2049 | tmp &= 0x3; | 2394 | tmp &= 0x3; |
2050 | tmp |= (1 << 5) | (1 << 6); | 2395 | tmp |= (1 << 5) | (1 << 6); |
2051 | writel(tmp, mmio + MV_GPIO_PORT_CTL); | 2396 | writel(tmp, mmio + MV_GPIO_PORT_CTL_OFS); |
2052 | } | 2397 | } |
2053 | 2398 | ||
2054 | /** | 2399 | /** |
@@ -2121,7 +2466,7 @@ static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx, | |||
2121 | void __iomem *port_mmio; | 2466 | void __iomem *port_mmio; |
2122 | u32 tmp; | 2467 | u32 tmp; |
2123 | 2468 | ||
2124 | tmp = readl(mmio + MV_RESET_CFG); | 2469 | tmp = readl(mmio + MV_RESET_CFG_OFS); |
2125 | if ((tmp & (1 << 0)) == 0) { | 2470 | if ((tmp & (1 << 0)) == 0) { |
2126 | hpriv->signal[idx].amps = 0x7 << 8; | 2471 | hpriv->signal[idx].amps = 0x7 << 8; |
2127 | hpriv->signal[idx].pre = 0x1 << 5; | 2472 | hpriv->signal[idx].pre = 0x1 << 5; |
@@ -2137,7 +2482,7 @@ static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx, | |||
2137 | 2482 | ||
2138 | static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio) | 2483 | static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio) |
2139 | { | 2484 | { |
2140 | writel(0x00000060, mmio + MV_GPIO_PORT_CTL); | 2485 | writel(0x00000060, mmio + MV_GPIO_PORT_CTL_OFS); |
2141 | } | 2486 | } |
2142 | 2487 | ||
2143 | static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio, | 2488 | static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio, |
@@ -2235,11 +2580,6 @@ static void mv_soc_reset_hc_port(struct mv_host_priv *hpriv, | |||
2235 | { | 2580 | { |
2236 | void __iomem *port_mmio = mv_port_base(mmio, port); | 2581 | void __iomem *port_mmio = mv_port_base(mmio, port); |
2237 | 2582 | ||
2238 | /* | ||
2239 | * The datasheet warns against setting ATA_RST when EDMA is active | ||
2240 | * (but doesn't say what the problem might be). So we first try | ||
2241 | * to disable the EDMA engine before doing the ATA_RST operation. | ||
2242 | */ | ||
2243 | mv_reset_channel(hpriv, mmio, port); | 2583 | mv_reset_channel(hpriv, mmio, port); |
2244 | 2584 | ||
2245 | ZERO(0x028); /* command */ | 2585 | ZERO(0x028); /* command */ |
@@ -2254,7 +2594,7 @@ static void mv_soc_reset_hc_port(struct mv_host_priv *hpriv, | |||
2254 | ZERO(0x024); /* respq outp */ | 2594 | ZERO(0x024); /* respq outp */ |
2255 | ZERO(0x020); /* respq inp */ | 2595 | ZERO(0x020); /* respq inp */ |
2256 | ZERO(0x02c); /* test control */ | 2596 | ZERO(0x02c); /* test control */ |
2257 | writel(0xbc, port_mmio + EDMA_IORDY_TMOUT); | 2597 | writel(0xbc, port_mmio + EDMA_IORDY_TMOUT_OFS); |
2258 | } | 2598 | } |
2259 | 2599 | ||
2260 | #undef ZERO | 2600 | #undef ZERO |
@@ -2297,38 +2637,39 @@ static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio) | |||
2297 | return; | 2637 | return; |
2298 | } | 2638 | } |
2299 | 2639 | ||
2300 | static void mv_setup_ifctl(void __iomem *port_mmio, int want_gen2i) | 2640 | static void mv_setup_ifcfg(void __iomem *port_mmio, int want_gen2i) |
2301 | { | 2641 | { |
2302 | u32 ifctl = readl(port_mmio + SATA_INTERFACE_CFG); | 2642 | u32 ifcfg = readl(port_mmio + SATA_INTERFACE_CFG_OFS); |
2303 | 2643 | ||
2304 | ifctl = (ifctl & 0xf7f) | 0x9b1000; /* from chip spec */ | 2644 | ifcfg = (ifcfg & 0xf7f) | 0x9b1000; /* from chip spec */ |
2305 | if (want_gen2i) | 2645 | if (want_gen2i) |
2306 | ifctl |= (1 << 7); /* enable gen2i speed */ | 2646 | ifcfg |= (1 << 7); /* enable gen2i speed */ |
2307 | writelfl(ifctl, port_mmio + SATA_INTERFACE_CFG); | 2647 | writelfl(ifcfg, port_mmio + SATA_INTERFACE_CFG_OFS); |
2308 | } | 2648 | } |
2309 | 2649 | ||
2310 | /* | ||
2311 | * Caller must ensure that EDMA is not active, | ||
2312 | * by first doing mv_stop_edma() where needed. | ||
2313 | */ | ||
2314 | static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio, | 2650 | static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio, |
2315 | unsigned int port_no) | 2651 | unsigned int port_no) |
2316 | { | 2652 | { |
2317 | void __iomem *port_mmio = mv_port_base(mmio, port_no); | 2653 | void __iomem *port_mmio = mv_port_base(mmio, port_no); |
2318 | 2654 | ||
2655 | /* | ||
2656 | * The datasheet warns against setting EDMA_RESET when EDMA is active | ||
2657 | * (but doesn't say what the problem might be). So we first try | ||
2658 | * to disable the EDMA engine before doing the EDMA_RESET operation. | ||
2659 | */ | ||
2319 | mv_stop_edma_engine(port_mmio); | 2660 | mv_stop_edma_engine(port_mmio); |
2320 | writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS); | 2661 | writelfl(EDMA_RESET, port_mmio + EDMA_CMD_OFS); |
2321 | 2662 | ||
2322 | if (!IS_GEN_I(hpriv)) { | 2663 | if (!IS_GEN_I(hpriv)) { |
2323 | /* Enable 3.0gb/s link speed */ | 2664 | /* Enable 3.0gb/s link speed: this survives EDMA_RESET */ |
2324 | mv_setup_ifctl(port_mmio, 1); | 2665 | mv_setup_ifcfg(port_mmio, 1); |
2325 | } | 2666 | } |
2326 | /* | 2667 | /* |
2327 | * Strobing ATA_RST here causes a hard reset of the SATA transport, | 2668 | * Strobing EDMA_RESET here causes a hard reset of the SATA transport, |
2328 | * link, and physical layers. It resets all SATA interface registers | 2669 | * link, and physical layers. It resets all SATA interface registers |
2329 | * (except for SATA_INTERFACE_CFG), and issues a COMRESET to the dev. | 2670 | * (except for SATA_INTERFACE_CFG), and issues a COMRESET to the dev. |
2330 | */ | 2671 | */ |
2331 | writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS); | 2672 | writelfl(EDMA_RESET, port_mmio + EDMA_CMD_OFS); |
2332 | udelay(25); /* allow reset propagation */ | 2673 | udelay(25); /* allow reset propagation */ |
2333 | writelfl(0, port_mmio + EDMA_CMD_OFS); | 2674 | writelfl(0, port_mmio + EDMA_CMD_OFS); |
2334 | 2675 | ||
@@ -2392,7 +2733,7 @@ static int mv_hardreset(struct ata_link *link, unsigned int *class, | |||
2392 | sata_scr_read(link, SCR_STATUS, &sstatus); | 2733 | sata_scr_read(link, SCR_STATUS, &sstatus); |
2393 | if (!IS_GEN_I(hpriv) && ++attempts >= 5 && sstatus == 0x121) { | 2734 | if (!IS_GEN_I(hpriv) && ++attempts >= 5 && sstatus == 0x121) { |
2394 | /* Force 1.5gb/s link speed and try again */ | 2735 | /* Force 1.5gb/s link speed and try again */ |
2395 | mv_setup_ifctl(mv_ap_base(ap), 0); | 2736 | mv_setup_ifcfg(mv_ap_base(ap), 0); |
2396 | if (time_after(jiffies + HZ, deadline)) | 2737 | if (time_after(jiffies + HZ, deadline)) |
2397 | extra = HZ; /* only extend it once, max */ | 2738 | extra = HZ; /* only extend it once, max */ |
2398 | } | 2739 | } |
@@ -2493,6 +2834,34 @@ static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio) | |||
2493 | readl(port_mmio + EDMA_ERR_IRQ_MASK_OFS)); | 2834 | readl(port_mmio + EDMA_ERR_IRQ_MASK_OFS)); |
2494 | } | 2835 | } |
2495 | 2836 | ||
2837 | static unsigned int mv_in_pcix_mode(struct ata_host *host) | ||
2838 | { | ||
2839 | struct mv_host_priv *hpriv = host->private_data; | ||
2840 | void __iomem *mmio = hpriv->base; | ||
2841 | u32 reg; | ||
2842 | |||
2843 | if (!HAS_PCI(host) || !IS_PCIE(hpriv)) | ||
2844 | return 0; /* not PCI-X capable */ | ||
2845 | reg = readl(mmio + MV_PCI_MODE_OFS); | ||
2846 | if ((reg & MV_PCI_MODE_MASK) == 0) | ||
2847 | return 0; /* conventional PCI mode */ | ||
2848 | return 1; /* chip is in PCI-X mode */ | ||
2849 | } | ||
2850 | |||
2851 | static int mv_pci_cut_through_okay(struct ata_host *host) | ||
2852 | { | ||
2853 | struct mv_host_priv *hpriv = host->private_data; | ||
2854 | void __iomem *mmio = hpriv->base; | ||
2855 | u32 reg; | ||
2856 | |||
2857 | if (!mv_in_pcix_mode(host)) { | ||
2858 | reg = readl(mmio + PCI_COMMAND_OFS); | ||
2859 | if (reg & PCI_COMMAND_MRDTRIG) | ||
2860 | return 0; /* not okay */ | ||
2861 | } | ||
2862 | return 1; /* okay */ | ||
2863 | } | ||
2864 | |||
2496 | static int mv_chip_id(struct ata_host *host, unsigned int board_idx) | 2865 | static int mv_chip_id(struct ata_host *host, unsigned int board_idx) |
2497 | { | 2866 | { |
2498 | struct pci_dev *pdev = to_pci_dev(host->dev); | 2867 | struct pci_dev *pdev = to_pci_dev(host->dev); |
@@ -2560,7 +2929,7 @@ static int mv_chip_id(struct ata_host *host, unsigned int board_idx) | |||
2560 | break; | 2929 | break; |
2561 | 2930 | ||
2562 | case chip_7042: | 2931 | case chip_7042: |
2563 | hp_flags |= MV_HP_PCIE; | 2932 | hp_flags |= MV_HP_PCIE | MV_HP_CUT_THROUGH; |
2564 | if (pdev->vendor == PCI_VENDOR_ID_TTI && | 2933 | if (pdev->vendor == PCI_VENDOR_ID_TTI && |
2565 | (pdev->device == 0x2300 || pdev->device == 0x2310)) | 2934 | (pdev->device == 0x2300 || pdev->device == 0x2310)) |
2566 | { | 2935 | { |
@@ -2590,9 +2959,12 @@ static int mv_chip_id(struct ata_host *host, unsigned int board_idx) | |||
2590 | " and avoid the final two gigabytes on" | 2959 | " and avoid the final two gigabytes on" |
2591 | " all RocketRAID BIOS initialized drives.\n"); | 2960 | " all RocketRAID BIOS initialized drives.\n"); |
2592 | } | 2961 | } |
2962 | /* drop through */ | ||
2593 | case chip_6042: | 2963 | case chip_6042: |
2594 | hpriv->ops = &mv6xxx_ops; | 2964 | hpriv->ops = &mv6xxx_ops; |
2595 | hp_flags |= MV_HP_GEN_IIE; | 2965 | hp_flags |= MV_HP_GEN_IIE; |
2966 | if (board_idx == chip_6042 && mv_pci_cut_through_okay(host)) | ||
2967 | hp_flags |= MV_HP_CUT_THROUGH; | ||
2596 | 2968 | ||
2597 | switch (pdev->revision) { | 2969 | switch (pdev->revision) { |
2598 | case 0x0: | 2970 | case 0x0: |