diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2009-04-07 11:53:02 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-04-07 11:53:02 -0400 |
commit | 132ea5e9aa9ce13f62ba45db8e43ec887d1106e9 (patch) | |
tree | 417d93c83ccaa205efab507df56fc985242ba0ae | |
parent | 0e26da0f2200a2fb51844aaa43e365ea9dd5a93d (diff) | |
parent | cae5a29d3c4ec7c4214966021c9ee827e66bd67b (diff) |
Merge branch 'upstream-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jgarzik/libata-dev
* 'upstream-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jgarzik/libata-dev:
sata_mv: shorten register names
sata_mv: workaround errata SATA#13
sata_mv: cosmetic renames
sata_mv: workaround errata SATA#26
sata_mv: workaround errata PCI#7
sata_mv: replace 0x1f with ATA_PIO4 (v2)
sata_mv: fix irq mask races
sata_mv: revert SoC irq breakage
libata: ahci enclosure management bios workaround
ata: Add TRIM infrastructure
ata_piix: VGN-BX297XP wants the controller power up on suspend
libata: Remove some redundant casts from pata_octeon_cf.c
pata_artop: typo
-rw-r--r-- | block/blk-settings.c | 2 | ||||
-rw-r--r-- | drivers/ata/ahci.c | 17 | ||||
-rw-r--r-- | drivers/ata/ata_piix.c | 7 | ||||
-rw-r--r-- | drivers/ata/pata_octeon_cf.c | 6 | ||||
-rw-r--r-- | drivers/ata/sata_mv.c | 487 | ||||
-rw-r--r-- | include/linux/ata.h | 41 |
6 files changed, 344 insertions, 216 deletions
diff --git a/block/blk-settings.c b/block/blk-settings.c index 59fd05d9f1d5..69c42adde52b 100644 --- a/block/blk-settings.c +++ b/block/blk-settings.c | |||
@@ -431,7 +431,7 @@ EXPORT_SYMBOL(blk_queue_segment_boundary); | |||
431 | * | 431 | * |
432 | * description: | 432 | * description: |
433 | * set required memory and length alignment for direct dma transactions. | 433 | * set required memory and length alignment for direct dma transactions. |
434 | * this is used when buiding direct io requests for the queue. | 434 | * this is used when building direct io requests for the queue. |
435 | * | 435 | * |
436 | **/ | 436 | **/ |
437 | void blk_queue_dma_alignment(struct request_queue *q, int mask) | 437 | void blk_queue_dma_alignment(struct request_queue *q, int mask) |
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c index f75dac57dc2b..57be6bea48eb 100644 --- a/drivers/ata/ahci.c +++ b/drivers/ata/ahci.c | |||
@@ -78,6 +78,7 @@ static ssize_t ahci_led_store(struct ata_port *ap, const char *buf, | |||
78 | static ssize_t ahci_transmit_led_message(struct ata_port *ap, u32 state, | 78 | static ssize_t ahci_transmit_led_message(struct ata_port *ap, u32 state, |
79 | ssize_t size); | 79 | ssize_t size); |
80 | #define MAX_SLOTS 8 | 80 | #define MAX_SLOTS 8 |
81 | #define MAX_RETRY 15 | ||
81 | 82 | ||
82 | enum { | 83 | enum { |
83 | AHCI_PCI_BAR = 5, | 84 | AHCI_PCI_BAR = 5, |
@@ -1115,6 +1116,8 @@ static void ahci_start_port(struct ata_port *ap) | |||
1115 | struct ahci_port_priv *pp = ap->private_data; | 1116 | struct ahci_port_priv *pp = ap->private_data; |
1116 | struct ata_link *link; | 1117 | struct ata_link *link; |
1117 | struct ahci_em_priv *emp; | 1118 | struct ahci_em_priv *emp; |
1119 | ssize_t rc; | ||
1120 | int i; | ||
1118 | 1121 | ||
1119 | /* enable FIS reception */ | 1122 | /* enable FIS reception */ |
1120 | ahci_start_fis_rx(ap); | 1123 | ahci_start_fis_rx(ap); |
@@ -1126,7 +1129,17 @@ static void ahci_start_port(struct ata_port *ap) | |||
1126 | if (ap->flags & ATA_FLAG_EM) { | 1129 | if (ap->flags & ATA_FLAG_EM) { |
1127 | ata_for_each_link(link, ap, EDGE) { | 1130 | ata_for_each_link(link, ap, EDGE) { |
1128 | emp = &pp->em_priv[link->pmp]; | 1131 | emp = &pp->em_priv[link->pmp]; |
1129 | ahci_transmit_led_message(ap, emp->led_state, 4); | 1132 | |
1133 | /* EM Transmit bit maybe busy during init */ | ||
1134 | for (i = 0; i < MAX_RETRY; i++) { | ||
1135 | rc = ahci_transmit_led_message(ap, | ||
1136 | emp->led_state, | ||
1137 | 4); | ||
1138 | if (rc == -EBUSY) | ||
1139 | udelay(100); | ||
1140 | else | ||
1141 | break; | ||
1142 | } | ||
1130 | } | 1143 | } |
1131 | } | 1144 | } |
1132 | 1145 | ||
@@ -1331,7 +1344,7 @@ static ssize_t ahci_transmit_led_message(struct ata_port *ap, u32 state, | |||
1331 | em_ctl = readl(mmio + HOST_EM_CTL); | 1344 | em_ctl = readl(mmio + HOST_EM_CTL); |
1332 | if (em_ctl & EM_CTL_TM) { | 1345 | if (em_ctl & EM_CTL_TM) { |
1333 | spin_unlock_irqrestore(ap->lock, flags); | 1346 | spin_unlock_irqrestore(ap->lock, flags); |
1334 | return -EINVAL; | 1347 | return -EBUSY; |
1335 | } | 1348 | } |
1336 | 1349 | ||
1337 | /* | 1350 | /* |
diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c index e5cbe80ce172..942d14ac8792 100644 --- a/drivers/ata/ata_piix.c +++ b/drivers/ata/ata_piix.c | |||
@@ -1053,6 +1053,13 @@ static int piix_broken_suspend(void) | |||
1053 | DMI_MATCH(DMI_PRODUCT_NAME, "PORTEGE M500"), | 1053 | DMI_MATCH(DMI_PRODUCT_NAME, "PORTEGE M500"), |
1054 | }, | 1054 | }, |
1055 | }, | 1055 | }, |
1056 | { | ||
1057 | .ident = "VGN-BX297XP", | ||
1058 | .matches = { | ||
1059 | DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), | ||
1060 | DMI_MATCH(DMI_PRODUCT_NAME, "VGN-BX297XP"), | ||
1061 | }, | ||
1062 | }, | ||
1056 | 1063 | ||
1057 | { } /* terminate list */ | 1064 | { } /* terminate list */ |
1058 | }; | 1065 | }; |
diff --git a/drivers/ata/pata_octeon_cf.c b/drivers/ata/pata_octeon_cf.c index efe2c1985af3..8d9343accf3c 100644 --- a/drivers/ata/pata_octeon_cf.c +++ b/drivers/ata/pata_octeon_cf.c | |||
@@ -503,7 +503,7 @@ static void octeon_cf_dma_setup(struct ata_queued_cmd *qc) | |||
503 | struct ata_port *ap = qc->ap; | 503 | struct ata_port *ap = qc->ap; |
504 | struct octeon_cf_port *cf_port; | 504 | struct octeon_cf_port *cf_port; |
505 | 505 | ||
506 | cf_port = (struct octeon_cf_port *)ap->private_data; | 506 | cf_port = ap->private_data; |
507 | DPRINTK("ENTER\n"); | 507 | DPRINTK("ENTER\n"); |
508 | /* issue r/w command */ | 508 | /* issue r/w command */ |
509 | qc->cursg = qc->sg; | 509 | qc->cursg = qc->sg; |
@@ -596,7 +596,7 @@ static unsigned int octeon_cf_dma_finished(struct ata_port *ap, | |||
596 | if (ap->hsm_task_state != HSM_ST_LAST) | 596 | if (ap->hsm_task_state != HSM_ST_LAST) |
597 | return 0; | 597 | return 0; |
598 | 598 | ||
599 | cf_port = (struct octeon_cf_port *)ap->private_data; | 599 | cf_port = ap->private_data; |
600 | 600 | ||
601 | dma_cfg.u64 = cvmx_read_csr(CVMX_MIO_BOOT_DMA_CFGX(ocd->dma_engine)); | 601 | dma_cfg.u64 = cvmx_read_csr(CVMX_MIO_BOOT_DMA_CFGX(ocd->dma_engine)); |
602 | if (dma_cfg.s.size != 0xfffff) { | 602 | if (dma_cfg.s.size != 0xfffff) { |
@@ -657,7 +657,7 @@ static irqreturn_t octeon_cf_interrupt(int irq, void *dev_instance) | |||
657 | continue; | 657 | continue; |
658 | 658 | ||
659 | ocd = ap->dev->platform_data; | 659 | ocd = ap->dev->platform_data; |
660 | cf_port = (struct octeon_cf_port *)ap->private_data; | 660 | cf_port = ap->private_data; |
661 | dma_int.u64 = | 661 | dma_int.u64 = |
662 | cvmx_read_csr(CVMX_MIO_BOOT_DMA_INTX(ocd->dma_engine)); | 662 | cvmx_read_csr(CVMX_MIO_BOOT_DMA_INTX(ocd->dma_engine)); |
663 | dma_cfg.u64 = | 663 | dma_cfg.u64 = |
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c index 5af3ea19d3c5..37ae5dc1070c 100644 --- a/drivers/ata/sata_mv.c +++ b/drivers/ata/sata_mv.c | |||
@@ -28,10 +28,6 @@ | |||
28 | /* | 28 | /* |
29 | * sata_mv TODO list: | 29 | * sata_mv TODO list: |
30 | * | 30 | * |
31 | * --> More errata workarounds for PCI-X. | ||
32 | * | ||
33 | * --> Complete a full errata audit for all chipsets to identify others. | ||
34 | * | ||
35 | * --> Develop a low-power-consumption strategy, and implement it. | 31 | * --> Develop a low-power-consumption strategy, and implement it. |
36 | * | 32 | * |
37 | * --> Add sysfs attributes for per-chip / per-HC IRQ coalescing thresholds. | 33 | * --> Add sysfs attributes for per-chip / per-HC IRQ coalescing thresholds. |
@@ -44,6 +40,15 @@ | |||
44 | * connect two SATA ports. | 40 | * connect two SATA ports. |
45 | */ | 41 | */ |
46 | 42 | ||
43 | /* | ||
44 | * 80x1-B2 errata PCI#11: | ||
45 | * | ||
46 | * Users of the 6041/6081 Rev.B2 chips (current is C0) | ||
47 | * should be careful to insert those cards only onto PCI-X bus #0, | ||
48 | * and only in device slots 0..7, not higher. The chips may not | ||
49 | * work correctly otherwise (note: this is a pretty rare condition). | ||
50 | */ | ||
51 | |||
47 | #include <linux/kernel.h> | 52 | #include <linux/kernel.h> |
48 | #include <linux/module.h> | 53 | #include <linux/module.h> |
49 | #include <linux/pci.h> | 54 | #include <linux/pci.h> |
@@ -64,7 +69,7 @@ | |||
64 | #include <linux/libata.h> | 69 | #include <linux/libata.h> |
65 | 70 | ||
66 | #define DRV_NAME "sata_mv" | 71 | #define DRV_NAME "sata_mv" |
67 | #define DRV_VERSION "1.27" | 72 | #define DRV_VERSION "1.28" |
68 | 73 | ||
69 | /* | 74 | /* |
70 | * module options | 75 | * module options |
@@ -109,23 +114,23 @@ enum { | |||
109 | * Coalescing defers the interrupt until either the IO_THRESHOLD | 114 | * Coalescing defers the interrupt until either the IO_THRESHOLD |
110 | * (count of completed I/Os) is met, or the TIME_THRESHOLD is met. | 115 | * (count of completed I/Os) is met, or the TIME_THRESHOLD is met. |
111 | */ | 116 | */ |
112 | MV_COAL_REG_BASE = 0x18000, | 117 | COAL_REG_BASE = 0x18000, |
113 | MV_IRQ_COAL_CAUSE = (MV_COAL_REG_BASE + 0x08), | 118 | IRQ_COAL_CAUSE = (COAL_REG_BASE + 0x08), |
114 | ALL_PORTS_COAL_IRQ = (1 << 4), /* all ports irq event */ | 119 | ALL_PORTS_COAL_IRQ = (1 << 4), /* all ports irq event */ |
115 | 120 | ||
116 | MV_IRQ_COAL_IO_THRESHOLD = (MV_COAL_REG_BASE + 0xcc), | 121 | IRQ_COAL_IO_THRESHOLD = (COAL_REG_BASE + 0xcc), |
117 | MV_IRQ_COAL_TIME_THRESHOLD = (MV_COAL_REG_BASE + 0xd0), | 122 | IRQ_COAL_TIME_THRESHOLD = (COAL_REG_BASE + 0xd0), |
118 | 123 | ||
119 | /* | 124 | /* |
120 | * Registers for the (unused here) transaction coalescing feature: | 125 | * Registers for the (unused here) transaction coalescing feature: |
121 | */ | 126 | */ |
122 | MV_TRAN_COAL_CAUSE_LO = (MV_COAL_REG_BASE + 0x88), | 127 | TRAN_COAL_CAUSE_LO = (COAL_REG_BASE + 0x88), |
123 | MV_TRAN_COAL_CAUSE_HI = (MV_COAL_REG_BASE + 0x8c), | 128 | TRAN_COAL_CAUSE_HI = (COAL_REG_BASE + 0x8c), |
124 | 129 | ||
125 | MV_SATAHC0_REG_BASE = 0x20000, | 130 | SATAHC0_REG_BASE = 0x20000, |
126 | MV_FLASH_CTL_OFS = 0x1046c, | 131 | FLASH_CTL = 0x1046c, |
127 | MV_GPIO_PORT_CTL_OFS = 0x104f0, | 132 | GPIO_PORT_CTL = 0x104f0, |
128 | MV_RESET_CFG_OFS = 0x180d8, | 133 | RESET_CFG = 0x180d8, |
129 | 134 | ||
130 | MV_PCI_REG_SZ = MV_MAJOR_REG_AREA_SZ, | 135 | MV_PCI_REG_SZ = MV_MAJOR_REG_AREA_SZ, |
131 | MV_SATAHC_REG_SZ = MV_MAJOR_REG_AREA_SZ, | 136 | MV_SATAHC_REG_SZ = MV_MAJOR_REG_AREA_SZ, |
@@ -180,40 +185,41 @@ enum { | |||
180 | 185 | ||
181 | /* PCI interface registers */ | 186 | /* PCI interface registers */ |
182 | 187 | ||
183 | PCI_COMMAND_OFS = 0xc00, | 188 | MV_PCI_COMMAND = 0xc00, |
184 | PCI_COMMAND_MRDTRIG = (1 << 7), /* PCI Master Read Trigger */ | 189 | MV_PCI_COMMAND_MWRCOM = (1 << 4), /* PCI Master Write Combining */ |
190 | MV_PCI_COMMAND_MRDTRIG = (1 << 7), /* PCI Master Read Trigger */ | ||
185 | 191 | ||
186 | PCI_MAIN_CMD_STS_OFS = 0xd30, | 192 | PCI_MAIN_CMD_STS = 0xd30, |
187 | STOP_PCI_MASTER = (1 << 2), | 193 | STOP_PCI_MASTER = (1 << 2), |
188 | PCI_MASTER_EMPTY = (1 << 3), | 194 | PCI_MASTER_EMPTY = (1 << 3), |
189 | GLOB_SFT_RST = (1 << 4), | 195 | GLOB_SFT_RST = (1 << 4), |
190 | 196 | ||
191 | MV_PCI_MODE_OFS = 0xd00, | 197 | MV_PCI_MODE = 0xd00, |
192 | MV_PCI_MODE_MASK = 0x30, | 198 | MV_PCI_MODE_MASK = 0x30, |
193 | 199 | ||
194 | MV_PCI_EXP_ROM_BAR_CTL = 0xd2c, | 200 | MV_PCI_EXP_ROM_BAR_CTL = 0xd2c, |
195 | MV_PCI_DISC_TIMER = 0xd04, | 201 | MV_PCI_DISC_TIMER = 0xd04, |
196 | MV_PCI_MSI_TRIGGER = 0xc38, | 202 | MV_PCI_MSI_TRIGGER = 0xc38, |
197 | MV_PCI_SERR_MASK = 0xc28, | 203 | MV_PCI_SERR_MASK = 0xc28, |
198 | MV_PCI_XBAR_TMOUT_OFS = 0x1d04, | 204 | MV_PCI_XBAR_TMOUT = 0x1d04, |
199 | MV_PCI_ERR_LOW_ADDRESS = 0x1d40, | 205 | MV_PCI_ERR_LOW_ADDRESS = 0x1d40, |
200 | MV_PCI_ERR_HIGH_ADDRESS = 0x1d44, | 206 | MV_PCI_ERR_HIGH_ADDRESS = 0x1d44, |
201 | MV_PCI_ERR_ATTRIBUTE = 0x1d48, | 207 | MV_PCI_ERR_ATTRIBUTE = 0x1d48, |
202 | MV_PCI_ERR_COMMAND = 0x1d50, | 208 | MV_PCI_ERR_COMMAND = 0x1d50, |
203 | 209 | ||
204 | PCI_IRQ_CAUSE_OFS = 0x1d58, | 210 | PCI_IRQ_CAUSE = 0x1d58, |
205 | PCI_IRQ_MASK_OFS = 0x1d5c, | 211 | PCI_IRQ_MASK = 0x1d5c, |
206 | PCI_UNMASK_ALL_IRQS = 0x7fffff, /* bits 22-0 */ | 212 | PCI_UNMASK_ALL_IRQS = 0x7fffff, /* bits 22-0 */ |
207 | 213 | ||
208 | PCIE_IRQ_CAUSE_OFS = 0x1900, | 214 | PCIE_IRQ_CAUSE = 0x1900, |
209 | PCIE_IRQ_MASK_OFS = 0x1910, | 215 | PCIE_IRQ_MASK = 0x1910, |
210 | PCIE_UNMASK_ALL_IRQS = 0x40a, /* assorted bits */ | 216 | PCIE_UNMASK_ALL_IRQS = 0x40a, /* assorted bits */ |
211 | 217 | ||
212 | /* Host Controller Main Interrupt Cause/Mask registers (1 per-chip) */ | 218 | /* Host Controller Main Interrupt Cause/Mask registers (1 per-chip) */ |
213 | PCI_HC_MAIN_IRQ_CAUSE_OFS = 0x1d60, | 219 | PCI_HC_MAIN_IRQ_CAUSE = 0x1d60, |
214 | PCI_HC_MAIN_IRQ_MASK_OFS = 0x1d64, | 220 | PCI_HC_MAIN_IRQ_MASK = 0x1d64, |
215 | SOC_HC_MAIN_IRQ_CAUSE_OFS = 0x20020, | 221 | SOC_HC_MAIN_IRQ_CAUSE = 0x20020, |
216 | SOC_HC_MAIN_IRQ_MASK_OFS = 0x20024, | 222 | SOC_HC_MAIN_IRQ_MASK = 0x20024, |
217 | ERR_IRQ = (1 << 0), /* shift by (2 * port #) */ | 223 | ERR_IRQ = (1 << 0), /* shift by (2 * port #) */ |
218 | DONE_IRQ = (1 << 1), /* shift by (2 * port #) */ | 224 | DONE_IRQ = (1 << 1), /* shift by (2 * port #) */ |
219 | HC0_IRQ_PEND = 0x1ff, /* bits 0-8 = HC0's ports */ | 225 | HC0_IRQ_PEND = 0x1ff, /* bits 0-8 = HC0's ports */ |
@@ -234,9 +240,9 @@ enum { | |||
234 | HC_MAIN_RSVD_SOC = (0x3fffffb << 6), /* bits 31-9, 7-6 */ | 240 | HC_MAIN_RSVD_SOC = (0x3fffffb << 6), /* bits 31-9, 7-6 */ |
235 | 241 | ||
236 | /* SATAHC registers */ | 242 | /* SATAHC registers */ |
237 | HC_CFG_OFS = 0, | 243 | HC_CFG = 0x00, |
238 | 244 | ||
239 | HC_IRQ_CAUSE_OFS = 0x14, | 245 | HC_IRQ_CAUSE = 0x14, |
240 | DMA_IRQ = (1 << 0), /* shift by port # */ | 246 | DMA_IRQ = (1 << 0), /* shift by port # */ |
241 | HC_COAL_IRQ = (1 << 4), /* IRQ coalescing */ | 247 | HC_COAL_IRQ = (1 << 4), /* IRQ coalescing */ |
242 | DEV_IRQ = (1 << 8), /* shift by port # */ | 248 | DEV_IRQ = (1 << 8), /* shift by port # */ |
@@ -248,53 +254,54 @@ enum { | |||
248 | * Coalescing defers the interrupt until either the IO_THRESHOLD | 254 | * Coalescing defers the interrupt until either the IO_THRESHOLD |
249 | * (count of completed I/Os) is met, or the TIME_THRESHOLD is met. | 255 | * (count of completed I/Os) is met, or the TIME_THRESHOLD is met. |
250 | */ | 256 | */ |
251 | HC_IRQ_COAL_IO_THRESHOLD_OFS = 0x000c, | 257 | HC_IRQ_COAL_IO_THRESHOLD = 0x000c, |
252 | HC_IRQ_COAL_TIME_THRESHOLD_OFS = 0x0010, | 258 | HC_IRQ_COAL_TIME_THRESHOLD = 0x0010, |
253 | 259 | ||
254 | SOC_LED_CTRL_OFS = 0x2c, | 260 | SOC_LED_CTRL = 0x2c, |
255 | SOC_LED_CTRL_BLINK = (1 << 0), /* Active LED blink */ | 261 | SOC_LED_CTRL_BLINK = (1 << 0), /* Active LED blink */ |
256 | SOC_LED_CTRL_ACT_PRESENCE = (1 << 2), /* Multiplex dev presence */ | 262 | SOC_LED_CTRL_ACT_PRESENCE = (1 << 2), /* Multiplex dev presence */ |
257 | /* with dev activity LED */ | 263 | /* with dev activity LED */ |
258 | 264 | ||
259 | /* Shadow block registers */ | 265 | /* Shadow block registers */ |
260 | SHD_BLK_OFS = 0x100, | 266 | SHD_BLK = 0x100, |
261 | SHD_CTL_AST_OFS = 0x20, /* ofs from SHD_BLK_OFS */ | 267 | SHD_CTL_AST = 0x20, /* ofs from SHD_BLK */ |
262 | 268 | ||
263 | /* SATA registers */ | 269 | /* SATA registers */ |
264 | SATA_STATUS_OFS = 0x300, /* ctrl, err regs follow status */ | 270 | SATA_STATUS = 0x300, /* ctrl, err regs follow status */ |
265 | SATA_ACTIVE_OFS = 0x350, | 271 | SATA_ACTIVE = 0x350, |
266 | SATA_FIS_IRQ_CAUSE_OFS = 0x364, | 272 | FIS_IRQ_CAUSE = 0x364, |
267 | SATA_FIS_IRQ_AN = (1 << 9), /* async notification */ | 273 | FIS_IRQ_CAUSE_AN = (1 << 9), /* async notification */ |
268 | 274 | ||
269 | LTMODE_OFS = 0x30c, | 275 | LTMODE = 0x30c, /* requires read-after-write */ |
270 | LTMODE_BIT8 = (1 << 8), /* unknown, but necessary */ | 276 | LTMODE_BIT8 = (1 << 8), /* unknown, but necessary */ |
271 | 277 | ||
278 | PHY_MODE2 = 0x330, | ||
272 | PHY_MODE3 = 0x310, | 279 | PHY_MODE3 = 0x310, |
273 | PHY_MODE4 = 0x314, | 280 | |
281 | PHY_MODE4 = 0x314, /* requires read-after-write */ | ||
274 | PHY_MODE4_CFG_MASK = 0x00000003, /* phy internal config field */ | 282 | PHY_MODE4_CFG_MASK = 0x00000003, /* phy internal config field */ |
275 | PHY_MODE4_CFG_VALUE = 0x00000001, /* phy internal config field */ | 283 | PHY_MODE4_CFG_VALUE = 0x00000001, /* phy internal config field */ |
276 | PHY_MODE4_RSVD_ZEROS = 0x5de3fffa, /* Gen2e always write zeros */ | 284 | PHY_MODE4_RSVD_ZEROS = 0x5de3fffa, /* Gen2e always write zeros */ |
277 | PHY_MODE4_RSVD_ONES = 0x00000005, /* Gen2e always write ones */ | 285 | PHY_MODE4_RSVD_ONES = 0x00000005, /* Gen2e always write ones */ |
278 | 286 | ||
279 | PHY_MODE2 = 0x330, | 287 | SATA_IFCTL = 0x344, |
280 | SATA_IFCTL_OFS = 0x344, | 288 | SATA_TESTCTL = 0x348, |
281 | SATA_TESTCTL_OFS = 0x348, | 289 | SATA_IFSTAT = 0x34c, |
282 | SATA_IFSTAT_OFS = 0x34c, | 290 | VENDOR_UNIQUE_FIS = 0x35c, |
283 | VENDOR_UNIQUE_FIS_OFS = 0x35c, | ||
284 | 291 | ||
285 | FISCFG_OFS = 0x360, | 292 | FISCFG = 0x360, |
286 | FISCFG_WAIT_DEV_ERR = (1 << 8), /* wait for host on DevErr */ | 293 | FISCFG_WAIT_DEV_ERR = (1 << 8), /* wait for host on DevErr */ |
287 | FISCFG_SINGLE_SYNC = (1 << 16), /* SYNC on DMA activation */ | 294 | FISCFG_SINGLE_SYNC = (1 << 16), /* SYNC on DMA activation */ |
288 | 295 | ||
289 | MV5_PHY_MODE = 0x74, | 296 | MV5_PHY_MODE = 0x74, |
290 | MV5_LTMODE_OFS = 0x30, | 297 | MV5_LTMODE = 0x30, |
291 | MV5_PHY_CTL_OFS = 0x0C, | 298 | MV5_PHY_CTL = 0x0C, |
292 | SATA_INTERFACE_CFG_OFS = 0x050, | 299 | SATA_IFCFG = 0x050, |
293 | 300 | ||
294 | MV_M2_PREAMP_MASK = 0x7e0, | 301 | MV_M2_PREAMP_MASK = 0x7e0, |
295 | 302 | ||
296 | /* Port registers */ | 303 | /* Port registers */ |
297 | EDMA_CFG_OFS = 0, | 304 | EDMA_CFG = 0, |
298 | EDMA_CFG_Q_DEPTH = 0x1f, /* max device queue depth */ | 305 | EDMA_CFG_Q_DEPTH = 0x1f, /* max device queue depth */ |
299 | EDMA_CFG_NCQ = (1 << 5), /* for R/W FPDMA queued */ | 306 | EDMA_CFG_NCQ = (1 << 5), /* for R/W FPDMA queued */ |
300 | EDMA_CFG_NCQ_GO_ON_ERR = (1 << 14), /* continue on error */ | 307 | EDMA_CFG_NCQ_GO_ON_ERR = (1 << 14), /* continue on error */ |
@@ -303,8 +310,8 @@ enum { | |||
303 | EDMA_CFG_EDMA_FBS = (1 << 16), /* EDMA FIS-Based Switching */ | 310 | EDMA_CFG_EDMA_FBS = (1 << 16), /* EDMA FIS-Based Switching */ |
304 | EDMA_CFG_FBS = (1 << 26), /* FIS-Based Switching */ | 311 | EDMA_CFG_FBS = (1 << 26), /* FIS-Based Switching */ |
305 | 312 | ||
306 | EDMA_ERR_IRQ_CAUSE_OFS = 0x8, | 313 | EDMA_ERR_IRQ_CAUSE = 0x8, |
307 | EDMA_ERR_IRQ_MASK_OFS = 0xc, | 314 | EDMA_ERR_IRQ_MASK = 0xc, |
308 | EDMA_ERR_D_PAR = (1 << 0), /* UDMA data parity err */ | 315 | EDMA_ERR_D_PAR = (1 << 0), /* UDMA data parity err */ |
309 | EDMA_ERR_PRD_PAR = (1 << 1), /* UDMA PRD parity err */ | 316 | EDMA_ERR_PRD_PAR = (1 << 1), /* UDMA PRD parity err */ |
310 | EDMA_ERR_DEV = (1 << 2), /* device error */ | 317 | EDMA_ERR_DEV = (1 << 2), /* device error */ |
@@ -373,36 +380,36 @@ enum { | |||
373 | EDMA_ERR_INTRL_PAR | | 380 | EDMA_ERR_INTRL_PAR | |
374 | EDMA_ERR_IORDY, | 381 | EDMA_ERR_IORDY, |
375 | 382 | ||
376 | EDMA_REQ_Q_BASE_HI_OFS = 0x10, | 383 | EDMA_REQ_Q_BASE_HI = 0x10, |
377 | EDMA_REQ_Q_IN_PTR_OFS = 0x14, /* also contains BASE_LO */ | 384 | EDMA_REQ_Q_IN_PTR = 0x14, /* also contains BASE_LO */ |
378 | 385 | ||
379 | EDMA_REQ_Q_OUT_PTR_OFS = 0x18, | 386 | EDMA_REQ_Q_OUT_PTR = 0x18, |
380 | EDMA_REQ_Q_PTR_SHIFT = 5, | 387 | EDMA_REQ_Q_PTR_SHIFT = 5, |
381 | 388 | ||
382 | EDMA_RSP_Q_BASE_HI_OFS = 0x1c, | 389 | EDMA_RSP_Q_BASE_HI = 0x1c, |
383 | EDMA_RSP_Q_IN_PTR_OFS = 0x20, | 390 | EDMA_RSP_Q_IN_PTR = 0x20, |
384 | EDMA_RSP_Q_OUT_PTR_OFS = 0x24, /* also contains BASE_LO */ | 391 | EDMA_RSP_Q_OUT_PTR = 0x24, /* also contains BASE_LO */ |
385 | EDMA_RSP_Q_PTR_SHIFT = 3, | 392 | EDMA_RSP_Q_PTR_SHIFT = 3, |
386 | 393 | ||
387 | EDMA_CMD_OFS = 0x28, /* EDMA command register */ | 394 | EDMA_CMD = 0x28, /* EDMA command register */ |
388 | EDMA_EN = (1 << 0), /* enable EDMA */ | 395 | EDMA_EN = (1 << 0), /* enable EDMA */ |
389 | EDMA_DS = (1 << 1), /* disable EDMA; self-negated */ | 396 | EDMA_DS = (1 << 1), /* disable EDMA; self-negated */ |
390 | EDMA_RESET = (1 << 2), /* reset eng/trans/link/phy */ | 397 | EDMA_RESET = (1 << 2), /* reset eng/trans/link/phy */ |
391 | 398 | ||
392 | EDMA_STATUS_OFS = 0x30, /* EDMA engine status */ | 399 | EDMA_STATUS = 0x30, /* EDMA engine status */ |
393 | EDMA_STATUS_CACHE_EMPTY = (1 << 6), /* GenIIe command cache empty */ | 400 | EDMA_STATUS_CACHE_EMPTY = (1 << 6), /* GenIIe command cache empty */ |
394 | EDMA_STATUS_IDLE = (1 << 7), /* GenIIe EDMA enabled/idle */ | 401 | EDMA_STATUS_IDLE = (1 << 7), /* GenIIe EDMA enabled/idle */ |
395 | 402 | ||
396 | EDMA_IORDY_TMOUT_OFS = 0x34, | 403 | EDMA_IORDY_TMOUT = 0x34, |
397 | EDMA_ARB_CFG_OFS = 0x38, | 404 | EDMA_ARB_CFG = 0x38, |
398 | 405 | ||
399 | EDMA_HALTCOND_OFS = 0x60, /* GenIIe halt conditions */ | 406 | EDMA_HALTCOND = 0x60, /* GenIIe halt conditions */ |
400 | EDMA_UNKNOWN_RSVD_OFS = 0x6C, /* GenIIe unknown/reserved */ | 407 | EDMA_UNKNOWN_RSVD = 0x6C, /* GenIIe unknown/reserved */ |
401 | 408 | ||
402 | BMDMA_CMD_OFS = 0x224, /* bmdma command register */ | 409 | BMDMA_CMD = 0x224, /* bmdma command register */ |
403 | BMDMA_STATUS_OFS = 0x228, /* bmdma status register */ | 410 | BMDMA_STATUS = 0x228, /* bmdma status register */ |
404 | BMDMA_PRD_LOW_OFS = 0x22c, /* bmdma PRD addr 31:0 */ | 411 | BMDMA_PRD_LOW = 0x22c, /* bmdma PRD addr 31:0 */ |
405 | BMDMA_PRD_HIGH_OFS = 0x230, /* bmdma PRD addr 63:32 */ | 412 | BMDMA_PRD_HIGH = 0x230, /* bmdma PRD addr 63:32 */ |
406 | 413 | ||
407 | /* Host private flags (hp_flags) */ | 414 | /* Host private flags (hp_flags) */ |
408 | MV_HP_FLAG_MSI = (1 << 0), | 415 | MV_HP_FLAG_MSI = (1 << 0), |
@@ -534,8 +541,8 @@ struct mv_host_priv { | |||
534 | void __iomem *base; | 541 | void __iomem *base; |
535 | void __iomem *main_irq_cause_addr; | 542 | void __iomem *main_irq_cause_addr; |
536 | void __iomem *main_irq_mask_addr; | 543 | void __iomem *main_irq_mask_addr; |
537 | u32 irq_cause_ofs; | 544 | u32 irq_cause_offset; |
538 | u32 irq_mask_ofs; | 545 | u32 irq_mask_offset; |
539 | u32 unmask_all_irqs; | 546 | u32 unmask_all_irqs; |
540 | /* | 547 | /* |
541 | * These consistent DMA memory pools give us guaranteed | 548 | * These consistent DMA memory pools give us guaranteed |
@@ -694,49 +701,49 @@ static struct ata_port_operations mv_iie_ops = { | |||
694 | static const struct ata_port_info mv_port_info[] = { | 701 | static const struct ata_port_info mv_port_info[] = { |
695 | { /* chip_504x */ | 702 | { /* chip_504x */ |
696 | .flags = MV_GEN_I_FLAGS, | 703 | .flags = MV_GEN_I_FLAGS, |
697 | .pio_mask = 0x1f, /* pio0-4 */ | 704 | .pio_mask = ATA_PIO4, |
698 | .udma_mask = ATA_UDMA6, | 705 | .udma_mask = ATA_UDMA6, |
699 | .port_ops = &mv5_ops, | 706 | .port_ops = &mv5_ops, |
700 | }, | 707 | }, |
701 | { /* chip_508x */ | 708 | { /* chip_508x */ |
702 | .flags = MV_GEN_I_FLAGS | MV_FLAG_DUAL_HC, | 709 | .flags = MV_GEN_I_FLAGS | MV_FLAG_DUAL_HC, |
703 | .pio_mask = 0x1f, /* pio0-4 */ | 710 | .pio_mask = ATA_PIO4, |
704 | .udma_mask = ATA_UDMA6, | 711 | .udma_mask = ATA_UDMA6, |
705 | .port_ops = &mv5_ops, | 712 | .port_ops = &mv5_ops, |
706 | }, | 713 | }, |
707 | { /* chip_5080 */ | 714 | { /* chip_5080 */ |
708 | .flags = MV_GEN_I_FLAGS | MV_FLAG_DUAL_HC, | 715 | .flags = MV_GEN_I_FLAGS | MV_FLAG_DUAL_HC, |
709 | .pio_mask = 0x1f, /* pio0-4 */ | 716 | .pio_mask = ATA_PIO4, |
710 | .udma_mask = ATA_UDMA6, | 717 | .udma_mask = ATA_UDMA6, |
711 | .port_ops = &mv5_ops, | 718 | .port_ops = &mv5_ops, |
712 | }, | 719 | }, |
713 | { /* chip_604x */ | 720 | { /* chip_604x */ |
714 | .flags = MV_GEN_II_FLAGS, | 721 | .flags = MV_GEN_II_FLAGS, |
715 | .pio_mask = 0x1f, /* pio0-4 */ | 722 | .pio_mask = ATA_PIO4, |
716 | .udma_mask = ATA_UDMA6, | 723 | .udma_mask = ATA_UDMA6, |
717 | .port_ops = &mv6_ops, | 724 | .port_ops = &mv6_ops, |
718 | }, | 725 | }, |
719 | { /* chip_608x */ | 726 | { /* chip_608x */ |
720 | .flags = MV_GEN_II_FLAGS | MV_FLAG_DUAL_HC, | 727 | .flags = MV_GEN_II_FLAGS | MV_FLAG_DUAL_HC, |
721 | .pio_mask = 0x1f, /* pio0-4 */ | 728 | .pio_mask = ATA_PIO4, |
722 | .udma_mask = ATA_UDMA6, | 729 | .udma_mask = ATA_UDMA6, |
723 | .port_ops = &mv6_ops, | 730 | .port_ops = &mv6_ops, |
724 | }, | 731 | }, |
725 | { /* chip_6042 */ | 732 | { /* chip_6042 */ |
726 | .flags = MV_GEN_IIE_FLAGS, | 733 | .flags = MV_GEN_IIE_FLAGS, |
727 | .pio_mask = 0x1f, /* pio0-4 */ | 734 | .pio_mask = ATA_PIO4, |
728 | .udma_mask = ATA_UDMA6, | 735 | .udma_mask = ATA_UDMA6, |
729 | .port_ops = &mv_iie_ops, | 736 | .port_ops = &mv_iie_ops, |
730 | }, | 737 | }, |
731 | { /* chip_7042 */ | 738 | { /* chip_7042 */ |
732 | .flags = MV_GEN_IIE_FLAGS, | 739 | .flags = MV_GEN_IIE_FLAGS, |
733 | .pio_mask = 0x1f, /* pio0-4 */ | 740 | .pio_mask = ATA_PIO4, |
734 | .udma_mask = ATA_UDMA6, | 741 | .udma_mask = ATA_UDMA6, |
735 | .port_ops = &mv_iie_ops, | 742 | .port_ops = &mv_iie_ops, |
736 | }, | 743 | }, |
737 | { /* chip_soc */ | 744 | { /* chip_soc */ |
738 | .flags = MV_GEN_IIE_FLAGS, | 745 | .flags = MV_GEN_IIE_FLAGS, |
739 | .pio_mask = 0x1f, /* pio0-4 */ | 746 | .pio_mask = ATA_PIO4, |
740 | .udma_mask = ATA_UDMA6, | 747 | .udma_mask = ATA_UDMA6, |
741 | .port_ops = &mv_iie_ops, | 748 | .port_ops = &mv_iie_ops, |
742 | }, | 749 | }, |
@@ -840,7 +847,7 @@ static inline unsigned int mv_hardport_from_port(unsigned int port) | |||
840 | 847 | ||
841 | static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc) | 848 | static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc) |
842 | { | 849 | { |
843 | return (base + MV_SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ)); | 850 | return (base + SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ)); |
844 | } | 851 | } |
845 | 852 | ||
846 | static inline void __iomem *mv_hc_base_from_port(void __iomem *base, | 853 | static inline void __iomem *mv_hc_base_from_port(void __iomem *base, |
@@ -895,10 +902,10 @@ static void mv_save_cached_regs(struct ata_port *ap) | |||
895 | void __iomem *port_mmio = mv_ap_base(ap); | 902 | void __iomem *port_mmio = mv_ap_base(ap); |
896 | struct mv_port_priv *pp = ap->private_data; | 903 | struct mv_port_priv *pp = ap->private_data; |
897 | 904 | ||
898 | pp->cached.fiscfg = readl(port_mmio + FISCFG_OFS); | 905 | pp->cached.fiscfg = readl(port_mmio + FISCFG); |
899 | pp->cached.ltmode = readl(port_mmio + LTMODE_OFS); | 906 | pp->cached.ltmode = readl(port_mmio + LTMODE); |
900 | pp->cached.haltcond = readl(port_mmio + EDMA_HALTCOND_OFS); | 907 | pp->cached.haltcond = readl(port_mmio + EDMA_HALTCOND); |
901 | pp->cached.unknown_rsvd = readl(port_mmio + EDMA_UNKNOWN_RSVD_OFS); | 908 | pp->cached.unknown_rsvd = readl(port_mmio + EDMA_UNKNOWN_RSVD); |
902 | } | 909 | } |
903 | 910 | ||
904 | /** | 911 | /** |
@@ -913,8 +920,26 @@ static void mv_save_cached_regs(struct ata_port *ap) | |||
913 | static inline void mv_write_cached_reg(void __iomem *addr, u32 *old, u32 new) | 920 | static inline void mv_write_cached_reg(void __iomem *addr, u32 *old, u32 new) |
914 | { | 921 | { |
915 | if (new != *old) { | 922 | if (new != *old) { |
923 | unsigned long laddr; | ||
916 | *old = new; | 924 | *old = new; |
917 | writel(new, addr); | 925 | /* |
926 | * Workaround for 88SX60x1-B2 FEr SATA#13: | ||
927 | * Read-after-write is needed to prevent generating 64-bit | ||
928 | * write cycles on the PCI bus for SATA interface registers | ||
929 | * at offsets ending in 0x4 or 0xc. | ||
930 | * | ||
931 | * Looks like a lot of fuss, but it avoids an unnecessary | ||
932 | * +1 usec read-after-write delay for unaffected registers. | ||
933 | */ | ||
934 | laddr = (long)addr & 0xffff; | ||
935 | if (laddr >= 0x300 && laddr <= 0x33c) { | ||
936 | laddr &= 0x000f; | ||
937 | if (laddr == 0x4 || laddr == 0xc) { | ||
938 | writelfl(new, addr); /* read after write */ | ||
939 | return; | ||
940 | } | ||
941 | } | ||
942 | writel(new, addr); /* unaffected by the errata */ | ||
918 | } | 943 | } |
919 | } | 944 | } |
920 | 945 | ||
@@ -931,10 +956,10 @@ static void mv_set_edma_ptrs(void __iomem *port_mmio, | |||
931 | index = pp->req_idx << EDMA_REQ_Q_PTR_SHIFT; | 956 | index = pp->req_idx << EDMA_REQ_Q_PTR_SHIFT; |
932 | 957 | ||
933 | WARN_ON(pp->crqb_dma & 0x3ff); | 958 | WARN_ON(pp->crqb_dma & 0x3ff); |
934 | writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS); | 959 | writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI); |
935 | writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | index, | 960 | writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | index, |
936 | port_mmio + EDMA_REQ_Q_IN_PTR_OFS); | 961 | port_mmio + EDMA_REQ_Q_IN_PTR); |
937 | writelfl(index, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS); | 962 | writelfl(index, port_mmio + EDMA_REQ_Q_OUT_PTR); |
938 | 963 | ||
939 | /* | 964 | /* |
940 | * initialize response queue | 965 | * initialize response queue |
@@ -943,10 +968,10 @@ static void mv_set_edma_ptrs(void __iomem *port_mmio, | |||
943 | index = pp->resp_idx << EDMA_RSP_Q_PTR_SHIFT; | 968 | index = pp->resp_idx << EDMA_RSP_Q_PTR_SHIFT; |
944 | 969 | ||
945 | WARN_ON(pp->crpb_dma & 0xff); | 970 | WARN_ON(pp->crpb_dma & 0xff); |
946 | writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS); | 971 | writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI); |
947 | writelfl(index, port_mmio + EDMA_RSP_Q_IN_PTR_OFS); | 972 | writelfl(index, port_mmio + EDMA_RSP_Q_IN_PTR); |
948 | writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | index, | 973 | writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | index, |
949 | port_mmio + EDMA_RSP_Q_OUT_PTR_OFS); | 974 | port_mmio + EDMA_RSP_Q_OUT_PTR); |
950 | } | 975 | } |
951 | 976 | ||
952 | static void mv_write_main_irq_mask(u32 mask, struct mv_host_priv *hpriv) | 977 | static void mv_write_main_irq_mask(u32 mask, struct mv_host_priv *hpriv) |
@@ -1004,15 +1029,15 @@ static void mv_clear_and_enable_port_irqs(struct ata_port *ap, | |||
1004 | u32 hc_irq_cause; | 1029 | u32 hc_irq_cause; |
1005 | 1030 | ||
1006 | /* clear EDMA event indicators, if any */ | 1031 | /* clear EDMA event indicators, if any */ |
1007 | writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS); | 1032 | writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE); |
1008 | 1033 | ||
1009 | /* clear pending irq events */ | 1034 | /* clear pending irq events */ |
1010 | hc_irq_cause = ~((DEV_IRQ | DMA_IRQ) << hardport); | 1035 | hc_irq_cause = ~((DEV_IRQ | DMA_IRQ) << hardport); |
1011 | writelfl(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS); | 1036 | writelfl(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE); |
1012 | 1037 | ||
1013 | /* clear FIS IRQ Cause */ | 1038 | /* clear FIS IRQ Cause */ |
1014 | if (IS_GEN_IIE(hpriv)) | 1039 | if (IS_GEN_IIE(hpriv)) |
1015 | writelfl(0, port_mmio + SATA_FIS_IRQ_CAUSE_OFS); | 1040 | writelfl(0, port_mmio + FIS_IRQ_CAUSE); |
1016 | 1041 | ||
1017 | mv_enable_port_irqs(ap, port_irqs); | 1042 | mv_enable_port_irqs(ap, port_irqs); |
1018 | } | 1043 | } |
@@ -1048,10 +1073,10 @@ static void mv_set_irq_coalescing(struct ata_host *host, | |||
1048 | * GEN_II/GEN_IIE with dual host controllers: | 1073 | * GEN_II/GEN_IIE with dual host controllers: |
1049 | * one set of global thresholds for the entire chip. | 1074 | * one set of global thresholds for the entire chip. |
1050 | */ | 1075 | */ |
1051 | writel(clks, mmio + MV_IRQ_COAL_TIME_THRESHOLD); | 1076 | writel(clks, mmio + IRQ_COAL_TIME_THRESHOLD); |
1052 | writel(count, mmio + MV_IRQ_COAL_IO_THRESHOLD); | 1077 | writel(count, mmio + IRQ_COAL_IO_THRESHOLD); |
1053 | /* clear leftover coal IRQ bit */ | 1078 | /* clear leftover coal IRQ bit */ |
1054 | writel(~ALL_PORTS_COAL_IRQ, mmio + MV_IRQ_COAL_CAUSE); | 1079 | writel(~ALL_PORTS_COAL_IRQ, mmio + IRQ_COAL_CAUSE); |
1055 | if (count) | 1080 | if (count) |
1056 | coal_enable = ALL_PORTS_COAL_DONE; | 1081 | coal_enable = ALL_PORTS_COAL_DONE; |
1057 | clks = count = 0; /* force clearing of regular regs below */ | 1082 | clks = count = 0; /* force clearing of regular regs below */ |
@@ -1061,16 +1086,16 @@ static void mv_set_irq_coalescing(struct ata_host *host, | |||
1061 | * All chips: independent thresholds for each HC on the chip. | 1086 | * All chips: independent thresholds for each HC on the chip. |
1062 | */ | 1087 | */ |
1063 | hc_mmio = mv_hc_base_from_port(mmio, 0); | 1088 | hc_mmio = mv_hc_base_from_port(mmio, 0); |
1064 | writel(clks, hc_mmio + HC_IRQ_COAL_TIME_THRESHOLD_OFS); | 1089 | writel(clks, hc_mmio + HC_IRQ_COAL_TIME_THRESHOLD); |
1065 | writel(count, hc_mmio + HC_IRQ_COAL_IO_THRESHOLD_OFS); | 1090 | writel(count, hc_mmio + HC_IRQ_COAL_IO_THRESHOLD); |
1066 | writel(~HC_COAL_IRQ, hc_mmio + HC_IRQ_CAUSE_OFS); | 1091 | writel(~HC_COAL_IRQ, hc_mmio + HC_IRQ_CAUSE); |
1067 | if (count) | 1092 | if (count) |
1068 | coal_enable |= PORTS_0_3_COAL_DONE; | 1093 | coal_enable |= PORTS_0_3_COAL_DONE; |
1069 | if (is_dual_hc) { | 1094 | if (is_dual_hc) { |
1070 | hc_mmio = mv_hc_base_from_port(mmio, MV_PORTS_PER_HC); | 1095 | hc_mmio = mv_hc_base_from_port(mmio, MV_PORTS_PER_HC); |
1071 | writel(clks, hc_mmio + HC_IRQ_COAL_TIME_THRESHOLD_OFS); | 1096 | writel(clks, hc_mmio + HC_IRQ_COAL_TIME_THRESHOLD); |
1072 | writel(count, hc_mmio + HC_IRQ_COAL_IO_THRESHOLD_OFS); | 1097 | writel(count, hc_mmio + HC_IRQ_COAL_IO_THRESHOLD); |
1073 | writel(~HC_COAL_IRQ, hc_mmio + HC_IRQ_CAUSE_OFS); | 1098 | writel(~HC_COAL_IRQ, hc_mmio + HC_IRQ_CAUSE); |
1074 | if (count) | 1099 | if (count) |
1075 | coal_enable |= PORTS_4_7_COAL_DONE; | 1100 | coal_enable |= PORTS_4_7_COAL_DONE; |
1076 | } | 1101 | } |
@@ -1108,7 +1133,7 @@ static void mv_start_edma(struct ata_port *ap, void __iomem *port_mmio, | |||
1108 | mv_set_edma_ptrs(port_mmio, hpriv, pp); | 1133 | mv_set_edma_ptrs(port_mmio, hpriv, pp); |
1109 | mv_clear_and_enable_port_irqs(ap, port_mmio, DONE_IRQ|ERR_IRQ); | 1134 | mv_clear_and_enable_port_irqs(ap, port_mmio, DONE_IRQ|ERR_IRQ); |
1110 | 1135 | ||
1111 | writelfl(EDMA_EN, port_mmio + EDMA_CMD_OFS); | 1136 | writelfl(EDMA_EN, port_mmio + EDMA_CMD); |
1112 | pp->pp_flags |= MV_PP_FLAG_EDMA_EN; | 1137 | pp->pp_flags |= MV_PP_FLAG_EDMA_EN; |
1113 | } | 1138 | } |
1114 | } | 1139 | } |
@@ -1128,7 +1153,7 @@ static void mv_wait_for_edma_empty_idle(struct ata_port *ap) | |||
1128 | * as a rough guess at what even more drives might require. | 1153 | * as a rough guess at what even more drives might require. |
1129 | */ | 1154 | */ |
1130 | for (i = 0; i < timeout; ++i) { | 1155 | for (i = 0; i < timeout; ++i) { |
1131 | u32 edma_stat = readl(port_mmio + EDMA_STATUS_OFS); | 1156 | u32 edma_stat = readl(port_mmio + EDMA_STATUS); |
1132 | if ((edma_stat & empty_idle) == empty_idle) | 1157 | if ((edma_stat & empty_idle) == empty_idle) |
1133 | break; | 1158 | break; |
1134 | udelay(per_loop); | 1159 | udelay(per_loop); |
@@ -1148,11 +1173,11 @@ static int mv_stop_edma_engine(void __iomem *port_mmio) | |||
1148 | int i; | 1173 | int i; |
1149 | 1174 | ||
1150 | /* Disable eDMA. The disable bit auto clears. */ | 1175 | /* Disable eDMA. The disable bit auto clears. */ |
1151 | writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS); | 1176 | writelfl(EDMA_DS, port_mmio + EDMA_CMD); |
1152 | 1177 | ||
1153 | /* Wait for the chip to confirm eDMA is off. */ | 1178 | /* Wait for the chip to confirm eDMA is off. */ |
1154 | for (i = 10000; i > 0; i--) { | 1179 | for (i = 10000; i > 0; i--) { |
1155 | u32 reg = readl(port_mmio + EDMA_CMD_OFS); | 1180 | u32 reg = readl(port_mmio + EDMA_CMD); |
1156 | if (!(reg & EDMA_EN)) | 1181 | if (!(reg & EDMA_EN)) |
1157 | return 0; | 1182 | return 0; |
1158 | udelay(10); | 1183 | udelay(10); |
@@ -1262,10 +1287,10 @@ static unsigned int mv_scr_offset(unsigned int sc_reg_in) | |||
1262 | case SCR_STATUS: | 1287 | case SCR_STATUS: |
1263 | case SCR_CONTROL: | 1288 | case SCR_CONTROL: |
1264 | case SCR_ERROR: | 1289 | case SCR_ERROR: |
1265 | ofs = SATA_STATUS_OFS + (sc_reg_in * sizeof(u32)); | 1290 | ofs = SATA_STATUS + (sc_reg_in * sizeof(u32)); |
1266 | break; | 1291 | break; |
1267 | case SCR_ACTIVE: | 1292 | case SCR_ACTIVE: |
1268 | ofs = SATA_ACTIVE_OFS; /* active is not with the others */ | 1293 | ofs = SATA_ACTIVE; /* active is not with the others */ |
1269 | break; | 1294 | break; |
1270 | default: | 1295 | default: |
1271 | ofs = 0xffffffffU; | 1296 | ofs = 0xffffffffU; |
@@ -1290,7 +1315,25 @@ static int mv_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val) | |||
1290 | unsigned int ofs = mv_scr_offset(sc_reg_in); | 1315 | unsigned int ofs = mv_scr_offset(sc_reg_in); |
1291 | 1316 | ||
1292 | if (ofs != 0xffffffffU) { | 1317 | if (ofs != 0xffffffffU) { |
1293 | writelfl(val, mv_ap_base(link->ap) + ofs); | 1318 | void __iomem *addr = mv_ap_base(link->ap) + ofs; |
1319 | if (sc_reg_in == SCR_CONTROL) { | ||
1320 | /* | ||
1321 | * Workaround for 88SX60x1 FEr SATA#26: | ||
1322 | * | ||
1323 | * COMRESETs have to take care not to accidently | ||
1324 | * put the drive to sleep when writing SCR_CONTROL. | ||
1325 | * Setting bits 12..15 prevents this problem. | ||
1326 | * | ||
1327 | * So if we see an outbound COMMRESET, set those bits. | ||
1328 | * Ditto for the followup write that clears the reset. | ||
1329 | * | ||
1330 | * The proprietary driver does this for | ||
1331 | * all chip versions, and so do we. | ||
1332 | */ | ||
1333 | if ((val & 0xf) == 1 || (readl(addr) & 0xf) == 1) | ||
1334 | val |= 0xf000; | ||
1335 | } | ||
1336 | writelfl(val, addr); | ||
1294 | return 0; | 1337 | return 0; |
1295 | } else | 1338 | } else |
1296 | return -EINVAL; | 1339 | return -EINVAL; |
@@ -1368,9 +1411,9 @@ static void mv_config_fbs(struct ata_port *ap, int want_ncq, int want_fbs) | |||
1368 | } | 1411 | } |
1369 | 1412 | ||
1370 | port_mmio = mv_ap_base(ap); | 1413 | port_mmio = mv_ap_base(ap); |
1371 | mv_write_cached_reg(port_mmio + FISCFG_OFS, old_fiscfg, fiscfg); | 1414 | mv_write_cached_reg(port_mmio + FISCFG, old_fiscfg, fiscfg); |
1372 | mv_write_cached_reg(port_mmio + LTMODE_OFS, old_ltmode, ltmode); | 1415 | mv_write_cached_reg(port_mmio + LTMODE, old_ltmode, ltmode); |
1373 | mv_write_cached_reg(port_mmio + EDMA_HALTCOND_OFS, old_haltcond, haltcond); | 1416 | mv_write_cached_reg(port_mmio + EDMA_HALTCOND, old_haltcond, haltcond); |
1374 | } | 1417 | } |
1375 | 1418 | ||
1376 | static void mv_60x1_errata_sata25(struct ata_port *ap, int want_ncq) | 1419 | static void mv_60x1_errata_sata25(struct ata_port *ap, int want_ncq) |
@@ -1379,13 +1422,13 @@ static void mv_60x1_errata_sata25(struct ata_port *ap, int want_ncq) | |||
1379 | u32 old, new; | 1422 | u32 old, new; |
1380 | 1423 | ||
1381 | /* workaround for 88SX60x1 FEr SATA#25 (part 1) */ | 1424 | /* workaround for 88SX60x1 FEr SATA#25 (part 1) */ |
1382 | old = readl(hpriv->base + MV_GPIO_PORT_CTL_OFS); | 1425 | old = readl(hpriv->base + GPIO_PORT_CTL); |
1383 | if (want_ncq) | 1426 | if (want_ncq) |
1384 | new = old | (1 << 22); | 1427 | new = old | (1 << 22); |
1385 | else | 1428 | else |
1386 | new = old & ~(1 << 22); | 1429 | new = old & ~(1 << 22); |
1387 | if (new != old) | 1430 | if (new != old) |
1388 | writel(new, hpriv->base + MV_GPIO_PORT_CTL_OFS); | 1431 | writel(new, hpriv->base + GPIO_PORT_CTL); |
1389 | } | 1432 | } |
1390 | 1433 | ||
1391 | /** | 1434 | /** |
@@ -1409,7 +1452,7 @@ static void mv_bmdma_enable_iie(struct ata_port *ap, int enable_bmdma) | |||
1409 | new = *old | 1; | 1452 | new = *old | 1; |
1410 | else | 1453 | else |
1411 | new = *old & ~1; | 1454 | new = *old & ~1; |
1412 | mv_write_cached_reg(mv_ap_base(ap) + EDMA_UNKNOWN_RSVD_OFS, old, new); | 1455 | mv_write_cached_reg(mv_ap_base(ap) + EDMA_UNKNOWN_RSVD, old, new); |
1413 | } | 1456 | } |
1414 | 1457 | ||
1415 | /* | 1458 | /* |
@@ -1437,8 +1480,8 @@ static void mv_soc_led_blink_enable(struct ata_port *ap) | |||
1437 | return; | 1480 | return; |
1438 | hpriv->hp_flags |= MV_HP_QUIRK_LED_BLINK_EN; | 1481 | hpriv->hp_flags |= MV_HP_QUIRK_LED_BLINK_EN; |
1439 | hc_mmio = mv_hc_base_from_port(mv_host_base(host), ap->port_no); | 1482 | hc_mmio = mv_hc_base_from_port(mv_host_base(host), ap->port_no); |
1440 | led_ctrl = readl(hc_mmio + SOC_LED_CTRL_OFS); | 1483 | led_ctrl = readl(hc_mmio + SOC_LED_CTRL); |
1441 | writel(led_ctrl | SOC_LED_CTRL_BLINK, hc_mmio + SOC_LED_CTRL_OFS); | 1484 | writel(led_ctrl | SOC_LED_CTRL_BLINK, hc_mmio + SOC_LED_CTRL); |
1442 | } | 1485 | } |
1443 | 1486 | ||
1444 | static void mv_soc_led_blink_disable(struct ata_port *ap) | 1487 | static void mv_soc_led_blink_disable(struct ata_port *ap) |
@@ -1463,8 +1506,8 @@ static void mv_soc_led_blink_disable(struct ata_port *ap) | |||
1463 | 1506 | ||
1464 | hpriv->hp_flags &= ~MV_HP_QUIRK_LED_BLINK_EN; | 1507 | hpriv->hp_flags &= ~MV_HP_QUIRK_LED_BLINK_EN; |
1465 | hc_mmio = mv_hc_base_from_port(mv_host_base(host), ap->port_no); | 1508 | hc_mmio = mv_hc_base_from_port(mv_host_base(host), ap->port_no); |
1466 | led_ctrl = readl(hc_mmio + SOC_LED_CTRL_OFS); | 1509 | led_ctrl = readl(hc_mmio + SOC_LED_CTRL); |
1467 | writel(led_ctrl & ~SOC_LED_CTRL_BLINK, hc_mmio + SOC_LED_CTRL_OFS); | 1510 | writel(led_ctrl & ~SOC_LED_CTRL_BLINK, hc_mmio + SOC_LED_CTRL); |
1468 | } | 1511 | } |
1469 | 1512 | ||
1470 | static void mv_edma_cfg(struct ata_port *ap, int want_ncq, int want_edma) | 1513 | static void mv_edma_cfg(struct ata_port *ap, int want_ncq, int want_edma) |
@@ -1528,7 +1571,7 @@ static void mv_edma_cfg(struct ata_port *ap, int want_ncq, int want_edma) | |||
1528 | pp->pp_flags |= MV_PP_FLAG_NCQ_EN; | 1571 | pp->pp_flags |= MV_PP_FLAG_NCQ_EN; |
1529 | } | 1572 | } |
1530 | 1573 | ||
1531 | writelfl(cfg, port_mmio + EDMA_CFG_OFS); | 1574 | writelfl(cfg, port_mmio + EDMA_CFG); |
1532 | } | 1575 | } |
1533 | 1576 | ||
1534 | static void mv_port_free_dma_mem(struct ata_port *ap) | 1577 | static void mv_port_free_dma_mem(struct ata_port *ap) |
@@ -1575,6 +1618,7 @@ static int mv_port_start(struct ata_port *ap) | |||
1575 | struct device *dev = ap->host->dev; | 1618 | struct device *dev = ap->host->dev; |
1576 | struct mv_host_priv *hpriv = ap->host->private_data; | 1619 | struct mv_host_priv *hpriv = ap->host->private_data; |
1577 | struct mv_port_priv *pp; | 1620 | struct mv_port_priv *pp; |
1621 | unsigned long flags; | ||
1578 | int tag; | 1622 | int tag; |
1579 | 1623 | ||
1580 | pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL); | 1624 | pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL); |
@@ -1610,8 +1654,12 @@ static int mv_port_start(struct ata_port *ap) | |||
1610 | pp->sg_tbl_dma[tag] = pp->sg_tbl_dma[0]; | 1654 | pp->sg_tbl_dma[tag] = pp->sg_tbl_dma[0]; |
1611 | } | 1655 | } |
1612 | } | 1656 | } |
1657 | |||
1658 | spin_lock_irqsave(ap->lock, flags); | ||
1613 | mv_save_cached_regs(ap); | 1659 | mv_save_cached_regs(ap); |
1614 | mv_edma_cfg(ap, 0, 0); | 1660 | mv_edma_cfg(ap, 0, 0); |
1661 | spin_unlock_irqrestore(ap->lock, flags); | ||
1662 | |||
1615 | return 0; | 1663 | return 0; |
1616 | 1664 | ||
1617 | out_port_free_dma_mem: | 1665 | out_port_free_dma_mem: |
@@ -1630,8 +1678,12 @@ out_port_free_dma_mem: | |||
1630 | */ | 1678 | */ |
1631 | static void mv_port_stop(struct ata_port *ap) | 1679 | static void mv_port_stop(struct ata_port *ap) |
1632 | { | 1680 | { |
1681 | unsigned long flags; | ||
1682 | |||
1683 | spin_lock_irqsave(ap->lock, flags); | ||
1633 | mv_stop_edma(ap); | 1684 | mv_stop_edma(ap); |
1634 | mv_enable_port_irqs(ap, 0); | 1685 | mv_enable_port_irqs(ap, 0); |
1686 | spin_unlock_irqrestore(ap->lock, flags); | ||
1635 | mv_port_free_dma_mem(ap); | 1687 | mv_port_free_dma_mem(ap); |
1636 | } | 1688 | } |
1637 | 1689 | ||
@@ -1749,13 +1801,13 @@ static void mv_bmdma_setup(struct ata_queued_cmd *qc) | |||
1749 | mv_fill_sg(qc); | 1801 | mv_fill_sg(qc); |
1750 | 1802 | ||
1751 | /* clear all DMA cmd bits */ | 1803 | /* clear all DMA cmd bits */ |
1752 | writel(0, port_mmio + BMDMA_CMD_OFS); | 1804 | writel(0, port_mmio + BMDMA_CMD); |
1753 | 1805 | ||
1754 | /* load PRD table addr. */ | 1806 | /* load PRD table addr. */ |
1755 | writel((pp->sg_tbl_dma[qc->tag] >> 16) >> 16, | 1807 | writel((pp->sg_tbl_dma[qc->tag] >> 16) >> 16, |
1756 | port_mmio + BMDMA_PRD_HIGH_OFS); | 1808 | port_mmio + BMDMA_PRD_HIGH); |
1757 | writelfl(pp->sg_tbl_dma[qc->tag], | 1809 | writelfl(pp->sg_tbl_dma[qc->tag], |
1758 | port_mmio + BMDMA_PRD_LOW_OFS); | 1810 | port_mmio + BMDMA_PRD_LOW); |
1759 | 1811 | ||
1760 | /* issue r/w command */ | 1812 | /* issue r/w command */ |
1761 | ap->ops->sff_exec_command(ap, &qc->tf); | 1813 | ap->ops->sff_exec_command(ap, &qc->tf); |
@@ -1776,7 +1828,7 @@ static void mv_bmdma_start(struct ata_queued_cmd *qc) | |||
1776 | u32 cmd = (rw ? 0 : ATA_DMA_WR) | ATA_DMA_START; | 1828 | u32 cmd = (rw ? 0 : ATA_DMA_WR) | ATA_DMA_START; |
1777 | 1829 | ||
1778 | /* start host DMA transaction */ | 1830 | /* start host DMA transaction */ |
1779 | writelfl(cmd, port_mmio + BMDMA_CMD_OFS); | 1831 | writelfl(cmd, port_mmio + BMDMA_CMD); |
1780 | } | 1832 | } |
1781 | 1833 | ||
1782 | /** | 1834 | /** |
@@ -1795,9 +1847,9 @@ static void mv_bmdma_stop(struct ata_queued_cmd *qc) | |||
1795 | u32 cmd; | 1847 | u32 cmd; |
1796 | 1848 | ||
1797 | /* clear start/stop bit */ | 1849 | /* clear start/stop bit */ |
1798 | cmd = readl(port_mmio + BMDMA_CMD_OFS); | 1850 | cmd = readl(port_mmio + BMDMA_CMD); |
1799 | cmd &= ~ATA_DMA_START; | 1851 | cmd &= ~ATA_DMA_START; |
1800 | writelfl(cmd, port_mmio + BMDMA_CMD_OFS); | 1852 | writelfl(cmd, port_mmio + BMDMA_CMD); |
1801 | 1853 | ||
1802 | /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */ | 1854 | /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */ |
1803 | ata_sff_dma_pause(ap); | 1855 | ata_sff_dma_pause(ap); |
@@ -1821,7 +1873,7 @@ static u8 mv_bmdma_status(struct ata_port *ap) | |||
1821 | * Other bits are valid only if ATA_DMA_ACTIVE==0, | 1873 | * Other bits are valid only if ATA_DMA_ACTIVE==0, |
1822 | * and the ATA_DMA_INTR bit doesn't exist. | 1874 | * and the ATA_DMA_INTR bit doesn't exist. |
1823 | */ | 1875 | */ |
1824 | reg = readl(port_mmio + BMDMA_STATUS_OFS); | 1876 | reg = readl(port_mmio + BMDMA_STATUS); |
1825 | if (reg & ATA_DMA_ACTIVE) | 1877 | if (reg & ATA_DMA_ACTIVE) |
1826 | status = ATA_DMA_ACTIVE; | 1878 | status = ATA_DMA_ACTIVE; |
1827 | else | 1879 | else |
@@ -2029,28 +2081,28 @@ static unsigned int mv_send_fis(struct ata_port *ap, u32 *fis, int nwords) | |||
2029 | int i, timeout = 200, final_word = nwords - 1; | 2081 | int i, timeout = 200, final_word = nwords - 1; |
2030 | 2082 | ||
2031 | /* Initiate FIS transmission mode */ | 2083 | /* Initiate FIS transmission mode */ |
2032 | old_ifctl = readl(port_mmio + SATA_IFCTL_OFS); | 2084 | old_ifctl = readl(port_mmio + SATA_IFCTL); |
2033 | ifctl = 0x100 | (old_ifctl & 0xf); | 2085 | ifctl = 0x100 | (old_ifctl & 0xf); |
2034 | writelfl(ifctl, port_mmio + SATA_IFCTL_OFS); | 2086 | writelfl(ifctl, port_mmio + SATA_IFCTL); |
2035 | 2087 | ||
2036 | /* Send all words of the FIS except for the final word */ | 2088 | /* Send all words of the FIS except for the final word */ |
2037 | for (i = 0; i < final_word; ++i) | 2089 | for (i = 0; i < final_word; ++i) |
2038 | writel(fis[i], port_mmio + VENDOR_UNIQUE_FIS_OFS); | 2090 | writel(fis[i], port_mmio + VENDOR_UNIQUE_FIS); |
2039 | 2091 | ||
2040 | /* Flag end-of-transmission, and then send the final word */ | 2092 | /* Flag end-of-transmission, and then send the final word */ |
2041 | writelfl(ifctl | 0x200, port_mmio + SATA_IFCTL_OFS); | 2093 | writelfl(ifctl | 0x200, port_mmio + SATA_IFCTL); |
2042 | writelfl(fis[final_word], port_mmio + VENDOR_UNIQUE_FIS_OFS); | 2094 | writelfl(fis[final_word], port_mmio + VENDOR_UNIQUE_FIS); |
2043 | 2095 | ||
2044 | /* | 2096 | /* |
2045 | * Wait for FIS transmission to complete. | 2097 | * Wait for FIS transmission to complete. |
2046 | * This typically takes just a single iteration. | 2098 | * This typically takes just a single iteration. |
2047 | */ | 2099 | */ |
2048 | do { | 2100 | do { |
2049 | ifstat = readl(port_mmio + SATA_IFSTAT_OFS); | 2101 | ifstat = readl(port_mmio + SATA_IFSTAT); |
2050 | } while (!(ifstat & 0x1000) && --timeout); | 2102 | } while (!(ifstat & 0x1000) && --timeout); |
2051 | 2103 | ||
2052 | /* Restore original port configuration */ | 2104 | /* Restore original port configuration */ |
2053 | writelfl(old_ifctl, port_mmio + SATA_IFCTL_OFS); | 2105 | writelfl(old_ifctl, port_mmio + SATA_IFCTL); |
2054 | 2106 | ||
2055 | /* See if it worked */ | 2107 | /* See if it worked */ |
2056 | if ((ifstat & 0x3000) != 0x1000) { | 2108 | if ((ifstat & 0x3000) != 0x1000) { |
@@ -2148,7 +2200,7 @@ static unsigned int mv_qc_issue(struct ata_queued_cmd *qc) | |||
2148 | 2200 | ||
2149 | /* Write the request in pointer to kick the EDMA to life */ | 2201 | /* Write the request in pointer to kick the EDMA to life */ |
2150 | writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | in_index, | 2202 | writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | in_index, |
2151 | port_mmio + EDMA_REQ_Q_IN_PTR_OFS); | 2203 | port_mmio + EDMA_REQ_Q_IN_PTR); |
2152 | return 0; | 2204 | return 0; |
2153 | 2205 | ||
2154 | case ATA_PROT_PIO: | 2206 | case ATA_PROT_PIO: |
@@ -2259,7 +2311,7 @@ static unsigned int mv_get_err_pmp_map(struct ata_port *ap) | |||
2259 | { | 2311 | { |
2260 | void __iomem *port_mmio = mv_ap_base(ap); | 2312 | void __iomem *port_mmio = mv_ap_base(ap); |
2261 | 2313 | ||
2262 | return readl(port_mmio + SATA_TESTCTL_OFS) >> 16; | 2314 | return readl(port_mmio + SATA_TESTCTL) >> 16; |
2263 | } | 2315 | } |
2264 | 2316 | ||
2265 | static void mv_pmp_eh_prep(struct ata_port *ap, unsigned int pmp_map) | 2317 | static void mv_pmp_eh_prep(struct ata_port *ap, unsigned int pmp_map) |
@@ -2292,9 +2344,9 @@ static int mv_req_q_empty(struct ata_port *ap) | |||
2292 | void __iomem *port_mmio = mv_ap_base(ap); | 2344 | void __iomem *port_mmio = mv_ap_base(ap); |
2293 | u32 in_ptr, out_ptr; | 2345 | u32 in_ptr, out_ptr; |
2294 | 2346 | ||
2295 | in_ptr = (readl(port_mmio + EDMA_REQ_Q_IN_PTR_OFS) | 2347 | in_ptr = (readl(port_mmio + EDMA_REQ_Q_IN_PTR) |
2296 | >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK; | 2348 | >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK; |
2297 | out_ptr = (readl(port_mmio + EDMA_REQ_Q_OUT_PTR_OFS) | 2349 | out_ptr = (readl(port_mmio + EDMA_REQ_Q_OUT_PTR) |
2298 | >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK; | 2350 | >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK; |
2299 | return (in_ptr == out_ptr); /* 1 == queue_is_empty */ | 2351 | return (in_ptr == out_ptr); /* 1 == queue_is_empty */ |
2300 | } | 2352 | } |
@@ -2456,12 +2508,12 @@ static void mv_err_intr(struct ata_port *ap) | |||
2456 | sata_scr_read(&ap->link, SCR_ERROR, &serr); | 2508 | sata_scr_read(&ap->link, SCR_ERROR, &serr); |
2457 | sata_scr_write_flush(&ap->link, SCR_ERROR, serr); | 2509 | sata_scr_write_flush(&ap->link, SCR_ERROR, serr); |
2458 | 2510 | ||
2459 | edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS); | 2511 | edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE); |
2460 | if (IS_GEN_IIE(hpriv) && (edma_err_cause & EDMA_ERR_TRANS_IRQ_7)) { | 2512 | if (IS_GEN_IIE(hpriv) && (edma_err_cause & EDMA_ERR_TRANS_IRQ_7)) { |
2461 | fis_cause = readl(port_mmio + SATA_FIS_IRQ_CAUSE_OFS); | 2513 | fis_cause = readl(port_mmio + FIS_IRQ_CAUSE); |
2462 | writelfl(~fis_cause, port_mmio + SATA_FIS_IRQ_CAUSE_OFS); | 2514 | writelfl(~fis_cause, port_mmio + FIS_IRQ_CAUSE); |
2463 | } | 2515 | } |
2464 | writelfl(~edma_err_cause, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS); | 2516 | writelfl(~edma_err_cause, port_mmio + EDMA_ERR_IRQ_CAUSE); |
2465 | 2517 | ||
2466 | if (edma_err_cause & EDMA_ERR_DEV) { | 2518 | if (edma_err_cause & EDMA_ERR_DEV) { |
2467 | /* | 2519 | /* |
@@ -2479,7 +2531,7 @@ static void mv_err_intr(struct ata_port *ap) | |||
2479 | 2531 | ||
2480 | if (IS_GEN_IIE(hpriv) && (edma_err_cause & EDMA_ERR_TRANS_IRQ_7)) { | 2532 | if (IS_GEN_IIE(hpriv) && (edma_err_cause & EDMA_ERR_TRANS_IRQ_7)) { |
2481 | ata_ehi_push_desc(ehi, "fis_cause=%08x", fis_cause); | 2533 | ata_ehi_push_desc(ehi, "fis_cause=%08x", fis_cause); |
2482 | if (fis_cause & SATA_FIS_IRQ_AN) { | 2534 | if (fis_cause & FIS_IRQ_CAUSE_AN) { |
2483 | u32 ec = edma_err_cause & | 2535 | u32 ec = edma_err_cause & |
2484 | ~(EDMA_ERR_TRANS_IRQ_7 | EDMA_ERR_IRQ_TRANSIENT); | 2536 | ~(EDMA_ERR_TRANS_IRQ_7 | EDMA_ERR_IRQ_TRANSIENT); |
2485 | sata_async_notification(ap); | 2537 | sata_async_notification(ap); |
@@ -2581,7 +2633,7 @@ static void mv_process_crpb_response(struct ata_port *ap, | |||
2581 | u16 edma_status = le16_to_cpu(response->flags); | 2633 | u16 edma_status = le16_to_cpu(response->flags); |
2582 | /* | 2634 | /* |
2583 | * edma_status from a response queue entry: | 2635 | * edma_status from a response queue entry: |
2584 | * LSB is from EDMA_ERR_IRQ_CAUSE_OFS (non-NCQ only). | 2636 | * LSB is from EDMA_ERR_IRQ_CAUSE (non-NCQ only). |
2585 | * MSB is saved ATA status from command completion. | 2637 | * MSB is saved ATA status from command completion. |
2586 | */ | 2638 | */ |
2587 | if (!ncq_enabled) { | 2639 | if (!ncq_enabled) { |
@@ -2613,7 +2665,7 @@ static void mv_process_crpb_entries(struct ata_port *ap, struct mv_port_priv *pp | |||
2613 | int ncq_enabled = (pp->pp_flags & MV_PP_FLAG_NCQ_EN); | 2665 | int ncq_enabled = (pp->pp_flags & MV_PP_FLAG_NCQ_EN); |
2614 | 2666 | ||
2615 | /* Get the hardware queue position index */ | 2667 | /* Get the hardware queue position index */ |
2616 | in_index = (readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS) | 2668 | in_index = (readl(port_mmio + EDMA_RSP_Q_IN_PTR) |
2617 | >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK; | 2669 | >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK; |
2618 | 2670 | ||
2619 | /* Process new responses from since the last time we looked */ | 2671 | /* Process new responses from since the last time we looked */ |
@@ -2638,7 +2690,7 @@ static void mv_process_crpb_entries(struct ata_port *ap, struct mv_port_priv *pp | |||
2638 | if (work_done) | 2690 | if (work_done) |
2639 | writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | | 2691 | writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | |
2640 | (pp->resp_idx << EDMA_RSP_Q_PTR_SHIFT), | 2692 | (pp->resp_idx << EDMA_RSP_Q_PTR_SHIFT), |
2641 | port_mmio + EDMA_RSP_Q_OUT_PTR_OFS); | 2693 | port_mmio + EDMA_RSP_Q_OUT_PTR); |
2642 | } | 2694 | } |
2643 | 2695 | ||
2644 | static void mv_port_intr(struct ata_port *ap, u32 port_cause) | 2696 | static void mv_port_intr(struct ata_port *ap, u32 port_cause) |
@@ -2695,7 +2747,7 @@ static int mv_host_intr(struct ata_host *host, u32 main_irq_cause) | |||
2695 | 2747 | ||
2696 | /* If asserted, clear the "all ports" IRQ coalescing bit */ | 2748 | /* If asserted, clear the "all ports" IRQ coalescing bit */ |
2697 | if (main_irq_cause & ALL_PORTS_COAL_DONE) | 2749 | if (main_irq_cause & ALL_PORTS_COAL_DONE) |
2698 | writel(~ALL_PORTS_COAL_IRQ, mmio + MV_IRQ_COAL_CAUSE); | 2750 | writel(~ALL_PORTS_COAL_IRQ, mmio + IRQ_COAL_CAUSE); |
2699 | 2751 | ||
2700 | for (port = 0; port < hpriv->n_ports; port++) { | 2752 | for (port = 0; port < hpriv->n_ports; port++) { |
2701 | struct ata_port *ap = host->ports[port]; | 2753 | struct ata_port *ap = host->ports[port]; |
@@ -2739,7 +2791,7 @@ static int mv_host_intr(struct ata_host *host, u32 main_irq_cause) | |||
2739 | ack_irqs |= (DMA_IRQ | DEV_IRQ) << p; | 2791 | ack_irqs |= (DMA_IRQ | DEV_IRQ) << p; |
2740 | } | 2792 | } |
2741 | hc_mmio = mv_hc_base_from_port(mmio, port); | 2793 | hc_mmio = mv_hc_base_from_port(mmio, port); |
2742 | writelfl(~ack_irqs, hc_mmio + HC_IRQ_CAUSE_OFS); | 2794 | writelfl(~ack_irqs, hc_mmio + HC_IRQ_CAUSE); |
2743 | handled = 1; | 2795 | handled = 1; |
2744 | } | 2796 | } |
2745 | /* | 2797 | /* |
@@ -2761,7 +2813,7 @@ static int mv_pci_error(struct ata_host *host, void __iomem *mmio) | |||
2761 | unsigned int i, err_mask, printed = 0; | 2813 | unsigned int i, err_mask, printed = 0; |
2762 | u32 err_cause; | 2814 | u32 err_cause; |
2763 | 2815 | ||
2764 | err_cause = readl(mmio + hpriv->irq_cause_ofs); | 2816 | err_cause = readl(mmio + hpriv->irq_cause_offset); |
2765 | 2817 | ||
2766 | dev_printk(KERN_ERR, host->dev, "PCI ERROR; PCI IRQ cause=0x%08x\n", | 2818 | dev_printk(KERN_ERR, host->dev, "PCI ERROR; PCI IRQ cause=0x%08x\n", |
2767 | err_cause); | 2819 | err_cause); |
@@ -2769,7 +2821,7 @@ static int mv_pci_error(struct ata_host *host, void __iomem *mmio) | |||
2769 | DPRINTK("All regs @ PCI error\n"); | 2821 | DPRINTK("All regs @ PCI error\n"); |
2770 | mv_dump_all_regs(mmio, -1, to_pci_dev(host->dev)); | 2822 | mv_dump_all_regs(mmio, -1, to_pci_dev(host->dev)); |
2771 | 2823 | ||
2772 | writelfl(0, mmio + hpriv->irq_cause_ofs); | 2824 | writelfl(0, mmio + hpriv->irq_cause_offset); |
2773 | 2825 | ||
2774 | for (i = 0; i < host->n_ports; i++) { | 2826 | for (i = 0; i < host->n_ports; i++) { |
2775 | ap = host->ports[i]; | 2827 | ap = host->ports[i]; |
@@ -2906,7 +2958,7 @@ static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio) | |||
2906 | 2958 | ||
2907 | static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio) | 2959 | static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio) |
2908 | { | 2960 | { |
2909 | writel(0x0fcfffff, mmio + MV_FLASH_CTL_OFS); | 2961 | writel(0x0fcfffff, mmio + FLASH_CTL); |
2910 | } | 2962 | } |
2911 | 2963 | ||
2912 | static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx, | 2964 | static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx, |
@@ -2925,7 +2977,7 @@ static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio) | |||
2925 | { | 2977 | { |
2926 | u32 tmp; | 2978 | u32 tmp; |
2927 | 2979 | ||
2928 | writel(0, mmio + MV_GPIO_PORT_CTL_OFS); | 2980 | writel(0, mmio + GPIO_PORT_CTL); |
2929 | 2981 | ||
2930 | /* FIXME: handle MV_HP_ERRATA_50XXB2 errata */ | 2982 | /* FIXME: handle MV_HP_ERRATA_50XXB2 errata */ |
2931 | 2983 | ||
@@ -2943,14 +2995,14 @@ static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio, | |||
2943 | int fix_apm_sq = (hpriv->hp_flags & MV_HP_ERRATA_50XXB0); | 2995 | int fix_apm_sq = (hpriv->hp_flags & MV_HP_ERRATA_50XXB0); |
2944 | 2996 | ||
2945 | if (fix_apm_sq) { | 2997 | if (fix_apm_sq) { |
2946 | tmp = readl(phy_mmio + MV5_LTMODE_OFS); | 2998 | tmp = readl(phy_mmio + MV5_LTMODE); |
2947 | tmp |= (1 << 19); | 2999 | tmp |= (1 << 19); |
2948 | writel(tmp, phy_mmio + MV5_LTMODE_OFS); | 3000 | writel(tmp, phy_mmio + MV5_LTMODE); |
2949 | 3001 | ||
2950 | tmp = readl(phy_mmio + MV5_PHY_CTL_OFS); | 3002 | tmp = readl(phy_mmio + MV5_PHY_CTL); |
2951 | tmp &= ~0x3; | 3003 | tmp &= ~0x3; |
2952 | tmp |= 0x1; | 3004 | tmp |= 0x1; |
2953 | writel(tmp, phy_mmio + MV5_PHY_CTL_OFS); | 3005 | writel(tmp, phy_mmio + MV5_PHY_CTL); |
2954 | } | 3006 | } |
2955 | 3007 | ||
2956 | tmp = readl(phy_mmio + MV5_PHY_MODE); | 3008 | tmp = readl(phy_mmio + MV5_PHY_MODE); |
@@ -2971,7 +3023,7 @@ static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio, | |||
2971 | mv_reset_channel(hpriv, mmio, port); | 3023 | mv_reset_channel(hpriv, mmio, port); |
2972 | 3024 | ||
2973 | ZERO(0x028); /* command */ | 3025 | ZERO(0x028); /* command */ |
2974 | writel(0x11f, port_mmio + EDMA_CFG_OFS); | 3026 | writel(0x11f, port_mmio + EDMA_CFG); |
2975 | ZERO(0x004); /* timer */ | 3027 | ZERO(0x004); /* timer */ |
2976 | ZERO(0x008); /* irq err cause */ | 3028 | ZERO(0x008); /* irq err cause */ |
2977 | ZERO(0x00c); /* irq err mask */ | 3029 | ZERO(0x00c); /* irq err mask */ |
@@ -2982,7 +3034,7 @@ static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio, | |||
2982 | ZERO(0x024); /* respq outp */ | 3034 | ZERO(0x024); /* respq outp */ |
2983 | ZERO(0x020); /* respq inp */ | 3035 | ZERO(0x020); /* respq inp */ |
2984 | ZERO(0x02c); /* test control */ | 3036 | ZERO(0x02c); /* test control */ |
2985 | writel(0xbc, port_mmio + EDMA_IORDY_TMOUT_OFS); | 3037 | writel(0xbc, port_mmio + EDMA_IORDY_TMOUT); |
2986 | } | 3038 | } |
2987 | #undef ZERO | 3039 | #undef ZERO |
2988 | 3040 | ||
@@ -3028,16 +3080,16 @@ static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio) | |||
3028 | struct mv_host_priv *hpriv = host->private_data; | 3080 | struct mv_host_priv *hpriv = host->private_data; |
3029 | u32 tmp; | 3081 | u32 tmp; |
3030 | 3082 | ||
3031 | tmp = readl(mmio + MV_PCI_MODE_OFS); | 3083 | tmp = readl(mmio + MV_PCI_MODE); |
3032 | tmp &= 0xff00ffff; | 3084 | tmp &= 0xff00ffff; |
3033 | writel(tmp, mmio + MV_PCI_MODE_OFS); | 3085 | writel(tmp, mmio + MV_PCI_MODE); |
3034 | 3086 | ||
3035 | ZERO(MV_PCI_DISC_TIMER); | 3087 | ZERO(MV_PCI_DISC_TIMER); |
3036 | ZERO(MV_PCI_MSI_TRIGGER); | 3088 | ZERO(MV_PCI_MSI_TRIGGER); |
3037 | writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT_OFS); | 3089 | writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT); |
3038 | ZERO(MV_PCI_SERR_MASK); | 3090 | ZERO(MV_PCI_SERR_MASK); |
3039 | ZERO(hpriv->irq_cause_ofs); | 3091 | ZERO(hpriv->irq_cause_offset); |
3040 | ZERO(hpriv->irq_mask_ofs); | 3092 | ZERO(hpriv->irq_mask_offset); |
3041 | ZERO(MV_PCI_ERR_LOW_ADDRESS); | 3093 | ZERO(MV_PCI_ERR_LOW_ADDRESS); |
3042 | ZERO(MV_PCI_ERR_HIGH_ADDRESS); | 3094 | ZERO(MV_PCI_ERR_HIGH_ADDRESS); |
3043 | ZERO(MV_PCI_ERR_ATTRIBUTE); | 3095 | ZERO(MV_PCI_ERR_ATTRIBUTE); |
@@ -3051,10 +3103,10 @@ static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio) | |||
3051 | 3103 | ||
3052 | mv5_reset_flash(hpriv, mmio); | 3104 | mv5_reset_flash(hpriv, mmio); |
3053 | 3105 | ||
3054 | tmp = readl(mmio + MV_GPIO_PORT_CTL_OFS); | 3106 | tmp = readl(mmio + GPIO_PORT_CTL); |
3055 | tmp &= 0x3; | 3107 | tmp &= 0x3; |
3056 | tmp |= (1 << 5) | (1 << 6); | 3108 | tmp |= (1 << 5) | (1 << 6); |
3057 | writel(tmp, mmio + MV_GPIO_PORT_CTL_OFS); | 3109 | writel(tmp, mmio + GPIO_PORT_CTL); |
3058 | } | 3110 | } |
3059 | 3111 | ||
3060 | /** | 3112 | /** |
@@ -3069,7 +3121,7 @@ static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio) | |||
3069 | static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio, | 3121 | static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio, |
3070 | unsigned int n_hc) | 3122 | unsigned int n_hc) |
3071 | { | 3123 | { |
3072 | void __iomem *reg = mmio + PCI_MAIN_CMD_STS_OFS; | 3124 | void __iomem *reg = mmio + PCI_MAIN_CMD_STS; |
3073 | int i, rc = 0; | 3125 | int i, rc = 0; |
3074 | u32 t; | 3126 | u32 t; |
3075 | 3127 | ||
@@ -3127,7 +3179,7 @@ static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx, | |||
3127 | void __iomem *port_mmio; | 3179 | void __iomem *port_mmio; |
3128 | u32 tmp; | 3180 | u32 tmp; |
3129 | 3181 | ||
3130 | tmp = readl(mmio + MV_RESET_CFG_OFS); | 3182 | tmp = readl(mmio + RESET_CFG); |
3131 | if ((tmp & (1 << 0)) == 0) { | 3183 | if ((tmp & (1 << 0)) == 0) { |
3132 | hpriv->signal[idx].amps = 0x7 << 8; | 3184 | hpriv->signal[idx].amps = 0x7 << 8; |
3133 | hpriv->signal[idx].pre = 0x1 << 5; | 3185 | hpriv->signal[idx].pre = 0x1 << 5; |
@@ -3143,7 +3195,7 @@ static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx, | |||
3143 | 3195 | ||
3144 | static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio) | 3196 | static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio) |
3145 | { | 3197 | { |
3146 | writel(0x00000060, mmio + MV_GPIO_PORT_CTL_OFS); | 3198 | writel(0x00000060, mmio + GPIO_PORT_CTL); |
3147 | } | 3199 | } |
3148 | 3200 | ||
3149 | static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio, | 3201 | static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio, |
@@ -3201,6 +3253,7 @@ static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio, | |||
3201 | * Workaround for 60x1-B2 errata SATA#13: | 3253 | * Workaround for 60x1-B2 errata SATA#13: |
3202 | * Any write to PHY_MODE4 (above) may corrupt PHY_MODE3, | 3254 | * Any write to PHY_MODE4 (above) may corrupt PHY_MODE3, |
3203 | * so we must always rewrite PHY_MODE3 after PHY_MODE4. | 3255 | * so we must always rewrite PHY_MODE3 after PHY_MODE4. |
3256 | * Or ensure we use writelfl() when writing PHY_MODE4. | ||
3204 | */ | 3257 | */ |
3205 | writel(m3, port_mmio + PHY_MODE3); | 3258 | writel(m3, port_mmio + PHY_MODE3); |
3206 | 3259 | ||
@@ -3252,7 +3305,7 @@ static void mv_soc_reset_hc_port(struct mv_host_priv *hpriv, | |||
3252 | mv_reset_channel(hpriv, mmio, port); | 3305 | mv_reset_channel(hpriv, mmio, port); |
3253 | 3306 | ||
3254 | ZERO(0x028); /* command */ | 3307 | ZERO(0x028); /* command */ |
3255 | writel(0x101f, port_mmio + EDMA_CFG_OFS); | 3308 | writel(0x101f, port_mmio + EDMA_CFG); |
3256 | ZERO(0x004); /* timer */ | 3309 | ZERO(0x004); /* timer */ |
3257 | ZERO(0x008); /* irq err cause */ | 3310 | ZERO(0x008); /* irq err cause */ |
3258 | ZERO(0x00c); /* irq err mask */ | 3311 | ZERO(0x00c); /* irq err mask */ |
@@ -3263,7 +3316,7 @@ static void mv_soc_reset_hc_port(struct mv_host_priv *hpriv, | |||
3263 | ZERO(0x024); /* respq outp */ | 3316 | ZERO(0x024); /* respq outp */ |
3264 | ZERO(0x020); /* respq inp */ | 3317 | ZERO(0x020); /* respq inp */ |
3265 | ZERO(0x02c); /* test control */ | 3318 | ZERO(0x02c); /* test control */ |
3266 | writel(0xbc, port_mmio + EDMA_IORDY_TMOUT_OFS); | 3319 | writel(0xbc, port_mmio + EDMA_IORDY_TMOUT); |
3267 | } | 3320 | } |
3268 | 3321 | ||
3269 | #undef ZERO | 3322 | #undef ZERO |
@@ -3308,12 +3361,12 @@ static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio) | |||
3308 | 3361 | ||
3309 | static void mv_setup_ifcfg(void __iomem *port_mmio, int want_gen2i) | 3362 | static void mv_setup_ifcfg(void __iomem *port_mmio, int want_gen2i) |
3310 | { | 3363 | { |
3311 | u32 ifcfg = readl(port_mmio + SATA_INTERFACE_CFG_OFS); | 3364 | u32 ifcfg = readl(port_mmio + SATA_IFCFG); |
3312 | 3365 | ||
3313 | ifcfg = (ifcfg & 0xf7f) | 0x9b1000; /* from chip spec */ | 3366 | ifcfg = (ifcfg & 0xf7f) | 0x9b1000; /* from chip spec */ |
3314 | if (want_gen2i) | 3367 | if (want_gen2i) |
3315 | ifcfg |= (1 << 7); /* enable gen2i speed */ | 3368 | ifcfg |= (1 << 7); /* enable gen2i speed */ |
3316 | writelfl(ifcfg, port_mmio + SATA_INTERFACE_CFG_OFS); | 3369 | writelfl(ifcfg, port_mmio + SATA_IFCFG); |
3317 | } | 3370 | } |
3318 | 3371 | ||
3319 | static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio, | 3372 | static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio, |
@@ -3327,7 +3380,7 @@ static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio, | |||
3327 | * to disable the EDMA engine before doing the EDMA_RESET operation. | 3380 | * to disable the EDMA engine before doing the EDMA_RESET operation. |
3328 | */ | 3381 | */ |
3329 | mv_stop_edma_engine(port_mmio); | 3382 | mv_stop_edma_engine(port_mmio); |
3330 | writelfl(EDMA_RESET, port_mmio + EDMA_CMD_OFS); | 3383 | writelfl(EDMA_RESET, port_mmio + EDMA_CMD); |
3331 | 3384 | ||
3332 | if (!IS_GEN_I(hpriv)) { | 3385 | if (!IS_GEN_I(hpriv)) { |
3333 | /* Enable 3.0gb/s link speed: this survives EDMA_RESET */ | 3386 | /* Enable 3.0gb/s link speed: this survives EDMA_RESET */ |
@@ -3336,11 +3389,11 @@ static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio, | |||
3336 | /* | 3389 | /* |
3337 | * Strobing EDMA_RESET here causes a hard reset of the SATA transport, | 3390 | * Strobing EDMA_RESET here causes a hard reset of the SATA transport, |
3338 | * link, and physical layers. It resets all SATA interface registers | 3391 | * link, and physical layers. It resets all SATA interface registers |
3339 | * (except for SATA_INTERFACE_CFG), and issues a COMRESET to the dev. | 3392 | * (except for SATA_IFCFG), and issues a COMRESET to the dev. |
3340 | */ | 3393 | */ |
3341 | writelfl(EDMA_RESET, port_mmio + EDMA_CMD_OFS); | 3394 | writelfl(EDMA_RESET, port_mmio + EDMA_CMD); |
3342 | udelay(25); /* allow reset propagation */ | 3395 | udelay(25); /* allow reset propagation */ |
3343 | writelfl(0, port_mmio + EDMA_CMD_OFS); | 3396 | writelfl(0, port_mmio + EDMA_CMD); |
3344 | 3397 | ||
3345 | hpriv->ops->phy_errata(hpriv, mmio, port_no); | 3398 | hpriv->ops->phy_errata(hpriv, mmio, port_no); |
3346 | 3399 | ||
@@ -3352,12 +3405,12 @@ static void mv_pmp_select(struct ata_port *ap, int pmp) | |||
3352 | { | 3405 | { |
3353 | if (sata_pmp_supported(ap)) { | 3406 | if (sata_pmp_supported(ap)) { |
3354 | void __iomem *port_mmio = mv_ap_base(ap); | 3407 | void __iomem *port_mmio = mv_ap_base(ap); |
3355 | u32 reg = readl(port_mmio + SATA_IFCTL_OFS); | 3408 | u32 reg = readl(port_mmio + SATA_IFCTL); |
3356 | int old = reg & 0xf; | 3409 | int old = reg & 0xf; |
3357 | 3410 | ||
3358 | if (old != pmp) { | 3411 | if (old != pmp) { |
3359 | reg = (reg & ~0xf) | pmp; | 3412 | reg = (reg & ~0xf) | pmp; |
3360 | writelfl(reg, port_mmio + SATA_IFCTL_OFS); | 3413 | writelfl(reg, port_mmio + SATA_IFCTL); |
3361 | } | 3414 | } |
3362 | } | 3415 | } |
3363 | } | 3416 | } |
@@ -3432,11 +3485,11 @@ static void mv_eh_thaw(struct ata_port *ap) | |||
3432 | u32 hc_irq_cause; | 3485 | u32 hc_irq_cause; |
3433 | 3486 | ||
3434 | /* clear EDMA errors on this port */ | 3487 | /* clear EDMA errors on this port */ |
3435 | writel(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS); | 3488 | writel(0, port_mmio + EDMA_ERR_IRQ_CAUSE); |
3436 | 3489 | ||
3437 | /* clear pending irq events */ | 3490 | /* clear pending irq events */ |
3438 | hc_irq_cause = ~((DEV_IRQ | DMA_IRQ) << hardport); | 3491 | hc_irq_cause = ~((DEV_IRQ | DMA_IRQ) << hardport); |
3439 | writelfl(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS); | 3492 | writelfl(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE); |
3440 | 3493 | ||
3441 | mv_enable_port_irqs(ap, ERR_IRQ); | 3494 | mv_enable_port_irqs(ap, ERR_IRQ); |
3442 | } | 3495 | } |
@@ -3455,8 +3508,7 @@ static void mv_eh_thaw(struct ata_port *ap) | |||
3455 | */ | 3508 | */ |
3456 | static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio) | 3509 | static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio) |
3457 | { | 3510 | { |
3458 | void __iomem *shd_base = port_mmio + SHD_BLK_OFS; | 3511 | void __iomem *serr, *shd_base = port_mmio + SHD_BLK; |
3459 | unsigned serr_ofs; | ||
3460 | 3512 | ||
3461 | /* PIO related setup | 3513 | /* PIO related setup |
3462 | */ | 3514 | */ |
@@ -3471,23 +3523,23 @@ static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio) | |||
3471 | port->status_addr = | 3523 | port->status_addr = |
3472 | port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS); | 3524 | port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS); |
3473 | /* special case: control/altstatus doesn't have ATA_REG_ address */ | 3525 | /* special case: control/altstatus doesn't have ATA_REG_ address */ |
3474 | port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST_OFS; | 3526 | port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST; |
3475 | 3527 | ||
3476 | /* unused: */ | 3528 | /* unused: */ |
3477 | port->cmd_addr = port->bmdma_addr = port->scr_addr = NULL; | 3529 | port->cmd_addr = port->bmdma_addr = port->scr_addr = NULL; |
3478 | 3530 | ||
3479 | /* Clear any currently outstanding port interrupt conditions */ | 3531 | /* Clear any currently outstanding port interrupt conditions */ |
3480 | serr_ofs = mv_scr_offset(SCR_ERROR); | 3532 | serr = port_mmio + mv_scr_offset(SCR_ERROR); |
3481 | writelfl(readl(port_mmio + serr_ofs), port_mmio + serr_ofs); | 3533 | writelfl(readl(serr), serr); |
3482 | writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS); | 3534 | writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE); |
3483 | 3535 | ||
3484 | /* unmask all non-transient EDMA error interrupts */ | 3536 | /* unmask all non-transient EDMA error interrupts */ |
3485 | writelfl(~EDMA_ERR_IRQ_TRANSIENT, port_mmio + EDMA_ERR_IRQ_MASK_OFS); | 3537 | writelfl(~EDMA_ERR_IRQ_TRANSIENT, port_mmio + EDMA_ERR_IRQ_MASK); |
3486 | 3538 | ||
3487 | VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n", | 3539 | VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n", |
3488 | readl(port_mmio + EDMA_CFG_OFS), | 3540 | readl(port_mmio + EDMA_CFG), |
3489 | readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS), | 3541 | readl(port_mmio + EDMA_ERR_IRQ_CAUSE), |
3490 | readl(port_mmio + EDMA_ERR_IRQ_MASK_OFS)); | 3542 | readl(port_mmio + EDMA_ERR_IRQ_MASK)); |
3491 | } | 3543 | } |
3492 | 3544 | ||
3493 | static unsigned int mv_in_pcix_mode(struct ata_host *host) | 3545 | static unsigned int mv_in_pcix_mode(struct ata_host *host) |
@@ -3498,7 +3550,7 @@ static unsigned int mv_in_pcix_mode(struct ata_host *host) | |||
3498 | 3550 | ||
3499 | if (IS_SOC(hpriv) || !IS_PCIE(hpriv)) | 3551 | if (IS_SOC(hpriv) || !IS_PCIE(hpriv)) |
3500 | return 0; /* not PCI-X capable */ | 3552 | return 0; /* not PCI-X capable */ |
3501 | reg = readl(mmio + MV_PCI_MODE_OFS); | 3553 | reg = readl(mmio + MV_PCI_MODE); |
3502 | if ((reg & MV_PCI_MODE_MASK) == 0) | 3554 | if ((reg & MV_PCI_MODE_MASK) == 0) |
3503 | return 0; /* conventional PCI mode */ | 3555 | return 0; /* conventional PCI mode */ |
3504 | return 1; /* chip is in PCI-X mode */ | 3556 | return 1; /* chip is in PCI-X mode */ |
@@ -3511,13 +3563,25 @@ static int mv_pci_cut_through_okay(struct ata_host *host) | |||
3511 | u32 reg; | 3563 | u32 reg; |
3512 | 3564 | ||
3513 | if (!mv_in_pcix_mode(host)) { | 3565 | if (!mv_in_pcix_mode(host)) { |
3514 | reg = readl(mmio + PCI_COMMAND_OFS); | 3566 | reg = readl(mmio + MV_PCI_COMMAND); |
3515 | if (reg & PCI_COMMAND_MRDTRIG) | 3567 | if (reg & MV_PCI_COMMAND_MRDTRIG) |
3516 | return 0; /* not okay */ | 3568 | return 0; /* not okay */ |
3517 | } | 3569 | } |
3518 | return 1; /* okay */ | 3570 | return 1; /* okay */ |
3519 | } | 3571 | } |
3520 | 3572 | ||
3573 | static void mv_60x1b2_errata_pci7(struct ata_host *host) | ||
3574 | { | ||
3575 | struct mv_host_priv *hpriv = host->private_data; | ||
3576 | void __iomem *mmio = hpriv->base; | ||
3577 | |||
3578 | /* workaround for 60x1-B2 errata PCI#7 */ | ||
3579 | if (mv_in_pcix_mode(host)) { | ||
3580 | u32 reg = readl(mmio + MV_PCI_COMMAND); | ||
3581 | writelfl(reg & ~MV_PCI_COMMAND_MWRCOM, mmio + MV_PCI_COMMAND); | ||
3582 | } | ||
3583 | } | ||
3584 | |||
3521 | static int mv_chip_id(struct ata_host *host, unsigned int board_idx) | 3585 | static int mv_chip_id(struct ata_host *host, unsigned int board_idx) |
3522 | { | 3586 | { |
3523 | struct pci_dev *pdev = to_pci_dev(host->dev); | 3587 | struct pci_dev *pdev = to_pci_dev(host->dev); |
@@ -3571,6 +3635,7 @@ static int mv_chip_id(struct ata_host *host, unsigned int board_idx) | |||
3571 | 3635 | ||
3572 | switch (pdev->revision) { | 3636 | switch (pdev->revision) { |
3573 | case 0x7: | 3637 | case 0x7: |
3638 | mv_60x1b2_errata_pci7(host); | ||
3574 | hp_flags |= MV_HP_ERRATA_60X1B2; | 3639 | hp_flags |= MV_HP_ERRATA_60X1B2; |
3575 | break; | 3640 | break; |
3576 | case 0x9: | 3641 | case 0x9: |
@@ -3647,12 +3712,12 @@ static int mv_chip_id(struct ata_host *host, unsigned int board_idx) | |||
3647 | 3712 | ||
3648 | hpriv->hp_flags = hp_flags; | 3713 | hpriv->hp_flags = hp_flags; |
3649 | if (hp_flags & MV_HP_PCIE) { | 3714 | if (hp_flags & MV_HP_PCIE) { |
3650 | hpriv->irq_cause_ofs = PCIE_IRQ_CAUSE_OFS; | 3715 | hpriv->irq_cause_offset = PCIE_IRQ_CAUSE; |
3651 | hpriv->irq_mask_ofs = PCIE_IRQ_MASK_OFS; | 3716 | hpriv->irq_mask_offset = PCIE_IRQ_MASK; |
3652 | hpriv->unmask_all_irqs = PCIE_UNMASK_ALL_IRQS; | 3717 | hpriv->unmask_all_irqs = PCIE_UNMASK_ALL_IRQS; |
3653 | } else { | 3718 | } else { |
3654 | hpriv->irq_cause_ofs = PCI_IRQ_CAUSE_OFS; | 3719 | hpriv->irq_cause_offset = PCI_IRQ_CAUSE; |
3655 | hpriv->irq_mask_ofs = PCI_IRQ_MASK_OFS; | 3720 | hpriv->irq_mask_offset = PCI_IRQ_MASK; |
3656 | hpriv->unmask_all_irqs = PCI_UNMASK_ALL_IRQS; | 3721 | hpriv->unmask_all_irqs = PCI_UNMASK_ALL_IRQS; |
3657 | } | 3722 | } |
3658 | 3723 | ||
@@ -3681,11 +3746,11 @@ static int mv_init_host(struct ata_host *host, unsigned int board_idx) | |||
3681 | goto done; | 3746 | goto done; |
3682 | 3747 | ||
3683 | if (IS_SOC(hpriv)) { | 3748 | if (IS_SOC(hpriv)) { |
3684 | hpriv->main_irq_cause_addr = mmio + SOC_HC_MAIN_IRQ_CAUSE_OFS; | 3749 | hpriv->main_irq_cause_addr = mmio + SOC_HC_MAIN_IRQ_CAUSE; |
3685 | hpriv->main_irq_mask_addr = mmio + SOC_HC_MAIN_IRQ_MASK_OFS; | 3750 | hpriv->main_irq_mask_addr = mmio + SOC_HC_MAIN_IRQ_MASK; |
3686 | } else { | 3751 | } else { |
3687 | hpriv->main_irq_cause_addr = mmio + PCI_HC_MAIN_IRQ_CAUSE_OFS; | 3752 | hpriv->main_irq_cause_addr = mmio + PCI_HC_MAIN_IRQ_CAUSE; |
3688 | hpriv->main_irq_mask_addr = mmio + PCI_HC_MAIN_IRQ_MASK_OFS; | 3753 | hpriv->main_irq_mask_addr = mmio + PCI_HC_MAIN_IRQ_MASK; |
3689 | } | 3754 | } |
3690 | 3755 | ||
3691 | /* initialize shadow irq mask with register's value */ | 3756 | /* initialize shadow irq mask with register's value */ |
@@ -3727,18 +3792,20 @@ static int mv_init_host(struct ata_host *host, unsigned int board_idx) | |||
3727 | 3792 | ||
3728 | VPRINTK("HC%i: HC config=0x%08x HC IRQ cause " | 3793 | VPRINTK("HC%i: HC config=0x%08x HC IRQ cause " |
3729 | "(before clear)=0x%08x\n", hc, | 3794 | "(before clear)=0x%08x\n", hc, |
3730 | readl(hc_mmio + HC_CFG_OFS), | 3795 | readl(hc_mmio + HC_CFG), |
3731 | readl(hc_mmio + HC_IRQ_CAUSE_OFS)); | 3796 | readl(hc_mmio + HC_IRQ_CAUSE)); |
3732 | 3797 | ||
3733 | /* Clear any currently outstanding hc interrupt conditions */ | 3798 | /* Clear any currently outstanding hc interrupt conditions */ |
3734 | writelfl(0, hc_mmio + HC_IRQ_CAUSE_OFS); | 3799 | writelfl(0, hc_mmio + HC_IRQ_CAUSE); |
3735 | } | 3800 | } |
3736 | 3801 | ||
3737 | /* Clear any currently outstanding host interrupt conditions */ | 3802 | if (!IS_SOC(hpriv)) { |
3738 | writelfl(0, mmio + hpriv->irq_cause_ofs); | 3803 | /* Clear any currently outstanding host interrupt conditions */ |
3804 | writelfl(0, mmio + hpriv->irq_cause_offset); | ||
3739 | 3805 | ||
3740 | /* and unmask interrupt generation for host regs */ | 3806 | /* and unmask interrupt generation for host regs */ |
3741 | writelfl(hpriv->unmask_all_irqs, mmio + hpriv->irq_mask_ofs); | 3807 | writelfl(hpriv->unmask_all_irqs, mmio + hpriv->irq_mask_offset); |
3808 | } | ||
3742 | 3809 | ||
3743 | /* | 3810 | /* |
3744 | * enable only global host interrupts for now. | 3811 | * enable only global host interrupts for now. |
@@ -3844,7 +3911,7 @@ static int mv_platform_probe(struct platform_device *pdev) | |||
3844 | host->iomap = NULL; | 3911 | host->iomap = NULL; |
3845 | hpriv->base = devm_ioremap(&pdev->dev, res->start, | 3912 | hpriv->base = devm_ioremap(&pdev->dev, res->start, |
3846 | res->end - res->start + 1); | 3913 | res->end - res->start + 1); |
3847 | hpriv->base -= MV_SATAHC0_REG_BASE; | 3914 | hpriv->base -= SATAHC0_REG_BASE; |
3848 | 3915 | ||
3849 | /* | 3916 | /* |
3850 | * (Re-)program MBUS remapping windows if we are asked to. | 3917 | * (Re-)program MBUS remapping windows if we are asked to. |
diff --git a/include/linux/ata.h b/include/linux/ata.h index 6617c9f8f2ca..cb79b7a208e1 100644 --- a/include/linux/ata.h +++ b/include/linux/ata.h | |||
@@ -29,6 +29,8 @@ | |||
29 | #ifndef __LINUX_ATA_H__ | 29 | #ifndef __LINUX_ATA_H__ |
30 | #define __LINUX_ATA_H__ | 30 | #define __LINUX_ATA_H__ |
31 | 31 | ||
32 | #include <linux/kernel.h> | ||
33 | #include <linux/string.h> | ||
32 | #include <linux/types.h> | 34 | #include <linux/types.h> |
33 | #include <asm/byteorder.h> | 35 | #include <asm/byteorder.h> |
34 | 36 | ||
@@ -91,6 +93,7 @@ enum { | |||
91 | ATA_ID_CFA_POWER = 160, | 93 | ATA_ID_CFA_POWER = 160, |
92 | ATA_ID_CFA_KEY_MGMT = 162, | 94 | ATA_ID_CFA_KEY_MGMT = 162, |
93 | ATA_ID_CFA_MODES = 163, | 95 | ATA_ID_CFA_MODES = 163, |
96 | ATA_ID_DATA_SET_MGMT = 169, | ||
94 | ATA_ID_ROT_SPEED = 217, | 97 | ATA_ID_ROT_SPEED = 217, |
95 | ATA_ID_PIO4 = (1 << 1), | 98 | ATA_ID_PIO4 = (1 << 1), |
96 | 99 | ||
@@ -248,6 +251,7 @@ enum { | |||
248 | ATA_CMD_SMART = 0xB0, | 251 | ATA_CMD_SMART = 0xB0, |
249 | ATA_CMD_MEDIA_LOCK = 0xDE, | 252 | ATA_CMD_MEDIA_LOCK = 0xDE, |
250 | ATA_CMD_MEDIA_UNLOCK = 0xDF, | 253 | ATA_CMD_MEDIA_UNLOCK = 0xDF, |
254 | ATA_CMD_DSM = 0x06, | ||
251 | /* marked obsolete in the ATA/ATAPI-7 spec */ | 255 | /* marked obsolete in the ATA/ATAPI-7 spec */ |
252 | ATA_CMD_RESTORE = 0x10, | 256 | ATA_CMD_RESTORE = 0x10, |
253 | 257 | ||
@@ -321,6 +325,9 @@ enum { | |||
321 | ATA_SMART_READ_VALUES = 0xD0, | 325 | ATA_SMART_READ_VALUES = 0xD0, |
322 | ATA_SMART_READ_THRESHOLDS = 0xD1, | 326 | ATA_SMART_READ_THRESHOLDS = 0xD1, |
323 | 327 | ||
328 | /* feature values for Data Set Management */ | ||
329 | ATA_DSM_TRIM = 0x01, | ||
330 | |||
324 | /* password used in LBA Mid / LBA High for executing SMART commands */ | 331 | /* password used in LBA Mid / LBA High for executing SMART commands */ |
325 | ATA_SMART_LBAM_PASS = 0x4F, | 332 | ATA_SMART_LBAM_PASS = 0x4F, |
326 | ATA_SMART_LBAH_PASS = 0xC2, | 333 | ATA_SMART_LBAH_PASS = 0xC2, |
@@ -723,6 +730,14 @@ static inline int ata_id_has_unload(const u16 *id) | |||
723 | return 0; | 730 | return 0; |
724 | } | 731 | } |
725 | 732 | ||
733 | static inline int ata_id_has_trim(const u16 *id) | ||
734 | { | ||
735 | if (ata_id_major_version(id) >= 7 && | ||
736 | (id[ATA_ID_DATA_SET_MGMT] & 1)) | ||
737 | return 1; | ||
738 | return 0; | ||
739 | } | ||
740 | |||
726 | static inline int ata_id_current_chs_valid(const u16 *id) | 741 | static inline int ata_id_current_chs_valid(const u16 *id) |
727 | { | 742 | { |
728 | /* For ATA-1 devices, if the INITIALIZE DEVICE PARAMETERS command | 743 | /* For ATA-1 devices, if the INITIALIZE DEVICE PARAMETERS command |
@@ -863,6 +878,32 @@ static inline void ata_id_to_hd_driveid(u16 *id) | |||
863 | #endif | 878 | #endif |
864 | } | 879 | } |
865 | 880 | ||
881 | /* | ||
882 | * Write up to 'max' LBA Range Entries to the buffer that will cover the | ||
883 | * extent from sector to sector + count. This is used for TRIM and for | ||
884 | * ADD LBA(S) TO NV CACHE PINNED SET. | ||
885 | */ | ||
886 | static inline unsigned ata_set_lba_range_entries(void *_buffer, unsigned max, | ||
887 | u64 sector, unsigned long count) | ||
888 | { | ||
889 | __le64 *buffer = _buffer; | ||
890 | unsigned i = 0; | ||
891 | |||
892 | while (i < max) { | ||
893 | u64 entry = sector | | ||
894 | ((u64)(count > 0xffff ? 0xffff : count) << 48); | ||
895 | buffer[i++] = __cpu_to_le64(entry); | ||
896 | if (count <= 0xffff) | ||
897 | break; | ||
898 | count -= 0xffff; | ||
899 | sector += 0xffff; | ||
900 | } | ||
901 | |||
902 | max = ALIGN(i * 8, 512); | ||
903 | memset(buffer + i, 0, max - i * 8); | ||
904 | return max; | ||
905 | } | ||
906 | |||
866 | static inline int is_multi_taskfile(struct ata_taskfile *tf) | 907 | static inline int is_multi_taskfile(struct ata_taskfile *tf) |
867 | { | 908 | { |
868 | return (tf->command == ATA_CMD_READ_MULTI) || | 909 | return (tf->command == ATA_CMD_READ_MULTI) || |