diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2008-05-06 12:17:03 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2008-05-06 12:17:03 -0400 |
commit | 31d9168d27fac127d449cb9fa252d880de872c7f (patch) | |
tree | 45cc65fa9af9eba64d79163d542e12ad55f9e457 | |
parent | 4880d10927c93d858d40e297361fff375ee98492 (diff) | |
parent | 05177f178efe1459d2d0ac05430027ba201889a4 (diff) |
Merge branch 'upstream-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jgarzik/libata-dev
* 'upstream-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jgarzik/libata-dev: (27 commits)
pata_atiixp: Don't disable
sata_inic162x: update intro comment, up the version and drop EXPERIMENTAL
sata_inic162x: add cardbus support
sata_inic162x: kill now unused SFF related stuff
sata_inic162x: use IDMA for ATAPI commands
sata_inic162x: use IDMA for non DMA ATA commands
sata_inic162x: kill now unused bmdma related stuff
sata_inic162x: use IDMA for ATA_PROT_DMA
sata_inic162x: update TF read handling
sata_inic162x: add / update constants
sata_inic162x: misc clean ups
sata_mv use hweight16() for bit counting (V2)
sata_mv NCQ-EH for FIS-based switching
sata_mv delayed eh handling
libata: export ata_eh_analyze_ncq_error
sata_mv new mv_port_intr function
sata_mv fix mv_host_intr bug for hc_irq_cause
sata_mv NCQ and SError fixes for mv_err_intr
sata_mv rearrange mv_config_fbs
sata_mv errata workaround for sata25 part 1
...
-rw-r--r-- | drivers/ata/Kconfig | 13 | ||||
-rw-r--r-- | drivers/ata/Makefile | 1 | ||||
-rw-r--r-- | drivers/ata/ahci.c | 4 | ||||
-rw-r--r-- | drivers/ata/ata_generic.c | 6 | ||||
-rw-r--r-- | drivers/ata/ata_piix.c | 25 | ||||
-rw-r--r-- | drivers/ata/libata-core.c | 1 | ||||
-rw-r--r-- | drivers/ata/libata-eh.c | 2 | ||||
-rw-r--r-- | drivers/ata/libata-sff.c | 6 | ||||
-rw-r--r-- | drivers/ata/pata_acpi.c | 6 | ||||
-rw-r--r-- | drivers/ata/pata_sch.c | 206 | ||||
-rw-r--r-- | drivers/ata/sata_inic162x.c | 646 | ||||
-rw-r--r-- | drivers/ata/sata_mv.c | 690 | ||||
-rw-r--r-- | include/linux/libata.h | 16 |
13 files changed, 1223 insertions, 399 deletions
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig index 1c11df9a5f32..9bf2986a2788 100644 --- a/drivers/ata/Kconfig +++ b/drivers/ata/Kconfig | |||
@@ -205,8 +205,8 @@ config SATA_VITESSE | |||
205 | If unsure, say N. | 205 | If unsure, say N. |
206 | 206 | ||
207 | config SATA_INIC162X | 207 | config SATA_INIC162X |
208 | tristate "Initio 162x SATA support (HIGHLY EXPERIMENTAL)" | 208 | tristate "Initio 162x SATA support" |
209 | depends on PCI && EXPERIMENTAL | 209 | depends on PCI |
210 | help | 210 | help |
211 | This option enables support for Initio 162x Serial ATA. | 211 | This option enables support for Initio 162x Serial ATA. |
212 | 212 | ||
@@ -697,6 +697,15 @@ config PATA_SCC | |||
697 | 697 | ||
698 | If unsure, say N. | 698 | If unsure, say N. |
699 | 699 | ||
700 | config PATA_SCH | ||
701 | tristate "Intel SCH PATA support" | ||
702 | depends on PCI | ||
703 | help | ||
704 | This option enables support for Intel SCH PATA on the Intel | ||
705 | SCH (US15W, US15L, UL11L) series host controllers. | ||
706 | |||
707 | If unsure, say N. | ||
708 | |||
700 | config PATA_BF54X | 709 | config PATA_BF54X |
701 | tristate "Blackfin 54x ATAPI support" | 710 | tristate "Blackfin 54x ATAPI support" |
702 | depends on BF542 || BF548 || BF549 | 711 | depends on BF542 || BF548 || BF549 |
diff --git a/drivers/ata/Makefile b/drivers/ata/Makefile index b693d829383a..674965fa326d 100644 --- a/drivers/ata/Makefile +++ b/drivers/ata/Makefile | |||
@@ -67,6 +67,7 @@ obj-$(CONFIG_PATA_SIS) += pata_sis.o | |||
67 | obj-$(CONFIG_PATA_TRIFLEX) += pata_triflex.o | 67 | obj-$(CONFIG_PATA_TRIFLEX) += pata_triflex.o |
68 | obj-$(CONFIG_PATA_IXP4XX_CF) += pata_ixp4xx_cf.o | 68 | obj-$(CONFIG_PATA_IXP4XX_CF) += pata_ixp4xx_cf.o |
69 | obj-$(CONFIG_PATA_SCC) += pata_scc.o | 69 | obj-$(CONFIG_PATA_SCC) += pata_scc.o |
70 | obj-$(CONFIG_PATA_SCH) += pata_sch.o | ||
70 | obj-$(CONFIG_PATA_BF54X) += pata_bf54x.o | 71 | obj-$(CONFIG_PATA_BF54X) += pata_bf54x.o |
71 | obj-$(CONFIG_PATA_PLATFORM) += pata_platform.o | 72 | obj-$(CONFIG_PATA_PLATFORM) += pata_platform.o |
72 | obj-$(CONFIG_PATA_OF_PLATFORM) += pata_of_platform.o | 73 | obj-$(CONFIG_PATA_OF_PLATFORM) += pata_of_platform.o |
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c index 8cace9aa9c03..97f83fb2ee2e 100644 --- a/drivers/ata/ahci.c +++ b/drivers/ata/ahci.c | |||
@@ -1267,9 +1267,7 @@ static int ahci_check_ready(struct ata_link *link) | |||
1267 | void __iomem *port_mmio = ahci_port_base(link->ap); | 1267 | void __iomem *port_mmio = ahci_port_base(link->ap); |
1268 | u8 status = readl(port_mmio + PORT_TFDATA) & 0xFF; | 1268 | u8 status = readl(port_mmio + PORT_TFDATA) & 0xFF; |
1269 | 1269 | ||
1270 | if (!(status & ATA_BUSY)) | 1270 | return ata_check_ready(status); |
1271 | return 1; | ||
1272 | return 0; | ||
1273 | } | 1271 | } |
1274 | 1272 | ||
1275 | static int ahci_softreset(struct ata_link *link, unsigned int *class, | 1273 | static int ahci_softreset(struct ata_link *link, unsigned int *class, |
diff --git a/drivers/ata/ata_generic.c b/drivers/ata/ata_generic.c index 47aeccd52fa9..75a406f5e694 100644 --- a/drivers/ata/ata_generic.c +++ b/drivers/ata/ata_generic.c | |||
@@ -152,6 +152,12 @@ static int ata_generic_init_one(struct pci_dev *dev, const struct pci_device_id | |||
152 | if (dev->vendor == PCI_VENDOR_ID_AL) | 152 | if (dev->vendor == PCI_VENDOR_ID_AL) |
153 | ata_pci_bmdma_clear_simplex(dev); | 153 | ata_pci_bmdma_clear_simplex(dev); |
154 | 154 | ||
155 | if (dev->vendor == PCI_VENDOR_ID_ATI) { | ||
156 | int rc = pcim_enable_device(dev); | ||
157 | if (rc < 0) | ||
158 | return rc; | ||
159 | pcim_pin_device(dev); | ||
160 | } | ||
155 | return ata_pci_sff_init_one(dev, ppi, &generic_sht, NULL); | 161 | return ata_pci_sff_init_one(dev, ppi, &generic_sht, NULL); |
156 | } | 162 | } |
157 | 163 | ||
diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c index ea2c7649d399..a9027b8fbdd5 100644 --- a/drivers/ata/ata_piix.c +++ b/drivers/ata/ata_piix.c | |||
@@ -1348,6 +1348,8 @@ static void __devinit piix_init_sidpr(struct ata_host *host) | |||
1348 | { | 1348 | { |
1349 | struct pci_dev *pdev = to_pci_dev(host->dev); | 1349 | struct pci_dev *pdev = to_pci_dev(host->dev); |
1350 | struct piix_host_priv *hpriv = host->private_data; | 1350 | struct piix_host_priv *hpriv = host->private_data; |
1351 | struct ata_device *dev0 = &host->ports[0]->link.device[0]; | ||
1352 | u32 scontrol; | ||
1351 | int i; | 1353 | int i; |
1352 | 1354 | ||
1353 | /* check for availability */ | 1355 | /* check for availability */ |
@@ -1366,6 +1368,29 @@ static void __devinit piix_init_sidpr(struct ata_host *host) | |||
1366 | return; | 1368 | return; |
1367 | 1369 | ||
1368 | hpriv->sidpr = pcim_iomap_table(pdev)[PIIX_SIDPR_BAR]; | 1370 | hpriv->sidpr = pcim_iomap_table(pdev)[PIIX_SIDPR_BAR]; |
1371 | |||
1372 | /* SCR access via SIDPR doesn't work on some configurations. | ||
1373 | * Give it a test drive by inhibiting power save modes which | ||
1374 | * we'll do anyway. | ||
1375 | */ | ||
1376 | scontrol = piix_sidpr_read(dev0, SCR_CONTROL); | ||
1377 | |||
1378 | /* if IPM is already 3, SCR access is probably working. Don't | ||
1379 | * un-inhibit power save modes as BIOS might have inhibited | ||
1380 | * them for a reason. | ||
1381 | */ | ||
1382 | if ((scontrol & 0xf00) != 0x300) { | ||
1383 | scontrol |= 0x300; | ||
1384 | piix_sidpr_write(dev0, SCR_CONTROL, scontrol); | ||
1385 | scontrol = piix_sidpr_read(dev0, SCR_CONTROL); | ||
1386 | |||
1387 | if ((scontrol & 0xf00) != 0x300) { | ||
1388 | dev_printk(KERN_INFO, host->dev, "SCR access via " | ||
1389 | "SIDPR is available but doesn't work\n"); | ||
1390 | return; | ||
1391 | } | ||
1392 | } | ||
1393 | |||
1369 | host->ports[0]->ops = &piix_sidpr_sata_ops; | 1394 | host->ports[0]->ops = &piix_sidpr_sata_ops; |
1370 | host->ports[1]->ops = &piix_sidpr_sata_ops; | 1395 | host->ports[1]->ops = &piix_sidpr_sata_ops; |
1371 | } | 1396 | } |
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index 3bc488538204..927b692d723c 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c | |||
@@ -6292,6 +6292,7 @@ EXPORT_SYMBOL_GPL(ata_eh_freeze_port); | |||
6292 | EXPORT_SYMBOL_GPL(ata_eh_thaw_port); | 6292 | EXPORT_SYMBOL_GPL(ata_eh_thaw_port); |
6293 | EXPORT_SYMBOL_GPL(ata_eh_qc_complete); | 6293 | EXPORT_SYMBOL_GPL(ata_eh_qc_complete); |
6294 | EXPORT_SYMBOL_GPL(ata_eh_qc_retry); | 6294 | EXPORT_SYMBOL_GPL(ata_eh_qc_retry); |
6295 | EXPORT_SYMBOL_GPL(ata_eh_analyze_ncq_error); | ||
6295 | EXPORT_SYMBOL_GPL(ata_do_eh); | 6296 | EXPORT_SYMBOL_GPL(ata_do_eh); |
6296 | EXPORT_SYMBOL_GPL(ata_std_error_handler); | 6297 | EXPORT_SYMBOL_GPL(ata_std_error_handler); |
6297 | 6298 | ||
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c index 61dcd0026c64..62e033146bed 100644 --- a/drivers/ata/libata-eh.c +++ b/drivers/ata/libata-eh.c | |||
@@ -1357,7 +1357,7 @@ static void ata_eh_analyze_serror(struct ata_link *link) | |||
1357 | * LOCKING: | 1357 | * LOCKING: |
1358 | * Kernel thread context (may sleep). | 1358 | * Kernel thread context (may sleep). |
1359 | */ | 1359 | */ |
1360 | static void ata_eh_analyze_ncq_error(struct ata_link *link) | 1360 | void ata_eh_analyze_ncq_error(struct ata_link *link) |
1361 | { | 1361 | { |
1362 | struct ata_port *ap = link->ap; | 1362 | struct ata_port *ap = link->ap; |
1363 | struct ata_eh_context *ehc = &link->eh_context; | 1363 | struct ata_eh_context *ehc = &link->eh_context; |
diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c index 2ec65a8fda79..3c2d2289f85e 100644 --- a/drivers/ata/libata-sff.c +++ b/drivers/ata/libata-sff.c | |||
@@ -314,11 +314,7 @@ static int ata_sff_check_ready(struct ata_link *link) | |||
314 | { | 314 | { |
315 | u8 status = link->ap->ops->sff_check_status(link->ap); | 315 | u8 status = link->ap->ops->sff_check_status(link->ap); |
316 | 316 | ||
317 | if (!(status & ATA_BUSY)) | 317 | return ata_check_ready(status); |
318 | return 1; | ||
319 | if (status == 0xff) | ||
320 | return -ENODEV; | ||
321 | return 0; | ||
322 | } | 318 | } |
323 | 319 | ||
324 | /** | 320 | /** |
diff --git a/drivers/ata/pata_acpi.c b/drivers/ata/pata_acpi.c index c5f91e629945..fbe605711554 100644 --- a/drivers/ata/pata_acpi.c +++ b/drivers/ata/pata_acpi.c | |||
@@ -259,6 +259,12 @@ static int pacpi_init_one (struct pci_dev *pdev, const struct pci_device_id *id) | |||
259 | .port_ops = &pacpi_ops, | 259 | .port_ops = &pacpi_ops, |
260 | }; | 260 | }; |
261 | const struct ata_port_info *ppi[] = { &info, NULL }; | 261 | const struct ata_port_info *ppi[] = { &info, NULL }; |
262 | if (pdev->vendor == PCI_VENDOR_ID_ATI) { | ||
263 | int rc = pcim_enable_device(pdev); | ||
264 | if (rc < 0) | ||
265 | return rc; | ||
266 | pcim_pin_device(pdev); | ||
267 | } | ||
262 | return ata_pci_sff_init_one(pdev, ppi, &pacpi_sht, NULL); | 268 | return ata_pci_sff_init_one(pdev, ppi, &pacpi_sht, NULL); |
263 | } | 269 | } |
264 | 270 | ||
diff --git a/drivers/ata/pata_sch.c b/drivers/ata/pata_sch.c new file mode 100644 index 000000000000..c8cc027789fe --- /dev/null +++ b/drivers/ata/pata_sch.c | |||
@@ -0,0 +1,206 @@ | |||
1 | /* | ||
2 | * pata_sch.c - Intel SCH PATA controllers | ||
3 | * | ||
4 | * Copyright (c) 2008 Alek Du <alek.du@intel.com> | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License 2 as published | ||
8 | * by the Free Software Foundation. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, | ||
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
13 | * GNU General Public License for more details. | ||
14 | * | ||
15 | * You should have received a copy of the GNU General Public License | ||
16 | * along with this program; see the file COPYING. If not, write to | ||
17 | * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. | ||
18 | * | ||
19 | */ | ||
20 | |||
21 | /* | ||
22 | * Supports: | ||
23 | * Intel SCH (AF82US15W, AF82US15L, AF82UL11L) chipsets -- see spec at: | ||
24 | * http://download.intel.com/design/chipsets/embedded/datashts/319537.pdf | ||
25 | */ | ||
26 | |||
27 | #include <linux/kernel.h> | ||
28 | #include <linux/module.h> | ||
29 | #include <linux/pci.h> | ||
30 | #include <linux/init.h> | ||
31 | #include <linux/blkdev.h> | ||
32 | #include <linux/delay.h> | ||
33 | #include <linux/device.h> | ||
34 | #include <scsi/scsi_host.h> | ||
35 | #include <linux/libata.h> | ||
36 | #include <linux/dmi.h> | ||
37 | |||
38 | #define DRV_NAME "pata_sch" | ||
39 | #define DRV_VERSION "0.2" | ||
40 | |||
41 | /* see SCH datasheet page 351 */ | ||
42 | enum { | ||
43 | D0TIM = 0x80, /* Device 0 Timing Register */ | ||
44 | D1TIM = 0x84, /* Device 1 Timing Register */ | ||
45 | PM = 0x07, /* PIO Mode Bit Mask */ | ||
46 | MDM = (0x03 << 8), /* Multi-word DMA Mode Bit Mask */ | ||
47 | UDM = (0x07 << 16), /* Ultra DMA Mode Bit Mask */ | ||
48 | PPE = (1 << 30), /* Prefetch/Post Enable */ | ||
49 | USD = (1 << 31), /* Use Synchronous DMA */ | ||
50 | }; | ||
51 | |||
52 | static int sch_init_one(struct pci_dev *pdev, | ||
53 | const struct pci_device_id *ent); | ||
54 | static void sch_set_piomode(struct ata_port *ap, struct ata_device *adev); | ||
55 | static void sch_set_dmamode(struct ata_port *ap, struct ata_device *adev); | ||
56 | |||
57 | static const struct pci_device_id sch_pci_tbl[] = { | ||
58 | /* Intel SCH PATA Controller */ | ||
59 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_SCH_IDE), 0 }, | ||
60 | { } /* terminate list */ | ||
61 | }; | ||
62 | |||
63 | static struct pci_driver sch_pci_driver = { | ||
64 | .name = DRV_NAME, | ||
65 | .id_table = sch_pci_tbl, | ||
66 | .probe = sch_init_one, | ||
67 | .remove = ata_pci_remove_one, | ||
68 | #ifdef CONFIG_PM | ||
69 | .suspend = ata_pci_device_suspend, | ||
70 | .resume = ata_pci_device_resume, | ||
71 | #endif | ||
72 | }; | ||
73 | |||
74 | static struct scsi_host_template sch_sht = { | ||
75 | ATA_BMDMA_SHT(DRV_NAME), | ||
76 | }; | ||
77 | |||
78 | static struct ata_port_operations sch_pata_ops = { | ||
79 | .inherits = &ata_bmdma_port_ops, | ||
80 | .cable_detect = ata_cable_unknown, | ||
81 | .set_piomode = sch_set_piomode, | ||
82 | .set_dmamode = sch_set_dmamode, | ||
83 | }; | ||
84 | |||
85 | static struct ata_port_info sch_port_info = { | ||
86 | .flags = 0, | ||
87 | .pio_mask = ATA_PIO4, /* pio0-4 */ | ||
88 | .mwdma_mask = ATA_MWDMA2, /* mwdma0-2 */ | ||
89 | .udma_mask = ATA_UDMA5, /* udma0-5 */ | ||
90 | .port_ops = &sch_pata_ops, | ||
91 | }; | ||
92 | |||
93 | MODULE_AUTHOR("Alek Du <alek.du@intel.com>"); | ||
94 | MODULE_DESCRIPTION("SCSI low-level driver for Intel SCH PATA controllers"); | ||
95 | MODULE_LICENSE("GPL"); | ||
96 | MODULE_DEVICE_TABLE(pci, sch_pci_tbl); | ||
97 | MODULE_VERSION(DRV_VERSION); | ||
98 | |||
99 | /** | ||
100 | * sch_set_piomode - Initialize host controller PATA PIO timings | ||
101 | * @ap: Port whose timings we are configuring | ||
102 | * @adev: ATA device | ||
103 | * | ||
104 | * Set PIO mode for device, in host controller PCI config space. | ||
105 | * | ||
106 | * LOCKING: | ||
107 | * None (inherited from caller). | ||
108 | */ | ||
109 | |||
110 | static void sch_set_piomode(struct ata_port *ap, struct ata_device *adev) | ||
111 | { | ||
112 | unsigned int pio = adev->pio_mode - XFER_PIO_0; | ||
113 | struct pci_dev *dev = to_pci_dev(ap->host->dev); | ||
114 | unsigned int port = adev->devno ? D1TIM : D0TIM; | ||
115 | unsigned int data; | ||
116 | |||
117 | pci_read_config_dword(dev, port, &data); | ||
118 | /* see SCH datasheet page 351 */ | ||
119 | /* set PIO mode */ | ||
120 | data &= ~(PM | PPE); | ||
121 | data |= pio; | ||
122 | /* enable PPE for block device */ | ||
123 | if (adev->class == ATA_DEV_ATA) | ||
124 | data |= PPE; | ||
125 | pci_write_config_dword(dev, port, data); | ||
126 | } | ||
127 | |||
128 | /** | ||
129 | * sch_set_dmamode - Initialize host controller PATA DMA timings | ||
130 | * @ap: Port whose timings we are configuring | ||
131 | * @adev: ATA device | ||
132 | * | ||
133 | * Set MW/UDMA mode for device, in host controller PCI config space. | ||
134 | * | ||
135 | * LOCKING: | ||
136 | * None (inherited from caller). | ||
137 | */ | ||
138 | |||
139 | static void sch_set_dmamode(struct ata_port *ap, struct ata_device *adev) | ||
140 | { | ||
141 | unsigned int dma_mode = adev->dma_mode; | ||
142 | struct pci_dev *dev = to_pci_dev(ap->host->dev); | ||
143 | unsigned int port = adev->devno ? D1TIM : D0TIM; | ||
144 | unsigned int data; | ||
145 | |||
146 | pci_read_config_dword(dev, port, &data); | ||
147 | /* see SCH datasheet page 351 */ | ||
148 | if (dma_mode >= XFER_UDMA_0) { | ||
149 | /* enable Synchronous DMA mode */ | ||
150 | data |= USD; | ||
151 | data &= ~UDM; | ||
152 | data |= (dma_mode - XFER_UDMA_0) << 16; | ||
153 | } else { /* must be MWDMA mode, since we masked SWDMA already */ | ||
154 | data &= ~(USD | MDM); | ||
155 | data |= (dma_mode - XFER_MW_DMA_0) << 8; | ||
156 | } | ||
157 | pci_write_config_dword(dev, port, data); | ||
158 | } | ||
159 | |||
160 | /** | ||
161 | * sch_init_one - Register SCH ATA PCI device with kernel services | ||
162 | * @pdev: PCI device to register | ||
163 | * @ent: Entry in sch_pci_tbl matching with @pdev | ||
164 | * | ||
165 | * LOCKING: | ||
166 | * Inherited from PCI layer (may sleep). | ||
167 | * | ||
168 | * RETURNS: | ||
169 | * Zero on success, or -ERRNO value. | ||
170 | */ | ||
171 | |||
172 | static int __devinit sch_init_one(struct pci_dev *pdev, | ||
173 | const struct pci_device_id *ent) | ||
174 | { | ||
175 | static int printed_version; | ||
176 | const struct ata_port_info *ppi[] = { &sch_port_info, NULL }; | ||
177 | struct ata_host *host; | ||
178 | int rc; | ||
179 | |||
180 | if (!printed_version++) | ||
181 | dev_printk(KERN_DEBUG, &pdev->dev, | ||
182 | "version " DRV_VERSION "\n"); | ||
183 | |||
184 | /* enable device and prepare host */ | ||
185 | rc = pcim_enable_device(pdev); | ||
186 | if (rc) | ||
187 | return rc; | ||
188 | rc = ata_pci_sff_prepare_host(pdev, ppi, &host); | ||
189 | if (rc) | ||
190 | return rc; | ||
191 | pci_set_master(pdev); | ||
192 | return ata_pci_sff_activate_host(host, ata_sff_interrupt, &sch_sht); | ||
193 | } | ||
194 | |||
195 | static int __init sch_init(void) | ||
196 | { | ||
197 | return pci_register_driver(&sch_pci_driver); | ||
198 | } | ||
199 | |||
200 | static void __exit sch_exit(void) | ||
201 | { | ||
202 | pci_unregister_driver(&sch_pci_driver); | ||
203 | } | ||
204 | |||
205 | module_init(sch_init); | ||
206 | module_exit(sch_exit); | ||
diff --git a/drivers/ata/sata_inic162x.c b/drivers/ata/sata_inic162x.c index d27bb9a2568f..3ead02fe379e 100644 --- a/drivers/ata/sata_inic162x.c +++ b/drivers/ata/sata_inic162x.c | |||
@@ -10,13 +10,33 @@ | |||
10 | * right. Documentation is available at initio's website but it only | 10 | * right. Documentation is available at initio's website but it only |
11 | * documents registers (not programming model). | 11 | * documents registers (not programming model). |
12 | * | 12 | * |
13 | * - ATA disks work. | 13 | * This driver has interesting history. The first version was written |
14 | * - Hotplug works. | 14 | * from the documentation and a 2.4 IDE driver posted on a Taiwan |
15 | * - ATAPI read works but burning doesn't. This thing is really | 15 | * company, which didn't use any IDMA features and couldn't handle |
16 | * peculiar about ATAPI and I couldn't figure out how ATAPI PIO and | 16 | * LBA48. The resulting driver couldn't handle LBA48 devices either |
17 | * ATAPI DMA WRITE should be programmed. If you've got a clue, be | 17 | * making it pretty useless. |
18 | * my guest. | 18 | * |
19 | * - Both STR and STD work. | 19 | * After a while, initio picked the driver up, renamed it to |
20 | * sata_initio162x, updated it to use IDMA for ATA DMA commands and | ||
21 | * posted it on their website. It only used ATA_PROT_DMA for IDMA and | ||
22 | * attaching both devices and issuing IDMA and !IDMA commands | ||
23 | * simultaneously broke it due to PIRQ masking interaction but it did | ||
24 | * show how to use the IDMA (ADMA + some initio specific twists) | ||
25 | * engine. | ||
26 | * | ||
27 | * Then, I picked up their changes again and here's the usable driver | ||
28 | * which uses IDMA for everything. Everything works now including | ||
29 | * LBA48, CD/DVD burning, suspend/resume and hotplug. There are some | ||
30 | * issues tho. Result Tf is not resported properly, NCQ isn't | ||
31 | * supported yet and CD/DVD writing works with DMA assisted PIO | ||
32 | * protocol (which, for native SATA devices, shouldn't cause any | ||
33 | * noticeable difference). | ||
34 | * | ||
35 | * Anyways, so, here's finally a working driver for inic162x. Enjoy! | ||
36 | * | ||
37 | * initio: If you guys wanna improve the driver regarding result TF | ||
38 | * access and other stuff, please feel free to contact me. I'll be | ||
39 | * happy to assist. | ||
20 | */ | 40 | */ |
21 | 41 | ||
22 | #include <linux/kernel.h> | 42 | #include <linux/kernel.h> |
@@ -28,13 +48,19 @@ | |||
28 | #include <scsi/scsi_device.h> | 48 | #include <scsi/scsi_device.h> |
29 | 49 | ||
30 | #define DRV_NAME "sata_inic162x" | 50 | #define DRV_NAME "sata_inic162x" |
31 | #define DRV_VERSION "0.3" | 51 | #define DRV_VERSION "0.4" |
32 | 52 | ||
33 | enum { | 53 | enum { |
34 | MMIO_BAR = 5, | 54 | MMIO_BAR_PCI = 5, |
55 | MMIO_BAR_CARDBUS = 1, | ||
35 | 56 | ||
36 | NR_PORTS = 2, | 57 | NR_PORTS = 2, |
37 | 58 | ||
59 | IDMA_CPB_TBL_SIZE = 4 * 32, | ||
60 | |||
61 | INIC_DMA_BOUNDARY = 0xffffff, | ||
62 | |||
63 | HOST_ACTRL = 0x08, | ||
38 | HOST_CTL = 0x7c, | 64 | HOST_CTL = 0x7c, |
39 | HOST_STAT = 0x7e, | 65 | HOST_STAT = 0x7e, |
40 | HOST_IRQ_STAT = 0xbc, | 66 | HOST_IRQ_STAT = 0xbc, |
@@ -43,22 +69,37 @@ enum { | |||
43 | PORT_SIZE = 0x40, | 69 | PORT_SIZE = 0x40, |
44 | 70 | ||
45 | /* registers for ATA TF operation */ | 71 | /* registers for ATA TF operation */ |
46 | PORT_TF = 0x00, | 72 | PORT_TF_DATA = 0x00, |
47 | PORT_ALT_STAT = 0x08, | 73 | PORT_TF_FEATURE = 0x01, |
74 | PORT_TF_NSECT = 0x02, | ||
75 | PORT_TF_LBAL = 0x03, | ||
76 | PORT_TF_LBAM = 0x04, | ||
77 | PORT_TF_LBAH = 0x05, | ||
78 | PORT_TF_DEVICE = 0x06, | ||
79 | PORT_TF_COMMAND = 0x07, | ||
80 | PORT_TF_ALT_STAT = 0x08, | ||
48 | PORT_IRQ_STAT = 0x09, | 81 | PORT_IRQ_STAT = 0x09, |
49 | PORT_IRQ_MASK = 0x0a, | 82 | PORT_IRQ_MASK = 0x0a, |
50 | PORT_PRD_CTL = 0x0b, | 83 | PORT_PRD_CTL = 0x0b, |
51 | PORT_PRD_ADDR = 0x0c, | 84 | PORT_PRD_ADDR = 0x0c, |
52 | PORT_PRD_XFERLEN = 0x10, | 85 | PORT_PRD_XFERLEN = 0x10, |
86 | PORT_CPB_CPBLAR = 0x18, | ||
87 | PORT_CPB_PTQFIFO = 0x1c, | ||
53 | 88 | ||
54 | /* IDMA register */ | 89 | /* IDMA register */ |
55 | PORT_IDMA_CTL = 0x14, | 90 | PORT_IDMA_CTL = 0x14, |
91 | PORT_IDMA_STAT = 0x16, | ||
92 | |||
93 | PORT_RPQ_FIFO = 0x1e, | ||
94 | PORT_RPQ_CNT = 0x1f, | ||
56 | 95 | ||
57 | PORT_SCR = 0x20, | 96 | PORT_SCR = 0x20, |
58 | 97 | ||
59 | /* HOST_CTL bits */ | 98 | /* HOST_CTL bits */ |
60 | HCTL_IRQOFF = (1 << 8), /* global IRQ off */ | 99 | HCTL_IRQOFF = (1 << 8), /* global IRQ off */ |
61 | HCTL_PWRDWN = (1 << 13), /* power down PHYs */ | 100 | HCTL_FTHD0 = (1 << 10), /* fifo threshold 0 */ |
101 | HCTL_FTHD1 = (1 << 11), /* fifo threshold 1*/ | ||
102 | HCTL_PWRDWN = (1 << 12), /* power down PHYs */ | ||
62 | HCTL_SOFTRST = (1 << 13), /* global reset (no phy reset) */ | 103 | HCTL_SOFTRST = (1 << 13), /* global reset (no phy reset) */ |
63 | HCTL_RPGSEL = (1 << 15), /* register page select */ | 104 | HCTL_RPGSEL = (1 << 15), /* register page select */ |
64 | 105 | ||
@@ -81,9 +122,7 @@ enum { | |||
81 | PIRQ_PENDING = (1 << 7), /* port IRQ pending (STAT only) */ | 122 | PIRQ_PENDING = (1 << 7), /* port IRQ pending (STAT only) */ |
82 | 123 | ||
83 | PIRQ_ERR = PIRQ_OFFLINE | PIRQ_ONLINE | PIRQ_FATAL, | 124 | PIRQ_ERR = PIRQ_OFFLINE | PIRQ_ONLINE | PIRQ_FATAL, |
84 | 125 | PIRQ_MASK_DEFAULT = PIRQ_REPLY | PIRQ_ATA, | |
85 | PIRQ_MASK_DMA_READ = PIRQ_REPLY | PIRQ_ATA, | ||
86 | PIRQ_MASK_OTHER = PIRQ_REPLY | PIRQ_COMPLETE, | ||
87 | PIRQ_MASK_FREEZE = 0xff, | 126 | PIRQ_MASK_FREEZE = 0xff, |
88 | 127 | ||
89 | /* PORT_PRD_CTL bits */ | 128 | /* PORT_PRD_CTL bits */ |
@@ -96,20 +135,104 @@ enum { | |||
96 | IDMA_CTL_RST_IDMA = (1 << 5), /* reset IDMA machinary */ | 135 | IDMA_CTL_RST_IDMA = (1 << 5), /* reset IDMA machinary */ |
97 | IDMA_CTL_GO = (1 << 7), /* IDMA mode go */ | 136 | IDMA_CTL_GO = (1 << 7), /* IDMA mode go */ |
98 | IDMA_CTL_ATA_NIEN = (1 << 8), /* ATA IRQ disable */ | 137 | IDMA_CTL_ATA_NIEN = (1 << 8), /* ATA IRQ disable */ |
138 | |||
139 | /* PORT_IDMA_STAT bits */ | ||
140 | IDMA_STAT_PERR = (1 << 0), /* PCI ERROR MODE */ | ||
141 | IDMA_STAT_CPBERR = (1 << 1), /* ADMA CPB error */ | ||
142 | IDMA_STAT_LGCY = (1 << 3), /* ADMA legacy */ | ||
143 | IDMA_STAT_UIRQ = (1 << 4), /* ADMA unsolicited irq */ | ||
144 | IDMA_STAT_STPD = (1 << 5), /* ADMA stopped */ | ||
145 | IDMA_STAT_PSD = (1 << 6), /* ADMA pause */ | ||
146 | IDMA_STAT_DONE = (1 << 7), /* ADMA done */ | ||
147 | |||
148 | IDMA_STAT_ERR = IDMA_STAT_PERR | IDMA_STAT_CPBERR, | ||
149 | |||
150 | /* CPB Control Flags*/ | ||
151 | CPB_CTL_VALID = (1 << 0), /* CPB valid */ | ||
152 | CPB_CTL_QUEUED = (1 << 1), /* queued command */ | ||
153 | CPB_CTL_DATA = (1 << 2), /* data, rsvd in datasheet */ | ||
154 | CPB_CTL_IEN = (1 << 3), /* PCI interrupt enable */ | ||
155 | CPB_CTL_DEVDIR = (1 << 4), /* device direction control */ | ||
156 | |||
157 | /* CPB Response Flags */ | ||
158 | CPB_RESP_DONE = (1 << 0), /* ATA command complete */ | ||
159 | CPB_RESP_REL = (1 << 1), /* ATA release */ | ||
160 | CPB_RESP_IGNORED = (1 << 2), /* CPB ignored */ | ||
161 | CPB_RESP_ATA_ERR = (1 << 3), /* ATA command error */ | ||
162 | CPB_RESP_SPURIOUS = (1 << 4), /* ATA spurious interrupt error */ | ||
163 | CPB_RESP_UNDERFLOW = (1 << 5), /* APRD deficiency length error */ | ||
164 | CPB_RESP_OVERFLOW = (1 << 6), /* APRD exccess length error */ | ||
165 | CPB_RESP_CPB_ERR = (1 << 7), /* CPB error flag */ | ||
166 | |||
167 | /* PRD Control Flags */ | ||
168 | PRD_DRAIN = (1 << 1), /* ignore data excess */ | ||
169 | PRD_CDB = (1 << 2), /* atapi packet command pointer */ | ||
170 | PRD_DIRECT_INTR = (1 << 3), /* direct interrupt */ | ||
171 | PRD_DMA = (1 << 4), /* data transfer method */ | ||
172 | PRD_WRITE = (1 << 5), /* data dir, rsvd in datasheet */ | ||
173 | PRD_IOM = (1 << 6), /* io/memory transfer */ | ||
174 | PRD_END = (1 << 7), /* APRD chain end */ | ||
99 | }; | 175 | }; |
100 | 176 | ||
177 | /* Comman Parameter Block */ | ||
178 | struct inic_cpb { | ||
179 | u8 resp_flags; /* Response Flags */ | ||
180 | u8 error; /* ATA Error */ | ||
181 | u8 status; /* ATA Status */ | ||
182 | u8 ctl_flags; /* Control Flags */ | ||
183 | __le32 len; /* Total Transfer Length */ | ||
184 | __le32 prd; /* First PRD pointer */ | ||
185 | u8 rsvd[4]; | ||
186 | /* 16 bytes */ | ||
187 | u8 feature; /* ATA Feature */ | ||
188 | u8 hob_feature; /* ATA Ex. Feature */ | ||
189 | u8 device; /* ATA Device/Head */ | ||
190 | u8 mirctl; /* Mirror Control */ | ||
191 | u8 nsect; /* ATA Sector Count */ | ||
192 | u8 hob_nsect; /* ATA Ex. Sector Count */ | ||
193 | u8 lbal; /* ATA Sector Number */ | ||
194 | u8 hob_lbal; /* ATA Ex. Sector Number */ | ||
195 | u8 lbam; /* ATA Cylinder Low */ | ||
196 | u8 hob_lbam; /* ATA Ex. Cylinder Low */ | ||
197 | u8 lbah; /* ATA Cylinder High */ | ||
198 | u8 hob_lbah; /* ATA Ex. Cylinder High */ | ||
199 | u8 command; /* ATA Command */ | ||
200 | u8 ctl; /* ATA Control */ | ||
201 | u8 slave_error; /* Slave ATA Error */ | ||
202 | u8 slave_status; /* Slave ATA Status */ | ||
203 | /* 32 bytes */ | ||
204 | } __packed; | ||
205 | |||
206 | /* Physical Region Descriptor */ | ||
207 | struct inic_prd { | ||
208 | __le32 mad; /* Physical Memory Address */ | ||
209 | __le16 len; /* Transfer Length */ | ||
210 | u8 rsvd; | ||
211 | u8 flags; /* Control Flags */ | ||
212 | } __packed; | ||
213 | |||
214 | struct inic_pkt { | ||
215 | struct inic_cpb cpb; | ||
216 | struct inic_prd prd[LIBATA_MAX_PRD + 1]; /* + 1 for cdb */ | ||
217 | u8 cdb[ATAPI_CDB_LEN]; | ||
218 | } __packed; | ||
219 | |||
101 | struct inic_host_priv { | 220 | struct inic_host_priv { |
102 | u16 cached_hctl; | 221 | void __iomem *mmio_base; |
222 | u16 cached_hctl; | ||
103 | }; | 223 | }; |
104 | 224 | ||
105 | struct inic_port_priv { | 225 | struct inic_port_priv { |
106 | u8 dfl_prdctl; | 226 | struct inic_pkt *pkt; |
107 | u8 cached_prdctl; | 227 | dma_addr_t pkt_dma; |
108 | u8 cached_pirq_mask; | 228 | u32 *cpb_tbl; |
229 | dma_addr_t cpb_tbl_dma; | ||
109 | }; | 230 | }; |
110 | 231 | ||
111 | static struct scsi_host_template inic_sht = { | 232 | static struct scsi_host_template inic_sht = { |
112 | ATA_BMDMA_SHT(DRV_NAME), | 233 | ATA_BASE_SHT(DRV_NAME), |
234 | .sg_tablesize = LIBATA_MAX_PRD, /* maybe it can be larger? */ | ||
235 | .dma_boundary = INIC_DMA_BOUNDARY, | ||
113 | }; | 236 | }; |
114 | 237 | ||
115 | static const int scr_map[] = { | 238 | static const int scr_map[] = { |
@@ -120,54 +243,34 @@ static const int scr_map[] = { | |||
120 | 243 | ||
121 | static void __iomem *inic_port_base(struct ata_port *ap) | 244 | static void __iomem *inic_port_base(struct ata_port *ap) |
122 | { | 245 | { |
123 | return ap->host->iomap[MMIO_BAR] + ap->port_no * PORT_SIZE; | 246 | struct inic_host_priv *hpriv = ap->host->private_data; |
124 | } | ||
125 | |||
126 | static void __inic_set_pirq_mask(struct ata_port *ap, u8 mask) | ||
127 | { | ||
128 | void __iomem *port_base = inic_port_base(ap); | ||
129 | struct inic_port_priv *pp = ap->private_data; | ||
130 | 247 | ||
131 | writeb(mask, port_base + PORT_IRQ_MASK); | 248 | return hpriv->mmio_base + ap->port_no * PORT_SIZE; |
132 | pp->cached_pirq_mask = mask; | ||
133 | } | ||
134 | |||
135 | static void inic_set_pirq_mask(struct ata_port *ap, u8 mask) | ||
136 | { | ||
137 | struct inic_port_priv *pp = ap->private_data; | ||
138 | |||
139 | if (pp->cached_pirq_mask != mask) | ||
140 | __inic_set_pirq_mask(ap, mask); | ||
141 | } | 249 | } |
142 | 250 | ||
143 | static void inic_reset_port(void __iomem *port_base) | 251 | static void inic_reset_port(void __iomem *port_base) |
144 | { | 252 | { |
145 | void __iomem *idma_ctl = port_base + PORT_IDMA_CTL; | 253 | void __iomem *idma_ctl = port_base + PORT_IDMA_CTL; |
146 | u16 ctl; | ||
147 | 254 | ||
148 | ctl = readw(idma_ctl); | 255 | /* stop IDMA engine */ |
149 | ctl &= ~(IDMA_CTL_RST_IDMA | IDMA_CTL_ATA_NIEN | IDMA_CTL_GO); | 256 | readw(idma_ctl); /* flush */ |
257 | msleep(1); | ||
150 | 258 | ||
151 | /* mask IRQ and assert reset */ | 259 | /* mask IRQ and assert reset */ |
152 | writew(ctl | IDMA_CTL_RST_IDMA | IDMA_CTL_ATA_NIEN, idma_ctl); | 260 | writew(IDMA_CTL_RST_IDMA, idma_ctl); |
153 | readw(idma_ctl); /* flush */ | 261 | readw(idma_ctl); /* flush */ |
154 | |||
155 | /* give it some time */ | ||
156 | msleep(1); | 262 | msleep(1); |
157 | 263 | ||
158 | /* release reset */ | 264 | /* release reset */ |
159 | writew(ctl | IDMA_CTL_ATA_NIEN, idma_ctl); | 265 | writew(0, idma_ctl); |
160 | 266 | ||
161 | /* clear irq */ | 267 | /* clear irq */ |
162 | writeb(0xff, port_base + PORT_IRQ_STAT); | 268 | writeb(0xff, port_base + PORT_IRQ_STAT); |
163 | |||
164 | /* reenable ATA IRQ, turn off IDMA mode */ | ||
165 | writew(ctl, idma_ctl); | ||
166 | } | 269 | } |
167 | 270 | ||
168 | static int inic_scr_read(struct ata_port *ap, unsigned sc_reg, u32 *val) | 271 | static int inic_scr_read(struct ata_port *ap, unsigned sc_reg, u32 *val) |
169 | { | 272 | { |
170 | void __iomem *scr_addr = ap->ioaddr.scr_addr; | 273 | void __iomem *scr_addr = inic_port_base(ap) + PORT_SCR; |
171 | void __iomem *addr; | 274 | void __iomem *addr; |
172 | 275 | ||
173 | if (unlikely(sc_reg >= ARRAY_SIZE(scr_map))) | 276 | if (unlikely(sc_reg >= ARRAY_SIZE(scr_map))) |
@@ -184,120 +287,126 @@ static int inic_scr_read(struct ata_port *ap, unsigned sc_reg, u32 *val) | |||
184 | 287 | ||
185 | static int inic_scr_write(struct ata_port *ap, unsigned sc_reg, u32 val) | 288 | static int inic_scr_write(struct ata_port *ap, unsigned sc_reg, u32 val) |
186 | { | 289 | { |
187 | void __iomem *scr_addr = ap->ioaddr.scr_addr; | 290 | void __iomem *scr_addr = inic_port_base(ap) + PORT_SCR; |
188 | void __iomem *addr; | ||
189 | 291 | ||
190 | if (unlikely(sc_reg >= ARRAY_SIZE(scr_map))) | 292 | if (unlikely(sc_reg >= ARRAY_SIZE(scr_map))) |
191 | return -EINVAL; | 293 | return -EINVAL; |
192 | 294 | ||
193 | addr = scr_addr + scr_map[sc_reg] * 4; | ||
194 | writel(val, scr_addr + scr_map[sc_reg] * 4); | 295 | writel(val, scr_addr + scr_map[sc_reg] * 4); |
195 | return 0; | 296 | return 0; |
196 | } | 297 | } |
197 | 298 | ||
198 | /* | 299 | static void inic_stop_idma(struct ata_port *ap) |
199 | * In TF mode, inic162x is very similar to SFF device. TF registers | ||
200 | * function the same. DMA engine behaves similary using the same PRD | ||
201 | * format as BMDMA but different command register, interrupt and event | ||
202 | * notification methods are used. The following inic_bmdma_*() | ||
203 | * functions do the impedance matching. | ||
204 | */ | ||
205 | static void inic_bmdma_setup(struct ata_queued_cmd *qc) | ||
206 | { | 300 | { |
207 | struct ata_port *ap = qc->ap; | ||
208 | struct inic_port_priv *pp = ap->private_data; | ||
209 | void __iomem *port_base = inic_port_base(ap); | 301 | void __iomem *port_base = inic_port_base(ap); |
210 | int rw = qc->tf.flags & ATA_TFLAG_WRITE; | ||
211 | |||
212 | /* make sure device sees PRD table writes */ | ||
213 | wmb(); | ||
214 | |||
215 | /* load transfer length */ | ||
216 | writel(qc->nbytes, port_base + PORT_PRD_XFERLEN); | ||
217 | |||
218 | /* turn on DMA and specify data direction */ | ||
219 | pp->cached_prdctl = pp->dfl_prdctl | PRD_CTL_DMAEN; | ||
220 | if (!rw) | ||
221 | pp->cached_prdctl |= PRD_CTL_WR; | ||
222 | writeb(pp->cached_prdctl, port_base + PORT_PRD_CTL); | ||
223 | 302 | ||
224 | /* issue r/w command */ | 303 | readb(port_base + PORT_RPQ_FIFO); |
225 | ap->ops->sff_exec_command(ap, &qc->tf); | 304 | readb(port_base + PORT_RPQ_CNT); |
305 | writew(0, port_base + PORT_IDMA_CTL); | ||
226 | } | 306 | } |
227 | 307 | ||
228 | static void inic_bmdma_start(struct ata_queued_cmd *qc) | 308 | static void inic_host_err_intr(struct ata_port *ap, u8 irq_stat, u16 idma_stat) |
229 | { | 309 | { |
230 | struct ata_port *ap = qc->ap; | 310 | struct ata_eh_info *ehi = &ap->link.eh_info; |
231 | struct inic_port_priv *pp = ap->private_data; | 311 | struct inic_port_priv *pp = ap->private_data; |
232 | void __iomem *port_base = inic_port_base(ap); | 312 | struct inic_cpb *cpb = &pp->pkt->cpb; |
313 | bool freeze = false; | ||
233 | 314 | ||
234 | /* start host DMA transaction */ | 315 | ata_ehi_clear_desc(ehi); |
235 | pp->cached_prdctl |= PRD_CTL_START; | 316 | ata_ehi_push_desc(ehi, "irq_stat=0x%x idma_stat=0x%x", |
236 | writeb(pp->cached_prdctl, port_base + PORT_PRD_CTL); | 317 | irq_stat, idma_stat); |
237 | } | ||
238 | 318 | ||
239 | static void inic_bmdma_stop(struct ata_queued_cmd *qc) | 319 | inic_stop_idma(ap); |
240 | { | ||
241 | struct ata_port *ap = qc->ap; | ||
242 | struct inic_port_priv *pp = ap->private_data; | ||
243 | void __iomem *port_base = inic_port_base(ap); | ||
244 | 320 | ||
245 | /* stop DMA engine */ | 321 | if (irq_stat & (PIRQ_OFFLINE | PIRQ_ONLINE)) { |
246 | writeb(pp->dfl_prdctl, port_base + PORT_PRD_CTL); | 322 | ata_ehi_push_desc(ehi, "hotplug"); |
247 | } | 323 | ata_ehi_hotplugged(ehi); |
324 | freeze = true; | ||
325 | } | ||
248 | 326 | ||
249 | static u8 inic_bmdma_status(struct ata_port *ap) | 327 | if (idma_stat & IDMA_STAT_PERR) { |
250 | { | 328 | ata_ehi_push_desc(ehi, "PCI error"); |
251 | /* event is already verified by the interrupt handler */ | 329 | freeze = true; |
252 | return ATA_DMA_INTR; | 330 | } |
331 | |||
332 | if (idma_stat & IDMA_STAT_CPBERR) { | ||
333 | ata_ehi_push_desc(ehi, "CPB error"); | ||
334 | |||
335 | if (cpb->resp_flags & CPB_RESP_IGNORED) { | ||
336 | __ata_ehi_push_desc(ehi, " ignored"); | ||
337 | ehi->err_mask |= AC_ERR_INVALID; | ||
338 | freeze = true; | ||
339 | } | ||
340 | |||
341 | if (cpb->resp_flags & CPB_RESP_ATA_ERR) | ||
342 | ehi->err_mask |= AC_ERR_DEV; | ||
343 | |||
344 | if (cpb->resp_flags & CPB_RESP_SPURIOUS) { | ||
345 | __ata_ehi_push_desc(ehi, " spurious-intr"); | ||
346 | ehi->err_mask |= AC_ERR_HSM; | ||
347 | freeze = true; | ||
348 | } | ||
349 | |||
350 | if (cpb->resp_flags & | ||
351 | (CPB_RESP_UNDERFLOW | CPB_RESP_OVERFLOW)) { | ||
352 | __ata_ehi_push_desc(ehi, " data-over/underflow"); | ||
353 | ehi->err_mask |= AC_ERR_HSM; | ||
354 | freeze = true; | ||
355 | } | ||
356 | } | ||
357 | |||
358 | if (freeze) | ||
359 | ata_port_freeze(ap); | ||
360 | else | ||
361 | ata_port_abort(ap); | ||
253 | } | 362 | } |
254 | 363 | ||
255 | static void inic_host_intr(struct ata_port *ap) | 364 | static void inic_host_intr(struct ata_port *ap) |
256 | { | 365 | { |
257 | void __iomem *port_base = inic_port_base(ap); | 366 | void __iomem *port_base = inic_port_base(ap); |
258 | struct ata_eh_info *ehi = &ap->link.eh_info; | 367 | struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag); |
259 | u8 irq_stat; | 368 | u8 irq_stat; |
369 | u16 idma_stat; | ||
260 | 370 | ||
261 | /* fetch and clear irq */ | 371 | /* read and clear IRQ status */ |
262 | irq_stat = readb(port_base + PORT_IRQ_STAT); | 372 | irq_stat = readb(port_base + PORT_IRQ_STAT); |
263 | writeb(irq_stat, port_base + PORT_IRQ_STAT); | 373 | writeb(irq_stat, port_base + PORT_IRQ_STAT); |
374 | idma_stat = readw(port_base + PORT_IDMA_STAT); | ||
264 | 375 | ||
265 | if (likely(!(irq_stat & PIRQ_ERR))) { | 376 | if (unlikely((irq_stat & PIRQ_ERR) || (idma_stat & IDMA_STAT_ERR))) |
266 | struct ata_queued_cmd *qc = | 377 | inic_host_err_intr(ap, irq_stat, idma_stat); |
267 | ata_qc_from_tag(ap, ap->link.active_tag); | ||
268 | 378 | ||
269 | if (unlikely(!qc || (qc->tf.flags & ATA_TFLAG_POLLING))) { | 379 | if (unlikely(!qc)) |
270 | ap->ops->sff_check_status(ap); /* clear ATA interrupt */ | 380 | goto spurious; |
271 | return; | ||
272 | } | ||
273 | 381 | ||
274 | if (likely(ata_sff_host_intr(ap, qc))) | 382 | if (likely(idma_stat & IDMA_STAT_DONE)) { |
275 | return; | 383 | inic_stop_idma(ap); |
276 | 384 | ||
277 | ap->ops->sff_check_status(ap); /* clear ATA interrupt */ | 385 | /* Depending on circumstances, device error |
278 | ata_port_printk(ap, KERN_WARNING, "unhandled " | 386 | * isn't reported by IDMA, check it explicitly. |
279 | "interrupt, irq_stat=%x\n", irq_stat); | 387 | */ |
388 | if (unlikely(readb(port_base + PORT_TF_COMMAND) & | ||
389 | (ATA_DF | ATA_ERR))) | ||
390 | qc->err_mask |= AC_ERR_DEV; | ||
391 | |||
392 | ata_qc_complete(qc); | ||
280 | return; | 393 | return; |
281 | } | 394 | } |
282 | 395 | ||
283 | /* error */ | 396 | spurious: |
284 | ata_ehi_push_desc(ehi, "irq_stat=0x%x", irq_stat); | 397 | ata_port_printk(ap, KERN_WARNING, "unhandled interrupt: " |
285 | 398 | "cmd=0x%x irq_stat=0x%x idma_stat=0x%x\n", | |
286 | if (irq_stat & (PIRQ_OFFLINE | PIRQ_ONLINE)) { | 399 | qc ? qc->tf.command : 0xff, irq_stat, idma_stat); |
287 | ata_ehi_hotplugged(ehi); | ||
288 | ata_port_freeze(ap); | ||
289 | } else | ||
290 | ata_port_abort(ap); | ||
291 | } | 400 | } |
292 | 401 | ||
293 | static irqreturn_t inic_interrupt(int irq, void *dev_instance) | 402 | static irqreturn_t inic_interrupt(int irq, void *dev_instance) |
294 | { | 403 | { |
295 | struct ata_host *host = dev_instance; | 404 | struct ata_host *host = dev_instance; |
296 | void __iomem *mmio_base = host->iomap[MMIO_BAR]; | 405 | struct inic_host_priv *hpriv = host->private_data; |
297 | u16 host_irq_stat; | 406 | u16 host_irq_stat; |
298 | int i, handled = 0;; | 407 | int i, handled = 0;; |
299 | 408 | ||
300 | host_irq_stat = readw(mmio_base + HOST_IRQ_STAT); | 409 | host_irq_stat = readw(hpriv->mmio_base + HOST_IRQ_STAT); |
301 | 410 | ||
302 | if (unlikely(!(host_irq_stat & HIRQ_GLOBAL))) | 411 | if (unlikely(!(host_irq_stat & HIRQ_GLOBAL))) |
303 | goto out; | 412 | goto out; |
@@ -327,60 +436,173 @@ static irqreturn_t inic_interrupt(int irq, void *dev_instance) | |||
327 | return IRQ_RETVAL(handled); | 436 | return IRQ_RETVAL(handled); |
328 | } | 437 | } |
329 | 438 | ||
439 | static int inic_check_atapi_dma(struct ata_queued_cmd *qc) | ||
440 | { | ||
441 | /* For some reason ATAPI_PROT_DMA doesn't work for some | ||
442 | * commands including writes and other misc ops. Use PIO | ||
443 | * protocol instead, which BTW is driven by the DMA engine | ||
444 | * anyway, so it shouldn't make much difference for native | ||
445 | * SATA devices. | ||
446 | */ | ||
447 | if (atapi_cmd_type(qc->cdb[0]) == READ) | ||
448 | return 0; | ||
449 | return 1; | ||
450 | } | ||
451 | |||
452 | static void inic_fill_sg(struct inic_prd *prd, struct ata_queued_cmd *qc) | ||
453 | { | ||
454 | struct scatterlist *sg; | ||
455 | unsigned int si; | ||
456 | u8 flags = 0; | ||
457 | |||
458 | if (qc->tf.flags & ATA_TFLAG_WRITE) | ||
459 | flags |= PRD_WRITE; | ||
460 | |||
461 | if (ata_is_dma(qc->tf.protocol)) | ||
462 | flags |= PRD_DMA; | ||
463 | |||
464 | for_each_sg(qc->sg, sg, qc->n_elem, si) { | ||
465 | prd->mad = cpu_to_le32(sg_dma_address(sg)); | ||
466 | prd->len = cpu_to_le16(sg_dma_len(sg)); | ||
467 | prd->flags = flags; | ||
468 | prd++; | ||
469 | } | ||
470 | |||
471 | WARN_ON(!si); | ||
472 | prd[-1].flags |= PRD_END; | ||
473 | } | ||
474 | |||
475 | static void inic_qc_prep(struct ata_queued_cmd *qc) | ||
476 | { | ||
477 | struct inic_port_priv *pp = qc->ap->private_data; | ||
478 | struct inic_pkt *pkt = pp->pkt; | ||
479 | struct inic_cpb *cpb = &pkt->cpb; | ||
480 | struct inic_prd *prd = pkt->prd; | ||
481 | bool is_atapi = ata_is_atapi(qc->tf.protocol); | ||
482 | bool is_data = ata_is_data(qc->tf.protocol); | ||
483 | unsigned int cdb_len = 0; | ||
484 | |||
485 | VPRINTK("ENTER\n"); | ||
486 | |||
487 | if (is_atapi) | ||
488 | cdb_len = qc->dev->cdb_len; | ||
489 | |||
490 | /* prepare packet, based on initio driver */ | ||
491 | memset(pkt, 0, sizeof(struct inic_pkt)); | ||
492 | |||
493 | cpb->ctl_flags = CPB_CTL_VALID | CPB_CTL_IEN; | ||
494 | if (is_atapi || is_data) | ||
495 | cpb->ctl_flags |= CPB_CTL_DATA; | ||
496 | |||
497 | cpb->len = cpu_to_le32(qc->nbytes + cdb_len); | ||
498 | cpb->prd = cpu_to_le32(pp->pkt_dma + offsetof(struct inic_pkt, prd)); | ||
499 | |||
500 | cpb->device = qc->tf.device; | ||
501 | cpb->feature = qc->tf.feature; | ||
502 | cpb->nsect = qc->tf.nsect; | ||
503 | cpb->lbal = qc->tf.lbal; | ||
504 | cpb->lbam = qc->tf.lbam; | ||
505 | cpb->lbah = qc->tf.lbah; | ||
506 | |||
507 | if (qc->tf.flags & ATA_TFLAG_LBA48) { | ||
508 | cpb->hob_feature = qc->tf.hob_feature; | ||
509 | cpb->hob_nsect = qc->tf.hob_nsect; | ||
510 | cpb->hob_lbal = qc->tf.hob_lbal; | ||
511 | cpb->hob_lbam = qc->tf.hob_lbam; | ||
512 | cpb->hob_lbah = qc->tf.hob_lbah; | ||
513 | } | ||
514 | |||
515 | cpb->command = qc->tf.command; | ||
516 | /* don't load ctl - dunno why. it's like that in the initio driver */ | ||
517 | |||
518 | /* setup PRD for CDB */ | ||
519 | if (is_atapi) { | ||
520 | memcpy(pkt->cdb, qc->cdb, ATAPI_CDB_LEN); | ||
521 | prd->mad = cpu_to_le32(pp->pkt_dma + | ||
522 | offsetof(struct inic_pkt, cdb)); | ||
523 | prd->len = cpu_to_le16(cdb_len); | ||
524 | prd->flags = PRD_CDB | PRD_WRITE; | ||
525 | if (!is_data) | ||
526 | prd->flags |= PRD_END; | ||
527 | prd++; | ||
528 | } | ||
529 | |||
530 | /* setup sg table */ | ||
531 | if (is_data) | ||
532 | inic_fill_sg(prd, qc); | ||
533 | |||
534 | pp->cpb_tbl[0] = pp->pkt_dma; | ||
535 | } | ||
536 | |||
330 | static unsigned int inic_qc_issue(struct ata_queued_cmd *qc) | 537 | static unsigned int inic_qc_issue(struct ata_queued_cmd *qc) |
331 | { | 538 | { |
332 | struct ata_port *ap = qc->ap; | 539 | struct ata_port *ap = qc->ap; |
540 | void __iomem *port_base = inic_port_base(ap); | ||
333 | 541 | ||
334 | /* ATA IRQ doesn't wait for DMA transfer completion and vice | 542 | /* fire up the ADMA engine */ |
335 | * versa. Mask IRQ selectively to detect command completion. | 543 | writew(HCTL_FTHD0, port_base + HOST_CTL); |
336 | * Without it, ATA DMA read command can cause data corruption. | 544 | writew(IDMA_CTL_GO, port_base + PORT_IDMA_CTL); |
337 | * | 545 | writeb(0, port_base + PORT_CPB_PTQFIFO); |
338 | * Something similar might be needed for ATAPI writes. I | 546 | |
339 | * tried a lot of combinations but couldn't find the solution. | 547 | return 0; |
340 | */ | 548 | } |
341 | if (qc->tf.protocol == ATA_PROT_DMA && | 549 | |
342 | !(qc->tf.flags & ATA_TFLAG_WRITE)) | 550 | static void inic_tf_read(struct ata_port *ap, struct ata_taskfile *tf) |
343 | inic_set_pirq_mask(ap, PIRQ_MASK_DMA_READ); | 551 | { |
344 | else | 552 | void __iomem *port_base = inic_port_base(ap); |
345 | inic_set_pirq_mask(ap, PIRQ_MASK_OTHER); | 553 | |
554 | tf->feature = readb(port_base + PORT_TF_FEATURE); | ||
555 | tf->nsect = readb(port_base + PORT_TF_NSECT); | ||
556 | tf->lbal = readb(port_base + PORT_TF_LBAL); | ||
557 | tf->lbam = readb(port_base + PORT_TF_LBAM); | ||
558 | tf->lbah = readb(port_base + PORT_TF_LBAH); | ||
559 | tf->device = readb(port_base + PORT_TF_DEVICE); | ||
560 | tf->command = readb(port_base + PORT_TF_COMMAND); | ||
561 | } | ||
346 | 562 | ||
347 | /* Issuing a command to yet uninitialized port locks up the | 563 | static bool inic_qc_fill_rtf(struct ata_queued_cmd *qc) |
348 | * controller. Most of the time, this happens for the first | 564 | { |
349 | * command after reset which are ATA and ATAPI IDENTIFYs. | 565 | struct ata_taskfile *rtf = &qc->result_tf; |
350 | * Fast fail if stat is 0x7f or 0xff for those commands. | 566 | struct ata_taskfile tf; |
567 | |||
568 | /* FIXME: Except for status and error, result TF access | ||
569 | * doesn't work. I tried reading from BAR0/2, CPB and BAR5. | ||
570 | * None works regardless of which command interface is used. | ||
571 | * For now return true iff status indicates device error. | ||
572 | * This means that we're reporting bogus sector for RW | ||
573 | * failures. Eeekk.... | ||
351 | */ | 574 | */ |
352 | if (unlikely(qc->tf.command == ATA_CMD_ID_ATA || | 575 | inic_tf_read(qc->ap, &tf); |
353 | qc->tf.command == ATA_CMD_ID_ATAPI)) { | ||
354 | u8 stat = ap->ops->sff_check_status(ap); | ||
355 | if (stat == 0x7f || stat == 0xff) | ||
356 | return AC_ERR_HSM; | ||
357 | } | ||
358 | 576 | ||
359 | return ata_sff_qc_issue(qc); | 577 | if (!(tf.command & ATA_ERR)) |
578 | return false; | ||
579 | |||
580 | rtf->command = tf.command; | ||
581 | rtf->feature = tf.feature; | ||
582 | return true; | ||
360 | } | 583 | } |
361 | 584 | ||
362 | static void inic_freeze(struct ata_port *ap) | 585 | static void inic_freeze(struct ata_port *ap) |
363 | { | 586 | { |
364 | void __iomem *port_base = inic_port_base(ap); | 587 | void __iomem *port_base = inic_port_base(ap); |
365 | 588 | ||
366 | __inic_set_pirq_mask(ap, PIRQ_MASK_FREEZE); | 589 | writeb(PIRQ_MASK_FREEZE, port_base + PORT_IRQ_MASK); |
367 | |||
368 | ap->ops->sff_check_status(ap); | ||
369 | writeb(0xff, port_base + PORT_IRQ_STAT); | 590 | writeb(0xff, port_base + PORT_IRQ_STAT); |
370 | |||
371 | readb(port_base + PORT_IRQ_STAT); /* flush */ | ||
372 | } | 591 | } |
373 | 592 | ||
374 | static void inic_thaw(struct ata_port *ap) | 593 | static void inic_thaw(struct ata_port *ap) |
375 | { | 594 | { |
376 | void __iomem *port_base = inic_port_base(ap); | 595 | void __iomem *port_base = inic_port_base(ap); |
377 | 596 | ||
378 | ap->ops->sff_check_status(ap); | ||
379 | writeb(0xff, port_base + PORT_IRQ_STAT); | 597 | writeb(0xff, port_base + PORT_IRQ_STAT); |
598 | writeb(PIRQ_MASK_DEFAULT, port_base + PORT_IRQ_MASK); | ||
599 | } | ||
380 | 600 | ||
381 | __inic_set_pirq_mask(ap, PIRQ_MASK_OTHER); | 601 | static int inic_check_ready(struct ata_link *link) |
602 | { | ||
603 | void __iomem *port_base = inic_port_base(link->ap); | ||
382 | 604 | ||
383 | readb(port_base + PORT_IRQ_STAT); /* flush */ | 605 | return ata_check_ready(readb(port_base + PORT_TF_COMMAND)); |
384 | } | 606 | } |
385 | 607 | ||
386 | /* | 608 | /* |
@@ -394,17 +616,15 @@ static int inic_hardreset(struct ata_link *link, unsigned int *class, | |||
394 | void __iomem *port_base = inic_port_base(ap); | 616 | void __iomem *port_base = inic_port_base(ap); |
395 | void __iomem *idma_ctl = port_base + PORT_IDMA_CTL; | 617 | void __iomem *idma_ctl = port_base + PORT_IDMA_CTL; |
396 | const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context); | 618 | const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context); |
397 | u16 val; | ||
398 | int rc; | 619 | int rc; |
399 | 620 | ||
400 | /* hammer it into sane state */ | 621 | /* hammer it into sane state */ |
401 | inic_reset_port(port_base); | 622 | inic_reset_port(port_base); |
402 | 623 | ||
403 | val = readw(idma_ctl); | 624 | writew(IDMA_CTL_RST_ATA, idma_ctl); |
404 | writew(val | IDMA_CTL_RST_ATA, idma_ctl); | ||
405 | readw(idma_ctl); /* flush */ | 625 | readw(idma_ctl); /* flush */ |
406 | msleep(1); | 626 | msleep(1); |
407 | writew(val & ~IDMA_CTL_RST_ATA, idma_ctl); | 627 | writew(0, idma_ctl); |
408 | 628 | ||
409 | rc = sata_link_resume(link, timing, deadline); | 629 | rc = sata_link_resume(link, timing, deadline); |
410 | if (rc) { | 630 | if (rc) { |
@@ -418,7 +638,7 @@ static int inic_hardreset(struct ata_link *link, unsigned int *class, | |||
418 | struct ata_taskfile tf; | 638 | struct ata_taskfile tf; |
419 | 639 | ||
420 | /* wait for link to become ready */ | 640 | /* wait for link to become ready */ |
421 | rc = ata_sff_wait_after_reset(link, 1, deadline); | 641 | rc = ata_wait_after_reset(link, deadline, inic_check_ready); |
422 | /* link occupied, -ENODEV too is an error */ | 642 | /* link occupied, -ENODEV too is an error */ |
423 | if (rc) { | 643 | if (rc) { |
424 | ata_link_printk(link, KERN_WARNING, "device not ready " | 644 | ata_link_printk(link, KERN_WARNING, "device not ready " |
@@ -426,7 +646,7 @@ static int inic_hardreset(struct ata_link *link, unsigned int *class, | |||
426 | return rc; | 646 | return rc; |
427 | } | 647 | } |
428 | 648 | ||
429 | ata_sff_tf_read(ap, &tf); | 649 | inic_tf_read(ap, &tf); |
430 | *class = ata_dev_classify(&tf); | 650 | *class = ata_dev_classify(&tf); |
431 | } | 651 | } |
432 | 652 | ||
@@ -436,18 +656,8 @@ static int inic_hardreset(struct ata_link *link, unsigned int *class, | |||
436 | static void inic_error_handler(struct ata_port *ap) | 656 | static void inic_error_handler(struct ata_port *ap) |
437 | { | 657 | { |
438 | void __iomem *port_base = inic_port_base(ap); | 658 | void __iomem *port_base = inic_port_base(ap); |
439 | struct inic_port_priv *pp = ap->private_data; | ||
440 | unsigned long flags; | ||
441 | 659 | ||
442 | /* reset PIO HSM and stop DMA engine */ | ||
443 | inic_reset_port(port_base); | 660 | inic_reset_port(port_base); |
444 | |||
445 | spin_lock_irqsave(ap->lock, flags); | ||
446 | ap->hsm_task_state = HSM_ST_IDLE; | ||
447 | writeb(pp->dfl_prdctl, port_base + PORT_PRD_CTL); | ||
448 | spin_unlock_irqrestore(ap->lock, flags); | ||
449 | |||
450 | /* PIO and DMA engines have been stopped, perform recovery */ | ||
451 | ata_std_error_handler(ap); | 661 | ata_std_error_handler(ap); |
452 | } | 662 | } |
453 | 663 | ||
@@ -458,26 +668,18 @@ static void inic_post_internal_cmd(struct ata_queued_cmd *qc) | |||
458 | inic_reset_port(inic_port_base(qc->ap)); | 668 | inic_reset_port(inic_port_base(qc->ap)); |
459 | } | 669 | } |
460 | 670 | ||
461 | static void inic_dev_config(struct ata_device *dev) | ||
462 | { | ||
463 | /* inic can only handle upto LBA28 max sectors */ | ||
464 | if (dev->max_sectors > ATA_MAX_SECTORS) | ||
465 | dev->max_sectors = ATA_MAX_SECTORS; | ||
466 | |||
467 | if (dev->n_sectors >= 1 << 28) { | ||
468 | ata_dev_printk(dev, KERN_ERR, | ||
469 | "ERROR: This driver doesn't support LBA48 yet and may cause\n" | ||
470 | " data corruption on such devices. Disabling.\n"); | ||
471 | ata_dev_disable(dev); | ||
472 | } | ||
473 | } | ||
474 | |||
475 | static void init_port(struct ata_port *ap) | 671 | static void init_port(struct ata_port *ap) |
476 | { | 672 | { |
477 | void __iomem *port_base = inic_port_base(ap); | 673 | void __iomem *port_base = inic_port_base(ap); |
674 | struct inic_port_priv *pp = ap->private_data; | ||
478 | 675 | ||
479 | /* Setup PRD address */ | 676 | /* clear packet and CPB table */ |
677 | memset(pp->pkt, 0, sizeof(struct inic_pkt)); | ||
678 | memset(pp->cpb_tbl, 0, IDMA_CPB_TBL_SIZE); | ||
679 | |||
680 | /* setup PRD and CPB lookup table addresses */ | ||
480 | writel(ap->prd_dma, port_base + PORT_PRD_ADDR); | 681 | writel(ap->prd_dma, port_base + PORT_PRD_ADDR); |
682 | writel(pp->cpb_tbl_dma, port_base + PORT_CPB_CPBLAR); | ||
481 | } | 683 | } |
482 | 684 | ||
483 | static int inic_port_resume(struct ata_port *ap) | 685 | static int inic_port_resume(struct ata_port *ap) |
@@ -488,28 +690,30 @@ static int inic_port_resume(struct ata_port *ap) | |||
488 | 690 | ||
489 | static int inic_port_start(struct ata_port *ap) | 691 | static int inic_port_start(struct ata_port *ap) |
490 | { | 692 | { |
491 | void __iomem *port_base = inic_port_base(ap); | 693 | struct device *dev = ap->host->dev; |
492 | struct inic_port_priv *pp; | 694 | struct inic_port_priv *pp; |
493 | u8 tmp; | ||
494 | int rc; | 695 | int rc; |
495 | 696 | ||
496 | /* alloc and initialize private data */ | 697 | /* alloc and initialize private data */ |
497 | pp = devm_kzalloc(ap->host->dev, sizeof(*pp), GFP_KERNEL); | 698 | pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL); |
498 | if (!pp) | 699 | if (!pp) |
499 | return -ENOMEM; | 700 | return -ENOMEM; |
500 | ap->private_data = pp; | 701 | ap->private_data = pp; |
501 | 702 | ||
502 | /* default PRD_CTL value, DMAEN, WR and START off */ | ||
503 | tmp = readb(port_base + PORT_PRD_CTL); | ||
504 | tmp &= ~(PRD_CTL_DMAEN | PRD_CTL_WR | PRD_CTL_START); | ||
505 | pp->dfl_prdctl = tmp; | ||
506 | |||
507 | /* Alloc resources */ | 703 | /* Alloc resources */ |
508 | rc = ata_port_start(ap); | 704 | rc = ata_port_start(ap); |
509 | if (rc) { | 705 | if (rc) |
510 | kfree(pp); | ||
511 | return rc; | 706 | return rc; |
512 | } | 707 | |
708 | pp->pkt = dmam_alloc_coherent(dev, sizeof(struct inic_pkt), | ||
709 | &pp->pkt_dma, GFP_KERNEL); | ||
710 | if (!pp->pkt) | ||
711 | return -ENOMEM; | ||
712 | |||
713 | pp->cpb_tbl = dmam_alloc_coherent(dev, IDMA_CPB_TBL_SIZE, | ||
714 | &pp->cpb_tbl_dma, GFP_KERNEL); | ||
715 | if (!pp->cpb_tbl) | ||
716 | return -ENOMEM; | ||
513 | 717 | ||
514 | init_port(ap); | 718 | init_port(ap); |
515 | 719 | ||
@@ -517,21 +721,18 @@ static int inic_port_start(struct ata_port *ap) | |||
517 | } | 721 | } |
518 | 722 | ||
519 | static struct ata_port_operations inic_port_ops = { | 723 | static struct ata_port_operations inic_port_ops = { |
520 | .inherits = &ata_sff_port_ops, | 724 | .inherits = &sata_port_ops, |
521 | 725 | ||
522 | .bmdma_setup = inic_bmdma_setup, | 726 | .check_atapi_dma = inic_check_atapi_dma, |
523 | .bmdma_start = inic_bmdma_start, | 727 | .qc_prep = inic_qc_prep, |
524 | .bmdma_stop = inic_bmdma_stop, | ||
525 | .bmdma_status = inic_bmdma_status, | ||
526 | .qc_issue = inic_qc_issue, | 728 | .qc_issue = inic_qc_issue, |
729 | .qc_fill_rtf = inic_qc_fill_rtf, | ||
527 | 730 | ||
528 | .freeze = inic_freeze, | 731 | .freeze = inic_freeze, |
529 | .thaw = inic_thaw, | 732 | .thaw = inic_thaw, |
530 | .softreset = ATA_OP_NULL, /* softreset is broken */ | ||
531 | .hardreset = inic_hardreset, | 733 | .hardreset = inic_hardreset, |
532 | .error_handler = inic_error_handler, | 734 | .error_handler = inic_error_handler, |
533 | .post_internal_cmd = inic_post_internal_cmd, | 735 | .post_internal_cmd = inic_post_internal_cmd, |
534 | .dev_config = inic_dev_config, | ||
535 | 736 | ||
536 | .scr_read = inic_scr_read, | 737 | .scr_read = inic_scr_read, |
537 | .scr_write = inic_scr_write, | 738 | .scr_write = inic_scr_write, |
@@ -541,12 +742,6 @@ static struct ata_port_operations inic_port_ops = { | |||
541 | }; | 742 | }; |
542 | 743 | ||
543 | static struct ata_port_info inic_port_info = { | 744 | static struct ata_port_info inic_port_info = { |
544 | /* For some reason, ATAPI_PROT_PIO is broken on this | ||
545 | * controller, and no, PIO_POLLING does't fix it. It somehow | ||
546 | * manages to report the wrong ireason and ignoring ireason | ||
547 | * results in machine lock up. Tell libata to always prefer | ||
548 | * DMA. | ||
549 | */ | ||
550 | .flags = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA, | 745 | .flags = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA, |
551 | .pio_mask = 0x1f, /* pio0-4 */ | 746 | .pio_mask = 0x1f, /* pio0-4 */ |
552 | .mwdma_mask = 0x07, /* mwdma0-2 */ | 747 | .mwdma_mask = 0x07, /* mwdma0-2 */ |
@@ -599,7 +794,6 @@ static int inic_pci_device_resume(struct pci_dev *pdev) | |||
599 | { | 794 | { |
600 | struct ata_host *host = dev_get_drvdata(&pdev->dev); | 795 | struct ata_host *host = dev_get_drvdata(&pdev->dev); |
601 | struct inic_host_priv *hpriv = host->private_data; | 796 | struct inic_host_priv *hpriv = host->private_data; |
602 | void __iomem *mmio_base = host->iomap[MMIO_BAR]; | ||
603 | int rc; | 797 | int rc; |
604 | 798 | ||
605 | rc = ata_pci_device_do_resume(pdev); | 799 | rc = ata_pci_device_do_resume(pdev); |
@@ -607,7 +801,7 @@ static int inic_pci_device_resume(struct pci_dev *pdev) | |||
607 | return rc; | 801 | return rc; |
608 | 802 | ||
609 | if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) { | 803 | if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) { |
610 | rc = init_controller(mmio_base, hpriv->cached_hctl); | 804 | rc = init_controller(hpriv->mmio_base, hpriv->cached_hctl); |
611 | if (rc) | 805 | if (rc) |
612 | return rc; | 806 | return rc; |
613 | } | 807 | } |
@@ -625,6 +819,7 @@ static int inic_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
625 | struct ata_host *host; | 819 | struct ata_host *host; |
626 | struct inic_host_priv *hpriv; | 820 | struct inic_host_priv *hpriv; |
627 | void __iomem * const *iomap; | 821 | void __iomem * const *iomap; |
822 | int mmio_bar; | ||
628 | int i, rc; | 823 | int i, rc; |
629 | 824 | ||
630 | if (!printed_version++) | 825 | if (!printed_version++) |
@@ -638,38 +833,31 @@ static int inic_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
638 | 833 | ||
639 | host->private_data = hpriv; | 834 | host->private_data = hpriv; |
640 | 835 | ||
641 | /* acquire resources and fill host */ | 836 | /* Acquire resources and fill host. Note that PCI and cardbus |
837 | * use different BARs. | ||
838 | */ | ||
642 | rc = pcim_enable_device(pdev); | 839 | rc = pcim_enable_device(pdev); |
643 | if (rc) | 840 | if (rc) |
644 | return rc; | 841 | return rc; |
645 | 842 | ||
646 | rc = pcim_iomap_regions(pdev, 0x3f, DRV_NAME); | 843 | if (pci_resource_flags(pdev, MMIO_BAR_PCI) & IORESOURCE_MEM) |
844 | mmio_bar = MMIO_BAR_PCI; | ||
845 | else | ||
846 | mmio_bar = MMIO_BAR_CARDBUS; | ||
847 | |||
848 | rc = pcim_iomap_regions(pdev, 1 << mmio_bar, DRV_NAME); | ||
647 | if (rc) | 849 | if (rc) |
648 | return rc; | 850 | return rc; |
649 | host->iomap = iomap = pcim_iomap_table(pdev); | 851 | host->iomap = iomap = pcim_iomap_table(pdev); |
852 | hpriv->mmio_base = iomap[mmio_bar]; | ||
853 | hpriv->cached_hctl = readw(hpriv->mmio_base + HOST_CTL); | ||
650 | 854 | ||
651 | for (i = 0; i < NR_PORTS; i++) { | 855 | for (i = 0; i < NR_PORTS; i++) { |
652 | struct ata_port *ap = host->ports[i]; | 856 | struct ata_port *ap = host->ports[i]; |
653 | struct ata_ioports *port = &ap->ioaddr; | ||
654 | unsigned int offset = i * PORT_SIZE; | ||
655 | |||
656 | port->cmd_addr = iomap[2 * i]; | ||
657 | port->altstatus_addr = | ||
658 | port->ctl_addr = (void __iomem *) | ||
659 | ((unsigned long)iomap[2 * i + 1] | ATA_PCI_CTL_OFS); | ||
660 | port->scr_addr = iomap[MMIO_BAR] + offset + PORT_SCR; | ||
661 | |||
662 | ata_sff_std_ports(port); | ||
663 | |||
664 | ata_port_pbar_desc(ap, MMIO_BAR, -1, "mmio"); | ||
665 | ata_port_pbar_desc(ap, MMIO_BAR, offset, "port"); | ||
666 | ata_port_desc(ap, "cmd 0x%llx ctl 0x%llx", | ||
667 | (unsigned long long)pci_resource_start(pdev, 2 * i), | ||
668 | (unsigned long long)pci_resource_start(pdev, (2 * i + 1)) | | ||
669 | ATA_PCI_CTL_OFS); | ||
670 | } | ||
671 | 857 | ||
672 | hpriv->cached_hctl = readw(iomap[MMIO_BAR] + HOST_CTL); | 858 | ata_port_pbar_desc(ap, mmio_bar, -1, "mmio"); |
859 | ata_port_pbar_desc(ap, mmio_bar, i * PORT_SIZE, "port"); | ||
860 | } | ||
673 | 861 | ||
674 | /* Set dma_mask. This devices doesn't support 64bit addressing. */ | 862 | /* Set dma_mask. This devices doesn't support 64bit addressing. */ |
675 | rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK); | 863 | rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK); |
@@ -698,7 +886,7 @@ static int inic_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
698 | return rc; | 886 | return rc; |
699 | } | 887 | } |
700 | 888 | ||
701 | rc = init_controller(iomap[MMIO_BAR], hpriv->cached_hctl); | 889 | rc = init_controller(hpriv->mmio_base, hpriv->cached_hctl); |
702 | if (rc) { | 890 | if (rc) { |
703 | dev_printk(KERN_ERR, &pdev->dev, | 891 | dev_printk(KERN_ERR, &pdev->dev, |
704 | "failed to initialize controller\n"); | 892 | "failed to initialize controller\n"); |
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c index 842b1a15b78c..bb73b2222627 100644 --- a/drivers/ata/sata_mv.c +++ b/drivers/ata/sata_mv.c | |||
@@ -65,6 +65,7 @@ | |||
65 | #include <linux/platform_device.h> | 65 | #include <linux/platform_device.h> |
66 | #include <linux/ata_platform.h> | 66 | #include <linux/ata_platform.h> |
67 | #include <linux/mbus.h> | 67 | #include <linux/mbus.h> |
68 | #include <linux/bitops.h> | ||
68 | #include <scsi/scsi_host.h> | 69 | #include <scsi/scsi_host.h> |
69 | #include <scsi/scsi_cmnd.h> | 70 | #include <scsi/scsi_cmnd.h> |
70 | #include <scsi/scsi_device.h> | 71 | #include <scsi/scsi_device.h> |
@@ -91,9 +92,9 @@ enum { | |||
91 | MV_IRQ_COAL_TIME_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xd0), | 92 | MV_IRQ_COAL_TIME_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xd0), |
92 | 93 | ||
93 | MV_SATAHC0_REG_BASE = 0x20000, | 94 | MV_SATAHC0_REG_BASE = 0x20000, |
94 | MV_FLASH_CTL = 0x1046c, | 95 | MV_FLASH_CTL_OFS = 0x1046c, |
95 | MV_GPIO_PORT_CTL = 0x104f0, | 96 | MV_GPIO_PORT_CTL_OFS = 0x104f0, |
96 | MV_RESET_CFG = 0x180d8, | 97 | MV_RESET_CFG_OFS = 0x180d8, |
97 | 98 | ||
98 | MV_PCI_REG_SZ = MV_MAJOR_REG_AREA_SZ, | 99 | MV_PCI_REG_SZ = MV_MAJOR_REG_AREA_SZ, |
99 | MV_SATAHC_REG_SZ = MV_MAJOR_REG_AREA_SZ, | 100 | MV_SATAHC_REG_SZ = MV_MAJOR_REG_AREA_SZ, |
@@ -147,18 +148,21 @@ enum { | |||
147 | /* PCI interface registers */ | 148 | /* PCI interface registers */ |
148 | 149 | ||
149 | PCI_COMMAND_OFS = 0xc00, | 150 | PCI_COMMAND_OFS = 0xc00, |
151 | PCI_COMMAND_MRDTRIG = (1 << 7), /* PCI Master Read Trigger */ | ||
150 | 152 | ||
151 | PCI_MAIN_CMD_STS_OFS = 0xd30, | 153 | PCI_MAIN_CMD_STS_OFS = 0xd30, |
152 | STOP_PCI_MASTER = (1 << 2), | 154 | STOP_PCI_MASTER = (1 << 2), |
153 | PCI_MASTER_EMPTY = (1 << 3), | 155 | PCI_MASTER_EMPTY = (1 << 3), |
154 | GLOB_SFT_RST = (1 << 4), | 156 | GLOB_SFT_RST = (1 << 4), |
155 | 157 | ||
156 | MV_PCI_MODE = 0xd00, | 158 | MV_PCI_MODE_OFS = 0xd00, |
159 | MV_PCI_MODE_MASK = 0x30, | ||
160 | |||
157 | MV_PCI_EXP_ROM_BAR_CTL = 0xd2c, | 161 | MV_PCI_EXP_ROM_BAR_CTL = 0xd2c, |
158 | MV_PCI_DISC_TIMER = 0xd04, | 162 | MV_PCI_DISC_TIMER = 0xd04, |
159 | MV_PCI_MSI_TRIGGER = 0xc38, | 163 | MV_PCI_MSI_TRIGGER = 0xc38, |
160 | MV_PCI_SERR_MASK = 0xc28, | 164 | MV_PCI_SERR_MASK = 0xc28, |
161 | MV_PCI_XBAR_TMOUT = 0x1d04, | 165 | MV_PCI_XBAR_TMOUT_OFS = 0x1d04, |
162 | MV_PCI_ERR_LOW_ADDRESS = 0x1d40, | 166 | MV_PCI_ERR_LOW_ADDRESS = 0x1d40, |
163 | MV_PCI_ERR_HIGH_ADDRESS = 0x1d44, | 167 | MV_PCI_ERR_HIGH_ADDRESS = 0x1d44, |
164 | MV_PCI_ERR_ATTRIBUTE = 0x1d48, | 168 | MV_PCI_ERR_ATTRIBUTE = 0x1d48, |
@@ -225,16 +229,18 @@ enum { | |||
225 | PHY_MODE4 = 0x314, | 229 | PHY_MODE4 = 0x314, |
226 | PHY_MODE2 = 0x330, | 230 | PHY_MODE2 = 0x330, |
227 | SATA_IFCTL_OFS = 0x344, | 231 | SATA_IFCTL_OFS = 0x344, |
232 | SATA_TESTCTL_OFS = 0x348, | ||
228 | SATA_IFSTAT_OFS = 0x34c, | 233 | SATA_IFSTAT_OFS = 0x34c, |
229 | VENDOR_UNIQUE_FIS_OFS = 0x35c, | 234 | VENDOR_UNIQUE_FIS_OFS = 0x35c, |
230 | 235 | ||
231 | FIS_CFG_OFS = 0x360, | 236 | FISCFG_OFS = 0x360, |
232 | FIS_CFG_SINGLE_SYNC = (1 << 16), /* SYNC on DMA activation */ | 237 | FISCFG_WAIT_DEV_ERR = (1 << 8), /* wait for host on DevErr */ |
238 | FISCFG_SINGLE_SYNC = (1 << 16), /* SYNC on DMA activation */ | ||
233 | 239 | ||
234 | MV5_PHY_MODE = 0x74, | 240 | MV5_PHY_MODE = 0x74, |
235 | MV5_LT_MODE = 0x30, | 241 | MV5_LTMODE_OFS = 0x30, |
236 | MV5_PHY_CTL = 0x0C, | 242 | MV5_PHY_CTL_OFS = 0x0C, |
237 | SATA_INTERFACE_CFG = 0x050, | 243 | SATA_INTERFACE_CFG_OFS = 0x050, |
238 | 244 | ||
239 | MV_M2_PREAMP_MASK = 0x7e0, | 245 | MV_M2_PREAMP_MASK = 0x7e0, |
240 | 246 | ||
@@ -332,10 +338,16 @@ enum { | |||
332 | EDMA_CMD_OFS = 0x28, /* EDMA command register */ | 338 | EDMA_CMD_OFS = 0x28, /* EDMA command register */ |
333 | EDMA_EN = (1 << 0), /* enable EDMA */ | 339 | EDMA_EN = (1 << 0), /* enable EDMA */ |
334 | EDMA_DS = (1 << 1), /* disable EDMA; self-negated */ | 340 | EDMA_DS = (1 << 1), /* disable EDMA; self-negated */ |
335 | ATA_RST = (1 << 2), /* reset trans/link/phy */ | 341 | EDMA_RESET = (1 << 2), /* reset eng/trans/link/phy */ |
342 | |||
343 | EDMA_STATUS_OFS = 0x30, /* EDMA engine status */ | ||
344 | EDMA_STATUS_CACHE_EMPTY = (1 << 6), /* GenIIe command cache empty */ | ||
345 | EDMA_STATUS_IDLE = (1 << 7), /* GenIIe EDMA enabled/idle */ | ||
336 | 346 | ||
337 | EDMA_IORDY_TMOUT = 0x34, | 347 | EDMA_IORDY_TMOUT_OFS = 0x34, |
338 | EDMA_ARB_CFG = 0x38, | 348 | EDMA_ARB_CFG_OFS = 0x38, |
349 | |||
350 | EDMA_HALTCOND_OFS = 0x60, /* GenIIe halt conditions */ | ||
339 | 351 | ||
340 | GEN_II_NCQ_MAX_SECTORS = 256, /* max sects/io on Gen2 w/NCQ */ | 352 | GEN_II_NCQ_MAX_SECTORS = 256, /* max sects/io on Gen2 w/NCQ */ |
341 | 353 | ||
@@ -350,15 +362,19 @@ enum { | |||
350 | MV_HP_GEN_II = (1 << 7), /* Generation II: 60xx */ | 362 | MV_HP_GEN_II = (1 << 7), /* Generation II: 60xx */ |
351 | MV_HP_GEN_IIE = (1 << 8), /* Generation IIE: 6042/7042 */ | 363 | MV_HP_GEN_IIE = (1 << 8), /* Generation IIE: 6042/7042 */ |
352 | MV_HP_PCIE = (1 << 9), /* PCIe bus/regs: 7042 */ | 364 | MV_HP_PCIE = (1 << 9), /* PCIe bus/regs: 7042 */ |
365 | MV_HP_CUT_THROUGH = (1 << 10), /* can use EDMA cut-through */ | ||
353 | 366 | ||
354 | /* Port private flags (pp_flags) */ | 367 | /* Port private flags (pp_flags) */ |
355 | MV_PP_FLAG_EDMA_EN = (1 << 0), /* is EDMA engine enabled? */ | 368 | MV_PP_FLAG_EDMA_EN = (1 << 0), /* is EDMA engine enabled? */ |
356 | MV_PP_FLAG_NCQ_EN = (1 << 1), /* is EDMA set up for NCQ? */ | 369 | MV_PP_FLAG_NCQ_EN = (1 << 1), /* is EDMA set up for NCQ? */ |
370 | MV_PP_FLAG_FBS_EN = (1 << 2), /* is EDMA set up for FBS? */ | ||
371 | MV_PP_FLAG_DELAYED_EH = (1 << 3), /* delayed dev err handling */ | ||
357 | }; | 372 | }; |
358 | 373 | ||
359 | #define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I) | 374 | #define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I) |
360 | #define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II) | 375 | #define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II) |
361 | #define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE) | 376 | #define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE) |
377 | #define IS_PCIE(hpriv) ((hpriv)->hp_flags & MV_HP_PCIE) | ||
362 | #define HAS_PCI(host) (!((host)->ports[0]->flags & MV_FLAG_SOC)) | 378 | #define HAS_PCI(host) (!((host)->ports[0]->flags & MV_FLAG_SOC)) |
363 | 379 | ||
364 | #define WINDOW_CTRL(i) (0x20030 + ((i) << 4)) | 380 | #define WINDOW_CTRL(i) (0x20030 + ((i) << 4)) |
@@ -433,6 +449,7 @@ struct mv_port_priv { | |||
433 | unsigned int resp_idx; | 449 | unsigned int resp_idx; |
434 | 450 | ||
435 | u32 pp_flags; | 451 | u32 pp_flags; |
452 | unsigned int delayed_eh_pmp_map; | ||
436 | }; | 453 | }; |
437 | 454 | ||
438 | struct mv_port_signal { | 455 | struct mv_port_signal { |
@@ -479,6 +496,7 @@ static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val); | |||
479 | static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val); | 496 | static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val); |
480 | static int mv_port_start(struct ata_port *ap); | 497 | static int mv_port_start(struct ata_port *ap); |
481 | static void mv_port_stop(struct ata_port *ap); | 498 | static void mv_port_stop(struct ata_port *ap); |
499 | static int mv_qc_defer(struct ata_queued_cmd *qc); | ||
482 | static void mv_qc_prep(struct ata_queued_cmd *qc); | 500 | static void mv_qc_prep(struct ata_queued_cmd *qc); |
483 | static void mv_qc_prep_iie(struct ata_queued_cmd *qc); | 501 | static void mv_qc_prep_iie(struct ata_queued_cmd *qc); |
484 | static unsigned int mv_qc_issue(struct ata_queued_cmd *qc); | 502 | static unsigned int mv_qc_issue(struct ata_queued_cmd *qc); |
@@ -527,6 +545,9 @@ static int mv_pmp_hardreset(struct ata_link *link, unsigned int *class, | |||
527 | unsigned long deadline); | 545 | unsigned long deadline); |
528 | static int mv_softreset(struct ata_link *link, unsigned int *class, | 546 | static int mv_softreset(struct ata_link *link, unsigned int *class, |
529 | unsigned long deadline); | 547 | unsigned long deadline); |
548 | static void mv_pmp_error_handler(struct ata_port *ap); | ||
549 | static void mv_process_crpb_entries(struct ata_port *ap, | ||
550 | struct mv_port_priv *pp); | ||
530 | 551 | ||
531 | /* .sg_tablesize is (MV_MAX_SG_CT / 2) in the structures below | 552 | /* .sg_tablesize is (MV_MAX_SG_CT / 2) in the structures below |
532 | * because we have to allow room for worst case splitting of | 553 | * because we have to allow room for worst case splitting of |
@@ -548,6 +569,7 @@ static struct scsi_host_template mv6_sht = { | |||
548 | static struct ata_port_operations mv5_ops = { | 569 | static struct ata_port_operations mv5_ops = { |
549 | .inherits = &ata_sff_port_ops, | 570 | .inherits = &ata_sff_port_ops, |
550 | 571 | ||
572 | .qc_defer = mv_qc_defer, | ||
551 | .qc_prep = mv_qc_prep, | 573 | .qc_prep = mv_qc_prep, |
552 | .qc_issue = mv_qc_issue, | 574 | .qc_issue = mv_qc_issue, |
553 | 575 | ||
@@ -566,7 +588,6 @@ static struct ata_port_operations mv5_ops = { | |||
566 | 588 | ||
567 | static struct ata_port_operations mv6_ops = { | 589 | static struct ata_port_operations mv6_ops = { |
568 | .inherits = &mv5_ops, | 590 | .inherits = &mv5_ops, |
569 | .qc_defer = sata_pmp_qc_defer_cmd_switch, | ||
570 | .dev_config = mv6_dev_config, | 591 | .dev_config = mv6_dev_config, |
571 | .scr_read = mv_scr_read, | 592 | .scr_read = mv_scr_read, |
572 | .scr_write = mv_scr_write, | 593 | .scr_write = mv_scr_write, |
@@ -574,12 +595,11 @@ static struct ata_port_operations mv6_ops = { | |||
574 | .pmp_hardreset = mv_pmp_hardreset, | 595 | .pmp_hardreset = mv_pmp_hardreset, |
575 | .pmp_softreset = mv_softreset, | 596 | .pmp_softreset = mv_softreset, |
576 | .softreset = mv_softreset, | 597 | .softreset = mv_softreset, |
577 | .error_handler = sata_pmp_error_handler, | 598 | .error_handler = mv_pmp_error_handler, |
578 | }; | 599 | }; |
579 | 600 | ||
580 | static struct ata_port_operations mv_iie_ops = { | 601 | static struct ata_port_operations mv_iie_ops = { |
581 | .inherits = &mv6_ops, | 602 | .inherits = &mv6_ops, |
582 | .qc_defer = ata_std_qc_defer, /* FIS-based switching */ | ||
583 | .dev_config = ATA_OP_NULL, | 603 | .dev_config = ATA_OP_NULL, |
584 | .qc_prep = mv_qc_prep_iie, | 604 | .qc_prep = mv_qc_prep_iie, |
585 | }; | 605 | }; |
@@ -875,6 +895,29 @@ static void mv_start_dma(struct ata_port *ap, void __iomem *port_mmio, | |||
875 | } | 895 | } |
876 | } | 896 | } |
877 | 897 | ||
898 | static void mv_wait_for_edma_empty_idle(struct ata_port *ap) | ||
899 | { | ||
900 | void __iomem *port_mmio = mv_ap_base(ap); | ||
901 | const u32 empty_idle = (EDMA_STATUS_CACHE_EMPTY | EDMA_STATUS_IDLE); | ||
902 | const int per_loop = 5, timeout = (15 * 1000 / per_loop); | ||
903 | int i; | ||
904 | |||
905 | /* | ||
906 | * Wait for the EDMA engine to finish transactions in progress. | ||
907 | * No idea what a good "timeout" value might be, but measurements | ||
908 | * indicate that it often requires hundreds of microseconds | ||
909 | * with two drives in-use. So we use the 15msec value above | ||
910 | * as a rough guess at what even more drives might require. | ||
911 | */ | ||
912 | for (i = 0; i < timeout; ++i) { | ||
913 | u32 edma_stat = readl(port_mmio + EDMA_STATUS_OFS); | ||
914 | if ((edma_stat & empty_idle) == empty_idle) | ||
915 | break; | ||
916 | udelay(per_loop); | ||
917 | } | ||
918 | /* ata_port_printk(ap, KERN_INFO, "%s: %u+ usecs\n", __func__, i); */ | ||
919 | } | ||
920 | |||
878 | /** | 921 | /** |
879 | * mv_stop_edma_engine - Disable eDMA engine | 922 | * mv_stop_edma_engine - Disable eDMA engine |
880 | * @port_mmio: io base address | 923 | * @port_mmio: io base address |
@@ -907,6 +950,7 @@ static int mv_stop_edma(struct ata_port *ap) | |||
907 | if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) | 950 | if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) |
908 | return 0; | 951 | return 0; |
909 | pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN; | 952 | pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN; |
953 | mv_wait_for_edma_empty_idle(ap); | ||
910 | if (mv_stop_edma_engine(port_mmio)) { | 954 | if (mv_stop_edma_engine(port_mmio)) { |
911 | ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n"); | 955 | ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n"); |
912 | return -EIO; | 956 | return -EIO; |
@@ -1057,26 +1101,95 @@ static void mv6_dev_config(struct ata_device *adev) | |||
1057 | } | 1101 | } |
1058 | } | 1102 | } |
1059 | 1103 | ||
1060 | static void mv_config_fbs(void __iomem *port_mmio, int enable_fbs) | 1104 | static int mv_qc_defer(struct ata_queued_cmd *qc) |
1061 | { | 1105 | { |
1062 | u32 old_fcfg, new_fcfg, old_ltmode, new_ltmode; | 1106 | struct ata_link *link = qc->dev->link; |
1107 | struct ata_port *ap = link->ap; | ||
1108 | struct mv_port_priv *pp = ap->private_data; | ||
1109 | |||
1110 | /* | ||
1111 | * Don't allow new commands if we're in a delayed EH state | ||
1112 | * for NCQ and/or FIS-based switching. | ||
1113 | */ | ||
1114 | if (pp->pp_flags & MV_PP_FLAG_DELAYED_EH) | ||
1115 | return ATA_DEFER_PORT; | ||
1063 | /* | 1116 | /* |
1064 | * Various bit settings required for operation | 1117 | * If the port is completely idle, then allow the new qc. |
1065 | * in FIS-based switching (fbs) mode on GenIIe: | ||
1066 | */ | 1118 | */ |
1067 | old_fcfg = readl(port_mmio + FIS_CFG_OFS); | 1119 | if (ap->nr_active_links == 0) |
1068 | old_ltmode = readl(port_mmio + LTMODE_OFS); | 1120 | return 0; |
1069 | if (enable_fbs) { | 1121 | |
1070 | new_fcfg = old_fcfg | FIS_CFG_SINGLE_SYNC; | 1122 | if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) { |
1071 | new_ltmode = old_ltmode | LTMODE_BIT8; | 1123 | /* |
1072 | } else { /* disable fbs */ | 1124 | * The port is operating in host queuing mode (EDMA). |
1073 | new_fcfg = old_fcfg & ~FIS_CFG_SINGLE_SYNC; | 1125 | * It can accomodate a new qc if the qc protocol |
1074 | new_ltmode = old_ltmode & ~LTMODE_BIT8; | 1126 | * is compatible with the current host queue mode. |
1075 | } | 1127 | */ |
1076 | if (new_fcfg != old_fcfg) | 1128 | if (pp->pp_flags & MV_PP_FLAG_NCQ_EN) { |
1077 | writelfl(new_fcfg, port_mmio + FIS_CFG_OFS); | 1129 | /* |
1130 | * The host queue (EDMA) is in NCQ mode. | ||
1131 | * If the new qc is also an NCQ command, | ||
1132 | * then allow the new qc. | ||
1133 | */ | ||
1134 | if (qc->tf.protocol == ATA_PROT_NCQ) | ||
1135 | return 0; | ||
1136 | } else { | ||
1137 | /* | ||
1138 | * The host queue (EDMA) is in non-NCQ, DMA mode. | ||
1139 | * If the new qc is also a non-NCQ, DMA command, | ||
1140 | * then allow the new qc. | ||
1141 | */ | ||
1142 | if (qc->tf.protocol == ATA_PROT_DMA) | ||
1143 | return 0; | ||
1144 | } | ||
1145 | } | ||
1146 | return ATA_DEFER_PORT; | ||
1147 | } | ||
1148 | |||
1149 | static void mv_config_fbs(void __iomem *port_mmio, int want_ncq, int want_fbs) | ||
1150 | { | ||
1151 | u32 new_fiscfg, old_fiscfg; | ||
1152 | u32 new_ltmode, old_ltmode; | ||
1153 | u32 new_haltcond, old_haltcond; | ||
1154 | |||
1155 | old_fiscfg = readl(port_mmio + FISCFG_OFS); | ||
1156 | old_ltmode = readl(port_mmio + LTMODE_OFS); | ||
1157 | old_haltcond = readl(port_mmio + EDMA_HALTCOND_OFS); | ||
1158 | |||
1159 | new_fiscfg = old_fiscfg & ~(FISCFG_SINGLE_SYNC | FISCFG_WAIT_DEV_ERR); | ||
1160 | new_ltmode = old_ltmode & ~LTMODE_BIT8; | ||
1161 | new_haltcond = old_haltcond | EDMA_ERR_DEV; | ||
1162 | |||
1163 | if (want_fbs) { | ||
1164 | new_fiscfg = old_fiscfg | FISCFG_SINGLE_SYNC; | ||
1165 | new_ltmode = old_ltmode | LTMODE_BIT8; | ||
1166 | if (want_ncq) | ||
1167 | new_haltcond &= ~EDMA_ERR_DEV; | ||
1168 | else | ||
1169 | new_fiscfg |= FISCFG_WAIT_DEV_ERR; | ||
1170 | } | ||
1171 | |||
1172 | if (new_fiscfg != old_fiscfg) | ||
1173 | writelfl(new_fiscfg, port_mmio + FISCFG_OFS); | ||
1078 | if (new_ltmode != old_ltmode) | 1174 | if (new_ltmode != old_ltmode) |
1079 | writelfl(new_ltmode, port_mmio + LTMODE_OFS); | 1175 | writelfl(new_ltmode, port_mmio + LTMODE_OFS); |
1176 | if (new_haltcond != old_haltcond) | ||
1177 | writelfl(new_haltcond, port_mmio + EDMA_HALTCOND_OFS); | ||
1178 | } | ||
1179 | |||
1180 | static void mv_60x1_errata_sata25(struct ata_port *ap, int want_ncq) | ||
1181 | { | ||
1182 | struct mv_host_priv *hpriv = ap->host->private_data; | ||
1183 | u32 old, new; | ||
1184 | |||
1185 | /* workaround for 88SX60x1 FEr SATA#25 (part 1) */ | ||
1186 | old = readl(hpriv->base + MV_GPIO_PORT_CTL_OFS); | ||
1187 | if (want_ncq) | ||
1188 | new = old | (1 << 22); | ||
1189 | else | ||
1190 | new = old & ~(1 << 22); | ||
1191 | if (new != old) | ||
1192 | writel(new, hpriv->base + MV_GPIO_PORT_CTL_OFS); | ||
1080 | } | 1193 | } |
1081 | 1194 | ||
1082 | static void mv_edma_cfg(struct ata_port *ap, int want_ncq) | 1195 | static void mv_edma_cfg(struct ata_port *ap, int want_ncq) |
@@ -1088,25 +1201,40 @@ static void mv_edma_cfg(struct ata_port *ap, int want_ncq) | |||
1088 | 1201 | ||
1089 | /* set up non-NCQ EDMA configuration */ | 1202 | /* set up non-NCQ EDMA configuration */ |
1090 | cfg = EDMA_CFG_Q_DEPTH; /* always 0x1f for *all* chips */ | 1203 | cfg = EDMA_CFG_Q_DEPTH; /* always 0x1f for *all* chips */ |
1204 | pp->pp_flags &= ~MV_PP_FLAG_FBS_EN; | ||
1091 | 1205 | ||
1092 | if (IS_GEN_I(hpriv)) | 1206 | if (IS_GEN_I(hpriv)) |
1093 | cfg |= (1 << 8); /* enab config burst size mask */ | 1207 | cfg |= (1 << 8); /* enab config burst size mask */ |
1094 | 1208 | ||
1095 | else if (IS_GEN_II(hpriv)) | 1209 | else if (IS_GEN_II(hpriv)) { |
1096 | cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN; | 1210 | cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN; |
1211 | mv_60x1_errata_sata25(ap, want_ncq); | ||
1097 | 1212 | ||
1098 | else if (IS_GEN_IIE(hpriv)) { | 1213 | } else if (IS_GEN_IIE(hpriv)) { |
1099 | cfg |= (1 << 23); /* do not mask PM field in rx'd FIS */ | 1214 | int want_fbs = sata_pmp_attached(ap); |
1100 | cfg |= (1 << 22); /* enab 4-entry host queue cache */ | 1215 | /* |
1101 | cfg |= (1 << 18); /* enab early completion */ | 1216 | * Possible future enhancement: |
1102 | cfg |= (1 << 17); /* enab cut-through (dis stor&forwrd) */ | 1217 | * |
1218 | * The chip can use FBS with non-NCQ, if we allow it, | ||
1219 | * But first we need to have the error handling in place | ||
1220 | * for this mode (datasheet section 7.3.15.4.2.3). | ||
1221 | * So disallow non-NCQ FBS for now. | ||
1222 | */ | ||
1223 | want_fbs &= want_ncq; | ||
1224 | |||
1225 | mv_config_fbs(port_mmio, want_ncq, want_fbs); | ||
1103 | 1226 | ||
1104 | if (want_ncq && sata_pmp_attached(ap)) { | 1227 | if (want_fbs) { |
1228 | pp->pp_flags |= MV_PP_FLAG_FBS_EN; | ||
1105 | cfg |= EDMA_CFG_EDMA_FBS; /* FIS-based switching */ | 1229 | cfg |= EDMA_CFG_EDMA_FBS; /* FIS-based switching */ |
1106 | mv_config_fbs(port_mmio, 1); | ||
1107 | } else { | ||
1108 | mv_config_fbs(port_mmio, 0); | ||
1109 | } | 1230 | } |
1231 | |||
1232 | cfg |= (1 << 23); /* do not mask PM field in rx'd FIS */ | ||
1233 | cfg |= (1 << 22); /* enab 4-entry host queue cache */ | ||
1234 | if (HAS_PCI(ap->host)) | ||
1235 | cfg |= (1 << 18); /* enab early completion */ | ||
1236 | if (hpriv->hp_flags & MV_HP_CUT_THROUGH) | ||
1237 | cfg |= (1 << 17); /* enab cut-thru (dis stor&forwrd) */ | ||
1110 | } | 1238 | } |
1111 | 1239 | ||
1112 | if (want_ncq) { | 1240 | if (want_ncq) { |
@@ -1483,25 +1611,186 @@ static struct ata_queued_cmd *mv_get_active_qc(struct ata_port *ap) | |||
1483 | return qc; | 1611 | return qc; |
1484 | } | 1612 | } |
1485 | 1613 | ||
1486 | static void mv_unexpected_intr(struct ata_port *ap) | 1614 | static void mv_pmp_error_handler(struct ata_port *ap) |
1487 | { | 1615 | { |
1616 | unsigned int pmp, pmp_map; | ||
1488 | struct mv_port_priv *pp = ap->private_data; | 1617 | struct mv_port_priv *pp = ap->private_data; |
1489 | struct ata_eh_info *ehi = &ap->link.eh_info; | ||
1490 | char *when = ""; | ||
1491 | 1618 | ||
1619 | if (pp->pp_flags & MV_PP_FLAG_DELAYED_EH) { | ||
1620 | /* | ||
1621 | * Perform NCQ error analysis on failed PMPs | ||
1622 | * before we freeze the port entirely. | ||
1623 | * | ||
1624 | * The failed PMPs are marked earlier by mv_pmp_eh_prep(). | ||
1625 | */ | ||
1626 | pmp_map = pp->delayed_eh_pmp_map; | ||
1627 | pp->pp_flags &= ~MV_PP_FLAG_DELAYED_EH; | ||
1628 | for (pmp = 0; pmp_map != 0; pmp++) { | ||
1629 | unsigned int this_pmp = (1 << pmp); | ||
1630 | if (pmp_map & this_pmp) { | ||
1631 | struct ata_link *link = &ap->pmp_link[pmp]; | ||
1632 | pmp_map &= ~this_pmp; | ||
1633 | ata_eh_analyze_ncq_error(link); | ||
1634 | } | ||
1635 | } | ||
1636 | ata_port_freeze(ap); | ||
1637 | } | ||
1638 | sata_pmp_error_handler(ap); | ||
1639 | } | ||
1640 | |||
1641 | static unsigned int mv_get_err_pmp_map(struct ata_port *ap) | ||
1642 | { | ||
1643 | void __iomem *port_mmio = mv_ap_base(ap); | ||
1644 | |||
1645 | return readl(port_mmio + SATA_TESTCTL_OFS) >> 16; | ||
1646 | } | ||
1647 | |||
1648 | static void mv_pmp_eh_prep(struct ata_port *ap, unsigned int pmp_map) | ||
1649 | { | ||
1650 | struct ata_eh_info *ehi; | ||
1651 | unsigned int pmp; | ||
1652 | |||
1653 | /* | ||
1654 | * Initialize EH info for PMPs which saw device errors | ||
1655 | */ | ||
1656 | ehi = &ap->link.eh_info; | ||
1657 | for (pmp = 0; pmp_map != 0; pmp++) { | ||
1658 | unsigned int this_pmp = (1 << pmp); | ||
1659 | if (pmp_map & this_pmp) { | ||
1660 | struct ata_link *link = &ap->pmp_link[pmp]; | ||
1661 | |||
1662 | pmp_map &= ~this_pmp; | ||
1663 | ehi = &link->eh_info; | ||
1664 | ata_ehi_clear_desc(ehi); | ||
1665 | ata_ehi_push_desc(ehi, "dev err"); | ||
1666 | ehi->err_mask |= AC_ERR_DEV; | ||
1667 | ehi->action |= ATA_EH_RESET; | ||
1668 | ata_link_abort(link); | ||
1669 | } | ||
1670 | } | ||
1671 | } | ||
1672 | |||
1673 | static int mv_handle_fbs_ncq_dev_err(struct ata_port *ap) | ||
1674 | { | ||
1675 | struct mv_port_priv *pp = ap->private_data; | ||
1676 | int failed_links; | ||
1677 | unsigned int old_map, new_map; | ||
1678 | |||
1679 | /* | ||
1680 | * Device error during FBS+NCQ operation: | ||
1681 | * | ||
1682 | * Set a port flag to prevent further I/O being enqueued. | ||
1683 | * Leave the EDMA running to drain outstanding commands from this port. | ||
1684 | * Perform the post-mortem/EH only when all responses are complete. | ||
1685 | * Follow recovery sequence from 6042/7042 datasheet (7.3.15.4.2.2). | ||
1686 | */ | ||
1687 | if (!(pp->pp_flags & MV_PP_FLAG_DELAYED_EH)) { | ||
1688 | pp->pp_flags |= MV_PP_FLAG_DELAYED_EH; | ||
1689 | pp->delayed_eh_pmp_map = 0; | ||
1690 | } | ||
1691 | old_map = pp->delayed_eh_pmp_map; | ||
1692 | new_map = old_map | mv_get_err_pmp_map(ap); | ||
1693 | |||
1694 | if (old_map != new_map) { | ||
1695 | pp->delayed_eh_pmp_map = new_map; | ||
1696 | mv_pmp_eh_prep(ap, new_map & ~old_map); | ||
1697 | } | ||
1698 | failed_links = hweight16(new_map); | ||
1699 | |||
1700 | ata_port_printk(ap, KERN_INFO, "%s: pmp_map=%04x qc_map=%04x " | ||
1701 | "failed_links=%d nr_active_links=%d\n", | ||
1702 | __func__, pp->delayed_eh_pmp_map, | ||
1703 | ap->qc_active, failed_links, | ||
1704 | ap->nr_active_links); | ||
1705 | |||
1706 | if (ap->nr_active_links <= failed_links) { | ||
1707 | mv_process_crpb_entries(ap, pp); | ||
1708 | mv_stop_edma(ap); | ||
1709 | mv_eh_freeze(ap); | ||
1710 | ata_port_printk(ap, KERN_INFO, "%s: done\n", __func__); | ||
1711 | return 1; /* handled */ | ||
1712 | } | ||
1713 | ata_port_printk(ap, KERN_INFO, "%s: waiting\n", __func__); | ||
1714 | return 1; /* handled */ | ||
1715 | } | ||
1716 | |||
1717 | static int mv_handle_fbs_non_ncq_dev_err(struct ata_port *ap) | ||
1718 | { | ||
1492 | /* | 1719 | /* |
1493 | * We got a device interrupt from something that | 1720 | * Possible future enhancement: |
1494 | * was supposed to be using EDMA or polling. | 1721 | * |
1722 | * FBS+non-NCQ operation is not yet implemented. | ||
1723 | * See related notes in mv_edma_cfg(). | ||
1724 | * | ||
1725 | * Device error during FBS+non-NCQ operation: | ||
1726 | * | ||
1727 | * We need to snapshot the shadow registers for each failed command. | ||
1728 | * Follow recovery sequence from 6042/7042 datasheet (7.3.15.4.2.3). | ||
1495 | */ | 1729 | */ |
1730 | return 0; /* not handled */ | ||
1731 | } | ||
1732 | |||
1733 | static int mv_handle_dev_err(struct ata_port *ap, u32 edma_err_cause) | ||
1734 | { | ||
1735 | struct mv_port_priv *pp = ap->private_data; | ||
1736 | |||
1737 | if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) | ||
1738 | return 0; /* EDMA was not active: not handled */ | ||
1739 | if (!(pp->pp_flags & MV_PP_FLAG_FBS_EN)) | ||
1740 | return 0; /* FBS was not active: not handled */ | ||
1741 | |||
1742 | if (!(edma_err_cause & EDMA_ERR_DEV)) | ||
1743 | return 0; /* non DEV error: not handled */ | ||
1744 | edma_err_cause &= ~EDMA_ERR_IRQ_TRANSIENT; | ||
1745 | if (edma_err_cause & ~(EDMA_ERR_DEV | EDMA_ERR_SELF_DIS)) | ||
1746 | return 0; /* other problems: not handled */ | ||
1747 | |||
1748 | if (pp->pp_flags & MV_PP_FLAG_NCQ_EN) { | ||
1749 | /* | ||
1750 | * EDMA should NOT have self-disabled for this case. | ||
1751 | * If it did, then something is wrong elsewhere, | ||
1752 | * and we cannot handle it here. | ||
1753 | */ | ||
1754 | if (edma_err_cause & EDMA_ERR_SELF_DIS) { | ||
1755 | ata_port_printk(ap, KERN_WARNING, | ||
1756 | "%s: err_cause=0x%x pp_flags=0x%x\n", | ||
1757 | __func__, edma_err_cause, pp->pp_flags); | ||
1758 | return 0; /* not handled */ | ||
1759 | } | ||
1760 | return mv_handle_fbs_ncq_dev_err(ap); | ||
1761 | } else { | ||
1762 | /* | ||
1763 | * EDMA should have self-disabled for this case. | ||
1764 | * If it did not, then something is wrong elsewhere, | ||
1765 | * and we cannot handle it here. | ||
1766 | */ | ||
1767 | if (!(edma_err_cause & EDMA_ERR_SELF_DIS)) { | ||
1768 | ata_port_printk(ap, KERN_WARNING, | ||
1769 | "%s: err_cause=0x%x pp_flags=0x%x\n", | ||
1770 | __func__, edma_err_cause, pp->pp_flags); | ||
1771 | return 0; /* not handled */ | ||
1772 | } | ||
1773 | return mv_handle_fbs_non_ncq_dev_err(ap); | ||
1774 | } | ||
1775 | return 0; /* not handled */ | ||
1776 | } | ||
1777 | |||
1778 | static void mv_unexpected_intr(struct ata_port *ap, int edma_was_enabled) | ||
1779 | { | ||
1780 | struct ata_eh_info *ehi = &ap->link.eh_info; | ||
1781 | char *when = "idle"; | ||
1782 | |||
1496 | ata_ehi_clear_desc(ehi); | 1783 | ata_ehi_clear_desc(ehi); |
1497 | if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) { | 1784 | if (!ap || (ap->flags & ATA_FLAG_DISABLED)) { |
1498 | when = " while EDMA enabled"; | 1785 | when = "disabled"; |
1786 | } else if (edma_was_enabled) { | ||
1787 | when = "EDMA enabled"; | ||
1499 | } else { | 1788 | } else { |
1500 | struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag); | 1789 | struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag); |
1501 | if (qc && (qc->tf.flags & ATA_TFLAG_POLLING)) | 1790 | if (qc && (qc->tf.flags & ATA_TFLAG_POLLING)) |
1502 | when = " while polling"; | 1791 | when = "polling"; |
1503 | } | 1792 | } |
1504 | ata_ehi_push_desc(ehi, "unexpected device interrupt%s", when); | 1793 | ata_ehi_push_desc(ehi, "unexpected device interrupt while %s", when); |
1505 | ehi->err_mask |= AC_ERR_OTHER; | 1794 | ehi->err_mask |= AC_ERR_OTHER; |
1506 | ehi->action |= ATA_EH_RESET; | 1795 | ehi->action |= ATA_EH_RESET; |
1507 | ata_port_freeze(ap); | 1796 | ata_port_freeze(ap); |
@@ -1519,7 +1808,7 @@ static void mv_unexpected_intr(struct ata_port *ap) | |||
1519 | * LOCKING: | 1808 | * LOCKING: |
1520 | * Inherited from caller. | 1809 | * Inherited from caller. |
1521 | */ | 1810 | */ |
1522 | static void mv_err_intr(struct ata_port *ap, struct ata_queued_cmd *qc) | 1811 | static void mv_err_intr(struct ata_port *ap) |
1523 | { | 1812 | { |
1524 | void __iomem *port_mmio = mv_ap_base(ap); | 1813 | void __iomem *port_mmio = mv_ap_base(ap); |
1525 | u32 edma_err_cause, eh_freeze_mask, serr = 0; | 1814 | u32 edma_err_cause, eh_freeze_mask, serr = 0; |
@@ -1527,24 +1816,42 @@ static void mv_err_intr(struct ata_port *ap, struct ata_queued_cmd *qc) | |||
1527 | struct mv_host_priv *hpriv = ap->host->private_data; | 1816 | struct mv_host_priv *hpriv = ap->host->private_data; |
1528 | unsigned int action = 0, err_mask = 0; | 1817 | unsigned int action = 0, err_mask = 0; |
1529 | struct ata_eh_info *ehi = &ap->link.eh_info; | 1818 | struct ata_eh_info *ehi = &ap->link.eh_info; |
1530 | 1819 | struct ata_queued_cmd *qc; | |
1531 | ata_ehi_clear_desc(ehi); | 1820 | int abort = 0; |
1532 | 1821 | ||
1533 | /* | 1822 | /* |
1534 | * Read and clear the err_cause bits. This won't actually | 1823 | * Read and clear the SError and err_cause bits. |
1535 | * clear for some errors (eg. SError), but we will be doing | ||
1536 | * a hard reset in those cases regardless, which *will* clear it. | ||
1537 | */ | 1824 | */ |
1825 | sata_scr_read(&ap->link, SCR_ERROR, &serr); | ||
1826 | sata_scr_write_flush(&ap->link, SCR_ERROR, serr); | ||
1827 | |||
1538 | edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS); | 1828 | edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS); |
1539 | writelfl(~edma_err_cause, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS); | 1829 | writelfl(~edma_err_cause, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS); |
1540 | 1830 | ||
1541 | ata_ehi_push_desc(ehi, "edma_err_cause=%08x", edma_err_cause); | 1831 | ata_port_printk(ap, KERN_INFO, "%s: err_cause=%08x pp_flags=0x%x\n", |
1832 | __func__, edma_err_cause, pp->pp_flags); | ||
1833 | |||
1834 | if (edma_err_cause & EDMA_ERR_DEV) { | ||
1835 | /* | ||
1836 | * Device errors during FIS-based switching operation | ||
1837 | * require special handling. | ||
1838 | */ | ||
1839 | if (mv_handle_dev_err(ap, edma_err_cause)) | ||
1840 | return; | ||
1841 | } | ||
1542 | 1842 | ||
1843 | qc = mv_get_active_qc(ap); | ||
1844 | ata_ehi_clear_desc(ehi); | ||
1845 | ata_ehi_push_desc(ehi, "edma_err_cause=%08x pp_flags=%08x", | ||
1846 | edma_err_cause, pp->pp_flags); | ||
1543 | /* | 1847 | /* |
1544 | * All generations share these EDMA error cause bits: | 1848 | * All generations share these EDMA error cause bits: |
1545 | */ | 1849 | */ |
1546 | if (edma_err_cause & EDMA_ERR_DEV) | 1850 | if (edma_err_cause & EDMA_ERR_DEV) { |
1547 | err_mask |= AC_ERR_DEV; | 1851 | err_mask |= AC_ERR_DEV; |
1852 | action |= ATA_EH_RESET; | ||
1853 | ata_ehi_push_desc(ehi, "dev error"); | ||
1854 | } | ||
1548 | if (edma_err_cause & (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR | | 1855 | if (edma_err_cause & (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR | |
1549 | EDMA_ERR_CRQB_PAR | EDMA_ERR_CRPB_PAR | | 1856 | EDMA_ERR_CRQB_PAR | EDMA_ERR_CRPB_PAR | |
1550 | EDMA_ERR_INTRL_PAR)) { | 1857 | EDMA_ERR_INTRL_PAR)) { |
@@ -1576,13 +1883,6 @@ static void mv_err_intr(struct ata_port *ap, struct ata_queued_cmd *qc) | |||
1576 | ata_ehi_push_desc(ehi, "EDMA self-disable"); | 1883 | ata_ehi_push_desc(ehi, "EDMA self-disable"); |
1577 | } | 1884 | } |
1578 | if (edma_err_cause & EDMA_ERR_SERR) { | 1885 | if (edma_err_cause & EDMA_ERR_SERR) { |
1579 | /* | ||
1580 | * Ensure that we read our own SCR, not a pmp link SCR: | ||
1581 | */ | ||
1582 | ap->ops->scr_read(ap, SCR_ERROR, &serr); | ||
1583 | /* | ||
1584 | * Don't clear SError here; leave it for libata-eh: | ||
1585 | */ | ||
1586 | ata_ehi_push_desc(ehi, "SError=%08x", serr); | 1886 | ata_ehi_push_desc(ehi, "SError=%08x", serr); |
1587 | err_mask |= AC_ERR_ATA_BUS; | 1887 | err_mask |= AC_ERR_ATA_BUS; |
1588 | action |= ATA_EH_RESET; | 1888 | action |= ATA_EH_RESET; |
@@ -1602,10 +1902,29 @@ static void mv_err_intr(struct ata_port *ap, struct ata_queued_cmd *qc) | |||
1602 | else | 1902 | else |
1603 | ehi->err_mask |= err_mask; | 1903 | ehi->err_mask |= err_mask; |
1604 | 1904 | ||
1605 | if (edma_err_cause & eh_freeze_mask) | 1905 | if (err_mask == AC_ERR_DEV) { |
1906 | /* | ||
1907 | * Cannot do ata_port_freeze() here, | ||
1908 | * because it would kill PIO access, | ||
1909 | * which is needed for further diagnosis. | ||
1910 | */ | ||
1911 | mv_eh_freeze(ap); | ||
1912 | abort = 1; | ||
1913 | } else if (edma_err_cause & eh_freeze_mask) { | ||
1914 | /* | ||
1915 | * Note to self: ata_port_freeze() calls ata_port_abort() | ||
1916 | */ | ||
1606 | ata_port_freeze(ap); | 1917 | ata_port_freeze(ap); |
1607 | else | 1918 | } else { |
1608 | ata_port_abort(ap); | 1919 | abort = 1; |
1920 | } | ||
1921 | |||
1922 | if (abort) { | ||
1923 | if (qc) | ||
1924 | ata_link_abort(qc->dev->link); | ||
1925 | else | ||
1926 | ata_port_abort(ap); | ||
1927 | } | ||
1609 | } | 1928 | } |
1610 | 1929 | ||
1611 | static void mv_process_crpb_response(struct ata_port *ap, | 1930 | static void mv_process_crpb_response(struct ata_port *ap, |
@@ -1632,8 +1951,9 @@ static void mv_process_crpb_response(struct ata_port *ap, | |||
1632 | } | 1951 | } |
1633 | } | 1952 | } |
1634 | ata_status = edma_status >> CRPB_FLAG_STATUS_SHIFT; | 1953 | ata_status = edma_status >> CRPB_FLAG_STATUS_SHIFT; |
1635 | qc->err_mask |= ac_err_mask(ata_status); | 1954 | if (!ac_err_mask(ata_status)) |
1636 | ata_qc_complete(qc); | 1955 | ata_qc_complete(qc); |
1956 | /* else: leave it for mv_err_intr() */ | ||
1637 | } else { | 1957 | } else { |
1638 | ata_port_printk(ap, KERN_ERR, "%s: no qc for tag=%d\n", | 1958 | ata_port_printk(ap, KERN_ERR, "%s: no qc for tag=%d\n", |
1639 | __func__, tag); | 1959 | __func__, tag); |
@@ -1677,6 +1997,44 @@ static void mv_process_crpb_entries(struct ata_port *ap, struct mv_port_priv *pp | |||
1677 | port_mmio + EDMA_RSP_Q_OUT_PTR_OFS); | 1997 | port_mmio + EDMA_RSP_Q_OUT_PTR_OFS); |
1678 | } | 1998 | } |
1679 | 1999 | ||
2000 | static void mv_port_intr(struct ata_port *ap, u32 port_cause) | ||
2001 | { | ||
2002 | struct mv_port_priv *pp; | ||
2003 | int edma_was_enabled; | ||
2004 | |||
2005 | if (!ap || (ap->flags & ATA_FLAG_DISABLED)) { | ||
2006 | mv_unexpected_intr(ap, 0); | ||
2007 | return; | ||
2008 | } | ||
2009 | /* | ||
2010 | * Grab a snapshot of the EDMA_EN flag setting, | ||
2011 | * so that we have a consistent view for this port, | ||
2012 | * even if something we call of our routines changes it. | ||
2013 | */ | ||
2014 | pp = ap->private_data; | ||
2015 | edma_was_enabled = (pp->pp_flags & MV_PP_FLAG_EDMA_EN); | ||
2016 | /* | ||
2017 | * Process completed CRPB response(s) before other events. | ||
2018 | */ | ||
2019 | if (edma_was_enabled && (port_cause & DONE_IRQ)) { | ||
2020 | mv_process_crpb_entries(ap, pp); | ||
2021 | if (pp->pp_flags & MV_PP_FLAG_DELAYED_EH) | ||
2022 | mv_handle_fbs_ncq_dev_err(ap); | ||
2023 | } | ||
2024 | /* | ||
2025 | * Handle chip-reported errors, or continue on to handle PIO. | ||
2026 | */ | ||
2027 | if (unlikely(port_cause & ERR_IRQ)) { | ||
2028 | mv_err_intr(ap); | ||
2029 | } else if (!edma_was_enabled) { | ||
2030 | struct ata_queued_cmd *qc = mv_get_active_qc(ap); | ||
2031 | if (qc) | ||
2032 | ata_sff_host_intr(ap, qc); | ||
2033 | else | ||
2034 | mv_unexpected_intr(ap, edma_was_enabled); | ||
2035 | } | ||
2036 | } | ||
2037 | |||
1680 | /** | 2038 | /** |
1681 | * mv_host_intr - Handle all interrupts on the given host controller | 2039 | * mv_host_intr - Handle all interrupts on the given host controller |
1682 | * @host: host specific structure | 2040 | * @host: host specific structure |
@@ -1688,66 +2046,58 @@ static void mv_process_crpb_entries(struct ata_port *ap, struct mv_port_priv *pp | |||
1688 | static int mv_host_intr(struct ata_host *host, u32 main_irq_cause) | 2046 | static int mv_host_intr(struct ata_host *host, u32 main_irq_cause) |
1689 | { | 2047 | { |
1690 | struct mv_host_priv *hpriv = host->private_data; | 2048 | struct mv_host_priv *hpriv = host->private_data; |
1691 | void __iomem *mmio = hpriv->base, *hc_mmio = NULL; | 2049 | void __iomem *mmio = hpriv->base, *hc_mmio; |
1692 | u32 hc_irq_cause = 0; | ||
1693 | unsigned int handled = 0, port; | 2050 | unsigned int handled = 0, port; |
1694 | 2051 | ||
1695 | for (port = 0; port < hpriv->n_ports; port++) { | 2052 | for (port = 0; port < hpriv->n_ports; port++) { |
1696 | struct ata_port *ap = host->ports[port]; | 2053 | struct ata_port *ap = host->ports[port]; |
1697 | struct mv_port_priv *pp; | 2054 | unsigned int p, shift, hardport, port_cause; |
1698 | unsigned int shift, hardport, port_cause; | 2055 | |
1699 | /* | ||
1700 | * When we move to the second hc, flag our cached | ||
1701 | * copies of hc_mmio (and hc_irq_cause) as invalid again. | ||
1702 | */ | ||
1703 | if (port == MV_PORTS_PER_HC) | ||
1704 | hc_mmio = NULL; | ||
1705 | /* | ||
1706 | * Do nothing if port is not interrupting or is disabled: | ||
1707 | */ | ||
1708 | MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport); | 2056 | MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport); |
1709 | port_cause = (main_irq_cause >> shift) & (DONE_IRQ | ERR_IRQ); | ||
1710 | if (!port_cause || !ap || (ap->flags & ATA_FLAG_DISABLED)) | ||
1711 | continue; | ||
1712 | /* | 2057 | /* |
1713 | * Each hc within the host has its own hc_irq_cause register. | 2058 | * Each hc within the host has its own hc_irq_cause register, |
1714 | * We defer reading it until we know we need it, right now: | 2059 | * where the interrupting ports bits get ack'd. |
1715 | * | ||
1716 | * FIXME later: we don't really need to read this register | ||
1717 | * (some logic changes required below if we go that way), | ||
1718 | * because it doesn't tell us anything new. But we do need | ||
1719 | * to write to it, outside the top of this loop, | ||
1720 | * to reset the interrupt triggers for next time. | ||
1721 | */ | 2060 | */ |
1722 | if (!hc_mmio) { | 2061 | if (hardport == 0) { /* first port on this hc ? */ |
2062 | u32 hc_cause = (main_irq_cause >> shift) & HC0_IRQ_PEND; | ||
2063 | u32 port_mask, ack_irqs; | ||
2064 | /* | ||
2065 | * Skip this entire hc if nothing pending for any ports | ||
2066 | */ | ||
2067 | if (!hc_cause) { | ||
2068 | port += MV_PORTS_PER_HC - 1; | ||
2069 | continue; | ||
2070 | } | ||
2071 | /* | ||
2072 | * We don't need/want to read the hc_irq_cause register, | ||
2073 | * because doing so hurts performance, and | ||
2074 | * main_irq_cause already gives us everything we need. | ||
2075 | * | ||
2076 | * But we do have to *write* to the hc_irq_cause to ack | ||
2077 | * the ports that we are handling this time through. | ||
2078 | * | ||
2079 | * This requires that we create a bitmap for those | ||
2080 | * ports which interrupted us, and use that bitmap | ||
2081 | * to ack (only) those ports via hc_irq_cause. | ||
2082 | */ | ||
2083 | ack_irqs = 0; | ||
2084 | for (p = 0; p < MV_PORTS_PER_HC; ++p) { | ||
2085 | if ((port + p) >= hpriv->n_ports) | ||
2086 | break; | ||
2087 | port_mask = (DONE_IRQ | ERR_IRQ) << (p * 2); | ||
2088 | if (hc_cause & port_mask) | ||
2089 | ack_irqs |= (DMA_IRQ | DEV_IRQ) << p; | ||
2090 | } | ||
1723 | hc_mmio = mv_hc_base_from_port(mmio, port); | 2091 | hc_mmio = mv_hc_base_from_port(mmio, port); |
1724 | hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS); | 2092 | writelfl(~ack_irqs, hc_mmio + HC_IRQ_CAUSE_OFS); |
1725 | writelfl(~hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS); | ||
1726 | handled = 1; | 2093 | handled = 1; |
1727 | } | 2094 | } |
1728 | /* | 2095 | /* |
1729 | * Process completed CRPB response(s) before other events. | 2096 | * Handle interrupts signalled for this port: |
1730 | */ | ||
1731 | pp = ap->private_data; | ||
1732 | if (hc_irq_cause & (DMA_IRQ << hardport)) { | ||
1733 | if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) | ||
1734 | mv_process_crpb_entries(ap, pp); | ||
1735 | } | ||
1736 | /* | ||
1737 | * Handle chip-reported errors, or continue on to handle PIO. | ||
1738 | */ | 2097 | */ |
1739 | if (unlikely(port_cause & ERR_IRQ)) { | 2098 | port_cause = (main_irq_cause >> shift) & (DONE_IRQ | ERR_IRQ); |
1740 | mv_err_intr(ap, mv_get_active_qc(ap)); | 2099 | if (port_cause) |
1741 | } else if (hc_irq_cause & (DEV_IRQ << hardport)) { | 2100 | mv_port_intr(ap, port_cause); |
1742 | if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) { | ||
1743 | struct ata_queued_cmd *qc = mv_get_active_qc(ap); | ||
1744 | if (qc) { | ||
1745 | ata_sff_host_intr(ap, qc); | ||
1746 | continue; | ||
1747 | } | ||
1748 | } | ||
1749 | mv_unexpected_intr(ap); | ||
1750 | } | ||
1751 | } | 2101 | } |
1752 | return handled; | 2102 | return handled; |
1753 | } | 2103 | } |
@@ -1894,7 +2244,7 @@ static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio) | |||
1894 | 2244 | ||
1895 | static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio) | 2245 | static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio) |
1896 | { | 2246 | { |
1897 | writel(0x0fcfffff, mmio + MV_FLASH_CTL); | 2247 | writel(0x0fcfffff, mmio + MV_FLASH_CTL_OFS); |
1898 | } | 2248 | } |
1899 | 2249 | ||
1900 | static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx, | 2250 | static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx, |
@@ -1913,7 +2263,7 @@ static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio) | |||
1913 | { | 2263 | { |
1914 | u32 tmp; | 2264 | u32 tmp; |
1915 | 2265 | ||
1916 | writel(0, mmio + MV_GPIO_PORT_CTL); | 2266 | writel(0, mmio + MV_GPIO_PORT_CTL_OFS); |
1917 | 2267 | ||
1918 | /* FIXME: handle MV_HP_ERRATA_50XXB2 errata */ | 2268 | /* FIXME: handle MV_HP_ERRATA_50XXB2 errata */ |
1919 | 2269 | ||
@@ -1931,14 +2281,14 @@ static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio, | |||
1931 | int fix_apm_sq = (hpriv->hp_flags & MV_HP_ERRATA_50XXB0); | 2281 | int fix_apm_sq = (hpriv->hp_flags & MV_HP_ERRATA_50XXB0); |
1932 | 2282 | ||
1933 | if (fix_apm_sq) { | 2283 | if (fix_apm_sq) { |
1934 | tmp = readl(phy_mmio + MV5_LT_MODE); | 2284 | tmp = readl(phy_mmio + MV5_LTMODE_OFS); |
1935 | tmp |= (1 << 19); | 2285 | tmp |= (1 << 19); |
1936 | writel(tmp, phy_mmio + MV5_LT_MODE); | 2286 | writel(tmp, phy_mmio + MV5_LTMODE_OFS); |
1937 | 2287 | ||
1938 | tmp = readl(phy_mmio + MV5_PHY_CTL); | 2288 | tmp = readl(phy_mmio + MV5_PHY_CTL_OFS); |
1939 | tmp &= ~0x3; | 2289 | tmp &= ~0x3; |
1940 | tmp |= 0x1; | 2290 | tmp |= 0x1; |
1941 | writel(tmp, phy_mmio + MV5_PHY_CTL); | 2291 | writel(tmp, phy_mmio + MV5_PHY_CTL_OFS); |
1942 | } | 2292 | } |
1943 | 2293 | ||
1944 | tmp = readl(phy_mmio + MV5_PHY_MODE); | 2294 | tmp = readl(phy_mmio + MV5_PHY_MODE); |
@@ -1956,11 +2306,6 @@ static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio, | |||
1956 | { | 2306 | { |
1957 | void __iomem *port_mmio = mv_port_base(mmio, port); | 2307 | void __iomem *port_mmio = mv_port_base(mmio, port); |
1958 | 2308 | ||
1959 | /* | ||
1960 | * The datasheet warns against setting ATA_RST when EDMA is active | ||
1961 | * (but doesn't say what the problem might be). So we first try | ||
1962 | * to disable the EDMA engine before doing the ATA_RST operation. | ||
1963 | */ | ||
1964 | mv_reset_channel(hpriv, mmio, port); | 2309 | mv_reset_channel(hpriv, mmio, port); |
1965 | 2310 | ||
1966 | ZERO(0x028); /* command */ | 2311 | ZERO(0x028); /* command */ |
@@ -1975,7 +2320,7 @@ static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio, | |||
1975 | ZERO(0x024); /* respq outp */ | 2320 | ZERO(0x024); /* respq outp */ |
1976 | ZERO(0x020); /* respq inp */ | 2321 | ZERO(0x020); /* respq inp */ |
1977 | ZERO(0x02c); /* test control */ | 2322 | ZERO(0x02c); /* test control */ |
1978 | writel(0xbc, port_mmio + EDMA_IORDY_TMOUT); | 2323 | writel(0xbc, port_mmio + EDMA_IORDY_TMOUT_OFS); |
1979 | } | 2324 | } |
1980 | #undef ZERO | 2325 | #undef ZERO |
1981 | 2326 | ||
@@ -2021,13 +2366,13 @@ static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio) | |||
2021 | struct mv_host_priv *hpriv = host->private_data; | 2366 | struct mv_host_priv *hpriv = host->private_data; |
2022 | u32 tmp; | 2367 | u32 tmp; |
2023 | 2368 | ||
2024 | tmp = readl(mmio + MV_PCI_MODE); | 2369 | tmp = readl(mmio + MV_PCI_MODE_OFS); |
2025 | tmp &= 0xff00ffff; | 2370 | tmp &= 0xff00ffff; |
2026 | writel(tmp, mmio + MV_PCI_MODE); | 2371 | writel(tmp, mmio + MV_PCI_MODE_OFS); |
2027 | 2372 | ||
2028 | ZERO(MV_PCI_DISC_TIMER); | 2373 | ZERO(MV_PCI_DISC_TIMER); |
2029 | ZERO(MV_PCI_MSI_TRIGGER); | 2374 | ZERO(MV_PCI_MSI_TRIGGER); |
2030 | writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT); | 2375 | writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT_OFS); |
2031 | ZERO(PCI_HC_MAIN_IRQ_MASK_OFS); | 2376 | ZERO(PCI_HC_MAIN_IRQ_MASK_OFS); |
2032 | ZERO(MV_PCI_SERR_MASK); | 2377 | ZERO(MV_PCI_SERR_MASK); |
2033 | ZERO(hpriv->irq_cause_ofs); | 2378 | ZERO(hpriv->irq_cause_ofs); |
@@ -2045,10 +2390,10 @@ static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio) | |||
2045 | 2390 | ||
2046 | mv5_reset_flash(hpriv, mmio); | 2391 | mv5_reset_flash(hpriv, mmio); |
2047 | 2392 | ||
2048 | tmp = readl(mmio + MV_GPIO_PORT_CTL); | 2393 | tmp = readl(mmio + MV_GPIO_PORT_CTL_OFS); |
2049 | tmp &= 0x3; | 2394 | tmp &= 0x3; |
2050 | tmp |= (1 << 5) | (1 << 6); | 2395 | tmp |= (1 << 5) | (1 << 6); |
2051 | writel(tmp, mmio + MV_GPIO_PORT_CTL); | 2396 | writel(tmp, mmio + MV_GPIO_PORT_CTL_OFS); |
2052 | } | 2397 | } |
2053 | 2398 | ||
2054 | /** | 2399 | /** |
@@ -2121,7 +2466,7 @@ static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx, | |||
2121 | void __iomem *port_mmio; | 2466 | void __iomem *port_mmio; |
2122 | u32 tmp; | 2467 | u32 tmp; |
2123 | 2468 | ||
2124 | tmp = readl(mmio + MV_RESET_CFG); | 2469 | tmp = readl(mmio + MV_RESET_CFG_OFS); |
2125 | if ((tmp & (1 << 0)) == 0) { | 2470 | if ((tmp & (1 << 0)) == 0) { |
2126 | hpriv->signal[idx].amps = 0x7 << 8; | 2471 | hpriv->signal[idx].amps = 0x7 << 8; |
2127 | hpriv->signal[idx].pre = 0x1 << 5; | 2472 | hpriv->signal[idx].pre = 0x1 << 5; |
@@ -2137,7 +2482,7 @@ static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx, | |||
2137 | 2482 | ||
2138 | static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio) | 2483 | static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio) |
2139 | { | 2484 | { |
2140 | writel(0x00000060, mmio + MV_GPIO_PORT_CTL); | 2485 | writel(0x00000060, mmio + MV_GPIO_PORT_CTL_OFS); |
2141 | } | 2486 | } |
2142 | 2487 | ||
2143 | static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio, | 2488 | static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio, |
@@ -2235,11 +2580,6 @@ static void mv_soc_reset_hc_port(struct mv_host_priv *hpriv, | |||
2235 | { | 2580 | { |
2236 | void __iomem *port_mmio = mv_port_base(mmio, port); | 2581 | void __iomem *port_mmio = mv_port_base(mmio, port); |
2237 | 2582 | ||
2238 | /* | ||
2239 | * The datasheet warns against setting ATA_RST when EDMA is active | ||
2240 | * (but doesn't say what the problem might be). So we first try | ||
2241 | * to disable the EDMA engine before doing the ATA_RST operation. | ||
2242 | */ | ||
2243 | mv_reset_channel(hpriv, mmio, port); | 2583 | mv_reset_channel(hpriv, mmio, port); |
2244 | 2584 | ||
2245 | ZERO(0x028); /* command */ | 2585 | ZERO(0x028); /* command */ |
@@ -2254,7 +2594,7 @@ static void mv_soc_reset_hc_port(struct mv_host_priv *hpriv, | |||
2254 | ZERO(0x024); /* respq outp */ | 2594 | ZERO(0x024); /* respq outp */ |
2255 | ZERO(0x020); /* respq inp */ | 2595 | ZERO(0x020); /* respq inp */ |
2256 | ZERO(0x02c); /* test control */ | 2596 | ZERO(0x02c); /* test control */ |
2257 | writel(0xbc, port_mmio + EDMA_IORDY_TMOUT); | 2597 | writel(0xbc, port_mmio + EDMA_IORDY_TMOUT_OFS); |
2258 | } | 2598 | } |
2259 | 2599 | ||
2260 | #undef ZERO | 2600 | #undef ZERO |
@@ -2297,38 +2637,39 @@ static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio) | |||
2297 | return; | 2637 | return; |
2298 | } | 2638 | } |
2299 | 2639 | ||
2300 | static void mv_setup_ifctl(void __iomem *port_mmio, int want_gen2i) | 2640 | static void mv_setup_ifcfg(void __iomem *port_mmio, int want_gen2i) |
2301 | { | 2641 | { |
2302 | u32 ifctl = readl(port_mmio + SATA_INTERFACE_CFG); | 2642 | u32 ifcfg = readl(port_mmio + SATA_INTERFACE_CFG_OFS); |
2303 | 2643 | ||
2304 | ifctl = (ifctl & 0xf7f) | 0x9b1000; /* from chip spec */ | 2644 | ifcfg = (ifcfg & 0xf7f) | 0x9b1000; /* from chip spec */ |
2305 | if (want_gen2i) | 2645 | if (want_gen2i) |
2306 | ifctl |= (1 << 7); /* enable gen2i speed */ | 2646 | ifcfg |= (1 << 7); /* enable gen2i speed */ |
2307 | writelfl(ifctl, port_mmio + SATA_INTERFACE_CFG); | 2647 | writelfl(ifcfg, port_mmio + SATA_INTERFACE_CFG_OFS); |
2308 | } | 2648 | } |
2309 | 2649 | ||
2310 | /* | ||
2311 | * Caller must ensure that EDMA is not active, | ||
2312 | * by first doing mv_stop_edma() where needed. | ||
2313 | */ | ||
2314 | static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio, | 2650 | static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio, |
2315 | unsigned int port_no) | 2651 | unsigned int port_no) |
2316 | { | 2652 | { |
2317 | void __iomem *port_mmio = mv_port_base(mmio, port_no); | 2653 | void __iomem *port_mmio = mv_port_base(mmio, port_no); |
2318 | 2654 | ||
2655 | /* | ||
2656 | * The datasheet warns against setting EDMA_RESET when EDMA is active | ||
2657 | * (but doesn't say what the problem might be). So we first try | ||
2658 | * to disable the EDMA engine before doing the EDMA_RESET operation. | ||
2659 | */ | ||
2319 | mv_stop_edma_engine(port_mmio); | 2660 | mv_stop_edma_engine(port_mmio); |
2320 | writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS); | 2661 | writelfl(EDMA_RESET, port_mmio + EDMA_CMD_OFS); |
2321 | 2662 | ||
2322 | if (!IS_GEN_I(hpriv)) { | 2663 | if (!IS_GEN_I(hpriv)) { |
2323 | /* Enable 3.0gb/s link speed */ | 2664 | /* Enable 3.0gb/s link speed: this survives EDMA_RESET */ |
2324 | mv_setup_ifctl(port_mmio, 1); | 2665 | mv_setup_ifcfg(port_mmio, 1); |
2325 | } | 2666 | } |
2326 | /* | 2667 | /* |
2327 | * Strobing ATA_RST here causes a hard reset of the SATA transport, | 2668 | * Strobing EDMA_RESET here causes a hard reset of the SATA transport, |
2328 | * link, and physical layers. It resets all SATA interface registers | 2669 | * link, and physical layers. It resets all SATA interface registers |
2329 | * (except for SATA_INTERFACE_CFG), and issues a COMRESET to the dev. | 2670 | * (except for SATA_INTERFACE_CFG), and issues a COMRESET to the dev. |
2330 | */ | 2671 | */ |
2331 | writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS); | 2672 | writelfl(EDMA_RESET, port_mmio + EDMA_CMD_OFS); |
2332 | udelay(25); /* allow reset propagation */ | 2673 | udelay(25); /* allow reset propagation */ |
2333 | writelfl(0, port_mmio + EDMA_CMD_OFS); | 2674 | writelfl(0, port_mmio + EDMA_CMD_OFS); |
2334 | 2675 | ||
@@ -2392,7 +2733,7 @@ static int mv_hardreset(struct ata_link *link, unsigned int *class, | |||
2392 | sata_scr_read(link, SCR_STATUS, &sstatus); | 2733 | sata_scr_read(link, SCR_STATUS, &sstatus); |
2393 | if (!IS_GEN_I(hpriv) && ++attempts >= 5 && sstatus == 0x121) { | 2734 | if (!IS_GEN_I(hpriv) && ++attempts >= 5 && sstatus == 0x121) { |
2394 | /* Force 1.5gb/s link speed and try again */ | 2735 | /* Force 1.5gb/s link speed and try again */ |
2395 | mv_setup_ifctl(mv_ap_base(ap), 0); | 2736 | mv_setup_ifcfg(mv_ap_base(ap), 0); |
2396 | if (time_after(jiffies + HZ, deadline)) | 2737 | if (time_after(jiffies + HZ, deadline)) |
2397 | extra = HZ; /* only extend it once, max */ | 2738 | extra = HZ; /* only extend it once, max */ |
2398 | } | 2739 | } |
@@ -2493,6 +2834,34 @@ static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio) | |||
2493 | readl(port_mmio + EDMA_ERR_IRQ_MASK_OFS)); | 2834 | readl(port_mmio + EDMA_ERR_IRQ_MASK_OFS)); |
2494 | } | 2835 | } |
2495 | 2836 | ||
2837 | static unsigned int mv_in_pcix_mode(struct ata_host *host) | ||
2838 | { | ||
2839 | struct mv_host_priv *hpriv = host->private_data; | ||
2840 | void __iomem *mmio = hpriv->base; | ||
2841 | u32 reg; | ||
2842 | |||
2843 | if (!HAS_PCI(host) || !IS_PCIE(hpriv)) | ||
2844 | return 0; /* not PCI-X capable */ | ||
2845 | reg = readl(mmio + MV_PCI_MODE_OFS); | ||
2846 | if ((reg & MV_PCI_MODE_MASK) == 0) | ||
2847 | return 0; /* conventional PCI mode */ | ||
2848 | return 1; /* chip is in PCI-X mode */ | ||
2849 | } | ||
2850 | |||
2851 | static int mv_pci_cut_through_okay(struct ata_host *host) | ||
2852 | { | ||
2853 | struct mv_host_priv *hpriv = host->private_data; | ||
2854 | void __iomem *mmio = hpriv->base; | ||
2855 | u32 reg; | ||
2856 | |||
2857 | if (!mv_in_pcix_mode(host)) { | ||
2858 | reg = readl(mmio + PCI_COMMAND_OFS); | ||
2859 | if (reg & PCI_COMMAND_MRDTRIG) | ||
2860 | return 0; /* not okay */ | ||
2861 | } | ||
2862 | return 1; /* okay */ | ||
2863 | } | ||
2864 | |||
2496 | static int mv_chip_id(struct ata_host *host, unsigned int board_idx) | 2865 | static int mv_chip_id(struct ata_host *host, unsigned int board_idx) |
2497 | { | 2866 | { |
2498 | struct pci_dev *pdev = to_pci_dev(host->dev); | 2867 | struct pci_dev *pdev = to_pci_dev(host->dev); |
@@ -2560,7 +2929,7 @@ static int mv_chip_id(struct ata_host *host, unsigned int board_idx) | |||
2560 | break; | 2929 | break; |
2561 | 2930 | ||
2562 | case chip_7042: | 2931 | case chip_7042: |
2563 | hp_flags |= MV_HP_PCIE; | 2932 | hp_flags |= MV_HP_PCIE | MV_HP_CUT_THROUGH; |
2564 | if (pdev->vendor == PCI_VENDOR_ID_TTI && | 2933 | if (pdev->vendor == PCI_VENDOR_ID_TTI && |
2565 | (pdev->device == 0x2300 || pdev->device == 0x2310)) | 2934 | (pdev->device == 0x2300 || pdev->device == 0x2310)) |
2566 | { | 2935 | { |
@@ -2590,9 +2959,12 @@ static int mv_chip_id(struct ata_host *host, unsigned int board_idx) | |||
2590 | " and avoid the final two gigabytes on" | 2959 | " and avoid the final two gigabytes on" |
2591 | " all RocketRAID BIOS initialized drives.\n"); | 2960 | " all RocketRAID BIOS initialized drives.\n"); |
2592 | } | 2961 | } |
2962 | /* drop through */ | ||
2593 | case chip_6042: | 2963 | case chip_6042: |
2594 | hpriv->ops = &mv6xxx_ops; | 2964 | hpriv->ops = &mv6xxx_ops; |
2595 | hp_flags |= MV_HP_GEN_IIE; | 2965 | hp_flags |= MV_HP_GEN_IIE; |
2966 | if (board_idx == chip_6042 && mv_pci_cut_through_okay(host)) | ||
2967 | hp_flags |= MV_HP_CUT_THROUGH; | ||
2596 | 2968 | ||
2597 | switch (pdev->revision) { | 2969 | switch (pdev->revision) { |
2598 | case 0x0: | 2970 | case 0x0: |
diff --git a/include/linux/libata.h b/include/linux/libata.h index d1dfe872ee30..7e206da1fbfb 100644 --- a/include/linux/libata.h +++ b/include/linux/libata.h | |||
@@ -1039,6 +1039,7 @@ extern void ata_eh_thaw_port(struct ata_port *ap); | |||
1039 | 1039 | ||
1040 | extern void ata_eh_qc_complete(struct ata_queued_cmd *qc); | 1040 | extern void ata_eh_qc_complete(struct ata_queued_cmd *qc); |
1041 | extern void ata_eh_qc_retry(struct ata_queued_cmd *qc); | 1041 | extern void ata_eh_qc_retry(struct ata_queued_cmd *qc); |
1042 | extern void ata_eh_analyze_ncq_error(struct ata_link *link); | ||
1042 | 1043 | ||
1043 | extern void ata_do_eh(struct ata_port *ap, ata_prereset_fn_t prereset, | 1044 | extern void ata_do_eh(struct ata_port *ap, ata_prereset_fn_t prereset, |
1044 | ata_reset_fn_t softreset, ata_reset_fn_t hardreset, | 1045 | ata_reset_fn_t softreset, ata_reset_fn_t hardreset, |
@@ -1381,6 +1382,21 @@ static inline struct ata_port *ata_shost_to_port(struct Scsi_Host *host) | |||
1381 | return *(struct ata_port **)&host->hostdata[0]; | 1382 | return *(struct ata_port **)&host->hostdata[0]; |
1382 | } | 1383 | } |
1383 | 1384 | ||
1385 | static inline int ata_check_ready(u8 status) | ||
1386 | { | ||
1387 | /* Some controllers report 0x77 or 0x7f during intermediate | ||
1388 | * not-ready stages. | ||
1389 | */ | ||
1390 | if (status == 0x77 || status == 0x7f) | ||
1391 | return 0; | ||
1392 | |||
1393 | /* 0xff indicates either no device or device not ready */ | ||
1394 | if (status == 0xff) | ||
1395 | return -ENODEV; | ||
1396 | |||
1397 | return !(status & ATA_BUSY); | ||
1398 | } | ||
1399 | |||
1384 | 1400 | ||
1385 | /************************************************************************** | 1401 | /************************************************************************** |
1386 | * PMP - drivers/ata/libata-pmp.c | 1402 | * PMP - drivers/ata/libata-pmp.c |