aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/ata
diff options
context:
space:
mode:
authorJiri Kosina <jkosina@suse.cz>2008-05-20 10:43:50 -0400
committerJiri Kosina <jkosina@suse.cz>2008-05-20 10:43:50 -0400
commit2d4b3f37ded8998a362c8d0b4be02f583dd9a002 (patch)
tree5c66ddaf0a6ab6d898931a5ed58c7aa844b94740 /drivers/ata
parent7022b15e2a9f878fd5184586064c63352c3dd225 (diff)
parent8033c6e9736c29cce5f0d0abbca9a44dffb20c39 (diff)
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6
Diffstat (limited to 'drivers/ata')
-rw-r--r--drivers/ata/Kconfig13
-rw-r--r--drivers/ata/Makefile1
-rw-r--r--drivers/ata/ahci.c4
-rw-r--r--drivers/ata/ata_generic.c6
-rw-r--r--drivers/ata/ata_piix.c25
-rw-r--r--drivers/ata/libata-acpi.c75
-rw-r--r--drivers/ata/libata-core.c41
-rw-r--r--drivers/ata/libata-eh.c209
-rw-r--r--drivers/ata/libata-pmp.c44
-rw-r--r--drivers/ata/libata-scsi.c6
-rw-r--r--drivers/ata/libata-sff.c6
-rw-r--r--drivers/ata/pata_acpi.c6
-rw-r--r--drivers/ata/pata_ali.c10
-rw-r--r--drivers/ata/pata_amd.c14
-rw-r--r--drivers/ata/pata_at32.c2
-rw-r--r--drivers/ata/pata_bf54x.c5
-rw-r--r--drivers/ata/pata_cypress.c8
-rw-r--r--drivers/ata/pata_legacy.c50
-rw-r--r--drivers/ata/pata_ns87410.c6
-rw-r--r--drivers/ata/pata_ns87415.c4
-rw-r--r--drivers/ata/pata_qdi.c16
-rw-r--r--drivers/ata/pata_sch.c206
-rw-r--r--drivers/ata/pata_sl82c105.c2
-rw-r--r--drivers/ata/pata_via.c14
-rw-r--r--drivers/ata/pata_winbond.c6
-rw-r--r--drivers/ata/sata_inic162x.c646
-rw-r--r--drivers/ata/sata_mv.c845
-rw-r--r--drivers/ata/sata_promise.c148
-rw-r--r--drivers/ata/sata_sil24.c11
29 files changed, 1654 insertions, 775 deletions
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
index 1c11df9a5f32..9bf2986a2788 100644
--- a/drivers/ata/Kconfig
+++ b/drivers/ata/Kconfig
@@ -205,8 +205,8 @@ config SATA_VITESSE
205 If unsure, say N. 205 If unsure, say N.
206 206
207config SATA_INIC162X 207config SATA_INIC162X
208 tristate "Initio 162x SATA support (HIGHLY EXPERIMENTAL)" 208 tristate "Initio 162x SATA support"
209 depends on PCI && EXPERIMENTAL 209 depends on PCI
210 help 210 help
211 This option enables support for Initio 162x Serial ATA. 211 This option enables support for Initio 162x Serial ATA.
212 212
@@ -697,6 +697,15 @@ config PATA_SCC
697 697
698 If unsure, say N. 698 If unsure, say N.
699 699
700config PATA_SCH
701 tristate "Intel SCH PATA support"
702 depends on PCI
703 help
704 This option enables support for Intel SCH PATA on the Intel
705 SCH (US15W, US15L, UL11L) series host controllers.
706
707 If unsure, say N.
708
700config PATA_BF54X 709config PATA_BF54X
701 tristate "Blackfin 54x ATAPI support" 710 tristate "Blackfin 54x ATAPI support"
702 depends on BF542 || BF548 || BF549 711 depends on BF542 || BF548 || BF549
diff --git a/drivers/ata/Makefile b/drivers/ata/Makefile
index b693d829383a..674965fa326d 100644
--- a/drivers/ata/Makefile
+++ b/drivers/ata/Makefile
@@ -67,6 +67,7 @@ obj-$(CONFIG_PATA_SIS) += pata_sis.o
67obj-$(CONFIG_PATA_TRIFLEX) += pata_triflex.o 67obj-$(CONFIG_PATA_TRIFLEX) += pata_triflex.o
68obj-$(CONFIG_PATA_IXP4XX_CF) += pata_ixp4xx_cf.o 68obj-$(CONFIG_PATA_IXP4XX_CF) += pata_ixp4xx_cf.o
69obj-$(CONFIG_PATA_SCC) += pata_scc.o 69obj-$(CONFIG_PATA_SCC) += pata_scc.o
70obj-$(CONFIG_PATA_SCH) += pata_sch.o
70obj-$(CONFIG_PATA_BF54X) += pata_bf54x.o 71obj-$(CONFIG_PATA_BF54X) += pata_bf54x.o
71obj-$(CONFIG_PATA_PLATFORM) += pata_platform.o 72obj-$(CONFIG_PATA_PLATFORM) += pata_platform.o
72obj-$(CONFIG_PATA_OF_PLATFORM) += pata_of_platform.o 73obj-$(CONFIG_PATA_OF_PLATFORM) += pata_of_platform.o
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 8cace9aa9c03..97f83fb2ee2e 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -1267,9 +1267,7 @@ static int ahci_check_ready(struct ata_link *link)
1267 void __iomem *port_mmio = ahci_port_base(link->ap); 1267 void __iomem *port_mmio = ahci_port_base(link->ap);
1268 u8 status = readl(port_mmio + PORT_TFDATA) & 0xFF; 1268 u8 status = readl(port_mmio + PORT_TFDATA) & 0xFF;
1269 1269
1270 if (!(status & ATA_BUSY)) 1270 return ata_check_ready(status);
1271 return 1;
1272 return 0;
1273} 1271}
1274 1272
1275static int ahci_softreset(struct ata_link *link, unsigned int *class, 1273static int ahci_softreset(struct ata_link *link, unsigned int *class,
diff --git a/drivers/ata/ata_generic.c b/drivers/ata/ata_generic.c
index 47aeccd52fa9..75a406f5e694 100644
--- a/drivers/ata/ata_generic.c
+++ b/drivers/ata/ata_generic.c
@@ -152,6 +152,12 @@ static int ata_generic_init_one(struct pci_dev *dev, const struct pci_device_id
152 if (dev->vendor == PCI_VENDOR_ID_AL) 152 if (dev->vendor == PCI_VENDOR_ID_AL)
153 ata_pci_bmdma_clear_simplex(dev); 153 ata_pci_bmdma_clear_simplex(dev);
154 154
155 if (dev->vendor == PCI_VENDOR_ID_ATI) {
156 int rc = pcim_enable_device(dev);
157 if (rc < 0)
158 return rc;
159 pcim_pin_device(dev);
160 }
155 return ata_pci_sff_init_one(dev, ppi, &generic_sht, NULL); 161 return ata_pci_sff_init_one(dev, ppi, &generic_sht, NULL);
156} 162}
157 163
diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c
index ea2c7649d399..a9027b8fbdd5 100644
--- a/drivers/ata/ata_piix.c
+++ b/drivers/ata/ata_piix.c
@@ -1348,6 +1348,8 @@ static void __devinit piix_init_sidpr(struct ata_host *host)
1348{ 1348{
1349 struct pci_dev *pdev = to_pci_dev(host->dev); 1349 struct pci_dev *pdev = to_pci_dev(host->dev);
1350 struct piix_host_priv *hpriv = host->private_data; 1350 struct piix_host_priv *hpriv = host->private_data;
1351 struct ata_device *dev0 = &host->ports[0]->link.device[0];
1352 u32 scontrol;
1351 int i; 1353 int i;
1352 1354
1353 /* check for availability */ 1355 /* check for availability */
@@ -1366,6 +1368,29 @@ static void __devinit piix_init_sidpr(struct ata_host *host)
1366 return; 1368 return;
1367 1369
1368 hpriv->sidpr = pcim_iomap_table(pdev)[PIIX_SIDPR_BAR]; 1370 hpriv->sidpr = pcim_iomap_table(pdev)[PIIX_SIDPR_BAR];
1371
1372 /* SCR access via SIDPR doesn't work on some configurations.
1373 * Give it a test drive by inhibiting power save modes which
1374 * we'll do anyway.
1375 */
1376 scontrol = piix_sidpr_read(dev0, SCR_CONTROL);
1377
1378 /* if IPM is already 3, SCR access is probably working. Don't
1379 * un-inhibit power save modes as BIOS might have inhibited
1380 * them for a reason.
1381 */
1382 if ((scontrol & 0xf00) != 0x300) {
1383 scontrol |= 0x300;
1384 piix_sidpr_write(dev0, SCR_CONTROL, scontrol);
1385 scontrol = piix_sidpr_read(dev0, SCR_CONTROL);
1386
1387 if ((scontrol & 0xf00) != 0x300) {
1388 dev_printk(KERN_INFO, host->dev, "SCR access via "
1389 "SIDPR is available but doesn't work\n");
1390 return;
1391 }
1392 }
1393
1369 host->ports[0]->ops = &piix_sidpr_sata_ops; 1394 host->ports[0]->ops = &piix_sidpr_sata_ops;
1370 host->ports[1]->ops = &piix_sidpr_sata_ops; 1395 host->ports[1]->ops = &piix_sidpr_sata_ops;
1371} 1396}
diff --git a/drivers/ata/libata-acpi.c b/drivers/ata/libata-acpi.c
index 70b77e0899a8..dbf6ca781f66 100644
--- a/drivers/ata/libata-acpi.c
+++ b/drivers/ata/libata-acpi.c
@@ -118,8 +118,8 @@ static void ata_acpi_associate_ide_port(struct ata_port *ap)
118 ap->pflags |= ATA_PFLAG_INIT_GTM_VALID; 118 ap->pflags |= ATA_PFLAG_INIT_GTM_VALID;
119} 119}
120 120
121static void ata_acpi_handle_hotplug(struct ata_port *ap, struct ata_device *dev, 121static void ata_acpi_handle_hotplug(struct ata_port *ap, struct ata_device
122 u32 event) 122 *dev, u32 event)
123{ 123{
124 char event_string[12]; 124 char event_string[12];
125 char *envp[] = { event_string, NULL }; 125 char *envp[] = { event_string, NULL };
@@ -127,6 +127,9 @@ static void ata_acpi_handle_hotplug(struct ata_port *ap, struct ata_device *dev,
127 struct kobject *kobj = NULL; 127 struct kobject *kobj = NULL;
128 int wait = 0; 128 int wait = 0;
129 unsigned long flags; 129 unsigned long flags;
130 acpi_handle handle, tmphandle;
131 unsigned long sta;
132 acpi_status status;
130 133
131 if (!ap) 134 if (!ap)
132 ap = dev->link->ap; 135 ap = dev->link->ap;
@@ -134,32 +137,57 @@ static void ata_acpi_handle_hotplug(struct ata_port *ap, struct ata_device *dev,
134 137
135 spin_lock_irqsave(ap->lock, flags); 138 spin_lock_irqsave(ap->lock, flags);
136 139
140 if (dev)
141 handle = dev->acpi_handle;
142 else
143 handle = ap->acpi_handle;
144
145 status = acpi_get_handle(handle, "_EJ0", &tmphandle);
146 if (ACPI_FAILURE(status)) {
147 /* This device is not ejectable */
148 spin_unlock_irqrestore(ap->lock, flags);
149 return;
150 }
151
152 status = acpi_evaluate_integer(handle, "_STA", NULL, &sta);
153 if (ACPI_FAILURE(status)) {
154 printk ("Unable to determine bay status\n");
155 spin_unlock_irqrestore(ap->lock, flags);
156 return;
157 }
158
137 switch (event) { 159 switch (event) {
138 case ACPI_NOTIFY_BUS_CHECK: 160 case ACPI_NOTIFY_BUS_CHECK:
139 case ACPI_NOTIFY_DEVICE_CHECK: 161 case ACPI_NOTIFY_DEVICE_CHECK:
140 ata_ehi_push_desc(ehi, "ACPI event"); 162 ata_ehi_push_desc(ehi, "ACPI event");
141 ata_ehi_hotplugged(ehi); 163 if (!sta) {
142 ata_port_freeze(ap); 164 /* Device has been unplugged */
143 break; 165 if (dev)
144 166 dev->flags |= ATA_DFLAG_DETACH;
145 case ACPI_NOTIFY_EJECT_REQUEST: 167 else {
146 ata_ehi_push_desc(ehi, "ACPI event"); 168 struct ata_link *tlink;
147 if (dev) 169 struct ata_device *tdev;
148 dev->flags |= ATA_DFLAG_DETACH; 170
149 else { 171 ata_port_for_each_link(tlink, ap) {
150 struct ata_link *tlink; 172 ata_link_for_each_dev(tdev, tlink) {
151 struct ata_device *tdev; 173 tdev->flags |=
152 174 ATA_DFLAG_DETACH;
153 ata_port_for_each_link(tlink, ap) 175 }
154 ata_link_for_each_dev(tdev, tlink) 176 }
155 tdev->flags |= ATA_DFLAG_DETACH; 177 }
178 ata_port_schedule_eh(ap);
179 wait = 1;
180 } else {
181 ata_ehi_hotplugged(ehi);
182 ata_port_freeze(ap);
156 } 183 }
157
158 ata_port_schedule_eh(ap);
159 wait = 1;
160 break;
161 } 184 }
162 185
186 spin_unlock_irqrestore(ap->lock, flags);
187
188 if (wait)
189 ata_port_wait_eh(ap);
190
163 if (dev) { 191 if (dev) {
164 if (dev->sdev) 192 if (dev->sdev)
165 kobj = &dev->sdev->sdev_gendev.kobj; 193 kobj = &dev->sdev->sdev_gendev.kobj;
@@ -170,11 +198,6 @@ static void ata_acpi_handle_hotplug(struct ata_port *ap, struct ata_device *dev,
170 sprintf(event_string, "BAY_EVENT=%d", event); 198 sprintf(event_string, "BAY_EVENT=%d", event);
171 kobject_uevent_env(kobj, KOBJ_CHANGE, envp); 199 kobject_uevent_env(kobj, KOBJ_CHANGE, envp);
172 } 200 }
173
174 spin_unlock_irqrestore(ap->lock, flags);
175
176 if (wait)
177 ata_port_wait_eh(ap);
178} 201}
179 202
180static void ata_acpi_dev_notify(acpi_handle handle, u32 event, void *data) 203static void ata_acpi_dev_notify(acpi_handle handle, u32 event, void *data)
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 3bc488538204..3c89f205c83f 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -2126,6 +2126,13 @@ int ata_dev_configure(struct ata_device *dev)
2126 dev->horkage |= ata_dev_blacklisted(dev); 2126 dev->horkage |= ata_dev_blacklisted(dev);
2127 ata_force_horkage(dev); 2127 ata_force_horkage(dev);
2128 2128
2129 if (dev->horkage & ATA_HORKAGE_DISABLE) {
2130 ata_dev_printk(dev, KERN_INFO,
2131 "unsupported device, disabling\n");
2132 ata_dev_disable(dev);
2133 return 0;
2134 }
2135
2129 /* let ACPI work its magic */ 2136 /* let ACPI work its magic */
2130 rc = ata_acpi_on_devcfg(dev); 2137 rc = ata_acpi_on_devcfg(dev);
2131 if (rc) 2138 if (rc)
@@ -3490,22 +3497,11 @@ int sata_link_resume(struct ata_link *link, const unsigned long *params,
3490 if ((rc = sata_link_debounce(link, params, deadline))) 3497 if ((rc = sata_link_debounce(link, params, deadline)))
3491 return rc; 3498 return rc;
3492 3499
3493 /* Clear SError. PMP and some host PHYs require this to 3500 /* clear SError, some PHYs require this even for SRST to work */
3494 * operate and clearing should be done before checking PHY
3495 * online status to avoid race condition (hotplugging between
3496 * link resume and status check).
3497 */
3498 if (!(rc = sata_scr_read(link, SCR_ERROR, &serror))) 3501 if (!(rc = sata_scr_read(link, SCR_ERROR, &serror)))
3499 rc = sata_scr_write(link, SCR_ERROR, serror); 3502 rc = sata_scr_write(link, SCR_ERROR, serror);
3500 if (rc == 0 || rc == -EINVAL) {
3501 unsigned long flags;
3502 3503
3503 spin_lock_irqsave(link->ap->lock, flags); 3504 return rc != -EINVAL ? rc : 0;
3504 link->eh_info.serror = 0;
3505 spin_unlock_irqrestore(link->ap->lock, flags);
3506 rc = 0;
3507 }
3508 return rc;
3509} 3505}
3510 3506
3511/** 3507/**
@@ -3653,9 +3649,13 @@ int sata_link_hardreset(struct ata_link *link, const unsigned long *timing,
3653 if (check_ready) 3649 if (check_ready)
3654 rc = ata_wait_ready(link, deadline, check_ready); 3650 rc = ata_wait_ready(link, deadline, check_ready);
3655 out: 3651 out:
3656 if (rc && rc != -EAGAIN) 3652 if (rc && rc != -EAGAIN) {
3653 /* online is set iff link is online && reset succeeded */
3654 if (online)
3655 *online = false;
3657 ata_link_printk(link, KERN_ERR, 3656 ata_link_printk(link, KERN_ERR,
3658 "COMRESET failed (errno=%d)\n", rc); 3657 "COMRESET failed (errno=%d)\n", rc);
3658 }
3659 DPRINTK("EXIT, rc=%d\n", rc); 3659 DPRINTK("EXIT, rc=%d\n", rc);
3660 return rc; 3660 return rc;
3661} 3661}
@@ -3700,8 +3700,14 @@ int sata_std_hardreset(struct ata_link *link, unsigned int *class,
3700 */ 3700 */
3701void ata_std_postreset(struct ata_link *link, unsigned int *classes) 3701void ata_std_postreset(struct ata_link *link, unsigned int *classes)
3702{ 3702{
3703 u32 serror;
3704
3703 DPRINTK("ENTER\n"); 3705 DPRINTK("ENTER\n");
3704 3706
3707 /* reset complete, clear SError */
3708 if (!sata_scr_read(link, SCR_ERROR, &serror))
3709 sata_scr_write(link, SCR_ERROR, serror);
3710
3705 /* print link status */ 3711 /* print link status */
3706 sata_print_link_status(link); 3712 sata_print_link_status(link);
3707 3713
@@ -3894,8 +3900,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
3894 { "SAMSUNG CD-ROM SN-124", "N001", ATA_HORKAGE_NODMA }, 3900 { "SAMSUNG CD-ROM SN-124", "N001", ATA_HORKAGE_NODMA },
3895 { "Seagate STT20000A", NULL, ATA_HORKAGE_NODMA }, 3901 { "Seagate STT20000A", NULL, ATA_HORKAGE_NODMA },
3896 /* Odd clown on sil3726/4726 PMPs */ 3902 /* Odd clown on sil3726/4726 PMPs */
3897 { "Config Disk", NULL, ATA_HORKAGE_NODMA | 3903 { "Config Disk", NULL, ATA_HORKAGE_DISABLE },
3898 ATA_HORKAGE_SKIP_PM },
3899 3904
3900 /* Weird ATAPI devices */ 3905 /* Weird ATAPI devices */
3901 { "TORiSAN DVD-ROM DRD-N216", NULL, ATA_HORKAGE_MAX_SEC_128 }, 3906 { "TORiSAN DVD-ROM DRD-N216", NULL, ATA_HORKAGE_MAX_SEC_128 },
@@ -5616,7 +5621,7 @@ int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
5616 spin_lock_irqsave(ap->lock, flags); 5621 spin_lock_irqsave(ap->lock, flags);
5617 5622
5618 ehi->probe_mask |= ATA_ALL_DEVICES; 5623 ehi->probe_mask |= ATA_ALL_DEVICES;
5619 ehi->action |= ATA_EH_RESET; 5624 ehi->action |= ATA_EH_RESET | ATA_EH_LPM;
5620 ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET; 5625 ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET;
5621 5626
5622 ap->pflags &= ~ATA_PFLAG_INITIALIZING; 5627 ap->pflags &= ~ATA_PFLAG_INITIALIZING;
@@ -5649,7 +5654,6 @@ int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
5649 struct ata_port *ap = host->ports[i]; 5654 struct ata_port *ap = host->ports[i];
5650 5655
5651 ata_scsi_scan_host(ap, 1); 5656 ata_scsi_scan_host(ap, 1);
5652 ata_lpm_schedule(ap, ap->pm_policy);
5653 } 5657 }
5654 5658
5655 return 0; 5659 return 0;
@@ -6292,6 +6296,7 @@ EXPORT_SYMBOL_GPL(ata_eh_freeze_port);
6292EXPORT_SYMBOL_GPL(ata_eh_thaw_port); 6296EXPORT_SYMBOL_GPL(ata_eh_thaw_port);
6293EXPORT_SYMBOL_GPL(ata_eh_qc_complete); 6297EXPORT_SYMBOL_GPL(ata_eh_qc_complete);
6294EXPORT_SYMBOL_GPL(ata_eh_qc_retry); 6298EXPORT_SYMBOL_GPL(ata_eh_qc_retry);
6299EXPORT_SYMBOL_GPL(ata_eh_analyze_ncq_error);
6295EXPORT_SYMBOL_GPL(ata_do_eh); 6300EXPORT_SYMBOL_GPL(ata_do_eh);
6296EXPORT_SYMBOL_GPL(ata_std_error_handler); 6301EXPORT_SYMBOL_GPL(ata_std_error_handler);
6297 6302
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index 61dcd0026c64..7894d83ea1eb 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -1308,12 +1308,7 @@ static void ata_eh_analyze_serror(struct ata_link *link)
1308 unsigned int err_mask = 0, action = 0; 1308 unsigned int err_mask = 0, action = 0;
1309 u32 hotplug_mask; 1309 u32 hotplug_mask;
1310 1310
1311 if (serror & SERR_PERSISTENT) { 1311 if (serror & (SERR_PERSISTENT | SERR_DATA)) {
1312 err_mask |= AC_ERR_ATA_BUS;
1313 action |= ATA_EH_RESET;
1314 }
1315 if (serror &
1316 (SERR_DATA_RECOVERED | SERR_COMM_RECOVERED | SERR_DATA)) {
1317 err_mask |= AC_ERR_ATA_BUS; 1312 err_mask |= AC_ERR_ATA_BUS;
1318 action |= ATA_EH_RESET; 1313 action |= ATA_EH_RESET;
1319 } 1314 }
@@ -1357,7 +1352,7 @@ static void ata_eh_analyze_serror(struct ata_link *link)
1357 * LOCKING: 1352 * LOCKING:
1358 * Kernel thread context (may sleep). 1353 * Kernel thread context (may sleep).
1359 */ 1354 */
1360static void ata_eh_analyze_ncq_error(struct ata_link *link) 1355void ata_eh_analyze_ncq_error(struct ata_link *link)
1361{ 1356{
1362 struct ata_port *ap = link->ap; 1357 struct ata_port *ap = link->ap;
1363 struct ata_eh_context *ehc = &link->eh_context; 1358 struct ata_eh_context *ehc = &link->eh_context;
@@ -2047,19 +2042,11 @@ static int ata_do_reset(struct ata_link *link, ata_reset_fn_t reset,
2047 unsigned int *classes, unsigned long deadline) 2042 unsigned int *classes, unsigned long deadline)
2048{ 2043{
2049 struct ata_device *dev; 2044 struct ata_device *dev;
2050 int rc;
2051 2045
2052 ata_link_for_each_dev(dev, link) 2046 ata_link_for_each_dev(dev, link)
2053 classes[dev->devno] = ATA_DEV_UNKNOWN; 2047 classes[dev->devno] = ATA_DEV_UNKNOWN;
2054 2048
2055 rc = reset(link, classes, deadline); 2049 return reset(link, classes, deadline);
2056
2057 /* convert all ATA_DEV_UNKNOWN to ATA_DEV_NONE */
2058 ata_link_for_each_dev(dev, link)
2059 if (classes[dev->devno] == ATA_DEV_UNKNOWN)
2060 classes[dev->devno] = ATA_DEV_NONE;
2061
2062 return rc;
2063} 2050}
2064 2051
2065static int ata_eh_followup_srst_needed(struct ata_link *link, 2052static int ata_eh_followup_srst_needed(struct ata_link *link,
@@ -2096,9 +2083,11 @@ int ata_eh_reset(struct ata_link *link, int classify,
2096 ata_reset_fn_t reset; 2083 ata_reset_fn_t reset;
2097 unsigned long flags; 2084 unsigned long flags;
2098 u32 sstatus; 2085 u32 sstatus;
2099 int rc; 2086 int nr_known, rc;
2100 2087
2101 /* about to reset */ 2088 /*
2089 * Prepare to reset
2090 */
2102 spin_lock_irqsave(ap->lock, flags); 2091 spin_lock_irqsave(ap->lock, flags);
2103 ap->pflags |= ATA_PFLAG_RESETTING; 2092 ap->pflags |= ATA_PFLAG_RESETTING;
2104 spin_unlock_irqrestore(ap->lock, flags); 2093 spin_unlock_irqrestore(ap->lock, flags);
@@ -2124,16 +2113,8 @@ int ata_eh_reset(struct ata_link *link, int classify,
2124 ap->ops->set_piomode(ap, dev); 2113 ap->ops->set_piomode(ap, dev);
2125 } 2114 }
2126 2115
2127 if (!softreset && !hardreset) {
2128 if (verbose)
2129 ata_link_printk(link, KERN_INFO, "no reset method "
2130 "available, skipping reset\n");
2131 if (!(lflags & ATA_LFLAG_ASSUME_CLASS))
2132 lflags |= ATA_LFLAG_ASSUME_ATA;
2133 goto done;
2134 }
2135
2136 /* prefer hardreset */ 2116 /* prefer hardreset */
2117 reset = NULL;
2137 ehc->i.action &= ~ATA_EH_RESET; 2118 ehc->i.action &= ~ATA_EH_RESET;
2138 if (hardreset) { 2119 if (hardreset) {
2139 reset = hardreset; 2120 reset = hardreset;
@@ -2141,11 +2122,6 @@ int ata_eh_reset(struct ata_link *link, int classify,
2141 } else if (softreset) { 2122 } else if (softreset) {
2142 reset = softreset; 2123 reset = softreset;
2143 ehc->i.action = ATA_EH_SOFTRESET; 2124 ehc->i.action = ATA_EH_SOFTRESET;
2144 } else {
2145 ata_link_printk(link, KERN_ERR, "BUG: no reset method, "
2146 "please report to linux-ide@vger.kernel.org\n");
2147 dump_stack();
2148 return -EINVAL;
2149 } 2125 }
2150 2126
2151 if (prereset) { 2127 if (prereset) {
@@ -2165,55 +2141,71 @@ int ata_eh_reset(struct ata_link *link, int classify,
2165 "prereset failed (errno=%d)\n", rc); 2141 "prereset failed (errno=%d)\n", rc);
2166 goto out; 2142 goto out;
2167 } 2143 }
2168 }
2169 2144
2170 /* prereset() might have cleared ATA_EH_RESET */ 2145 /* prereset() might have cleared ATA_EH_RESET. If so,
2171 if (!(ehc->i.action & ATA_EH_RESET)) { 2146 * bang classes and return.
2172 /* prereset told us not to reset, bang classes and return */ 2147 */
2173 ata_link_for_each_dev(dev, link) 2148 if (reset && !(ehc->i.action & ATA_EH_RESET)) {
2174 classes[dev->devno] = ATA_DEV_NONE; 2149 ata_link_for_each_dev(dev, link)
2175 rc = 0; 2150 classes[dev->devno] = ATA_DEV_NONE;
2176 goto out; 2151 rc = 0;
2152 goto out;
2153 }
2177 } 2154 }
2178 2155
2179 retry: 2156 retry:
2157 /*
2158 * Perform reset
2159 */
2160 if (ata_is_host_link(link))
2161 ata_eh_freeze_port(ap);
2162
2180 deadline = jiffies + ata_eh_reset_timeouts[try++]; 2163 deadline = jiffies + ata_eh_reset_timeouts[try++];
2181 2164
2182 /* shut up during boot probing */ 2165 if (reset) {
2183 if (verbose) 2166 if (verbose)
2184 ata_link_printk(link, KERN_INFO, "%s resetting link\n", 2167 ata_link_printk(link, KERN_INFO, "%s resetting link\n",
2185 reset == softreset ? "soft" : "hard"); 2168 reset == softreset ? "soft" : "hard");
2186 2169
2187 /* mark that this EH session started with reset */ 2170 /* mark that this EH session started with reset */
2188 if (reset == hardreset) 2171 if (reset == hardreset)
2189 ehc->i.flags |= ATA_EHI_DID_HARDRESET; 2172 ehc->i.flags |= ATA_EHI_DID_HARDRESET;
2190 else 2173 else
2191 ehc->i.flags |= ATA_EHI_DID_SOFTRESET; 2174 ehc->i.flags |= ATA_EHI_DID_SOFTRESET;
2192 2175
2193 rc = ata_do_reset(link, reset, classes, deadline); 2176 rc = ata_do_reset(link, reset, classes, deadline);
2194 2177
2195 if (reset == hardreset && 2178 if (reset == hardreset &&
2196 ata_eh_followup_srst_needed(link, rc, classify, classes)) { 2179 ata_eh_followup_srst_needed(link, rc, classify, classes)) {
2197 /* okay, let's do follow-up softreset */ 2180 /* okay, let's do follow-up softreset */
2198 reset = softreset; 2181 reset = softreset;
2199 2182
2200 if (!reset) { 2183 if (!reset) {
2201 ata_link_printk(link, KERN_ERR, 2184 ata_link_printk(link, KERN_ERR,
2202 "follow-up softreset required " 2185 "follow-up softreset required "
2203 "but no softreset avaliable\n"); 2186 "but no softreset avaliable\n");
2204 rc = -EINVAL; 2187 rc = -EINVAL;
2205 goto fail; 2188 goto fail;
2189 }
2190
2191 ata_eh_about_to_do(link, NULL, ATA_EH_RESET);
2192 rc = ata_do_reset(link, reset, classes, deadline);
2206 } 2193 }
2207 2194
2208 ata_eh_about_to_do(link, NULL, ATA_EH_RESET); 2195 /* -EAGAIN can happen if we skipped followup SRST */
2209 rc = ata_do_reset(link, reset, classes, deadline); 2196 if (rc && rc != -EAGAIN)
2197 goto fail;
2198 } else {
2199 if (verbose)
2200 ata_link_printk(link, KERN_INFO, "no reset method "
2201 "available, skipping reset\n");
2202 if (!(lflags & ATA_LFLAG_ASSUME_CLASS))
2203 lflags |= ATA_LFLAG_ASSUME_ATA;
2210 } 2204 }
2211 2205
2212 /* -EAGAIN can happen if we skipped followup SRST */ 2206 /*
2213 if (rc && rc != -EAGAIN) 2207 * Post-reset processing
2214 goto fail; 2208 */
2215
2216 done:
2217 ata_link_for_each_dev(dev, link) { 2209 ata_link_for_each_dev(dev, link) {
2218 /* After the reset, the device state is PIO 0 and the 2210 /* After the reset, the device state is PIO 0 and the
2219 * controller state is undefined. Reset also wakes up 2211 * controller state is undefined. Reset also wakes up
@@ -2236,9 +2228,53 @@ int ata_eh_reset(struct ata_link *link, int classify,
2236 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0) 2228 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0)
2237 link->sata_spd = (sstatus >> 4) & 0xf; 2229 link->sata_spd = (sstatus >> 4) & 0xf;
2238 2230
2231 /* thaw the port */
2232 if (ata_is_host_link(link))
2233 ata_eh_thaw_port(ap);
2234
2235 /* postreset() should clear hardware SError. Although SError
2236 * is cleared during link resume, clearing SError here is
2237 * necessary as some PHYs raise hotplug events after SRST.
2238 * This introduces race condition where hotplug occurs between
2239 * reset and here. This race is mediated by cross checking
2240 * link onlineness and classification result later.
2241 */
2239 if (postreset) 2242 if (postreset)
2240 postreset(link, classes); 2243 postreset(link, classes);
2241 2244
2245 /* clear cached SError */
2246 spin_lock_irqsave(link->ap->lock, flags);
2247 link->eh_info.serror = 0;
2248 spin_unlock_irqrestore(link->ap->lock, flags);
2249
2250 /* Make sure onlineness and classification result correspond.
2251 * Hotplug could have happened during reset and some
2252 * controllers fail to wait while a drive is spinning up after
2253 * being hotplugged causing misdetection. By cross checking
2254 * link onlineness and classification result, those conditions
2255 * can be reliably detected and retried.
2256 */
2257 nr_known = 0;
2258 ata_link_for_each_dev(dev, link) {
2259 /* convert all ATA_DEV_UNKNOWN to ATA_DEV_NONE */
2260 if (classes[dev->devno] == ATA_DEV_UNKNOWN)
2261 classes[dev->devno] = ATA_DEV_NONE;
2262 else
2263 nr_known++;
2264 }
2265
2266 if (classify && !nr_known && ata_link_online(link)) {
2267 if (try < max_tries) {
2268 ata_link_printk(link, KERN_WARNING, "link online but "
2269 "device misclassified, retrying\n");
2270 rc = -EAGAIN;
2271 goto fail;
2272 }
2273 ata_link_printk(link, KERN_WARNING,
2274 "link online but device misclassified, "
2275 "device detection might fail\n");
2276 }
2277
2242 /* reset successful, schedule revalidation */ 2278 /* reset successful, schedule revalidation */
2243 ata_eh_done(link, NULL, ATA_EH_RESET); 2279 ata_eh_done(link, NULL, ATA_EH_RESET);
2244 ehc->i.action |= ATA_EH_REVALIDATE; 2280 ehc->i.action |= ATA_EH_REVALIDATE;
@@ -2587,7 +2623,7 @@ int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset,
2587 struct ata_link *link; 2623 struct ata_link *link;
2588 struct ata_device *dev; 2624 struct ata_device *dev;
2589 int nr_failed_devs, nr_disabled_devs; 2625 int nr_failed_devs, nr_disabled_devs;
2590 int reset, rc; 2626 int rc;
2591 unsigned long flags; 2627 unsigned long flags;
2592 2628
2593 DPRINTK("ENTER\n"); 2629 DPRINTK("ENTER\n");
@@ -2630,7 +2666,6 @@ int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset,
2630 rc = 0; 2666 rc = 0;
2631 nr_failed_devs = 0; 2667 nr_failed_devs = 0;
2632 nr_disabled_devs = 0; 2668 nr_disabled_devs = 0;
2633 reset = 0;
2634 2669
2635 /* if UNLOADING, finish immediately */ 2670 /* if UNLOADING, finish immediately */
2636 if (ap->pflags & ATA_PFLAG_UNLOADING) 2671 if (ap->pflags & ATA_PFLAG_UNLOADING)
@@ -2644,40 +2679,24 @@ int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset,
2644 if (ata_eh_skip_recovery(link)) 2679 if (ata_eh_skip_recovery(link))
2645 ehc->i.action = 0; 2680 ehc->i.action = 0;
2646 2681
2647 /* do we need to reset? */
2648 if (ehc->i.action & ATA_EH_RESET)
2649 reset = 1;
2650
2651 ata_link_for_each_dev(dev, link) 2682 ata_link_for_each_dev(dev, link)
2652 ehc->classes[dev->devno] = ATA_DEV_UNKNOWN; 2683 ehc->classes[dev->devno] = ATA_DEV_UNKNOWN;
2653 } 2684 }
2654 2685
2655 /* reset */ 2686 /* reset */
2656 if (reset) { 2687 ata_port_for_each_link(link, ap) {
2657 /* if PMP is attached, this function only deals with 2688 struct ata_eh_context *ehc = &link->eh_context;
2658 * downstream links, port should stay thawed.
2659 */
2660 if (!sata_pmp_attached(ap))
2661 ata_eh_freeze_port(ap);
2662
2663 ata_port_for_each_link(link, ap) {
2664 struct ata_eh_context *ehc = &link->eh_context;
2665 2689
2666 if (!(ehc->i.action & ATA_EH_RESET)) 2690 if (!(ehc->i.action & ATA_EH_RESET))
2667 continue; 2691 continue;
2668 2692
2669 rc = ata_eh_reset(link, ata_link_nr_vacant(link), 2693 rc = ata_eh_reset(link, ata_link_nr_vacant(link),
2670 prereset, softreset, hardreset, 2694 prereset, softreset, hardreset, postreset);
2671 postreset); 2695 if (rc) {
2672 if (rc) { 2696 ata_link_printk(link, KERN_ERR,
2673 ata_link_printk(link, KERN_ERR, 2697 "reset failed, giving up\n");
2674 "reset failed, giving up\n"); 2698 goto out;
2675 goto out;
2676 }
2677 } 2699 }
2678
2679 if (!sata_pmp_attached(ap))
2680 ata_eh_thaw_port(ap);
2681 } 2700 }
2682 2701
2683 /* the rest */ 2702 /* the rest */
diff --git a/drivers/ata/libata-pmp.c b/drivers/ata/libata-pmp.c
index ff1822a7da38..0f9386d4a5a0 100644
--- a/drivers/ata/libata-pmp.c
+++ b/drivers/ata/libata-pmp.c
@@ -48,7 +48,7 @@ static unsigned int sata_pmp_read(struct ata_link *link, int reg, u32 *r_val)
48 tf.device = link->pmp; 48 tf.device = link->pmp;
49 49
50 err_mask = ata_exec_internal(pmp_dev, &tf, NULL, DMA_NONE, NULL, 0, 50 err_mask = ata_exec_internal(pmp_dev, &tf, NULL, DMA_NONE, NULL, 0,
51 SATA_PMP_SCR_TIMEOUT); 51 SATA_PMP_RW_TIMEOUT);
52 if (err_mask) 52 if (err_mask)
53 return err_mask; 53 return err_mask;
54 54
@@ -88,7 +88,7 @@ static unsigned int sata_pmp_write(struct ata_link *link, int reg, u32 val)
88 tf.lbah = (val >> 24) & 0xff; 88 tf.lbah = (val >> 24) & 0xff;
89 89
90 return ata_exec_internal(pmp_dev, &tf, NULL, DMA_NONE, NULL, 0, 90 return ata_exec_internal(pmp_dev, &tf, NULL, DMA_NONE, NULL, 0,
91 SATA_PMP_SCR_TIMEOUT); 91 SATA_PMP_RW_TIMEOUT);
92} 92}
93 93
94/** 94/**
@@ -257,19 +257,6 @@ static int sata_pmp_configure(struct ata_device *dev, int print_info)
257 goto fail; 257 goto fail;
258 } 258 }
259 259
260 /* turn off notification till fan-out ports are reset and configured */
261 if (gscr[SATA_PMP_GSCR_FEAT_EN] & SATA_PMP_FEAT_NOTIFY) {
262 gscr[SATA_PMP_GSCR_FEAT_EN] &= ~SATA_PMP_FEAT_NOTIFY;
263
264 err_mask = sata_pmp_write(dev->link, SATA_PMP_GSCR_FEAT_EN,
265 gscr[SATA_PMP_GSCR_FEAT_EN]);
266 if (err_mask) {
267 rc = -EIO;
268 reason = "failed to write GSCR_FEAT_EN";
269 goto fail;
270 }
271 }
272
273 if (print_info) { 260 if (print_info) {
274 ata_dev_printk(dev, KERN_INFO, "Port Multiplier %s, " 261 ata_dev_printk(dev, KERN_INFO, "Port Multiplier %s, "
275 "0x%04x:0x%04x r%d, %d ports, feat 0x%x/0x%x\n", 262 "0x%04x:0x%04x r%d, %d ports, feat 0x%x/0x%x\n",
@@ -700,8 +687,6 @@ static int sata_pmp_eh_recover_pmp(struct ata_port *ap,
700 if (ehc->i.action & ATA_EH_RESET) { 687 if (ehc->i.action & ATA_EH_RESET) {
701 struct ata_link *tlink; 688 struct ata_link *tlink;
702 689
703 ata_eh_freeze_port(ap);
704
705 /* reset */ 690 /* reset */
706 rc = ata_eh_reset(link, 0, prereset, softreset, hardreset, 691 rc = ata_eh_reset(link, 0, prereset, softreset, hardreset,
707 postreset); 692 postreset);
@@ -711,8 +696,6 @@ static int sata_pmp_eh_recover_pmp(struct ata_port *ap,
711 goto fail; 696 goto fail;
712 } 697 }
713 698
714 ata_eh_thaw_port(ap);
715
716 /* PMP is reset, SErrors cannot be trusted, scan all */ 699 /* PMP is reset, SErrors cannot be trusted, scan all */
717 ata_port_for_each_link(tlink, ap) { 700 ata_port_for_each_link(tlink, ap) {
718 struct ata_eh_context *ehc = &tlink->eh_context; 701 struct ata_eh_context *ehc = &tlink->eh_context;
@@ -864,6 +847,7 @@ static int sata_pmp_eh_recover(struct ata_port *ap)
864 struct ata_link *pmp_link = &ap->link; 847 struct ata_link *pmp_link = &ap->link;
865 struct ata_device *pmp_dev = pmp_link->device; 848 struct ata_device *pmp_dev = pmp_link->device;
866 struct ata_eh_context *pmp_ehc = &pmp_link->eh_context; 849 struct ata_eh_context *pmp_ehc = &pmp_link->eh_context;
850 u32 *gscr = pmp_dev->gscr;
867 struct ata_link *link; 851 struct ata_link *link;
868 struct ata_device *dev; 852 struct ata_device *dev;
869 unsigned int err_mask; 853 unsigned int err_mask;
@@ -901,6 +885,22 @@ static int sata_pmp_eh_recover(struct ata_port *ap)
901 if (rc) 885 if (rc)
902 goto pmp_fail; 886 goto pmp_fail;
903 887
888 /* PHY event notification can disturb reset and other recovery
889 * operations. Turn it off.
890 */
891 if (gscr[SATA_PMP_GSCR_FEAT_EN] & SATA_PMP_FEAT_NOTIFY) {
892 gscr[SATA_PMP_GSCR_FEAT_EN] &= ~SATA_PMP_FEAT_NOTIFY;
893
894 err_mask = sata_pmp_write(pmp_link, SATA_PMP_GSCR_FEAT_EN,
895 gscr[SATA_PMP_GSCR_FEAT_EN]);
896 if (err_mask) {
897 ata_link_printk(pmp_link, KERN_WARNING,
898 "failed to disable NOTIFY (err_mask=0x%x)\n",
899 err_mask);
900 goto pmp_fail;
901 }
902 }
903
904 /* handle disabled links */ 904 /* handle disabled links */
905 rc = sata_pmp_eh_handle_disabled_links(ap); 905 rc = sata_pmp_eh_handle_disabled_links(ap);
906 if (rc) 906 if (rc)
@@ -923,10 +923,10 @@ static int sata_pmp_eh_recover(struct ata_port *ap)
923 923
924 /* enable notification */ 924 /* enable notification */
925 if (pmp_dev->flags & ATA_DFLAG_AN) { 925 if (pmp_dev->flags & ATA_DFLAG_AN) {
926 pmp_dev->gscr[SATA_PMP_GSCR_FEAT_EN] |= SATA_PMP_FEAT_NOTIFY; 926 gscr[SATA_PMP_GSCR_FEAT_EN] |= SATA_PMP_FEAT_NOTIFY;
927 927
928 err_mask = sata_pmp_write(pmp_dev->link, SATA_PMP_GSCR_FEAT_EN, 928 err_mask = sata_pmp_write(pmp_link, SATA_PMP_GSCR_FEAT_EN,
929 pmp_dev->gscr[SATA_PMP_GSCR_FEAT_EN]); 929 gscr[SATA_PMP_GSCR_FEAT_EN]);
930 if (err_mask) { 930 if (err_mask) {
931 ata_dev_printk(pmp_dev, KERN_ERR, "failed to write " 931 ata_dev_printk(pmp_dev, KERN_ERR, "failed to write "
932 "PMP_FEAT_EN (Emask=0x%x)\n", err_mask); 932 "PMP_FEAT_EN (Emask=0x%x)\n", err_mask);
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index 3ce43920e459..aeb6e01d82ce 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -1082,12 +1082,6 @@ static unsigned int ata_scsi_start_stop_xlat(struct ata_queued_cmd *qc)
1082 if (((cdb[4] >> 4) & 0xf) != 0) 1082 if (((cdb[4] >> 4) & 0xf) != 0)
1083 goto invalid_fld; /* power conditions not supported */ 1083 goto invalid_fld; /* power conditions not supported */
1084 1084
1085 if (qc->dev->horkage & ATA_HORKAGE_SKIP_PM) {
1086 /* the device lacks PM support, finish without doing anything */
1087 scmd->result = SAM_STAT_GOOD;
1088 return 1;
1089 }
1090
1091 if (cdb[4] & 0x1) { 1085 if (cdb[4] & 0x1) {
1092 tf->nsect = 1; /* 1 sector, lba=0 */ 1086 tf->nsect = 1; /* 1 sector, lba=0 */
1093 1087
diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c
index 2ec65a8fda79..3c2d2289f85e 100644
--- a/drivers/ata/libata-sff.c
+++ b/drivers/ata/libata-sff.c
@@ -314,11 +314,7 @@ static int ata_sff_check_ready(struct ata_link *link)
314{ 314{
315 u8 status = link->ap->ops->sff_check_status(link->ap); 315 u8 status = link->ap->ops->sff_check_status(link->ap);
316 316
317 if (!(status & ATA_BUSY)) 317 return ata_check_ready(status);
318 return 1;
319 if (status == 0xff)
320 return -ENODEV;
321 return 0;
322} 318}
323 319
324/** 320/**
diff --git a/drivers/ata/pata_acpi.c b/drivers/ata/pata_acpi.c
index c5f91e629945..fbe605711554 100644
--- a/drivers/ata/pata_acpi.c
+++ b/drivers/ata/pata_acpi.c
@@ -259,6 +259,12 @@ static int pacpi_init_one (struct pci_dev *pdev, const struct pci_device_id *id)
259 .port_ops = &pacpi_ops, 259 .port_ops = &pacpi_ops,
260 }; 260 };
261 const struct ata_port_info *ppi[] = { &info, NULL }; 261 const struct ata_port_info *ppi[] = { &info, NULL };
262 if (pdev->vendor == PCI_VENDOR_ID_ATI) {
263 int rc = pcim_enable_device(pdev);
264 if (rc < 0)
265 return rc;
266 pcim_pin_device(pdev);
267 }
262 return ata_pci_sff_init_one(pdev, ppi, &pacpi_sht, NULL); 268 return ata_pci_sff_init_one(pdev, ppi, &pacpi_sht, NULL);
263} 269}
264 270
diff --git a/drivers/ata/pata_ali.c b/drivers/ata/pata_ali.c
index fcabe46f262b..0f3e659db99a 100644
--- a/drivers/ata/pata_ali.c
+++ b/drivers/ata/pata_ali.c
@@ -177,11 +177,11 @@ static void ali_program_modes(struct ata_port *ap, struct ata_device *adev, stru
177 u8 udma; 177 u8 udma;
178 178
179 if (t != NULL) { 179 if (t != NULL) {
180 t->setup = FIT(t->setup, 1, 8) & 7; 180 t->setup = clamp_val(t->setup, 1, 8) & 7;
181 t->act8b = FIT(t->act8b, 1, 8) & 7; 181 t->act8b = clamp_val(t->act8b, 1, 8) & 7;
182 t->rec8b = FIT(t->rec8b, 1, 16) & 15; 182 t->rec8b = clamp_val(t->rec8b, 1, 16) & 15;
183 t->active = FIT(t->active, 1, 8) & 7; 183 t->active = clamp_val(t->active, 1, 8) & 7;
184 t->recover = FIT(t->recover, 1, 16) & 15; 184 t->recover = clamp_val(t->recover, 1, 16) & 15;
185 185
186 pci_write_config_byte(pdev, cas, t->setup); 186 pci_write_config_byte(pdev, cas, t->setup);
187 pci_write_config_byte(pdev, cbt, (t->act8b << 4) | t->rec8b); 187 pci_write_config_byte(pdev, cbt, (t->act8b << 4) | t->rec8b);
diff --git a/drivers/ata/pata_amd.c b/drivers/ata/pata_amd.c
index 26665c396485..57dd00f463d3 100644
--- a/drivers/ata/pata_amd.c
+++ b/drivers/ata/pata_amd.c
@@ -84,32 +84,32 @@ static void timing_setup(struct ata_port *ap, struct ata_device *adev, int offse
84 84
85 /* Configure the address set up timing */ 85 /* Configure the address set up timing */
86 pci_read_config_byte(pdev, offset + 0x0C, &t); 86 pci_read_config_byte(pdev, offset + 0x0C, &t);
87 t = (t & ~(3 << ((3 - dn) << 1))) | ((FIT(at.setup, 1, 4) - 1) << ((3 - dn) << 1)); 87 t = (t & ~(3 << ((3 - dn) << 1))) | ((clamp_val(at.setup, 1, 4) - 1) << ((3 - dn) << 1));
88 pci_write_config_byte(pdev, offset + 0x0C , t); 88 pci_write_config_byte(pdev, offset + 0x0C , t);
89 89
90 /* Configure the 8bit I/O timing */ 90 /* Configure the 8bit I/O timing */
91 pci_write_config_byte(pdev, offset + 0x0E + (1 - (dn >> 1)), 91 pci_write_config_byte(pdev, offset + 0x0E + (1 - (dn >> 1)),
92 ((FIT(at.act8b, 1, 16) - 1) << 4) | (FIT(at.rec8b, 1, 16) - 1)); 92 ((clamp_val(at.act8b, 1, 16) - 1) << 4) | (clamp_val(at.rec8b, 1, 16) - 1));
93 93
94 /* Drive timing */ 94 /* Drive timing */
95 pci_write_config_byte(pdev, offset + 0x08 + (3 - dn), 95 pci_write_config_byte(pdev, offset + 0x08 + (3 - dn),
96 ((FIT(at.active, 1, 16) - 1) << 4) | (FIT(at.recover, 1, 16) - 1)); 96 ((clamp_val(at.active, 1, 16) - 1) << 4) | (clamp_val(at.recover, 1, 16) - 1));
97 97
98 switch (clock) { 98 switch (clock) {
99 case 1: 99 case 1:
100 t = at.udma ? (0xc0 | (FIT(at.udma, 2, 5) - 2)) : 0x03; 100 t = at.udma ? (0xc0 | (clamp_val(at.udma, 2, 5) - 2)) : 0x03;
101 break; 101 break;
102 102
103 case 2: 103 case 2:
104 t = at.udma ? (0xc0 | amd_cyc2udma[FIT(at.udma, 2, 10)]) : 0x03; 104 t = at.udma ? (0xc0 | amd_cyc2udma[clamp_val(at.udma, 2, 10)]) : 0x03;
105 break; 105 break;
106 106
107 case 3: 107 case 3:
108 t = at.udma ? (0xc0 | amd_cyc2udma[FIT(at.udma, 1, 10)]) : 0x03; 108 t = at.udma ? (0xc0 | amd_cyc2udma[clamp_val(at.udma, 1, 10)]) : 0x03;
109 break; 109 break;
110 110
111 case 4: 111 case 4:
112 t = at.udma ? (0xc0 | amd_cyc2udma[FIT(at.udma, 1, 15)]) : 0x03; 112 t = at.udma ? (0xc0 | amd_cyc2udma[clamp_val(at.udma, 1, 15)]) : 0x03;
113 break; 113 break;
114 114
115 default: 115 default:
diff --git a/drivers/ata/pata_at32.c b/drivers/ata/pata_at32.c
index 5e104385d6a3..82fb6e273169 100644
--- a/drivers/ata/pata_at32.c
+++ b/drivers/ata/pata_at32.c
@@ -291,8 +291,6 @@ static int __init pata_at32_probe(struct platform_device *pdev)
291 if (!info) 291 if (!info)
292 return -ENOMEM; 292 return -ENOMEM;
293 293
294 memset(info, 0, sizeof(struct at32_ide_info));
295
296 info->irq = irq; 294 info->irq = irq;
297 info->cs = board->cs; 295 info->cs = board->cs;
298 296
diff --git a/drivers/ata/pata_bf54x.c b/drivers/ata/pata_bf54x.c
index 9ab89732cf94..55516103626a 100644
--- a/drivers/ata/pata_bf54x.c
+++ b/drivers/ata/pata_bf54x.c
@@ -911,7 +911,10 @@ static void bfin_bmdma_start(struct ata_queued_cmd *qc)
911 /* Reset all transfer count */ 911 /* Reset all transfer count */
912 ATAPI_SET_CONTROL(base, ATAPI_GET_CONTROL(base) | TFRCNT_RST); 912 ATAPI_SET_CONTROL(base, ATAPI_GET_CONTROL(base) | TFRCNT_RST);
913 913
914 /* Set transfer length to buffer len */ 914 /* Set ATAPI state machine contorl in terminate sequence */
915 ATAPI_SET_CONTROL(base, ATAPI_GET_CONTROL(base) | END_ON_TERM);
916
917 /* Set transfer length to buffer len */
915 for_each_sg(qc->sg, sg, qc->n_elem, si) { 918 for_each_sg(qc->sg, sg, qc->n_elem, si) {
916 ATAPI_SET_XFER_LEN(base, (sg_dma_len(sg) >> 1)); 919 ATAPI_SET_XFER_LEN(base, (sg_dma_len(sg) >> 1));
917 } 920 }
diff --git a/drivers/ata/pata_cypress.c b/drivers/ata/pata_cypress.c
index a9c3218e22fd..2ff62608ae37 100644
--- a/drivers/ata/pata_cypress.c
+++ b/drivers/ata/pata_cypress.c
@@ -62,14 +62,14 @@ static void cy82c693_set_piomode(struct ata_port *ap, struct ata_device *adev)
62 return; 62 return;
63 } 63 }
64 64
65 time_16 = FIT(t.recover, 0, 15) | (FIT(t.active, 0, 15) << 4); 65 time_16 = clamp_val(t.recover, 0, 15) | (clamp_val(t.active, 0, 15) << 4);
66 time_8 = FIT(t.act8b, 0, 15) | (FIT(t.rec8b, 0, 15) << 4); 66 time_8 = clamp_val(t.act8b, 0, 15) | (clamp_val(t.rec8b, 0, 15) << 4);
67 67
68 if (adev->devno == 0) { 68 if (adev->devno == 0) {
69 pci_read_config_dword(pdev, CY82_IDE_ADDRSETUP, &addr); 69 pci_read_config_dword(pdev, CY82_IDE_ADDRSETUP, &addr);
70 70
71 addr &= ~0x0F; /* Mask bits */ 71 addr &= ~0x0F; /* Mask bits */
72 addr |= FIT(t.setup, 0, 15); 72 addr |= clamp_val(t.setup, 0, 15);
73 73
74 pci_write_config_dword(pdev, CY82_IDE_ADDRSETUP, addr); 74 pci_write_config_dword(pdev, CY82_IDE_ADDRSETUP, addr);
75 pci_write_config_byte(pdev, CY82_IDE_MASTER_IOR, time_16); 75 pci_write_config_byte(pdev, CY82_IDE_MASTER_IOR, time_16);
@@ -79,7 +79,7 @@ static void cy82c693_set_piomode(struct ata_port *ap, struct ata_device *adev)
79 pci_read_config_dword(pdev, CY82_IDE_ADDRSETUP, &addr); 79 pci_read_config_dword(pdev, CY82_IDE_ADDRSETUP, &addr);
80 80
81 addr &= ~0xF0; /* Mask bits */ 81 addr &= ~0xF0; /* Mask bits */
82 addr |= (FIT(t.setup, 0, 15) << 4); 82 addr |= (clamp_val(t.setup, 0, 15) << 4);
83 83
84 pci_write_config_dword(pdev, CY82_IDE_ADDRSETUP, addr); 84 pci_write_config_dword(pdev, CY82_IDE_ADDRSETUP, addr);
85 pci_write_config_byte(pdev, CY82_IDE_SLAVE_IOR, time_16); 85 pci_write_config_byte(pdev, CY82_IDE_SLAVE_IOR, time_16);
diff --git a/drivers/ata/pata_legacy.c b/drivers/ata/pata_legacy.c
index 7af4b29cc422..fe7cc8ed4ea4 100644
--- a/drivers/ata/pata_legacy.c
+++ b/drivers/ata/pata_legacy.c
@@ -343,8 +343,8 @@ static void ht6560a_set_piomode(struct ata_port *ap, struct ata_device *adev)
343 /* Get the timing data in cycles. For now play safe at 50Mhz */ 343 /* Get the timing data in cycles. For now play safe at 50Mhz */
344 ata_timing_compute(adev, adev->pio_mode, &t, 20000, 1000); 344 ata_timing_compute(adev, adev->pio_mode, &t, 20000, 1000);
345 345
346 active = FIT(t.active, 2, 15); 346 active = clamp_val(t.active, 2, 15);
347 recover = FIT(t.recover, 4, 15); 347 recover = clamp_val(t.recover, 4, 15);
348 348
349 inb(0x3E6); 349 inb(0x3E6);
350 inb(0x3E6); 350 inb(0x3E6);
@@ -377,8 +377,8 @@ static void ht6560b_set_piomode(struct ata_port *ap, struct ata_device *adev)
377 /* Get the timing data in cycles. For now play safe at 50Mhz */ 377 /* Get the timing data in cycles. For now play safe at 50Mhz */
378 ata_timing_compute(adev, adev->pio_mode, &t, 20000, 1000); 378 ata_timing_compute(adev, adev->pio_mode, &t, 20000, 1000);
379 379
380 active = FIT(t.active, 2, 15); 380 active = clamp_val(t.active, 2, 15);
381 recover = FIT(t.recover, 2, 16); 381 recover = clamp_val(t.recover, 2, 16);
382 recover &= 0x15; 382 recover &= 0x15;
383 383
384 inb(0x3E6); 384 inb(0x3E6);
@@ -462,9 +462,9 @@ static void opti82c611a_set_piomode(struct ata_port *ap,
462 ata_timing_merge(&t, &tp, &t, ATA_TIMING_SETUP); 462 ata_timing_merge(&t, &tp, &t, ATA_TIMING_SETUP);
463 } 463 }
464 464
465 active = FIT(t.active, 2, 17) - 2; 465 active = clamp_val(t.active, 2, 17) - 2;
466 recover = FIT(t.recover, 1, 16) - 1; 466 recover = clamp_val(t.recover, 1, 16) - 1;
467 setup = FIT(t.setup, 1, 4) - 1; 467 setup = clamp_val(t.setup, 1, 4) - 1;
468 468
469 /* Select the right timing bank for write timing */ 469 /* Select the right timing bank for write timing */
470 rc = ioread8(ap->ioaddr.lbal_addr); 470 rc = ioread8(ap->ioaddr.lbal_addr);
@@ -541,9 +541,9 @@ static void opti82c46x_set_piomode(struct ata_port *ap, struct ata_device *adev)
541 ata_timing_merge(&t, &tp, &t, ATA_TIMING_SETUP); 541 ata_timing_merge(&t, &tp, &t, ATA_TIMING_SETUP);
542 } 542 }
543 543
544 active = FIT(t.active, 2, 17) - 2; 544 active = clamp_val(t.active, 2, 17) - 2;
545 recover = FIT(t.recover, 1, 16) - 1; 545 recover = clamp_val(t.recover, 1, 16) - 1;
546 setup = FIT(t.setup, 1, 4) - 1; 546 setup = clamp_val(t.setup, 1, 4) - 1;
547 547
548 /* Select the right timing bank for write timing */ 548 /* Select the right timing bank for write timing */
549 rc = ioread8(ap->ioaddr.lbal_addr); 549 rc = ioread8(ap->ioaddr.lbal_addr);
@@ -624,11 +624,11 @@ static void qdi6500_set_piomode(struct ata_port *ap, struct ata_device *adev)
624 ata_timing_compute(adev, adev->pio_mode, &t, 30303, 1000); 624 ata_timing_compute(adev, adev->pio_mode, &t, 30303, 1000);
625 625
626 if (ld_qdi->fast) { 626 if (ld_qdi->fast) {
627 active = 8 - FIT(t.active, 1, 8); 627 active = 8 - clamp_val(t.active, 1, 8);
628 recovery = 18 - FIT(t.recover, 3, 18); 628 recovery = 18 - clamp_val(t.recover, 3, 18);
629 } else { 629 } else {
630 active = 9 - FIT(t.active, 2, 9); 630 active = 9 - clamp_val(t.active, 2, 9);
631 recovery = 15 - FIT(t.recover, 0, 15); 631 recovery = 15 - clamp_val(t.recover, 0, 15);
632 } 632 }
633 timing = (recovery << 4) | active | 0x08; 633 timing = (recovery << 4) | active | 0x08;
634 634
@@ -658,11 +658,11 @@ static void qdi6580dp_set_piomode(struct ata_port *ap, struct ata_device *adev)
658 ata_timing_compute(adev, adev->pio_mode, &t, 30303, 1000); 658 ata_timing_compute(adev, adev->pio_mode, &t, 30303, 1000);
659 659
660 if (ld_qdi->fast) { 660 if (ld_qdi->fast) {
661 active = 8 - FIT(t.active, 1, 8); 661 active = 8 - clamp_val(t.active, 1, 8);
662 recovery = 18 - FIT(t.recover, 3, 18); 662 recovery = 18 - clamp_val(t.recover, 3, 18);
663 } else { 663 } else {
664 active = 9 - FIT(t.active, 2, 9); 664 active = 9 - clamp_val(t.active, 2, 9);
665 recovery = 15 - FIT(t.recover, 0, 15); 665 recovery = 15 - clamp_val(t.recover, 0, 15);
666 } 666 }
667 timing = (recovery << 4) | active | 0x08; 667 timing = (recovery << 4) | active | 0x08;
668 668
@@ -695,11 +695,11 @@ static void qdi6580_set_piomode(struct ata_port *ap, struct ata_device *adev)
695 ata_timing_compute(adev, adev->pio_mode, &t, 30303, 1000); 695 ata_timing_compute(adev, adev->pio_mode, &t, 30303, 1000);
696 696
697 if (ld_qdi->fast) { 697 if (ld_qdi->fast) {
698 active = 8 - FIT(t.active, 1, 8); 698 active = 8 - clamp_val(t.active, 1, 8);
699 recovery = 18 - FIT(t.recover, 3, 18); 699 recovery = 18 - clamp_val(t.recover, 3, 18);
700 } else { 700 } else {
701 active = 9 - FIT(t.active, 2, 9); 701 active = 9 - clamp_val(t.active, 2, 9);
702 recovery = 15 - FIT(t.recover, 0, 15); 702 recovery = 15 - clamp_val(t.recover, 0, 15);
703 } 703 }
704 timing = (recovery << 4) | active | 0x08; 704 timing = (recovery << 4) | active | 0x08;
705 ld_qdi->clock[adev->devno] = timing; 705 ld_qdi->clock[adev->devno] = timing;
@@ -830,8 +830,8 @@ static void winbond_set_piomode(struct ata_port *ap, struct ata_device *adev)
830 else 830 else
831 ata_timing_compute(adev, adev->pio_mode, &t, 30303, 1000); 831 ata_timing_compute(adev, adev->pio_mode, &t, 30303, 1000);
832 832
833 active = (FIT(t.active, 3, 17) - 1) & 0x0F; 833 active = (clamp_val(t.active, 3, 17) - 1) & 0x0F;
834 recovery = (FIT(t.recover, 1, 15) + 1) & 0x0F; 834 recovery = (clamp_val(t.recover, 1, 15) + 1) & 0x0F;
835 timing = (active << 4) | recovery; 835 timing = (active << 4) | recovery;
836 winbond_writecfg(ld_winbond->timing, timing, reg); 836 winbond_writecfg(ld_winbond->timing, timing, reg);
837 837
@@ -842,7 +842,7 @@ static void winbond_set_piomode(struct ata_port *ap, struct ata_device *adev)
842 reg |= 0x08; /* FIFO off */ 842 reg |= 0x08; /* FIFO off */
843 if (!ata_pio_need_iordy(adev)) 843 if (!ata_pio_need_iordy(adev))
844 reg |= 0x02; /* IORDY off */ 844 reg |= 0x02; /* IORDY off */
845 reg |= (FIT(t.setup, 0, 3) << 6); 845 reg |= (clamp_val(t.setup, 0, 3) << 6);
846 winbond_writecfg(ld_winbond->timing, timing + 1, reg); 846 winbond_writecfg(ld_winbond->timing, timing + 1, reg);
847} 847}
848 848
diff --git a/drivers/ata/pata_ns87410.c b/drivers/ata/pata_ns87410.c
index 76d2455bc453..be756b7ef07e 100644
--- a/drivers/ata/pata_ns87410.c
+++ b/drivers/ata/pata_ns87410.c
@@ -91,9 +91,9 @@ static void ns87410_set_piomode(struct ata_port *ap, struct ata_device *adev)
91 return; 91 return;
92 } 92 }
93 93
94 at.active = FIT(at.active, 2, 16) - 2; 94 at.active = clamp_val(at.active, 2, 16) - 2;
95 at.setup = FIT(at.setup, 1, 4) - 1; 95 at.setup = clamp_val(at.setup, 1, 4) - 1;
96 at.recover = FIT(at.recover, 1, 12) - 1; 96 at.recover = clamp_val(at.recover, 1, 12) - 1;
97 97
98 idetcr = (at.setup << 6) | (recoverbits[at.recover] << 3) | activebits[at.active]; 98 idetcr = (at.setup << 6) | (recoverbits[at.recover] << 3) | activebits[at.active];
99 99
diff --git a/drivers/ata/pata_ns87415.c b/drivers/ata/pata_ns87415.c
index ae92b0049bd5..e0aa7eaaee0a 100644
--- a/drivers/ata/pata_ns87415.c
+++ b/drivers/ata/pata_ns87415.c
@@ -66,8 +66,8 @@ static void ns87415_set_mode(struct ata_port *ap, struct ata_device *adev, u8 mo
66 66
67 ata_timing_compute(adev, adev->pio_mode, &t, T, 0); 67 ata_timing_compute(adev, adev->pio_mode, &t, T, 0);
68 68
69 clocking = 17 - FIT(t.active, 2, 17); 69 clocking = 17 - clamp_val(t.active, 2, 17);
70 clocking |= (16 - FIT(t.recover, 1, 16)) << 4; 70 clocking |= (16 - clamp_val(t.recover, 1, 16)) << 4;
71 /* Use the same timing for read and write bytes */ 71 /* Use the same timing for read and write bytes */
72 clocking |= (clocking << 8); 72 clocking |= (clocking << 8);
73 pci_write_config_word(dev, timing, clocking); 73 pci_write_config_word(dev, timing, clocking);
diff --git a/drivers/ata/pata_qdi.c b/drivers/ata/pata_qdi.c
index bf45cf017753..97e5b090d7c2 100644
--- a/drivers/ata/pata_qdi.c
+++ b/drivers/ata/pata_qdi.c
@@ -60,11 +60,11 @@ static void qdi6500_set_piomode(struct ata_port *ap, struct ata_device *adev)
60 ata_timing_compute(adev, adev->pio_mode, &t, 30303, 1000); 60 ata_timing_compute(adev, adev->pio_mode, &t, 30303, 1000);
61 61
62 if (qdi->fast) { 62 if (qdi->fast) {
63 active = 8 - FIT(t.active, 1, 8); 63 active = 8 - clamp_val(t.active, 1, 8);
64 recovery = 18 - FIT(t.recover, 3, 18); 64 recovery = 18 - clamp_val(t.recover, 3, 18);
65 } else { 65 } else {
66 active = 9 - FIT(t.active, 2, 9); 66 active = 9 - clamp_val(t.active, 2, 9);
67 recovery = 15 - FIT(t.recover, 0, 15); 67 recovery = 15 - clamp_val(t.recover, 0, 15);
68 } 68 }
69 timing = (recovery << 4) | active | 0x08; 69 timing = (recovery << 4) | active | 0x08;
70 70
@@ -84,11 +84,11 @@ static void qdi6580_set_piomode(struct ata_port *ap, struct ata_device *adev)
84 ata_timing_compute(adev, adev->pio_mode, &t, 30303, 1000); 84 ata_timing_compute(adev, adev->pio_mode, &t, 30303, 1000);
85 85
86 if (qdi->fast) { 86 if (qdi->fast) {
87 active = 8 - FIT(t.active, 1, 8); 87 active = 8 - clamp_val(t.active, 1, 8);
88 recovery = 18 - FIT(t.recover, 3, 18); 88 recovery = 18 - clamp_val(t.recover, 3, 18);
89 } else { 89 } else {
90 active = 9 - FIT(t.active, 2, 9); 90 active = 9 - clamp_val(t.active, 2, 9);
91 recovery = 15 - FIT(t.recover, 0, 15); 91 recovery = 15 - clamp_val(t.recover, 0, 15);
92 } 92 }
93 timing = (recovery << 4) | active | 0x08; 93 timing = (recovery << 4) | active | 0x08;
94 94
diff --git a/drivers/ata/pata_sch.c b/drivers/ata/pata_sch.c
new file mode 100644
index 000000000000..c8cc027789fe
--- /dev/null
+++ b/drivers/ata/pata_sch.c
@@ -0,0 +1,206 @@
1/*
2 * pata_sch.c - Intel SCH PATA controllers
3 *
4 * Copyright (c) 2008 Alek Du <alek.du@intel.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License 2 as published
8 * by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; see the file COPYING. If not, write to
17 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
18 *
19 */
20
21/*
22 * Supports:
23 * Intel SCH (AF82US15W, AF82US15L, AF82UL11L) chipsets -- see spec at:
24 * http://download.intel.com/design/chipsets/embedded/datashts/319537.pdf
25 */
26
27#include <linux/kernel.h>
28#include <linux/module.h>
29#include <linux/pci.h>
30#include <linux/init.h>
31#include <linux/blkdev.h>
32#include <linux/delay.h>
33#include <linux/device.h>
34#include <scsi/scsi_host.h>
35#include <linux/libata.h>
36#include <linux/dmi.h>
37
38#define DRV_NAME "pata_sch"
39#define DRV_VERSION "0.2"
40
41/* see SCH datasheet page 351 */
42enum {
43 D0TIM = 0x80, /* Device 0 Timing Register */
44 D1TIM = 0x84, /* Device 1 Timing Register */
45 PM = 0x07, /* PIO Mode Bit Mask */
46 MDM = (0x03 << 8), /* Multi-word DMA Mode Bit Mask */
47 UDM = (0x07 << 16), /* Ultra DMA Mode Bit Mask */
48 PPE = (1 << 30), /* Prefetch/Post Enable */
49 USD = (1 << 31), /* Use Synchronous DMA */
50};
51
52static int sch_init_one(struct pci_dev *pdev,
53 const struct pci_device_id *ent);
54static void sch_set_piomode(struct ata_port *ap, struct ata_device *adev);
55static void sch_set_dmamode(struct ata_port *ap, struct ata_device *adev);
56
57static const struct pci_device_id sch_pci_tbl[] = {
58 /* Intel SCH PATA Controller */
59 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_SCH_IDE), 0 },
60 { } /* terminate list */
61};
62
63static struct pci_driver sch_pci_driver = {
64 .name = DRV_NAME,
65 .id_table = sch_pci_tbl,
66 .probe = sch_init_one,
67 .remove = ata_pci_remove_one,
68#ifdef CONFIG_PM
69 .suspend = ata_pci_device_suspend,
70 .resume = ata_pci_device_resume,
71#endif
72};
73
74static struct scsi_host_template sch_sht = {
75 ATA_BMDMA_SHT(DRV_NAME),
76};
77
78static struct ata_port_operations sch_pata_ops = {
79 .inherits = &ata_bmdma_port_ops,
80 .cable_detect = ata_cable_unknown,
81 .set_piomode = sch_set_piomode,
82 .set_dmamode = sch_set_dmamode,
83};
84
85static struct ata_port_info sch_port_info = {
86 .flags = 0,
87 .pio_mask = ATA_PIO4, /* pio0-4 */
88 .mwdma_mask = ATA_MWDMA2, /* mwdma0-2 */
89 .udma_mask = ATA_UDMA5, /* udma0-5 */
90 .port_ops = &sch_pata_ops,
91};
92
93MODULE_AUTHOR("Alek Du <alek.du@intel.com>");
94MODULE_DESCRIPTION("SCSI low-level driver for Intel SCH PATA controllers");
95MODULE_LICENSE("GPL");
96MODULE_DEVICE_TABLE(pci, sch_pci_tbl);
97MODULE_VERSION(DRV_VERSION);
98
99/**
100 * sch_set_piomode - Initialize host controller PATA PIO timings
101 * @ap: Port whose timings we are configuring
102 * @adev: ATA device
103 *
104 * Set PIO mode for device, in host controller PCI config space.
105 *
106 * LOCKING:
107 * None (inherited from caller).
108 */
109
110static void sch_set_piomode(struct ata_port *ap, struct ata_device *adev)
111{
112 unsigned int pio = adev->pio_mode - XFER_PIO_0;
113 struct pci_dev *dev = to_pci_dev(ap->host->dev);
114 unsigned int port = adev->devno ? D1TIM : D0TIM;
115 unsigned int data;
116
117 pci_read_config_dword(dev, port, &data);
118 /* see SCH datasheet page 351 */
119 /* set PIO mode */
120 data &= ~(PM | PPE);
121 data |= pio;
122 /* enable PPE for block device */
123 if (adev->class == ATA_DEV_ATA)
124 data |= PPE;
125 pci_write_config_dword(dev, port, data);
126}
127
128/**
129 * sch_set_dmamode - Initialize host controller PATA DMA timings
130 * @ap: Port whose timings we are configuring
131 * @adev: ATA device
132 *
133 * Set MW/UDMA mode for device, in host controller PCI config space.
134 *
135 * LOCKING:
136 * None (inherited from caller).
137 */
138
139static void sch_set_dmamode(struct ata_port *ap, struct ata_device *adev)
140{
141 unsigned int dma_mode = adev->dma_mode;
142 struct pci_dev *dev = to_pci_dev(ap->host->dev);
143 unsigned int port = adev->devno ? D1TIM : D0TIM;
144 unsigned int data;
145
146 pci_read_config_dword(dev, port, &data);
147 /* see SCH datasheet page 351 */
148 if (dma_mode >= XFER_UDMA_0) {
149 /* enable Synchronous DMA mode */
150 data |= USD;
151 data &= ~UDM;
152 data |= (dma_mode - XFER_UDMA_0) << 16;
153 } else { /* must be MWDMA mode, since we masked SWDMA already */
154 data &= ~(USD | MDM);
155 data |= (dma_mode - XFER_MW_DMA_0) << 8;
156 }
157 pci_write_config_dword(dev, port, data);
158}
159
160/**
161 * sch_init_one - Register SCH ATA PCI device with kernel services
162 * @pdev: PCI device to register
163 * @ent: Entry in sch_pci_tbl matching with @pdev
164 *
165 * LOCKING:
166 * Inherited from PCI layer (may sleep).
167 *
168 * RETURNS:
169 * Zero on success, or -ERRNO value.
170 */
171
172static int __devinit sch_init_one(struct pci_dev *pdev,
173 const struct pci_device_id *ent)
174{
175 static int printed_version;
176 const struct ata_port_info *ppi[] = { &sch_port_info, NULL };
177 struct ata_host *host;
178 int rc;
179
180 if (!printed_version++)
181 dev_printk(KERN_DEBUG, &pdev->dev,
182 "version " DRV_VERSION "\n");
183
184 /* enable device and prepare host */
185 rc = pcim_enable_device(pdev);
186 if (rc)
187 return rc;
188 rc = ata_pci_sff_prepare_host(pdev, ppi, &host);
189 if (rc)
190 return rc;
191 pci_set_master(pdev);
192 return ata_pci_sff_activate_host(host, ata_sff_interrupt, &sch_sht);
193}
194
195static int __init sch_init(void)
196{
197 return pci_register_driver(&sch_pci_driver);
198}
199
200static void __exit sch_exit(void)
201{
202 pci_unregister_driver(&sch_pci_driver);
203}
204
205module_init(sch_init);
206module_exit(sch_exit);
diff --git a/drivers/ata/pata_sl82c105.c b/drivers/ata/pata_sl82c105.c
index 70d94fb28a5f..69877bd81815 100644
--- a/drivers/ata/pata_sl82c105.c
+++ b/drivers/ata/pata_sl82c105.c
@@ -216,7 +216,7 @@ static int sl82c105_qc_defer(struct ata_queued_cmd *qc)
216 struct ata_port *alt = host->ports[1 ^ qc->ap->port_no]; 216 struct ata_port *alt = host->ports[1 ^ qc->ap->port_no];
217 int rc; 217 int rc;
218 218
219 /* First apply the usual rules */ 219 /* First apply the usual rules */
220 rc = ata_std_qc_defer(qc); 220 rc = ata_std_qc_defer(qc);
221 if (rc != 0) 221 if (rc != 0)
222 return rc; 222 return rc;
diff --git a/drivers/ata/pata_via.c b/drivers/ata/pata_via.c
index 2fea6cbe7755..708ed144ede9 100644
--- a/drivers/ata/pata_via.c
+++ b/drivers/ata/pata_via.c
@@ -259,15 +259,15 @@ static void via_do_set_mode(struct ata_port *ap, struct ata_device *adev, int mo
259 259
260 pci_read_config_byte(pdev, 0x4C, &setup); 260 pci_read_config_byte(pdev, 0x4C, &setup);
261 setup &= ~(3 << shift); 261 setup &= ~(3 << shift);
262 setup |= FIT(t.setup, 1, 4) << shift; /* 1,4 or 1,4 - 1 FIXME */ 262 setup |= clamp_val(t.setup, 1, 4) << shift; /* 1,4 or 1,4 - 1 FIXME */
263 pci_write_config_byte(pdev, 0x4C, setup); 263 pci_write_config_byte(pdev, 0x4C, setup);
264 } 264 }
265 265
266 /* Load the PIO mode bits */ 266 /* Load the PIO mode bits */
267 pci_write_config_byte(pdev, 0x4F - ap->port_no, 267 pci_write_config_byte(pdev, 0x4F - ap->port_no,
268 ((FIT(t.act8b, 1, 16) - 1) << 4) | (FIT(t.rec8b, 1, 16) - 1)); 268 ((clamp_val(t.act8b, 1, 16) - 1) << 4) | (clamp_val(t.rec8b, 1, 16) - 1));
269 pci_write_config_byte(pdev, 0x48 + offset, 269 pci_write_config_byte(pdev, 0x48 + offset,
270 ((FIT(t.active, 1, 16) - 1) << 4) | (FIT(t.recover, 1, 16) - 1)); 270 ((clamp_val(t.active, 1, 16) - 1) << 4) | (clamp_val(t.recover, 1, 16) - 1));
271 271
272 /* Load the UDMA bits according to type */ 272 /* Load the UDMA bits according to type */
273 switch(udma_type) { 273 switch(udma_type) {
@@ -275,16 +275,16 @@ static void via_do_set_mode(struct ata_port *ap, struct ata_device *adev, int mo
275 /* BUG() ? */ 275 /* BUG() ? */
276 /* fall through */ 276 /* fall through */
277 case 33: 277 case 33:
278 ut = t.udma ? (0xe0 | (FIT(t.udma, 2, 5) - 2)) : 0x03; 278 ut = t.udma ? (0xe0 | (clamp_val(t.udma, 2, 5) - 2)) : 0x03;
279 break; 279 break;
280 case 66: 280 case 66:
281 ut = t.udma ? (0xe8 | (FIT(t.udma, 2, 9) - 2)) : 0x0f; 281 ut = t.udma ? (0xe8 | (clamp_val(t.udma, 2, 9) - 2)) : 0x0f;
282 break; 282 break;
283 case 100: 283 case 100:
284 ut = t.udma ? (0xe0 | (FIT(t.udma, 2, 9) - 2)) : 0x07; 284 ut = t.udma ? (0xe0 | (clamp_val(t.udma, 2, 9) - 2)) : 0x07;
285 break; 285 break;
286 case 133: 286 case 133:
287 ut = t.udma ? (0xe0 | (FIT(t.udma, 2, 9) - 2)) : 0x07; 287 ut = t.udma ? (0xe0 | (clamp_val(t.udma, 2, 9) - 2)) : 0x07;
288 break; 288 break;
289 } 289 }
290 290
diff --git a/drivers/ata/pata_winbond.c b/drivers/ata/pata_winbond.c
index 6e52a3573fbf..474528f8fe3d 100644
--- a/drivers/ata/pata_winbond.c
+++ b/drivers/ata/pata_winbond.c
@@ -75,8 +75,8 @@ static void winbond_set_piomode(struct ata_port *ap, struct ata_device *adev)
75 else 75 else
76 ata_timing_compute(adev, adev->pio_mode, &t, 30303, 1000); 76 ata_timing_compute(adev, adev->pio_mode, &t, 30303, 1000);
77 77
78 active = (FIT(t.active, 3, 17) - 1) & 0x0F; 78 active = (clamp_val(t.active, 3, 17) - 1) & 0x0F;
79 recovery = (FIT(t.recover, 1, 15) + 1) & 0x0F; 79 recovery = (clamp_val(t.recover, 1, 15) + 1) & 0x0F;
80 timing = (active << 4) | recovery; 80 timing = (active << 4) | recovery;
81 winbond_writecfg(winbond->config, timing, reg); 81 winbond_writecfg(winbond->config, timing, reg);
82 82
@@ -87,7 +87,7 @@ static void winbond_set_piomode(struct ata_port *ap, struct ata_device *adev)
87 reg |= 0x08; /* FIFO off */ 87 reg |= 0x08; /* FIFO off */
88 if (!ata_pio_need_iordy(adev)) 88 if (!ata_pio_need_iordy(adev))
89 reg |= 0x02; /* IORDY off */ 89 reg |= 0x02; /* IORDY off */
90 reg |= (FIT(t.setup, 0, 3) << 6); 90 reg |= (clamp_val(t.setup, 0, 3) << 6);
91 winbond_writecfg(winbond->config, timing + 1, reg); 91 winbond_writecfg(winbond->config, timing + 1, reg);
92} 92}
93 93
diff --git a/drivers/ata/sata_inic162x.c b/drivers/ata/sata_inic162x.c
index d27bb9a2568f..3ead02fe379e 100644
--- a/drivers/ata/sata_inic162x.c
+++ b/drivers/ata/sata_inic162x.c
@@ -10,13 +10,33 @@
10 * right. Documentation is available at initio's website but it only 10 * right. Documentation is available at initio's website but it only
11 * documents registers (not programming model). 11 * documents registers (not programming model).
12 * 12 *
13 * - ATA disks work. 13 * This driver has interesting history. The first version was written
14 * - Hotplug works. 14 * from the documentation and a 2.4 IDE driver posted on a Taiwan
15 * - ATAPI read works but burning doesn't. This thing is really 15 * company, which didn't use any IDMA features and couldn't handle
16 * peculiar about ATAPI and I couldn't figure out how ATAPI PIO and 16 * LBA48. The resulting driver couldn't handle LBA48 devices either
17 * ATAPI DMA WRITE should be programmed. If you've got a clue, be 17 * making it pretty useless.
18 * my guest. 18 *
19 * - Both STR and STD work. 19 * After a while, initio picked the driver up, renamed it to
20 * sata_initio162x, updated it to use IDMA for ATA DMA commands and
21 * posted it on their website. It only used ATA_PROT_DMA for IDMA and
22 * attaching both devices and issuing IDMA and !IDMA commands
23 * simultaneously broke it due to PIRQ masking interaction but it did
24 * show how to use the IDMA (ADMA + some initio specific twists)
25 * engine.
26 *
27 * Then, I picked up their changes again and here's the usable driver
28 * which uses IDMA for everything. Everything works now including
29 * LBA48, CD/DVD burning, suspend/resume and hotplug. There are some
30 * issues tho. Result Tf is not resported properly, NCQ isn't
31 * supported yet and CD/DVD writing works with DMA assisted PIO
32 * protocol (which, for native SATA devices, shouldn't cause any
33 * noticeable difference).
34 *
35 * Anyways, so, here's finally a working driver for inic162x. Enjoy!
36 *
37 * initio: If you guys wanna improve the driver regarding result TF
38 * access and other stuff, please feel free to contact me. I'll be
39 * happy to assist.
20 */ 40 */
21 41
22#include <linux/kernel.h> 42#include <linux/kernel.h>
@@ -28,13 +48,19 @@
28#include <scsi/scsi_device.h> 48#include <scsi/scsi_device.h>
29 49
30#define DRV_NAME "sata_inic162x" 50#define DRV_NAME "sata_inic162x"
31#define DRV_VERSION "0.3" 51#define DRV_VERSION "0.4"
32 52
33enum { 53enum {
34 MMIO_BAR = 5, 54 MMIO_BAR_PCI = 5,
55 MMIO_BAR_CARDBUS = 1,
35 56
36 NR_PORTS = 2, 57 NR_PORTS = 2,
37 58
59 IDMA_CPB_TBL_SIZE = 4 * 32,
60
61 INIC_DMA_BOUNDARY = 0xffffff,
62
63 HOST_ACTRL = 0x08,
38 HOST_CTL = 0x7c, 64 HOST_CTL = 0x7c,
39 HOST_STAT = 0x7e, 65 HOST_STAT = 0x7e,
40 HOST_IRQ_STAT = 0xbc, 66 HOST_IRQ_STAT = 0xbc,
@@ -43,22 +69,37 @@ enum {
43 PORT_SIZE = 0x40, 69 PORT_SIZE = 0x40,
44 70
45 /* registers for ATA TF operation */ 71 /* registers for ATA TF operation */
46 PORT_TF = 0x00, 72 PORT_TF_DATA = 0x00,
47 PORT_ALT_STAT = 0x08, 73 PORT_TF_FEATURE = 0x01,
74 PORT_TF_NSECT = 0x02,
75 PORT_TF_LBAL = 0x03,
76 PORT_TF_LBAM = 0x04,
77 PORT_TF_LBAH = 0x05,
78 PORT_TF_DEVICE = 0x06,
79 PORT_TF_COMMAND = 0x07,
80 PORT_TF_ALT_STAT = 0x08,
48 PORT_IRQ_STAT = 0x09, 81 PORT_IRQ_STAT = 0x09,
49 PORT_IRQ_MASK = 0x0a, 82 PORT_IRQ_MASK = 0x0a,
50 PORT_PRD_CTL = 0x0b, 83 PORT_PRD_CTL = 0x0b,
51 PORT_PRD_ADDR = 0x0c, 84 PORT_PRD_ADDR = 0x0c,
52 PORT_PRD_XFERLEN = 0x10, 85 PORT_PRD_XFERLEN = 0x10,
86 PORT_CPB_CPBLAR = 0x18,
87 PORT_CPB_PTQFIFO = 0x1c,
53 88
54 /* IDMA register */ 89 /* IDMA register */
55 PORT_IDMA_CTL = 0x14, 90 PORT_IDMA_CTL = 0x14,
91 PORT_IDMA_STAT = 0x16,
92
93 PORT_RPQ_FIFO = 0x1e,
94 PORT_RPQ_CNT = 0x1f,
56 95
57 PORT_SCR = 0x20, 96 PORT_SCR = 0x20,
58 97
59 /* HOST_CTL bits */ 98 /* HOST_CTL bits */
60 HCTL_IRQOFF = (1 << 8), /* global IRQ off */ 99 HCTL_IRQOFF = (1 << 8), /* global IRQ off */
61 HCTL_PWRDWN = (1 << 13), /* power down PHYs */ 100 HCTL_FTHD0 = (1 << 10), /* fifo threshold 0 */
101 HCTL_FTHD1 = (1 << 11), /* fifo threshold 1*/
102 HCTL_PWRDWN = (1 << 12), /* power down PHYs */
62 HCTL_SOFTRST = (1 << 13), /* global reset (no phy reset) */ 103 HCTL_SOFTRST = (1 << 13), /* global reset (no phy reset) */
63 HCTL_RPGSEL = (1 << 15), /* register page select */ 104 HCTL_RPGSEL = (1 << 15), /* register page select */
64 105
@@ -81,9 +122,7 @@ enum {
81 PIRQ_PENDING = (1 << 7), /* port IRQ pending (STAT only) */ 122 PIRQ_PENDING = (1 << 7), /* port IRQ pending (STAT only) */
82 123
83 PIRQ_ERR = PIRQ_OFFLINE | PIRQ_ONLINE | PIRQ_FATAL, 124 PIRQ_ERR = PIRQ_OFFLINE | PIRQ_ONLINE | PIRQ_FATAL,
84 125 PIRQ_MASK_DEFAULT = PIRQ_REPLY | PIRQ_ATA,
85 PIRQ_MASK_DMA_READ = PIRQ_REPLY | PIRQ_ATA,
86 PIRQ_MASK_OTHER = PIRQ_REPLY | PIRQ_COMPLETE,
87 PIRQ_MASK_FREEZE = 0xff, 126 PIRQ_MASK_FREEZE = 0xff,
88 127
89 /* PORT_PRD_CTL bits */ 128 /* PORT_PRD_CTL bits */
@@ -96,20 +135,104 @@ enum {
96 IDMA_CTL_RST_IDMA = (1 << 5), /* reset IDMA machinary */ 135 IDMA_CTL_RST_IDMA = (1 << 5), /* reset IDMA machinary */
97 IDMA_CTL_GO = (1 << 7), /* IDMA mode go */ 136 IDMA_CTL_GO = (1 << 7), /* IDMA mode go */
98 IDMA_CTL_ATA_NIEN = (1 << 8), /* ATA IRQ disable */ 137 IDMA_CTL_ATA_NIEN = (1 << 8), /* ATA IRQ disable */
138
139 /* PORT_IDMA_STAT bits */
140 IDMA_STAT_PERR = (1 << 0), /* PCI ERROR MODE */
141 IDMA_STAT_CPBERR = (1 << 1), /* ADMA CPB error */
142 IDMA_STAT_LGCY = (1 << 3), /* ADMA legacy */
143 IDMA_STAT_UIRQ = (1 << 4), /* ADMA unsolicited irq */
144 IDMA_STAT_STPD = (1 << 5), /* ADMA stopped */
145 IDMA_STAT_PSD = (1 << 6), /* ADMA pause */
146 IDMA_STAT_DONE = (1 << 7), /* ADMA done */
147
148 IDMA_STAT_ERR = IDMA_STAT_PERR | IDMA_STAT_CPBERR,
149
150 /* CPB Control Flags*/
151 CPB_CTL_VALID = (1 << 0), /* CPB valid */
152 CPB_CTL_QUEUED = (1 << 1), /* queued command */
153 CPB_CTL_DATA = (1 << 2), /* data, rsvd in datasheet */
154 CPB_CTL_IEN = (1 << 3), /* PCI interrupt enable */
155 CPB_CTL_DEVDIR = (1 << 4), /* device direction control */
156
157 /* CPB Response Flags */
158 CPB_RESP_DONE = (1 << 0), /* ATA command complete */
159 CPB_RESP_REL = (1 << 1), /* ATA release */
160 CPB_RESP_IGNORED = (1 << 2), /* CPB ignored */
161 CPB_RESP_ATA_ERR = (1 << 3), /* ATA command error */
162 CPB_RESP_SPURIOUS = (1 << 4), /* ATA spurious interrupt error */
163 CPB_RESP_UNDERFLOW = (1 << 5), /* APRD deficiency length error */
164 CPB_RESP_OVERFLOW = (1 << 6), /* APRD exccess length error */
165 CPB_RESP_CPB_ERR = (1 << 7), /* CPB error flag */
166
167 /* PRD Control Flags */
168 PRD_DRAIN = (1 << 1), /* ignore data excess */
169 PRD_CDB = (1 << 2), /* atapi packet command pointer */
170 PRD_DIRECT_INTR = (1 << 3), /* direct interrupt */
171 PRD_DMA = (1 << 4), /* data transfer method */
172 PRD_WRITE = (1 << 5), /* data dir, rsvd in datasheet */
173 PRD_IOM = (1 << 6), /* io/memory transfer */
174 PRD_END = (1 << 7), /* APRD chain end */
99}; 175};
100 176
177/* Comman Parameter Block */
178struct inic_cpb {
179 u8 resp_flags; /* Response Flags */
180 u8 error; /* ATA Error */
181 u8 status; /* ATA Status */
182 u8 ctl_flags; /* Control Flags */
183 __le32 len; /* Total Transfer Length */
184 __le32 prd; /* First PRD pointer */
185 u8 rsvd[4];
186 /* 16 bytes */
187 u8 feature; /* ATA Feature */
188 u8 hob_feature; /* ATA Ex. Feature */
189 u8 device; /* ATA Device/Head */
190 u8 mirctl; /* Mirror Control */
191 u8 nsect; /* ATA Sector Count */
192 u8 hob_nsect; /* ATA Ex. Sector Count */
193 u8 lbal; /* ATA Sector Number */
194 u8 hob_lbal; /* ATA Ex. Sector Number */
195 u8 lbam; /* ATA Cylinder Low */
196 u8 hob_lbam; /* ATA Ex. Cylinder Low */
197 u8 lbah; /* ATA Cylinder High */
198 u8 hob_lbah; /* ATA Ex. Cylinder High */
199 u8 command; /* ATA Command */
200 u8 ctl; /* ATA Control */
201 u8 slave_error; /* Slave ATA Error */
202 u8 slave_status; /* Slave ATA Status */
203 /* 32 bytes */
204} __packed;
205
206/* Physical Region Descriptor */
207struct inic_prd {
208 __le32 mad; /* Physical Memory Address */
209 __le16 len; /* Transfer Length */
210 u8 rsvd;
211 u8 flags; /* Control Flags */
212} __packed;
213
214struct inic_pkt {
215 struct inic_cpb cpb;
216 struct inic_prd prd[LIBATA_MAX_PRD + 1]; /* + 1 for cdb */
217 u8 cdb[ATAPI_CDB_LEN];
218} __packed;
219
101struct inic_host_priv { 220struct inic_host_priv {
102 u16 cached_hctl; 221 void __iomem *mmio_base;
222 u16 cached_hctl;
103}; 223};
104 224
105struct inic_port_priv { 225struct inic_port_priv {
106 u8 dfl_prdctl; 226 struct inic_pkt *pkt;
107 u8 cached_prdctl; 227 dma_addr_t pkt_dma;
108 u8 cached_pirq_mask; 228 u32 *cpb_tbl;
229 dma_addr_t cpb_tbl_dma;
109}; 230};
110 231
111static struct scsi_host_template inic_sht = { 232static struct scsi_host_template inic_sht = {
112 ATA_BMDMA_SHT(DRV_NAME), 233 ATA_BASE_SHT(DRV_NAME),
234 .sg_tablesize = LIBATA_MAX_PRD, /* maybe it can be larger? */
235 .dma_boundary = INIC_DMA_BOUNDARY,
113}; 236};
114 237
115static const int scr_map[] = { 238static const int scr_map[] = {
@@ -120,54 +243,34 @@ static const int scr_map[] = {
120 243
121static void __iomem *inic_port_base(struct ata_port *ap) 244static void __iomem *inic_port_base(struct ata_port *ap)
122{ 245{
123 return ap->host->iomap[MMIO_BAR] + ap->port_no * PORT_SIZE; 246 struct inic_host_priv *hpriv = ap->host->private_data;
124}
125
126static void __inic_set_pirq_mask(struct ata_port *ap, u8 mask)
127{
128 void __iomem *port_base = inic_port_base(ap);
129 struct inic_port_priv *pp = ap->private_data;
130 247
131 writeb(mask, port_base + PORT_IRQ_MASK); 248 return hpriv->mmio_base + ap->port_no * PORT_SIZE;
132 pp->cached_pirq_mask = mask;
133}
134
135static void inic_set_pirq_mask(struct ata_port *ap, u8 mask)
136{
137 struct inic_port_priv *pp = ap->private_data;
138
139 if (pp->cached_pirq_mask != mask)
140 __inic_set_pirq_mask(ap, mask);
141} 249}
142 250
143static void inic_reset_port(void __iomem *port_base) 251static void inic_reset_port(void __iomem *port_base)
144{ 252{
145 void __iomem *idma_ctl = port_base + PORT_IDMA_CTL; 253 void __iomem *idma_ctl = port_base + PORT_IDMA_CTL;
146 u16 ctl;
147 254
148 ctl = readw(idma_ctl); 255 /* stop IDMA engine */
149 ctl &= ~(IDMA_CTL_RST_IDMA | IDMA_CTL_ATA_NIEN | IDMA_CTL_GO); 256 readw(idma_ctl); /* flush */
257 msleep(1);
150 258
151 /* mask IRQ and assert reset */ 259 /* mask IRQ and assert reset */
152 writew(ctl | IDMA_CTL_RST_IDMA | IDMA_CTL_ATA_NIEN, idma_ctl); 260 writew(IDMA_CTL_RST_IDMA, idma_ctl);
153 readw(idma_ctl); /* flush */ 261 readw(idma_ctl); /* flush */
154
155 /* give it some time */
156 msleep(1); 262 msleep(1);
157 263
158 /* release reset */ 264 /* release reset */
159 writew(ctl | IDMA_CTL_ATA_NIEN, idma_ctl); 265 writew(0, idma_ctl);
160 266
161 /* clear irq */ 267 /* clear irq */
162 writeb(0xff, port_base + PORT_IRQ_STAT); 268 writeb(0xff, port_base + PORT_IRQ_STAT);
163
164 /* reenable ATA IRQ, turn off IDMA mode */
165 writew(ctl, idma_ctl);
166} 269}
167 270
168static int inic_scr_read(struct ata_port *ap, unsigned sc_reg, u32 *val) 271static int inic_scr_read(struct ata_port *ap, unsigned sc_reg, u32 *val)
169{ 272{
170 void __iomem *scr_addr = ap->ioaddr.scr_addr; 273 void __iomem *scr_addr = inic_port_base(ap) + PORT_SCR;
171 void __iomem *addr; 274 void __iomem *addr;
172 275
173 if (unlikely(sc_reg >= ARRAY_SIZE(scr_map))) 276 if (unlikely(sc_reg >= ARRAY_SIZE(scr_map)))
@@ -184,120 +287,126 @@ static int inic_scr_read(struct ata_port *ap, unsigned sc_reg, u32 *val)
184 287
185static int inic_scr_write(struct ata_port *ap, unsigned sc_reg, u32 val) 288static int inic_scr_write(struct ata_port *ap, unsigned sc_reg, u32 val)
186{ 289{
187 void __iomem *scr_addr = ap->ioaddr.scr_addr; 290 void __iomem *scr_addr = inic_port_base(ap) + PORT_SCR;
188 void __iomem *addr;
189 291
190 if (unlikely(sc_reg >= ARRAY_SIZE(scr_map))) 292 if (unlikely(sc_reg >= ARRAY_SIZE(scr_map)))
191 return -EINVAL; 293 return -EINVAL;
192 294
193 addr = scr_addr + scr_map[sc_reg] * 4;
194 writel(val, scr_addr + scr_map[sc_reg] * 4); 295 writel(val, scr_addr + scr_map[sc_reg] * 4);
195 return 0; 296 return 0;
196} 297}
197 298
198/* 299static void inic_stop_idma(struct ata_port *ap)
199 * In TF mode, inic162x is very similar to SFF device. TF registers
200 * function the same. DMA engine behaves similary using the same PRD
201 * format as BMDMA but different command register, interrupt and event
202 * notification methods are used. The following inic_bmdma_*()
203 * functions do the impedance matching.
204 */
205static void inic_bmdma_setup(struct ata_queued_cmd *qc)
206{ 300{
207 struct ata_port *ap = qc->ap;
208 struct inic_port_priv *pp = ap->private_data;
209 void __iomem *port_base = inic_port_base(ap); 301 void __iomem *port_base = inic_port_base(ap);
210 int rw = qc->tf.flags & ATA_TFLAG_WRITE;
211
212 /* make sure device sees PRD table writes */
213 wmb();
214
215 /* load transfer length */
216 writel(qc->nbytes, port_base + PORT_PRD_XFERLEN);
217
218 /* turn on DMA and specify data direction */
219 pp->cached_prdctl = pp->dfl_prdctl | PRD_CTL_DMAEN;
220 if (!rw)
221 pp->cached_prdctl |= PRD_CTL_WR;
222 writeb(pp->cached_prdctl, port_base + PORT_PRD_CTL);
223 302
224 /* issue r/w command */ 303 readb(port_base + PORT_RPQ_FIFO);
225 ap->ops->sff_exec_command(ap, &qc->tf); 304 readb(port_base + PORT_RPQ_CNT);
305 writew(0, port_base + PORT_IDMA_CTL);
226} 306}
227 307
228static void inic_bmdma_start(struct ata_queued_cmd *qc) 308static void inic_host_err_intr(struct ata_port *ap, u8 irq_stat, u16 idma_stat)
229{ 309{
230 struct ata_port *ap = qc->ap; 310 struct ata_eh_info *ehi = &ap->link.eh_info;
231 struct inic_port_priv *pp = ap->private_data; 311 struct inic_port_priv *pp = ap->private_data;
232 void __iomem *port_base = inic_port_base(ap); 312 struct inic_cpb *cpb = &pp->pkt->cpb;
313 bool freeze = false;
233 314
234 /* start host DMA transaction */ 315 ata_ehi_clear_desc(ehi);
235 pp->cached_prdctl |= PRD_CTL_START; 316 ata_ehi_push_desc(ehi, "irq_stat=0x%x idma_stat=0x%x",
236 writeb(pp->cached_prdctl, port_base + PORT_PRD_CTL); 317 irq_stat, idma_stat);
237}
238 318
239static void inic_bmdma_stop(struct ata_queued_cmd *qc) 319 inic_stop_idma(ap);
240{
241 struct ata_port *ap = qc->ap;
242 struct inic_port_priv *pp = ap->private_data;
243 void __iomem *port_base = inic_port_base(ap);
244 320
245 /* stop DMA engine */ 321 if (irq_stat & (PIRQ_OFFLINE | PIRQ_ONLINE)) {
246 writeb(pp->dfl_prdctl, port_base + PORT_PRD_CTL); 322 ata_ehi_push_desc(ehi, "hotplug");
247} 323 ata_ehi_hotplugged(ehi);
324 freeze = true;
325 }
248 326
249static u8 inic_bmdma_status(struct ata_port *ap) 327 if (idma_stat & IDMA_STAT_PERR) {
250{ 328 ata_ehi_push_desc(ehi, "PCI error");
251 /* event is already verified by the interrupt handler */ 329 freeze = true;
252 return ATA_DMA_INTR; 330 }
331
332 if (idma_stat & IDMA_STAT_CPBERR) {
333 ata_ehi_push_desc(ehi, "CPB error");
334
335 if (cpb->resp_flags & CPB_RESP_IGNORED) {
336 __ata_ehi_push_desc(ehi, " ignored");
337 ehi->err_mask |= AC_ERR_INVALID;
338 freeze = true;
339 }
340
341 if (cpb->resp_flags & CPB_RESP_ATA_ERR)
342 ehi->err_mask |= AC_ERR_DEV;
343
344 if (cpb->resp_flags & CPB_RESP_SPURIOUS) {
345 __ata_ehi_push_desc(ehi, " spurious-intr");
346 ehi->err_mask |= AC_ERR_HSM;
347 freeze = true;
348 }
349
350 if (cpb->resp_flags &
351 (CPB_RESP_UNDERFLOW | CPB_RESP_OVERFLOW)) {
352 __ata_ehi_push_desc(ehi, " data-over/underflow");
353 ehi->err_mask |= AC_ERR_HSM;
354 freeze = true;
355 }
356 }
357
358 if (freeze)
359 ata_port_freeze(ap);
360 else
361 ata_port_abort(ap);
253} 362}
254 363
255static void inic_host_intr(struct ata_port *ap) 364static void inic_host_intr(struct ata_port *ap)
256{ 365{
257 void __iomem *port_base = inic_port_base(ap); 366 void __iomem *port_base = inic_port_base(ap);
258 struct ata_eh_info *ehi = &ap->link.eh_info; 367 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag);
259 u8 irq_stat; 368 u8 irq_stat;
369 u16 idma_stat;
260 370
261 /* fetch and clear irq */ 371 /* read and clear IRQ status */
262 irq_stat = readb(port_base + PORT_IRQ_STAT); 372 irq_stat = readb(port_base + PORT_IRQ_STAT);
263 writeb(irq_stat, port_base + PORT_IRQ_STAT); 373 writeb(irq_stat, port_base + PORT_IRQ_STAT);
374 idma_stat = readw(port_base + PORT_IDMA_STAT);
264 375
265 if (likely(!(irq_stat & PIRQ_ERR))) { 376 if (unlikely((irq_stat & PIRQ_ERR) || (idma_stat & IDMA_STAT_ERR)))
266 struct ata_queued_cmd *qc = 377 inic_host_err_intr(ap, irq_stat, idma_stat);
267 ata_qc_from_tag(ap, ap->link.active_tag);
268 378
269 if (unlikely(!qc || (qc->tf.flags & ATA_TFLAG_POLLING))) { 379 if (unlikely(!qc))
270 ap->ops->sff_check_status(ap); /* clear ATA interrupt */ 380 goto spurious;
271 return;
272 }
273 381
274 if (likely(ata_sff_host_intr(ap, qc))) 382 if (likely(idma_stat & IDMA_STAT_DONE)) {
275 return; 383 inic_stop_idma(ap);
276 384
277 ap->ops->sff_check_status(ap); /* clear ATA interrupt */ 385 /* Depending on circumstances, device error
278 ata_port_printk(ap, KERN_WARNING, "unhandled " 386 * isn't reported by IDMA, check it explicitly.
279 "interrupt, irq_stat=%x\n", irq_stat); 387 */
388 if (unlikely(readb(port_base + PORT_TF_COMMAND) &
389 (ATA_DF | ATA_ERR)))
390 qc->err_mask |= AC_ERR_DEV;
391
392 ata_qc_complete(qc);
280 return; 393 return;
281 } 394 }
282 395
283 /* error */ 396 spurious:
284 ata_ehi_push_desc(ehi, "irq_stat=0x%x", irq_stat); 397 ata_port_printk(ap, KERN_WARNING, "unhandled interrupt: "
285 398 "cmd=0x%x irq_stat=0x%x idma_stat=0x%x\n",
286 if (irq_stat & (PIRQ_OFFLINE | PIRQ_ONLINE)) { 399 qc ? qc->tf.command : 0xff, irq_stat, idma_stat);
287 ata_ehi_hotplugged(ehi);
288 ata_port_freeze(ap);
289 } else
290 ata_port_abort(ap);
291} 400}
292 401
293static irqreturn_t inic_interrupt(int irq, void *dev_instance) 402static irqreturn_t inic_interrupt(int irq, void *dev_instance)
294{ 403{
295 struct ata_host *host = dev_instance; 404 struct ata_host *host = dev_instance;
296 void __iomem *mmio_base = host->iomap[MMIO_BAR]; 405 struct inic_host_priv *hpriv = host->private_data;
297 u16 host_irq_stat; 406 u16 host_irq_stat;
298 int i, handled = 0;; 407 int i, handled = 0;;
299 408
300 host_irq_stat = readw(mmio_base + HOST_IRQ_STAT); 409 host_irq_stat = readw(hpriv->mmio_base + HOST_IRQ_STAT);
301 410
302 if (unlikely(!(host_irq_stat & HIRQ_GLOBAL))) 411 if (unlikely(!(host_irq_stat & HIRQ_GLOBAL)))
303 goto out; 412 goto out;
@@ -327,60 +436,173 @@ static irqreturn_t inic_interrupt(int irq, void *dev_instance)
327 return IRQ_RETVAL(handled); 436 return IRQ_RETVAL(handled);
328} 437}
329 438
439static int inic_check_atapi_dma(struct ata_queued_cmd *qc)
440{
441 /* For some reason ATAPI_PROT_DMA doesn't work for some
442 * commands including writes and other misc ops. Use PIO
443 * protocol instead, which BTW is driven by the DMA engine
444 * anyway, so it shouldn't make much difference for native
445 * SATA devices.
446 */
447 if (atapi_cmd_type(qc->cdb[0]) == READ)
448 return 0;
449 return 1;
450}
451
452static void inic_fill_sg(struct inic_prd *prd, struct ata_queued_cmd *qc)
453{
454 struct scatterlist *sg;
455 unsigned int si;
456 u8 flags = 0;
457
458 if (qc->tf.flags & ATA_TFLAG_WRITE)
459 flags |= PRD_WRITE;
460
461 if (ata_is_dma(qc->tf.protocol))
462 flags |= PRD_DMA;
463
464 for_each_sg(qc->sg, sg, qc->n_elem, si) {
465 prd->mad = cpu_to_le32(sg_dma_address(sg));
466 prd->len = cpu_to_le16(sg_dma_len(sg));
467 prd->flags = flags;
468 prd++;
469 }
470
471 WARN_ON(!si);
472 prd[-1].flags |= PRD_END;
473}
474
475static void inic_qc_prep(struct ata_queued_cmd *qc)
476{
477 struct inic_port_priv *pp = qc->ap->private_data;
478 struct inic_pkt *pkt = pp->pkt;
479 struct inic_cpb *cpb = &pkt->cpb;
480 struct inic_prd *prd = pkt->prd;
481 bool is_atapi = ata_is_atapi(qc->tf.protocol);
482 bool is_data = ata_is_data(qc->tf.protocol);
483 unsigned int cdb_len = 0;
484
485 VPRINTK("ENTER\n");
486
487 if (is_atapi)
488 cdb_len = qc->dev->cdb_len;
489
490 /* prepare packet, based on initio driver */
491 memset(pkt, 0, sizeof(struct inic_pkt));
492
493 cpb->ctl_flags = CPB_CTL_VALID | CPB_CTL_IEN;
494 if (is_atapi || is_data)
495 cpb->ctl_flags |= CPB_CTL_DATA;
496
497 cpb->len = cpu_to_le32(qc->nbytes + cdb_len);
498 cpb->prd = cpu_to_le32(pp->pkt_dma + offsetof(struct inic_pkt, prd));
499
500 cpb->device = qc->tf.device;
501 cpb->feature = qc->tf.feature;
502 cpb->nsect = qc->tf.nsect;
503 cpb->lbal = qc->tf.lbal;
504 cpb->lbam = qc->tf.lbam;
505 cpb->lbah = qc->tf.lbah;
506
507 if (qc->tf.flags & ATA_TFLAG_LBA48) {
508 cpb->hob_feature = qc->tf.hob_feature;
509 cpb->hob_nsect = qc->tf.hob_nsect;
510 cpb->hob_lbal = qc->tf.hob_lbal;
511 cpb->hob_lbam = qc->tf.hob_lbam;
512 cpb->hob_lbah = qc->tf.hob_lbah;
513 }
514
515 cpb->command = qc->tf.command;
516 /* don't load ctl - dunno why. it's like that in the initio driver */
517
518 /* setup PRD for CDB */
519 if (is_atapi) {
520 memcpy(pkt->cdb, qc->cdb, ATAPI_CDB_LEN);
521 prd->mad = cpu_to_le32(pp->pkt_dma +
522 offsetof(struct inic_pkt, cdb));
523 prd->len = cpu_to_le16(cdb_len);
524 prd->flags = PRD_CDB | PRD_WRITE;
525 if (!is_data)
526 prd->flags |= PRD_END;
527 prd++;
528 }
529
530 /* setup sg table */
531 if (is_data)
532 inic_fill_sg(prd, qc);
533
534 pp->cpb_tbl[0] = pp->pkt_dma;
535}
536
330static unsigned int inic_qc_issue(struct ata_queued_cmd *qc) 537static unsigned int inic_qc_issue(struct ata_queued_cmd *qc)
331{ 538{
332 struct ata_port *ap = qc->ap; 539 struct ata_port *ap = qc->ap;
540 void __iomem *port_base = inic_port_base(ap);
333 541
334 /* ATA IRQ doesn't wait for DMA transfer completion and vice 542 /* fire up the ADMA engine */
335 * versa. Mask IRQ selectively to detect command completion. 543 writew(HCTL_FTHD0, port_base + HOST_CTL);
336 * Without it, ATA DMA read command can cause data corruption. 544 writew(IDMA_CTL_GO, port_base + PORT_IDMA_CTL);
337 * 545 writeb(0, port_base + PORT_CPB_PTQFIFO);
338 * Something similar might be needed for ATAPI writes. I 546
339 * tried a lot of combinations but couldn't find the solution. 547 return 0;
340 */ 548}
341 if (qc->tf.protocol == ATA_PROT_DMA && 549
342 !(qc->tf.flags & ATA_TFLAG_WRITE)) 550static void inic_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
343 inic_set_pirq_mask(ap, PIRQ_MASK_DMA_READ); 551{
344 else 552 void __iomem *port_base = inic_port_base(ap);
345 inic_set_pirq_mask(ap, PIRQ_MASK_OTHER); 553
554 tf->feature = readb(port_base + PORT_TF_FEATURE);
555 tf->nsect = readb(port_base + PORT_TF_NSECT);
556 tf->lbal = readb(port_base + PORT_TF_LBAL);
557 tf->lbam = readb(port_base + PORT_TF_LBAM);
558 tf->lbah = readb(port_base + PORT_TF_LBAH);
559 tf->device = readb(port_base + PORT_TF_DEVICE);
560 tf->command = readb(port_base + PORT_TF_COMMAND);
561}
346 562
347 /* Issuing a command to yet uninitialized port locks up the 563static bool inic_qc_fill_rtf(struct ata_queued_cmd *qc)
348 * controller. Most of the time, this happens for the first 564{
349 * command after reset which are ATA and ATAPI IDENTIFYs. 565 struct ata_taskfile *rtf = &qc->result_tf;
350 * Fast fail if stat is 0x7f or 0xff for those commands. 566 struct ata_taskfile tf;
567
568 /* FIXME: Except for status and error, result TF access
569 * doesn't work. I tried reading from BAR0/2, CPB and BAR5.
570 * None works regardless of which command interface is used.
571 * For now return true iff status indicates device error.
572 * This means that we're reporting bogus sector for RW
573 * failures. Eeekk....
351 */ 574 */
352 if (unlikely(qc->tf.command == ATA_CMD_ID_ATA || 575 inic_tf_read(qc->ap, &tf);
353 qc->tf.command == ATA_CMD_ID_ATAPI)) {
354 u8 stat = ap->ops->sff_check_status(ap);
355 if (stat == 0x7f || stat == 0xff)
356 return AC_ERR_HSM;
357 }
358 576
359 return ata_sff_qc_issue(qc); 577 if (!(tf.command & ATA_ERR))
578 return false;
579
580 rtf->command = tf.command;
581 rtf->feature = tf.feature;
582 return true;
360} 583}
361 584
362static void inic_freeze(struct ata_port *ap) 585static void inic_freeze(struct ata_port *ap)
363{ 586{
364 void __iomem *port_base = inic_port_base(ap); 587 void __iomem *port_base = inic_port_base(ap);
365 588
366 __inic_set_pirq_mask(ap, PIRQ_MASK_FREEZE); 589 writeb(PIRQ_MASK_FREEZE, port_base + PORT_IRQ_MASK);
367
368 ap->ops->sff_check_status(ap);
369 writeb(0xff, port_base + PORT_IRQ_STAT); 590 writeb(0xff, port_base + PORT_IRQ_STAT);
370
371 readb(port_base + PORT_IRQ_STAT); /* flush */
372} 591}
373 592
374static void inic_thaw(struct ata_port *ap) 593static void inic_thaw(struct ata_port *ap)
375{ 594{
376 void __iomem *port_base = inic_port_base(ap); 595 void __iomem *port_base = inic_port_base(ap);
377 596
378 ap->ops->sff_check_status(ap);
379 writeb(0xff, port_base + PORT_IRQ_STAT); 597 writeb(0xff, port_base + PORT_IRQ_STAT);
598 writeb(PIRQ_MASK_DEFAULT, port_base + PORT_IRQ_MASK);
599}
380 600
381 __inic_set_pirq_mask(ap, PIRQ_MASK_OTHER); 601static int inic_check_ready(struct ata_link *link)
602{
603 void __iomem *port_base = inic_port_base(link->ap);
382 604
383 readb(port_base + PORT_IRQ_STAT); /* flush */ 605 return ata_check_ready(readb(port_base + PORT_TF_COMMAND));
384} 606}
385 607
386/* 608/*
@@ -394,17 +616,15 @@ static int inic_hardreset(struct ata_link *link, unsigned int *class,
394 void __iomem *port_base = inic_port_base(ap); 616 void __iomem *port_base = inic_port_base(ap);
395 void __iomem *idma_ctl = port_base + PORT_IDMA_CTL; 617 void __iomem *idma_ctl = port_base + PORT_IDMA_CTL;
396 const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context); 618 const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
397 u16 val;
398 int rc; 619 int rc;
399 620
400 /* hammer it into sane state */ 621 /* hammer it into sane state */
401 inic_reset_port(port_base); 622 inic_reset_port(port_base);
402 623
403 val = readw(idma_ctl); 624 writew(IDMA_CTL_RST_ATA, idma_ctl);
404 writew(val | IDMA_CTL_RST_ATA, idma_ctl);
405 readw(idma_ctl); /* flush */ 625 readw(idma_ctl); /* flush */
406 msleep(1); 626 msleep(1);
407 writew(val & ~IDMA_CTL_RST_ATA, idma_ctl); 627 writew(0, idma_ctl);
408 628
409 rc = sata_link_resume(link, timing, deadline); 629 rc = sata_link_resume(link, timing, deadline);
410 if (rc) { 630 if (rc) {
@@ -418,7 +638,7 @@ static int inic_hardreset(struct ata_link *link, unsigned int *class,
418 struct ata_taskfile tf; 638 struct ata_taskfile tf;
419 639
420 /* wait for link to become ready */ 640 /* wait for link to become ready */
421 rc = ata_sff_wait_after_reset(link, 1, deadline); 641 rc = ata_wait_after_reset(link, deadline, inic_check_ready);
422 /* link occupied, -ENODEV too is an error */ 642 /* link occupied, -ENODEV too is an error */
423 if (rc) { 643 if (rc) {
424 ata_link_printk(link, KERN_WARNING, "device not ready " 644 ata_link_printk(link, KERN_WARNING, "device not ready "
@@ -426,7 +646,7 @@ static int inic_hardreset(struct ata_link *link, unsigned int *class,
426 return rc; 646 return rc;
427 } 647 }
428 648
429 ata_sff_tf_read(ap, &tf); 649 inic_tf_read(ap, &tf);
430 *class = ata_dev_classify(&tf); 650 *class = ata_dev_classify(&tf);
431 } 651 }
432 652
@@ -436,18 +656,8 @@ static int inic_hardreset(struct ata_link *link, unsigned int *class,
436static void inic_error_handler(struct ata_port *ap) 656static void inic_error_handler(struct ata_port *ap)
437{ 657{
438 void __iomem *port_base = inic_port_base(ap); 658 void __iomem *port_base = inic_port_base(ap);
439 struct inic_port_priv *pp = ap->private_data;
440 unsigned long flags;
441 659
442 /* reset PIO HSM and stop DMA engine */
443 inic_reset_port(port_base); 660 inic_reset_port(port_base);
444
445 spin_lock_irqsave(ap->lock, flags);
446 ap->hsm_task_state = HSM_ST_IDLE;
447 writeb(pp->dfl_prdctl, port_base + PORT_PRD_CTL);
448 spin_unlock_irqrestore(ap->lock, flags);
449
450 /* PIO and DMA engines have been stopped, perform recovery */
451 ata_std_error_handler(ap); 661 ata_std_error_handler(ap);
452} 662}
453 663
@@ -458,26 +668,18 @@ static void inic_post_internal_cmd(struct ata_queued_cmd *qc)
458 inic_reset_port(inic_port_base(qc->ap)); 668 inic_reset_port(inic_port_base(qc->ap));
459} 669}
460 670
461static void inic_dev_config(struct ata_device *dev)
462{
463 /* inic can only handle upto LBA28 max sectors */
464 if (dev->max_sectors > ATA_MAX_SECTORS)
465 dev->max_sectors = ATA_MAX_SECTORS;
466
467 if (dev->n_sectors >= 1 << 28) {
468 ata_dev_printk(dev, KERN_ERR,
469 "ERROR: This driver doesn't support LBA48 yet and may cause\n"
470 " data corruption on such devices. Disabling.\n");
471 ata_dev_disable(dev);
472 }
473}
474
475static void init_port(struct ata_port *ap) 671static void init_port(struct ata_port *ap)
476{ 672{
477 void __iomem *port_base = inic_port_base(ap); 673 void __iomem *port_base = inic_port_base(ap);
674 struct inic_port_priv *pp = ap->private_data;
478 675
479 /* Setup PRD address */ 676 /* clear packet and CPB table */
677 memset(pp->pkt, 0, sizeof(struct inic_pkt));
678 memset(pp->cpb_tbl, 0, IDMA_CPB_TBL_SIZE);
679
680 /* setup PRD and CPB lookup table addresses */
480 writel(ap->prd_dma, port_base + PORT_PRD_ADDR); 681 writel(ap->prd_dma, port_base + PORT_PRD_ADDR);
682 writel(pp->cpb_tbl_dma, port_base + PORT_CPB_CPBLAR);
481} 683}
482 684
483static int inic_port_resume(struct ata_port *ap) 685static int inic_port_resume(struct ata_port *ap)
@@ -488,28 +690,30 @@ static int inic_port_resume(struct ata_port *ap)
488 690
489static int inic_port_start(struct ata_port *ap) 691static int inic_port_start(struct ata_port *ap)
490{ 692{
491 void __iomem *port_base = inic_port_base(ap); 693 struct device *dev = ap->host->dev;
492 struct inic_port_priv *pp; 694 struct inic_port_priv *pp;
493 u8 tmp;
494 int rc; 695 int rc;
495 696
496 /* alloc and initialize private data */ 697 /* alloc and initialize private data */
497 pp = devm_kzalloc(ap->host->dev, sizeof(*pp), GFP_KERNEL); 698 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
498 if (!pp) 699 if (!pp)
499 return -ENOMEM; 700 return -ENOMEM;
500 ap->private_data = pp; 701 ap->private_data = pp;
501 702
502 /* default PRD_CTL value, DMAEN, WR and START off */
503 tmp = readb(port_base + PORT_PRD_CTL);
504 tmp &= ~(PRD_CTL_DMAEN | PRD_CTL_WR | PRD_CTL_START);
505 pp->dfl_prdctl = tmp;
506
507 /* Alloc resources */ 703 /* Alloc resources */
508 rc = ata_port_start(ap); 704 rc = ata_port_start(ap);
509 if (rc) { 705 if (rc)
510 kfree(pp);
511 return rc; 706 return rc;
512 } 707
708 pp->pkt = dmam_alloc_coherent(dev, sizeof(struct inic_pkt),
709 &pp->pkt_dma, GFP_KERNEL);
710 if (!pp->pkt)
711 return -ENOMEM;
712
713 pp->cpb_tbl = dmam_alloc_coherent(dev, IDMA_CPB_TBL_SIZE,
714 &pp->cpb_tbl_dma, GFP_KERNEL);
715 if (!pp->cpb_tbl)
716 return -ENOMEM;
513 717
514 init_port(ap); 718 init_port(ap);
515 719
@@ -517,21 +721,18 @@ static int inic_port_start(struct ata_port *ap)
517} 721}
518 722
519static struct ata_port_operations inic_port_ops = { 723static struct ata_port_operations inic_port_ops = {
520 .inherits = &ata_sff_port_ops, 724 .inherits = &sata_port_ops,
521 725
522 .bmdma_setup = inic_bmdma_setup, 726 .check_atapi_dma = inic_check_atapi_dma,
523 .bmdma_start = inic_bmdma_start, 727 .qc_prep = inic_qc_prep,
524 .bmdma_stop = inic_bmdma_stop,
525 .bmdma_status = inic_bmdma_status,
526 .qc_issue = inic_qc_issue, 728 .qc_issue = inic_qc_issue,
729 .qc_fill_rtf = inic_qc_fill_rtf,
527 730
528 .freeze = inic_freeze, 731 .freeze = inic_freeze,
529 .thaw = inic_thaw, 732 .thaw = inic_thaw,
530 .softreset = ATA_OP_NULL, /* softreset is broken */
531 .hardreset = inic_hardreset, 733 .hardreset = inic_hardreset,
532 .error_handler = inic_error_handler, 734 .error_handler = inic_error_handler,
533 .post_internal_cmd = inic_post_internal_cmd, 735 .post_internal_cmd = inic_post_internal_cmd,
534 .dev_config = inic_dev_config,
535 736
536 .scr_read = inic_scr_read, 737 .scr_read = inic_scr_read,
537 .scr_write = inic_scr_write, 738 .scr_write = inic_scr_write,
@@ -541,12 +742,6 @@ static struct ata_port_operations inic_port_ops = {
541}; 742};
542 743
543static struct ata_port_info inic_port_info = { 744static struct ata_port_info inic_port_info = {
544 /* For some reason, ATAPI_PROT_PIO is broken on this
545 * controller, and no, PIO_POLLING does't fix it. It somehow
546 * manages to report the wrong ireason and ignoring ireason
547 * results in machine lock up. Tell libata to always prefer
548 * DMA.
549 */
550 .flags = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA, 745 .flags = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA,
551 .pio_mask = 0x1f, /* pio0-4 */ 746 .pio_mask = 0x1f, /* pio0-4 */
552 .mwdma_mask = 0x07, /* mwdma0-2 */ 747 .mwdma_mask = 0x07, /* mwdma0-2 */
@@ -599,7 +794,6 @@ static int inic_pci_device_resume(struct pci_dev *pdev)
599{ 794{
600 struct ata_host *host = dev_get_drvdata(&pdev->dev); 795 struct ata_host *host = dev_get_drvdata(&pdev->dev);
601 struct inic_host_priv *hpriv = host->private_data; 796 struct inic_host_priv *hpriv = host->private_data;
602 void __iomem *mmio_base = host->iomap[MMIO_BAR];
603 int rc; 797 int rc;
604 798
605 rc = ata_pci_device_do_resume(pdev); 799 rc = ata_pci_device_do_resume(pdev);
@@ -607,7 +801,7 @@ static int inic_pci_device_resume(struct pci_dev *pdev)
607 return rc; 801 return rc;
608 802
609 if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) { 803 if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
610 rc = init_controller(mmio_base, hpriv->cached_hctl); 804 rc = init_controller(hpriv->mmio_base, hpriv->cached_hctl);
611 if (rc) 805 if (rc)
612 return rc; 806 return rc;
613 } 807 }
@@ -625,6 +819,7 @@ static int inic_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
625 struct ata_host *host; 819 struct ata_host *host;
626 struct inic_host_priv *hpriv; 820 struct inic_host_priv *hpriv;
627 void __iomem * const *iomap; 821 void __iomem * const *iomap;
822 int mmio_bar;
628 int i, rc; 823 int i, rc;
629 824
630 if (!printed_version++) 825 if (!printed_version++)
@@ -638,38 +833,31 @@ static int inic_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
638 833
639 host->private_data = hpriv; 834 host->private_data = hpriv;
640 835
641 /* acquire resources and fill host */ 836 /* Acquire resources and fill host. Note that PCI and cardbus
837 * use different BARs.
838 */
642 rc = pcim_enable_device(pdev); 839 rc = pcim_enable_device(pdev);
643 if (rc) 840 if (rc)
644 return rc; 841 return rc;
645 842
646 rc = pcim_iomap_regions(pdev, 0x3f, DRV_NAME); 843 if (pci_resource_flags(pdev, MMIO_BAR_PCI) & IORESOURCE_MEM)
844 mmio_bar = MMIO_BAR_PCI;
845 else
846 mmio_bar = MMIO_BAR_CARDBUS;
847
848 rc = pcim_iomap_regions(pdev, 1 << mmio_bar, DRV_NAME);
647 if (rc) 849 if (rc)
648 return rc; 850 return rc;
649 host->iomap = iomap = pcim_iomap_table(pdev); 851 host->iomap = iomap = pcim_iomap_table(pdev);
852 hpriv->mmio_base = iomap[mmio_bar];
853 hpriv->cached_hctl = readw(hpriv->mmio_base + HOST_CTL);
650 854
651 for (i = 0; i < NR_PORTS; i++) { 855 for (i = 0; i < NR_PORTS; i++) {
652 struct ata_port *ap = host->ports[i]; 856 struct ata_port *ap = host->ports[i];
653 struct ata_ioports *port = &ap->ioaddr;
654 unsigned int offset = i * PORT_SIZE;
655
656 port->cmd_addr = iomap[2 * i];
657 port->altstatus_addr =
658 port->ctl_addr = (void __iomem *)
659 ((unsigned long)iomap[2 * i + 1] | ATA_PCI_CTL_OFS);
660 port->scr_addr = iomap[MMIO_BAR] + offset + PORT_SCR;
661
662 ata_sff_std_ports(port);
663
664 ata_port_pbar_desc(ap, MMIO_BAR, -1, "mmio");
665 ata_port_pbar_desc(ap, MMIO_BAR, offset, "port");
666 ata_port_desc(ap, "cmd 0x%llx ctl 0x%llx",
667 (unsigned long long)pci_resource_start(pdev, 2 * i),
668 (unsigned long long)pci_resource_start(pdev, (2 * i + 1)) |
669 ATA_PCI_CTL_OFS);
670 }
671 857
672 hpriv->cached_hctl = readw(iomap[MMIO_BAR] + HOST_CTL); 858 ata_port_pbar_desc(ap, mmio_bar, -1, "mmio");
859 ata_port_pbar_desc(ap, mmio_bar, i * PORT_SIZE, "port");
860 }
673 861
674 /* Set dma_mask. This devices doesn't support 64bit addressing. */ 862 /* Set dma_mask. This devices doesn't support 64bit addressing. */
675 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK); 863 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
@@ -698,7 +886,7 @@ static int inic_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
698 return rc; 886 return rc;
699 } 887 }
700 888
701 rc = init_controller(iomap[MMIO_BAR], hpriv->cached_hctl); 889 rc = init_controller(hpriv->mmio_base, hpriv->cached_hctl);
702 if (rc) { 890 if (rc) {
703 dev_printk(KERN_ERR, &pdev->dev, 891 dev_printk(KERN_ERR, &pdev->dev,
704 "failed to initialize controller\n"); 892 "failed to initialize controller\n");
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
index 842b1a15b78c..fb81f0c7a8c2 100644
--- a/drivers/ata/sata_mv.c
+++ b/drivers/ata/sata_mv.c
@@ -65,13 +65,14 @@
65#include <linux/platform_device.h> 65#include <linux/platform_device.h>
66#include <linux/ata_platform.h> 66#include <linux/ata_platform.h>
67#include <linux/mbus.h> 67#include <linux/mbus.h>
68#include <linux/bitops.h>
68#include <scsi/scsi_host.h> 69#include <scsi/scsi_host.h>
69#include <scsi/scsi_cmnd.h> 70#include <scsi/scsi_cmnd.h>
70#include <scsi/scsi_device.h> 71#include <scsi/scsi_device.h>
71#include <linux/libata.h> 72#include <linux/libata.h>
72 73
73#define DRV_NAME "sata_mv" 74#define DRV_NAME "sata_mv"
74#define DRV_VERSION "1.20" 75#define DRV_VERSION "1.21"
75 76
76enum { 77enum {
77 /* BAR's are enumerated in terms of pci_resource_start() terms */ 78 /* BAR's are enumerated in terms of pci_resource_start() terms */
@@ -91,9 +92,9 @@ enum {
91 MV_IRQ_COAL_TIME_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xd0), 92 MV_IRQ_COAL_TIME_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xd0),
92 93
93 MV_SATAHC0_REG_BASE = 0x20000, 94 MV_SATAHC0_REG_BASE = 0x20000,
94 MV_FLASH_CTL = 0x1046c, 95 MV_FLASH_CTL_OFS = 0x1046c,
95 MV_GPIO_PORT_CTL = 0x104f0, 96 MV_GPIO_PORT_CTL_OFS = 0x104f0,
96 MV_RESET_CFG = 0x180d8, 97 MV_RESET_CFG_OFS = 0x180d8,
97 98
98 MV_PCI_REG_SZ = MV_MAJOR_REG_AREA_SZ, 99 MV_PCI_REG_SZ = MV_MAJOR_REG_AREA_SZ,
99 MV_SATAHC_REG_SZ = MV_MAJOR_REG_AREA_SZ, 100 MV_SATAHC_REG_SZ = MV_MAJOR_REG_AREA_SZ,
@@ -127,8 +128,13 @@ enum {
127 MV_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 128 MV_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
128 ATA_FLAG_MMIO | ATA_FLAG_NO_ATAPI | 129 ATA_FLAG_MMIO | ATA_FLAG_NO_ATAPI |
129 ATA_FLAG_PIO_POLLING, 130 ATA_FLAG_PIO_POLLING,
131
130 MV_6XXX_FLAGS = MV_FLAG_IRQ_COALESCE, 132 MV_6XXX_FLAGS = MV_FLAG_IRQ_COALESCE,
131 133
134 MV_GENIIE_FLAGS = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
135 ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA |
136 ATA_FLAG_NCQ | ATA_FLAG_AN,
137
132 CRQB_FLAG_READ = (1 << 0), 138 CRQB_FLAG_READ = (1 << 0),
133 CRQB_TAG_SHIFT = 1, 139 CRQB_TAG_SHIFT = 1,
134 CRQB_IOID_SHIFT = 6, /* CRQB Gen-II/IIE IO Id shift */ 140 CRQB_IOID_SHIFT = 6, /* CRQB Gen-II/IIE IO Id shift */
@@ -147,18 +153,21 @@ enum {
147 /* PCI interface registers */ 153 /* PCI interface registers */
148 154
149 PCI_COMMAND_OFS = 0xc00, 155 PCI_COMMAND_OFS = 0xc00,
156 PCI_COMMAND_MRDTRIG = (1 << 7), /* PCI Master Read Trigger */
150 157
151 PCI_MAIN_CMD_STS_OFS = 0xd30, 158 PCI_MAIN_CMD_STS_OFS = 0xd30,
152 STOP_PCI_MASTER = (1 << 2), 159 STOP_PCI_MASTER = (1 << 2),
153 PCI_MASTER_EMPTY = (1 << 3), 160 PCI_MASTER_EMPTY = (1 << 3),
154 GLOB_SFT_RST = (1 << 4), 161 GLOB_SFT_RST = (1 << 4),
155 162
156 MV_PCI_MODE = 0xd00, 163 MV_PCI_MODE_OFS = 0xd00,
164 MV_PCI_MODE_MASK = 0x30,
165
157 MV_PCI_EXP_ROM_BAR_CTL = 0xd2c, 166 MV_PCI_EXP_ROM_BAR_CTL = 0xd2c,
158 MV_PCI_DISC_TIMER = 0xd04, 167 MV_PCI_DISC_TIMER = 0xd04,
159 MV_PCI_MSI_TRIGGER = 0xc38, 168 MV_PCI_MSI_TRIGGER = 0xc38,
160 MV_PCI_SERR_MASK = 0xc28, 169 MV_PCI_SERR_MASK = 0xc28,
161 MV_PCI_XBAR_TMOUT = 0x1d04, 170 MV_PCI_XBAR_TMOUT_OFS = 0x1d04,
162 MV_PCI_ERR_LOW_ADDRESS = 0x1d40, 171 MV_PCI_ERR_LOW_ADDRESS = 0x1d40,
163 MV_PCI_ERR_HIGH_ADDRESS = 0x1d44, 172 MV_PCI_ERR_HIGH_ADDRESS = 0x1d44,
164 MV_PCI_ERR_ATTRIBUTE = 0x1d48, 173 MV_PCI_ERR_ATTRIBUTE = 0x1d48,
@@ -193,13 +202,6 @@ enum {
193 HC_MAIN_RSVD = (0x7f << 25), /* bits 31-25 */ 202 HC_MAIN_RSVD = (0x7f << 25), /* bits 31-25 */
194 HC_MAIN_RSVD_5 = (0x1fff << 19), /* bits 31-19 */ 203 HC_MAIN_RSVD_5 = (0x1fff << 19), /* bits 31-19 */
195 HC_MAIN_RSVD_SOC = (0x3fffffb << 6), /* bits 31-9, 7-6 */ 204 HC_MAIN_RSVD_SOC = (0x3fffffb << 6), /* bits 31-9, 7-6 */
196 HC_MAIN_MASKED_IRQS = (TRAN_LO_DONE | TRAN_HI_DONE |
197 PORTS_0_3_COAL_DONE | PORTS_4_7_COAL_DONE |
198 PORTS_0_7_COAL_DONE | GPIO_INT | TWSI_INT |
199 HC_MAIN_RSVD),
200 HC_MAIN_MASKED_IRQS_5 = (PORTS_0_3_COAL_DONE | PORTS_4_7_COAL_DONE |
201 HC_MAIN_RSVD_5),
202 HC_MAIN_MASKED_IRQS_SOC = (PORTS_0_3_COAL_DONE | HC_MAIN_RSVD_SOC),
203 205
204 /* SATAHC registers */ 206 /* SATAHC registers */
205 HC_CFG_OFS = 0, 207 HC_CFG_OFS = 0,
@@ -217,6 +219,7 @@ enum {
217 SATA_STATUS_OFS = 0x300, /* ctrl, err regs follow status */ 219 SATA_STATUS_OFS = 0x300, /* ctrl, err regs follow status */
218 SATA_ACTIVE_OFS = 0x350, 220 SATA_ACTIVE_OFS = 0x350,
219 SATA_FIS_IRQ_CAUSE_OFS = 0x364, 221 SATA_FIS_IRQ_CAUSE_OFS = 0x364,
222 SATA_FIS_IRQ_AN = (1 << 9), /* async notification */
220 223
221 LTMODE_OFS = 0x30c, 224 LTMODE_OFS = 0x30c,
222 LTMODE_BIT8 = (1 << 8), /* unknown, but necessary */ 225 LTMODE_BIT8 = (1 << 8), /* unknown, but necessary */
@@ -225,16 +228,18 @@ enum {
225 PHY_MODE4 = 0x314, 228 PHY_MODE4 = 0x314,
226 PHY_MODE2 = 0x330, 229 PHY_MODE2 = 0x330,
227 SATA_IFCTL_OFS = 0x344, 230 SATA_IFCTL_OFS = 0x344,
231 SATA_TESTCTL_OFS = 0x348,
228 SATA_IFSTAT_OFS = 0x34c, 232 SATA_IFSTAT_OFS = 0x34c,
229 VENDOR_UNIQUE_FIS_OFS = 0x35c, 233 VENDOR_UNIQUE_FIS_OFS = 0x35c,
230 234
231 FIS_CFG_OFS = 0x360, 235 FISCFG_OFS = 0x360,
232 FIS_CFG_SINGLE_SYNC = (1 << 16), /* SYNC on DMA activation */ 236 FISCFG_WAIT_DEV_ERR = (1 << 8), /* wait for host on DevErr */
237 FISCFG_SINGLE_SYNC = (1 << 16), /* SYNC on DMA activation */
233 238
234 MV5_PHY_MODE = 0x74, 239 MV5_PHY_MODE = 0x74,
235 MV5_LT_MODE = 0x30, 240 MV5_LTMODE_OFS = 0x30,
236 MV5_PHY_CTL = 0x0C, 241 MV5_PHY_CTL_OFS = 0x0C,
237 SATA_INTERFACE_CFG = 0x050, 242 SATA_INTERFACE_CFG_OFS = 0x050,
238 243
239 MV_M2_PREAMP_MASK = 0x7e0, 244 MV_M2_PREAMP_MASK = 0x7e0,
240 245
@@ -332,10 +337,16 @@ enum {
332 EDMA_CMD_OFS = 0x28, /* EDMA command register */ 337 EDMA_CMD_OFS = 0x28, /* EDMA command register */
333 EDMA_EN = (1 << 0), /* enable EDMA */ 338 EDMA_EN = (1 << 0), /* enable EDMA */
334 EDMA_DS = (1 << 1), /* disable EDMA; self-negated */ 339 EDMA_DS = (1 << 1), /* disable EDMA; self-negated */
335 ATA_RST = (1 << 2), /* reset trans/link/phy */ 340 EDMA_RESET = (1 << 2), /* reset eng/trans/link/phy */
336 341
337 EDMA_IORDY_TMOUT = 0x34, 342 EDMA_STATUS_OFS = 0x30, /* EDMA engine status */
338 EDMA_ARB_CFG = 0x38, 343 EDMA_STATUS_CACHE_EMPTY = (1 << 6), /* GenIIe command cache empty */
344 EDMA_STATUS_IDLE = (1 << 7), /* GenIIe EDMA enabled/idle */
345
346 EDMA_IORDY_TMOUT_OFS = 0x34,
347 EDMA_ARB_CFG_OFS = 0x38,
348
349 EDMA_HALTCOND_OFS = 0x60, /* GenIIe halt conditions */
339 350
340 GEN_II_NCQ_MAX_SECTORS = 256, /* max sects/io on Gen2 w/NCQ */ 351 GEN_II_NCQ_MAX_SECTORS = 256, /* max sects/io on Gen2 w/NCQ */
341 352
@@ -350,15 +361,19 @@ enum {
350 MV_HP_GEN_II = (1 << 7), /* Generation II: 60xx */ 361 MV_HP_GEN_II = (1 << 7), /* Generation II: 60xx */
351 MV_HP_GEN_IIE = (1 << 8), /* Generation IIE: 6042/7042 */ 362 MV_HP_GEN_IIE = (1 << 8), /* Generation IIE: 6042/7042 */
352 MV_HP_PCIE = (1 << 9), /* PCIe bus/regs: 7042 */ 363 MV_HP_PCIE = (1 << 9), /* PCIe bus/regs: 7042 */
364 MV_HP_CUT_THROUGH = (1 << 10), /* can use EDMA cut-through */
353 365
354 /* Port private flags (pp_flags) */ 366 /* Port private flags (pp_flags) */
355 MV_PP_FLAG_EDMA_EN = (1 << 0), /* is EDMA engine enabled? */ 367 MV_PP_FLAG_EDMA_EN = (1 << 0), /* is EDMA engine enabled? */
356 MV_PP_FLAG_NCQ_EN = (1 << 1), /* is EDMA set up for NCQ? */ 368 MV_PP_FLAG_NCQ_EN = (1 << 1), /* is EDMA set up for NCQ? */
369 MV_PP_FLAG_FBS_EN = (1 << 2), /* is EDMA set up for FBS? */
370 MV_PP_FLAG_DELAYED_EH = (1 << 3), /* delayed dev err handling */
357}; 371};
358 372
359#define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I) 373#define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I)
360#define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II) 374#define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II)
361#define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE) 375#define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
376#define IS_PCIE(hpriv) ((hpriv)->hp_flags & MV_HP_PCIE)
362#define HAS_PCI(host) (!((host)->ports[0]->flags & MV_FLAG_SOC)) 377#define HAS_PCI(host) (!((host)->ports[0]->flags & MV_FLAG_SOC))
363 378
364#define WINDOW_CTRL(i) (0x20030 + ((i) << 4)) 379#define WINDOW_CTRL(i) (0x20030 + ((i) << 4))
@@ -433,6 +448,7 @@ struct mv_port_priv {
433 unsigned int resp_idx; 448 unsigned int resp_idx;
434 449
435 u32 pp_flags; 450 u32 pp_flags;
451 unsigned int delayed_eh_pmp_map;
436}; 452};
437 453
438struct mv_port_signal { 454struct mv_port_signal {
@@ -442,6 +458,7 @@ struct mv_port_signal {
442 458
443struct mv_host_priv { 459struct mv_host_priv {
444 u32 hp_flags; 460 u32 hp_flags;
461 u32 main_irq_mask;
445 struct mv_port_signal signal[8]; 462 struct mv_port_signal signal[8];
446 const struct mv_hw_ops *ops; 463 const struct mv_hw_ops *ops;
447 int n_ports; 464 int n_ports;
@@ -479,6 +496,7 @@ static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
479static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val); 496static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
480static int mv_port_start(struct ata_port *ap); 497static int mv_port_start(struct ata_port *ap);
481static void mv_port_stop(struct ata_port *ap); 498static void mv_port_stop(struct ata_port *ap);
499static int mv_qc_defer(struct ata_queued_cmd *qc);
482static void mv_qc_prep(struct ata_queued_cmd *qc); 500static void mv_qc_prep(struct ata_queued_cmd *qc);
483static void mv_qc_prep_iie(struct ata_queued_cmd *qc); 501static void mv_qc_prep_iie(struct ata_queued_cmd *qc);
484static unsigned int mv_qc_issue(struct ata_queued_cmd *qc); 502static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
@@ -527,6 +545,9 @@ static int mv_pmp_hardreset(struct ata_link *link, unsigned int *class,
527 unsigned long deadline); 545 unsigned long deadline);
528static int mv_softreset(struct ata_link *link, unsigned int *class, 546static int mv_softreset(struct ata_link *link, unsigned int *class,
529 unsigned long deadline); 547 unsigned long deadline);
548static void mv_pmp_error_handler(struct ata_port *ap);
549static void mv_process_crpb_entries(struct ata_port *ap,
550 struct mv_port_priv *pp);
530 551
531/* .sg_tablesize is (MV_MAX_SG_CT / 2) in the structures below 552/* .sg_tablesize is (MV_MAX_SG_CT / 2) in the structures below
532 * because we have to allow room for worst case splitting of 553 * because we have to allow room for worst case splitting of
@@ -548,6 +569,7 @@ static struct scsi_host_template mv6_sht = {
548static struct ata_port_operations mv5_ops = { 569static struct ata_port_operations mv5_ops = {
549 .inherits = &ata_sff_port_ops, 570 .inherits = &ata_sff_port_ops,
550 571
572 .qc_defer = mv_qc_defer,
551 .qc_prep = mv_qc_prep, 573 .qc_prep = mv_qc_prep,
552 .qc_issue = mv_qc_issue, 574 .qc_issue = mv_qc_issue,
553 575
@@ -566,7 +588,6 @@ static struct ata_port_operations mv5_ops = {
566 588
567static struct ata_port_operations mv6_ops = { 589static struct ata_port_operations mv6_ops = {
568 .inherits = &mv5_ops, 590 .inherits = &mv5_ops,
569 .qc_defer = sata_pmp_qc_defer_cmd_switch,
570 .dev_config = mv6_dev_config, 591 .dev_config = mv6_dev_config,
571 .scr_read = mv_scr_read, 592 .scr_read = mv_scr_read,
572 .scr_write = mv_scr_write, 593 .scr_write = mv_scr_write,
@@ -574,12 +595,11 @@ static struct ata_port_operations mv6_ops = {
574 .pmp_hardreset = mv_pmp_hardreset, 595 .pmp_hardreset = mv_pmp_hardreset,
575 .pmp_softreset = mv_softreset, 596 .pmp_softreset = mv_softreset,
576 .softreset = mv_softreset, 597 .softreset = mv_softreset,
577 .error_handler = sata_pmp_error_handler, 598 .error_handler = mv_pmp_error_handler,
578}; 599};
579 600
580static struct ata_port_operations mv_iie_ops = { 601static struct ata_port_operations mv_iie_ops = {
581 .inherits = &mv6_ops, 602 .inherits = &mv6_ops,
582 .qc_defer = ata_std_qc_defer, /* FIS-based switching */
583 .dev_config = ATA_OP_NULL, 603 .dev_config = ATA_OP_NULL,
584 .qc_prep = mv_qc_prep_iie, 604 .qc_prep = mv_qc_prep_iie,
585}; 605};
@@ -620,25 +640,19 @@ static const struct ata_port_info mv_port_info[] = {
620 .port_ops = &mv6_ops, 640 .port_ops = &mv6_ops,
621 }, 641 },
622 { /* chip_6042 */ 642 { /* chip_6042 */
623 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS | 643 .flags = MV_GENIIE_FLAGS,
624 ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA |
625 ATA_FLAG_NCQ,
626 .pio_mask = 0x1f, /* pio0-4 */ 644 .pio_mask = 0x1f, /* pio0-4 */
627 .udma_mask = ATA_UDMA6, 645 .udma_mask = ATA_UDMA6,
628 .port_ops = &mv_iie_ops, 646 .port_ops = &mv_iie_ops,
629 }, 647 },
630 { /* chip_7042 */ 648 { /* chip_7042 */
631 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS | 649 .flags = MV_GENIIE_FLAGS,
632 ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA |
633 ATA_FLAG_NCQ,
634 .pio_mask = 0x1f, /* pio0-4 */ 650 .pio_mask = 0x1f, /* pio0-4 */
635 .udma_mask = ATA_UDMA6, 651 .udma_mask = ATA_UDMA6,
636 .port_ops = &mv_iie_ops, 652 .port_ops = &mv_iie_ops,
637 }, 653 },
638 { /* chip_soc */ 654 { /* chip_soc */
639 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS | 655 .flags = MV_GENIIE_FLAGS | MV_FLAG_SOC,
640 ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA |
641 ATA_FLAG_NCQ | MV_FLAG_SOC,
642 .pio_mask = 0x1f, /* pio0-4 */ 656 .pio_mask = 0x1f, /* pio0-4 */
643 .udma_mask = ATA_UDMA6, 657 .udma_mask = ATA_UDMA6,
644 .port_ops = &mv_iie_ops, 658 .port_ops = &mv_iie_ops,
@@ -824,6 +838,33 @@ static void mv_set_edma_ptrs(void __iomem *port_mmio,
824 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS); 838 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
825} 839}
826 840
841static void mv_set_main_irq_mask(struct ata_host *host,
842 u32 disable_bits, u32 enable_bits)
843{
844 struct mv_host_priv *hpriv = host->private_data;
845 u32 old_mask, new_mask;
846
847 old_mask = hpriv->main_irq_mask;
848 new_mask = (old_mask & ~disable_bits) | enable_bits;
849 if (new_mask != old_mask) {
850 hpriv->main_irq_mask = new_mask;
851 writelfl(new_mask, hpriv->main_irq_mask_addr);
852 }
853}
854
855static void mv_enable_port_irqs(struct ata_port *ap,
856 unsigned int port_bits)
857{
858 unsigned int shift, hardport, port = ap->port_no;
859 u32 disable_bits, enable_bits;
860
861 MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport);
862
863 disable_bits = (DONE_IRQ | ERR_IRQ) << shift;
864 enable_bits = port_bits << shift;
865 mv_set_main_irq_mask(ap->host, disable_bits, enable_bits);
866}
867
827/** 868/**
828 * mv_start_dma - Enable eDMA engine 869 * mv_start_dma - Enable eDMA engine
829 * @base: port base address 870 * @base: port base address
@@ -866,15 +907,40 @@ static void mv_start_dma(struct ata_port *ap, void __iomem *port_mmio,
866 mv_edma_cfg(ap, want_ncq); 907 mv_edma_cfg(ap, want_ncq);
867 908
868 /* clear FIS IRQ Cause */ 909 /* clear FIS IRQ Cause */
869 writelfl(0, port_mmio + SATA_FIS_IRQ_CAUSE_OFS); 910 if (IS_GEN_IIE(hpriv))
911 writelfl(0, port_mmio + SATA_FIS_IRQ_CAUSE_OFS);
870 912
871 mv_set_edma_ptrs(port_mmio, hpriv, pp); 913 mv_set_edma_ptrs(port_mmio, hpriv, pp);
914 mv_enable_port_irqs(ap, DONE_IRQ|ERR_IRQ);
872 915
873 writelfl(EDMA_EN, port_mmio + EDMA_CMD_OFS); 916 writelfl(EDMA_EN, port_mmio + EDMA_CMD_OFS);
874 pp->pp_flags |= MV_PP_FLAG_EDMA_EN; 917 pp->pp_flags |= MV_PP_FLAG_EDMA_EN;
875 } 918 }
876} 919}
877 920
921static void mv_wait_for_edma_empty_idle(struct ata_port *ap)
922{
923 void __iomem *port_mmio = mv_ap_base(ap);
924 const u32 empty_idle = (EDMA_STATUS_CACHE_EMPTY | EDMA_STATUS_IDLE);
925 const int per_loop = 5, timeout = (15 * 1000 / per_loop);
926 int i;
927
928 /*
929 * Wait for the EDMA engine to finish transactions in progress.
930 * No idea what a good "timeout" value might be, but measurements
931 * indicate that it often requires hundreds of microseconds
932 * with two drives in-use. So we use the 15msec value above
933 * as a rough guess at what even more drives might require.
934 */
935 for (i = 0; i < timeout; ++i) {
936 u32 edma_stat = readl(port_mmio + EDMA_STATUS_OFS);
937 if ((edma_stat & empty_idle) == empty_idle)
938 break;
939 udelay(per_loop);
940 }
941 /* ata_port_printk(ap, KERN_INFO, "%s: %u+ usecs\n", __func__, i); */
942}
943
878/** 944/**
879 * mv_stop_edma_engine - Disable eDMA engine 945 * mv_stop_edma_engine - Disable eDMA engine
880 * @port_mmio: io base address 946 * @port_mmio: io base address
@@ -907,6 +973,7 @@ static int mv_stop_edma(struct ata_port *ap)
907 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) 973 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN))
908 return 0; 974 return 0;
909 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN; 975 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
976 mv_wait_for_edma_empty_idle(ap);
910 if (mv_stop_edma_engine(port_mmio)) { 977 if (mv_stop_edma_engine(port_mmio)) {
911 ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n"); 978 ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n");
912 return -EIO; 979 return -EIO;
@@ -1057,26 +1124,95 @@ static void mv6_dev_config(struct ata_device *adev)
1057 } 1124 }
1058} 1125}
1059 1126
1060static void mv_config_fbs(void __iomem *port_mmio, int enable_fbs) 1127static int mv_qc_defer(struct ata_queued_cmd *qc)
1061{ 1128{
1062 u32 old_fcfg, new_fcfg, old_ltmode, new_ltmode; 1129 struct ata_link *link = qc->dev->link;
1130 struct ata_port *ap = link->ap;
1131 struct mv_port_priv *pp = ap->private_data;
1132
1063 /* 1133 /*
1064 * Various bit settings required for operation 1134 * Don't allow new commands if we're in a delayed EH state
1065 * in FIS-based switching (fbs) mode on GenIIe: 1135 * for NCQ and/or FIS-based switching.
1066 */ 1136 */
1067 old_fcfg = readl(port_mmio + FIS_CFG_OFS); 1137 if (pp->pp_flags & MV_PP_FLAG_DELAYED_EH)
1068 old_ltmode = readl(port_mmio + LTMODE_OFS); 1138 return ATA_DEFER_PORT;
1069 if (enable_fbs) { 1139 /*
1070 new_fcfg = old_fcfg | FIS_CFG_SINGLE_SYNC; 1140 * If the port is completely idle, then allow the new qc.
1071 new_ltmode = old_ltmode | LTMODE_BIT8; 1141 */
1072 } else { /* disable fbs */ 1142 if (ap->nr_active_links == 0)
1073 new_fcfg = old_fcfg & ~FIS_CFG_SINGLE_SYNC; 1143 return 0;
1074 new_ltmode = old_ltmode & ~LTMODE_BIT8; 1144
1075 } 1145 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
1076 if (new_fcfg != old_fcfg) 1146 /*
1077 writelfl(new_fcfg, port_mmio + FIS_CFG_OFS); 1147 * The port is operating in host queuing mode (EDMA).
1148 * It can accomodate a new qc if the qc protocol
1149 * is compatible with the current host queue mode.
1150 */
1151 if (pp->pp_flags & MV_PP_FLAG_NCQ_EN) {
1152 /*
1153 * The host queue (EDMA) is in NCQ mode.
1154 * If the new qc is also an NCQ command,
1155 * then allow the new qc.
1156 */
1157 if (qc->tf.protocol == ATA_PROT_NCQ)
1158 return 0;
1159 } else {
1160 /*
1161 * The host queue (EDMA) is in non-NCQ, DMA mode.
1162 * If the new qc is also a non-NCQ, DMA command,
1163 * then allow the new qc.
1164 */
1165 if (qc->tf.protocol == ATA_PROT_DMA)
1166 return 0;
1167 }
1168 }
1169 return ATA_DEFER_PORT;
1170}
1171
1172static void mv_config_fbs(void __iomem *port_mmio, int want_ncq, int want_fbs)
1173{
1174 u32 new_fiscfg, old_fiscfg;
1175 u32 new_ltmode, old_ltmode;
1176 u32 new_haltcond, old_haltcond;
1177
1178 old_fiscfg = readl(port_mmio + FISCFG_OFS);
1179 old_ltmode = readl(port_mmio + LTMODE_OFS);
1180 old_haltcond = readl(port_mmio + EDMA_HALTCOND_OFS);
1181
1182 new_fiscfg = old_fiscfg & ~(FISCFG_SINGLE_SYNC | FISCFG_WAIT_DEV_ERR);
1183 new_ltmode = old_ltmode & ~LTMODE_BIT8;
1184 new_haltcond = old_haltcond | EDMA_ERR_DEV;
1185
1186 if (want_fbs) {
1187 new_fiscfg = old_fiscfg | FISCFG_SINGLE_SYNC;
1188 new_ltmode = old_ltmode | LTMODE_BIT8;
1189 if (want_ncq)
1190 new_haltcond &= ~EDMA_ERR_DEV;
1191 else
1192 new_fiscfg |= FISCFG_WAIT_DEV_ERR;
1193 }
1194
1195 if (new_fiscfg != old_fiscfg)
1196 writelfl(new_fiscfg, port_mmio + FISCFG_OFS);
1078 if (new_ltmode != old_ltmode) 1197 if (new_ltmode != old_ltmode)
1079 writelfl(new_ltmode, port_mmio + LTMODE_OFS); 1198 writelfl(new_ltmode, port_mmio + LTMODE_OFS);
1199 if (new_haltcond != old_haltcond)
1200 writelfl(new_haltcond, port_mmio + EDMA_HALTCOND_OFS);
1201}
1202
1203static void mv_60x1_errata_sata25(struct ata_port *ap, int want_ncq)
1204{
1205 struct mv_host_priv *hpriv = ap->host->private_data;
1206 u32 old, new;
1207
1208 /* workaround for 88SX60x1 FEr SATA#25 (part 1) */
1209 old = readl(hpriv->base + MV_GPIO_PORT_CTL_OFS);
1210 if (want_ncq)
1211 new = old | (1 << 22);
1212 else
1213 new = old & ~(1 << 22);
1214 if (new != old)
1215 writel(new, hpriv->base + MV_GPIO_PORT_CTL_OFS);
1080} 1216}
1081 1217
1082static void mv_edma_cfg(struct ata_port *ap, int want_ncq) 1218static void mv_edma_cfg(struct ata_port *ap, int want_ncq)
@@ -1088,25 +1224,40 @@ static void mv_edma_cfg(struct ata_port *ap, int want_ncq)
1088 1224
1089 /* set up non-NCQ EDMA configuration */ 1225 /* set up non-NCQ EDMA configuration */
1090 cfg = EDMA_CFG_Q_DEPTH; /* always 0x1f for *all* chips */ 1226 cfg = EDMA_CFG_Q_DEPTH; /* always 0x1f for *all* chips */
1227 pp->pp_flags &= ~MV_PP_FLAG_FBS_EN;
1091 1228
1092 if (IS_GEN_I(hpriv)) 1229 if (IS_GEN_I(hpriv))
1093 cfg |= (1 << 8); /* enab config burst size mask */ 1230 cfg |= (1 << 8); /* enab config burst size mask */
1094 1231
1095 else if (IS_GEN_II(hpriv)) 1232 else if (IS_GEN_II(hpriv)) {
1096 cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN; 1233 cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN;
1234 mv_60x1_errata_sata25(ap, want_ncq);
1097 1235
1098 else if (IS_GEN_IIE(hpriv)) { 1236 } else if (IS_GEN_IIE(hpriv)) {
1099 cfg |= (1 << 23); /* do not mask PM field in rx'd FIS */ 1237 int want_fbs = sata_pmp_attached(ap);
1100 cfg |= (1 << 22); /* enab 4-entry host queue cache */ 1238 /*
1101 cfg |= (1 << 18); /* enab early completion */ 1239 * Possible future enhancement:
1102 cfg |= (1 << 17); /* enab cut-through (dis stor&forwrd) */ 1240 *
1241 * The chip can use FBS with non-NCQ, if we allow it,
1242 * But first we need to have the error handling in place
1243 * for this mode (datasheet section 7.3.15.4.2.3).
1244 * So disallow non-NCQ FBS for now.
1245 */
1246 want_fbs &= want_ncq;
1247
1248 mv_config_fbs(port_mmio, want_ncq, want_fbs);
1103 1249
1104 if (want_ncq && sata_pmp_attached(ap)) { 1250 if (want_fbs) {
1251 pp->pp_flags |= MV_PP_FLAG_FBS_EN;
1105 cfg |= EDMA_CFG_EDMA_FBS; /* FIS-based switching */ 1252 cfg |= EDMA_CFG_EDMA_FBS; /* FIS-based switching */
1106 mv_config_fbs(port_mmio, 1);
1107 } else {
1108 mv_config_fbs(port_mmio, 0);
1109 } 1253 }
1254
1255 cfg |= (1 << 23); /* do not mask PM field in rx'd FIS */
1256 cfg |= (1 << 22); /* enab 4-entry host queue cache */
1257 if (HAS_PCI(ap->host))
1258 cfg |= (1 << 18); /* enab early completion */
1259 if (hpriv->hp_flags & MV_HP_CUT_THROUGH)
1260 cfg |= (1 << 17); /* enab cut-thru (dis stor&forwrd) */
1110 } 1261 }
1111 1262
1112 if (want_ncq) { 1263 if (want_ncq) {
@@ -1213,6 +1364,7 @@ out_port_free_dma_mem:
1213static void mv_port_stop(struct ata_port *ap) 1364static void mv_port_stop(struct ata_port *ap)
1214{ 1365{
1215 mv_stop_edma(ap); 1366 mv_stop_edma(ap);
1367 mv_enable_port_irqs(ap, 0);
1216 mv_port_free_dma_mem(ap); 1368 mv_port_free_dma_mem(ap);
1217} 1369}
1218 1370
@@ -1454,6 +1606,7 @@ static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
1454 * shadow block, etc registers. 1606 * shadow block, etc registers.
1455 */ 1607 */
1456 mv_stop_edma(ap); 1608 mv_stop_edma(ap);
1609 mv_enable_port_irqs(ap, ERR_IRQ);
1457 mv_pmp_select(ap, qc->dev->link->pmp); 1610 mv_pmp_select(ap, qc->dev->link->pmp);
1458 return ata_sff_qc_issue(qc); 1611 return ata_sff_qc_issue(qc);
1459 } 1612 }
@@ -1483,25 +1636,198 @@ static struct ata_queued_cmd *mv_get_active_qc(struct ata_port *ap)
1483 return qc; 1636 return qc;
1484} 1637}
1485 1638
1486static void mv_unexpected_intr(struct ata_port *ap) 1639static void mv_pmp_error_handler(struct ata_port *ap)
1487{ 1640{
1641 unsigned int pmp, pmp_map;
1488 struct mv_port_priv *pp = ap->private_data; 1642 struct mv_port_priv *pp = ap->private_data;
1489 struct ata_eh_info *ehi = &ap->link.eh_info;
1490 char *when = "";
1491 1643
1644 if (pp->pp_flags & MV_PP_FLAG_DELAYED_EH) {
1645 /*
1646 * Perform NCQ error analysis on failed PMPs
1647 * before we freeze the port entirely.
1648 *
1649 * The failed PMPs are marked earlier by mv_pmp_eh_prep().
1650 */
1651 pmp_map = pp->delayed_eh_pmp_map;
1652 pp->pp_flags &= ~MV_PP_FLAG_DELAYED_EH;
1653 for (pmp = 0; pmp_map != 0; pmp++) {
1654 unsigned int this_pmp = (1 << pmp);
1655 if (pmp_map & this_pmp) {
1656 struct ata_link *link = &ap->pmp_link[pmp];
1657 pmp_map &= ~this_pmp;
1658 ata_eh_analyze_ncq_error(link);
1659 }
1660 }
1661 ata_port_freeze(ap);
1662 }
1663 sata_pmp_error_handler(ap);
1664}
1665
1666static unsigned int mv_get_err_pmp_map(struct ata_port *ap)
1667{
1668 void __iomem *port_mmio = mv_ap_base(ap);
1669
1670 return readl(port_mmio + SATA_TESTCTL_OFS) >> 16;
1671}
1672
1673static void mv_pmp_eh_prep(struct ata_port *ap, unsigned int pmp_map)
1674{
1675 struct ata_eh_info *ehi;
1676 unsigned int pmp;
1677
1678 /*
1679 * Initialize EH info for PMPs which saw device errors
1680 */
1681 ehi = &ap->link.eh_info;
1682 for (pmp = 0; pmp_map != 0; pmp++) {
1683 unsigned int this_pmp = (1 << pmp);
1684 if (pmp_map & this_pmp) {
1685 struct ata_link *link = &ap->pmp_link[pmp];
1686
1687 pmp_map &= ~this_pmp;
1688 ehi = &link->eh_info;
1689 ata_ehi_clear_desc(ehi);
1690 ata_ehi_push_desc(ehi, "dev err");
1691 ehi->err_mask |= AC_ERR_DEV;
1692 ehi->action |= ATA_EH_RESET;
1693 ata_link_abort(link);
1694 }
1695 }
1696}
1697
1698static int mv_req_q_empty(struct ata_port *ap)
1699{
1700 void __iomem *port_mmio = mv_ap_base(ap);
1701 u32 in_ptr, out_ptr;
1702
1703 in_ptr = (readl(port_mmio + EDMA_REQ_Q_IN_PTR_OFS)
1704 >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
1705 out_ptr = (readl(port_mmio + EDMA_REQ_Q_OUT_PTR_OFS)
1706 >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
1707 return (in_ptr == out_ptr); /* 1 == queue_is_empty */
1708}
1709
1710static int mv_handle_fbs_ncq_dev_err(struct ata_port *ap)
1711{
1712 struct mv_port_priv *pp = ap->private_data;
1713 int failed_links;
1714 unsigned int old_map, new_map;
1715
1716 /*
1717 * Device error during FBS+NCQ operation:
1718 *
1719 * Set a port flag to prevent further I/O being enqueued.
1720 * Leave the EDMA running to drain outstanding commands from this port.
1721 * Perform the post-mortem/EH only when all responses are complete.
1722 * Follow recovery sequence from 6042/7042 datasheet (7.3.15.4.2.2).
1723 */
1724 if (!(pp->pp_flags & MV_PP_FLAG_DELAYED_EH)) {
1725 pp->pp_flags |= MV_PP_FLAG_DELAYED_EH;
1726 pp->delayed_eh_pmp_map = 0;
1727 }
1728 old_map = pp->delayed_eh_pmp_map;
1729 new_map = old_map | mv_get_err_pmp_map(ap);
1730
1731 if (old_map != new_map) {
1732 pp->delayed_eh_pmp_map = new_map;
1733 mv_pmp_eh_prep(ap, new_map & ~old_map);
1734 }
1735 failed_links = hweight16(new_map);
1736
1737 ata_port_printk(ap, KERN_INFO, "%s: pmp_map=%04x qc_map=%04x "
1738 "failed_links=%d nr_active_links=%d\n",
1739 __func__, pp->delayed_eh_pmp_map,
1740 ap->qc_active, failed_links,
1741 ap->nr_active_links);
1742
1743 if (ap->nr_active_links <= failed_links && mv_req_q_empty(ap)) {
1744 mv_process_crpb_entries(ap, pp);
1745 mv_stop_edma(ap);
1746 mv_eh_freeze(ap);
1747 ata_port_printk(ap, KERN_INFO, "%s: done\n", __func__);
1748 return 1; /* handled */
1749 }
1750 ata_port_printk(ap, KERN_INFO, "%s: waiting\n", __func__);
1751 return 1; /* handled */
1752}
1753
1754static int mv_handle_fbs_non_ncq_dev_err(struct ata_port *ap)
1755{
1492 /* 1756 /*
1493 * We got a device interrupt from something that 1757 * Possible future enhancement:
1494 * was supposed to be using EDMA or polling. 1758 *
1759 * FBS+non-NCQ operation is not yet implemented.
1760 * See related notes in mv_edma_cfg().
1761 *
1762 * Device error during FBS+non-NCQ operation:
1763 *
1764 * We need to snapshot the shadow registers for each failed command.
1765 * Follow recovery sequence from 6042/7042 datasheet (7.3.15.4.2.3).
1495 */ 1766 */
1767 return 0; /* not handled */
1768}
1769
1770static int mv_handle_dev_err(struct ata_port *ap, u32 edma_err_cause)
1771{
1772 struct mv_port_priv *pp = ap->private_data;
1773
1774 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN))
1775 return 0; /* EDMA was not active: not handled */
1776 if (!(pp->pp_flags & MV_PP_FLAG_FBS_EN))
1777 return 0; /* FBS was not active: not handled */
1778
1779 if (!(edma_err_cause & EDMA_ERR_DEV))
1780 return 0; /* non DEV error: not handled */
1781 edma_err_cause &= ~EDMA_ERR_IRQ_TRANSIENT;
1782 if (edma_err_cause & ~(EDMA_ERR_DEV | EDMA_ERR_SELF_DIS))
1783 return 0; /* other problems: not handled */
1784
1785 if (pp->pp_flags & MV_PP_FLAG_NCQ_EN) {
1786 /*
1787 * EDMA should NOT have self-disabled for this case.
1788 * If it did, then something is wrong elsewhere,
1789 * and we cannot handle it here.
1790 */
1791 if (edma_err_cause & EDMA_ERR_SELF_DIS) {
1792 ata_port_printk(ap, KERN_WARNING,
1793 "%s: err_cause=0x%x pp_flags=0x%x\n",
1794 __func__, edma_err_cause, pp->pp_flags);
1795 return 0; /* not handled */
1796 }
1797 return mv_handle_fbs_ncq_dev_err(ap);
1798 } else {
1799 /*
1800 * EDMA should have self-disabled for this case.
1801 * If it did not, then something is wrong elsewhere,
1802 * and we cannot handle it here.
1803 */
1804 if (!(edma_err_cause & EDMA_ERR_SELF_DIS)) {
1805 ata_port_printk(ap, KERN_WARNING,
1806 "%s: err_cause=0x%x pp_flags=0x%x\n",
1807 __func__, edma_err_cause, pp->pp_flags);
1808 return 0; /* not handled */
1809 }
1810 return mv_handle_fbs_non_ncq_dev_err(ap);
1811 }
1812 return 0; /* not handled */
1813}
1814
1815static void mv_unexpected_intr(struct ata_port *ap, int edma_was_enabled)
1816{
1817 struct ata_eh_info *ehi = &ap->link.eh_info;
1818 char *when = "idle";
1819
1496 ata_ehi_clear_desc(ehi); 1820 ata_ehi_clear_desc(ehi);
1497 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) { 1821 if (!ap || (ap->flags & ATA_FLAG_DISABLED)) {
1498 when = " while EDMA enabled"; 1822 when = "disabled";
1823 } else if (edma_was_enabled) {
1824 when = "EDMA enabled";
1499 } else { 1825 } else {
1500 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag); 1826 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag);
1501 if (qc && (qc->tf.flags & ATA_TFLAG_POLLING)) 1827 if (qc && (qc->tf.flags & ATA_TFLAG_POLLING))
1502 when = " while polling"; 1828 when = "polling";
1503 } 1829 }
1504 ata_ehi_push_desc(ehi, "unexpected device interrupt%s", when); 1830 ata_ehi_push_desc(ehi, "unexpected device interrupt while %s", when);
1505 ehi->err_mask |= AC_ERR_OTHER; 1831 ehi->err_mask |= AC_ERR_OTHER;
1506 ehi->action |= ATA_EH_RESET; 1832 ehi->action |= ATA_EH_RESET;
1507 ata_port_freeze(ap); 1833 ata_port_freeze(ap);
@@ -1519,32 +1845,66 @@ static void mv_unexpected_intr(struct ata_port *ap)
1519 * LOCKING: 1845 * LOCKING:
1520 * Inherited from caller. 1846 * Inherited from caller.
1521 */ 1847 */
1522static void mv_err_intr(struct ata_port *ap, struct ata_queued_cmd *qc) 1848static void mv_err_intr(struct ata_port *ap)
1523{ 1849{
1524 void __iomem *port_mmio = mv_ap_base(ap); 1850 void __iomem *port_mmio = mv_ap_base(ap);
1525 u32 edma_err_cause, eh_freeze_mask, serr = 0; 1851 u32 edma_err_cause, eh_freeze_mask, serr = 0;
1852 u32 fis_cause = 0;
1526 struct mv_port_priv *pp = ap->private_data; 1853 struct mv_port_priv *pp = ap->private_data;
1527 struct mv_host_priv *hpriv = ap->host->private_data; 1854 struct mv_host_priv *hpriv = ap->host->private_data;
1528 unsigned int action = 0, err_mask = 0; 1855 unsigned int action = 0, err_mask = 0;
1529 struct ata_eh_info *ehi = &ap->link.eh_info; 1856 struct ata_eh_info *ehi = &ap->link.eh_info;
1530 1857 struct ata_queued_cmd *qc;
1531 ata_ehi_clear_desc(ehi); 1858 int abort = 0;
1532 1859
1533 /* 1860 /*
1534 * Read and clear the err_cause bits. This won't actually 1861 * Read and clear the SError and err_cause bits.
1535 * clear for some errors (eg. SError), but we will be doing 1862 * For GenIIe, if EDMA_ERR_TRANS_IRQ_7 is set, we also must read/clear
1536 * a hard reset in those cases regardless, which *will* clear it. 1863 * the FIS_IRQ_CAUSE register before clearing edma_err_cause.
1537 */ 1864 */
1865 sata_scr_read(&ap->link, SCR_ERROR, &serr);
1866 sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
1867
1538 edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS); 1868 edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1869 if (IS_GEN_IIE(hpriv) && (edma_err_cause & EDMA_ERR_TRANS_IRQ_7)) {
1870 fis_cause = readl(port_mmio + SATA_FIS_IRQ_CAUSE_OFS);
1871 writelfl(~fis_cause, port_mmio + SATA_FIS_IRQ_CAUSE_OFS);
1872 }
1539 writelfl(~edma_err_cause, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS); 1873 writelfl(~edma_err_cause, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1540 1874
1541 ata_ehi_push_desc(ehi, "edma_err_cause=%08x", edma_err_cause); 1875 if (edma_err_cause & EDMA_ERR_DEV) {
1876 /*
1877 * Device errors during FIS-based switching operation
1878 * require special handling.
1879 */
1880 if (mv_handle_dev_err(ap, edma_err_cause))
1881 return;
1882 }
1542 1883
1884 qc = mv_get_active_qc(ap);
1885 ata_ehi_clear_desc(ehi);
1886 ata_ehi_push_desc(ehi, "edma_err_cause=%08x pp_flags=%08x",
1887 edma_err_cause, pp->pp_flags);
1888
1889 if (IS_GEN_IIE(hpriv) && (edma_err_cause & EDMA_ERR_TRANS_IRQ_7)) {
1890 ata_ehi_push_desc(ehi, "fis_cause=%08x", fis_cause);
1891 if (fis_cause & SATA_FIS_IRQ_AN) {
1892 u32 ec = edma_err_cause &
1893 ~(EDMA_ERR_TRANS_IRQ_7 | EDMA_ERR_IRQ_TRANSIENT);
1894 sata_async_notification(ap);
1895 if (!ec)
1896 return; /* Just an AN; no need for the nukes */
1897 ata_ehi_push_desc(ehi, "SDB notify");
1898 }
1899 }
1543 /* 1900 /*
1544 * All generations share these EDMA error cause bits: 1901 * All generations share these EDMA error cause bits:
1545 */ 1902 */
1546 if (edma_err_cause & EDMA_ERR_DEV) 1903 if (edma_err_cause & EDMA_ERR_DEV) {
1547 err_mask |= AC_ERR_DEV; 1904 err_mask |= AC_ERR_DEV;
1905 action |= ATA_EH_RESET;
1906 ata_ehi_push_desc(ehi, "dev error");
1907 }
1548 if (edma_err_cause & (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR | 1908 if (edma_err_cause & (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR |
1549 EDMA_ERR_CRQB_PAR | EDMA_ERR_CRPB_PAR | 1909 EDMA_ERR_CRQB_PAR | EDMA_ERR_CRPB_PAR |
1550 EDMA_ERR_INTRL_PAR)) { 1910 EDMA_ERR_INTRL_PAR)) {
@@ -1576,13 +1936,6 @@ static void mv_err_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
1576 ata_ehi_push_desc(ehi, "EDMA self-disable"); 1936 ata_ehi_push_desc(ehi, "EDMA self-disable");
1577 } 1937 }
1578 if (edma_err_cause & EDMA_ERR_SERR) { 1938 if (edma_err_cause & EDMA_ERR_SERR) {
1579 /*
1580 * Ensure that we read our own SCR, not a pmp link SCR:
1581 */
1582 ap->ops->scr_read(ap, SCR_ERROR, &serr);
1583 /*
1584 * Don't clear SError here; leave it for libata-eh:
1585 */
1586 ata_ehi_push_desc(ehi, "SError=%08x", serr); 1939 ata_ehi_push_desc(ehi, "SError=%08x", serr);
1587 err_mask |= AC_ERR_ATA_BUS; 1940 err_mask |= AC_ERR_ATA_BUS;
1588 action |= ATA_EH_RESET; 1941 action |= ATA_EH_RESET;
@@ -1602,10 +1955,29 @@ static void mv_err_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
1602 else 1955 else
1603 ehi->err_mask |= err_mask; 1956 ehi->err_mask |= err_mask;
1604 1957
1605 if (edma_err_cause & eh_freeze_mask) 1958 if (err_mask == AC_ERR_DEV) {
1959 /*
1960 * Cannot do ata_port_freeze() here,
1961 * because it would kill PIO access,
1962 * which is needed for further diagnosis.
1963 */
1964 mv_eh_freeze(ap);
1965 abort = 1;
1966 } else if (edma_err_cause & eh_freeze_mask) {
1967 /*
1968 * Note to self: ata_port_freeze() calls ata_port_abort()
1969 */
1606 ata_port_freeze(ap); 1970 ata_port_freeze(ap);
1607 else 1971 } else {
1608 ata_port_abort(ap); 1972 abort = 1;
1973 }
1974
1975 if (abort) {
1976 if (qc)
1977 ata_link_abort(qc->dev->link);
1978 else
1979 ata_port_abort(ap);
1980 }
1609} 1981}
1610 1982
1611static void mv_process_crpb_response(struct ata_port *ap, 1983static void mv_process_crpb_response(struct ata_port *ap,
@@ -1632,8 +2004,9 @@ static void mv_process_crpb_response(struct ata_port *ap,
1632 } 2004 }
1633 } 2005 }
1634 ata_status = edma_status >> CRPB_FLAG_STATUS_SHIFT; 2006 ata_status = edma_status >> CRPB_FLAG_STATUS_SHIFT;
1635 qc->err_mask |= ac_err_mask(ata_status); 2007 if (!ac_err_mask(ata_status))
1636 ata_qc_complete(qc); 2008 ata_qc_complete(qc);
2009 /* else: leave it for mv_err_intr() */
1637 } else { 2010 } else {
1638 ata_port_printk(ap, KERN_ERR, "%s: no qc for tag=%d\n", 2011 ata_port_printk(ap, KERN_ERR, "%s: no qc for tag=%d\n",
1639 __func__, tag); 2012 __func__, tag);
@@ -1677,6 +2050,44 @@ static void mv_process_crpb_entries(struct ata_port *ap, struct mv_port_priv *pp
1677 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS); 2050 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
1678} 2051}
1679 2052
2053static void mv_port_intr(struct ata_port *ap, u32 port_cause)
2054{
2055 struct mv_port_priv *pp;
2056 int edma_was_enabled;
2057
2058 if (!ap || (ap->flags & ATA_FLAG_DISABLED)) {
2059 mv_unexpected_intr(ap, 0);
2060 return;
2061 }
2062 /*
2063 * Grab a snapshot of the EDMA_EN flag setting,
2064 * so that we have a consistent view for this port,
2065 * even if something we call of our routines changes it.
2066 */
2067 pp = ap->private_data;
2068 edma_was_enabled = (pp->pp_flags & MV_PP_FLAG_EDMA_EN);
2069 /*
2070 * Process completed CRPB response(s) before other events.
2071 */
2072 if (edma_was_enabled && (port_cause & DONE_IRQ)) {
2073 mv_process_crpb_entries(ap, pp);
2074 if (pp->pp_flags & MV_PP_FLAG_DELAYED_EH)
2075 mv_handle_fbs_ncq_dev_err(ap);
2076 }
2077 /*
2078 * Handle chip-reported errors, or continue on to handle PIO.
2079 */
2080 if (unlikely(port_cause & ERR_IRQ)) {
2081 mv_err_intr(ap);
2082 } else if (!edma_was_enabled) {
2083 struct ata_queued_cmd *qc = mv_get_active_qc(ap);
2084 if (qc)
2085 ata_sff_host_intr(ap, qc);
2086 else
2087 mv_unexpected_intr(ap, edma_was_enabled);
2088 }
2089}
2090
1680/** 2091/**
1681 * mv_host_intr - Handle all interrupts on the given host controller 2092 * mv_host_intr - Handle all interrupts on the given host controller
1682 * @host: host specific structure 2093 * @host: host specific structure
@@ -1688,66 +2099,58 @@ static void mv_process_crpb_entries(struct ata_port *ap, struct mv_port_priv *pp
1688static int mv_host_intr(struct ata_host *host, u32 main_irq_cause) 2099static int mv_host_intr(struct ata_host *host, u32 main_irq_cause)
1689{ 2100{
1690 struct mv_host_priv *hpriv = host->private_data; 2101 struct mv_host_priv *hpriv = host->private_data;
1691 void __iomem *mmio = hpriv->base, *hc_mmio = NULL; 2102 void __iomem *mmio = hpriv->base, *hc_mmio;
1692 u32 hc_irq_cause = 0;
1693 unsigned int handled = 0, port; 2103 unsigned int handled = 0, port;
1694 2104
1695 for (port = 0; port < hpriv->n_ports; port++) { 2105 for (port = 0; port < hpriv->n_ports; port++) {
1696 struct ata_port *ap = host->ports[port]; 2106 struct ata_port *ap = host->ports[port];
1697 struct mv_port_priv *pp; 2107 unsigned int p, shift, hardport, port_cause;
1698 unsigned int shift, hardport, port_cause; 2108
1699 /*
1700 * When we move to the second hc, flag our cached
1701 * copies of hc_mmio (and hc_irq_cause) as invalid again.
1702 */
1703 if (port == MV_PORTS_PER_HC)
1704 hc_mmio = NULL;
1705 /*
1706 * Do nothing if port is not interrupting or is disabled:
1707 */
1708 MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport); 2109 MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport);
1709 port_cause = (main_irq_cause >> shift) & (DONE_IRQ | ERR_IRQ);
1710 if (!port_cause || !ap || (ap->flags & ATA_FLAG_DISABLED))
1711 continue;
1712 /* 2110 /*
1713 * Each hc within the host has its own hc_irq_cause register. 2111 * Each hc within the host has its own hc_irq_cause register,
1714 * We defer reading it until we know we need it, right now: 2112 * where the interrupting ports bits get ack'd.
1715 *
1716 * FIXME later: we don't really need to read this register
1717 * (some logic changes required below if we go that way),
1718 * because it doesn't tell us anything new. But we do need
1719 * to write to it, outside the top of this loop,
1720 * to reset the interrupt triggers for next time.
1721 */ 2113 */
1722 if (!hc_mmio) { 2114 if (hardport == 0) { /* first port on this hc ? */
2115 u32 hc_cause = (main_irq_cause >> shift) & HC0_IRQ_PEND;
2116 u32 port_mask, ack_irqs;
2117 /*
2118 * Skip this entire hc if nothing pending for any ports
2119 */
2120 if (!hc_cause) {
2121 port += MV_PORTS_PER_HC - 1;
2122 continue;
2123 }
2124 /*
2125 * We don't need/want to read the hc_irq_cause register,
2126 * because doing so hurts performance, and
2127 * main_irq_cause already gives us everything we need.
2128 *
2129 * But we do have to *write* to the hc_irq_cause to ack
2130 * the ports that we are handling this time through.
2131 *
2132 * This requires that we create a bitmap for those
2133 * ports which interrupted us, and use that bitmap
2134 * to ack (only) those ports via hc_irq_cause.
2135 */
2136 ack_irqs = 0;
2137 for (p = 0; p < MV_PORTS_PER_HC; ++p) {
2138 if ((port + p) >= hpriv->n_ports)
2139 break;
2140 port_mask = (DONE_IRQ | ERR_IRQ) << (p * 2);
2141 if (hc_cause & port_mask)
2142 ack_irqs |= (DMA_IRQ | DEV_IRQ) << p;
2143 }
1723 hc_mmio = mv_hc_base_from_port(mmio, port); 2144 hc_mmio = mv_hc_base_from_port(mmio, port);
1724 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS); 2145 writelfl(~ack_irqs, hc_mmio + HC_IRQ_CAUSE_OFS);
1725 writelfl(~hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
1726 handled = 1; 2146 handled = 1;
1727 } 2147 }
1728 /* 2148 /*
1729 * Process completed CRPB response(s) before other events. 2149 * Handle interrupts signalled for this port:
1730 */
1731 pp = ap->private_data;
1732 if (hc_irq_cause & (DMA_IRQ << hardport)) {
1733 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN)
1734 mv_process_crpb_entries(ap, pp);
1735 }
1736 /*
1737 * Handle chip-reported errors, or continue on to handle PIO.
1738 */ 2150 */
1739 if (unlikely(port_cause & ERR_IRQ)) { 2151 port_cause = (main_irq_cause >> shift) & (DONE_IRQ | ERR_IRQ);
1740 mv_err_intr(ap, mv_get_active_qc(ap)); 2152 if (port_cause)
1741 } else if (hc_irq_cause & (DEV_IRQ << hardport)) { 2153 mv_port_intr(ap, port_cause);
1742 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) {
1743 struct ata_queued_cmd *qc = mv_get_active_qc(ap);
1744 if (qc) {
1745 ata_sff_host_intr(ap, qc);
1746 continue;
1747 }
1748 }
1749 mv_unexpected_intr(ap);
1750 }
1751 } 2154 }
1752 return handled; 2155 return handled;
1753} 2156}
@@ -1812,20 +2215,20 @@ static irqreturn_t mv_interrupt(int irq, void *dev_instance)
1812 struct ata_host *host = dev_instance; 2215 struct ata_host *host = dev_instance;
1813 struct mv_host_priv *hpriv = host->private_data; 2216 struct mv_host_priv *hpriv = host->private_data;
1814 unsigned int handled = 0; 2217 unsigned int handled = 0;
1815 u32 main_irq_cause, main_irq_mask; 2218 u32 main_irq_cause, pending_irqs;
1816 2219
1817 spin_lock(&host->lock); 2220 spin_lock(&host->lock);
1818 main_irq_cause = readl(hpriv->main_irq_cause_addr); 2221 main_irq_cause = readl(hpriv->main_irq_cause_addr);
1819 main_irq_mask = readl(hpriv->main_irq_mask_addr); 2222 pending_irqs = main_irq_cause & hpriv->main_irq_mask;
1820 /* 2223 /*
1821 * Deal with cases where we either have nothing pending, or have read 2224 * Deal with cases where we either have nothing pending, or have read
1822 * a bogus register value which can indicate HW removal or PCI fault. 2225 * a bogus register value which can indicate HW removal or PCI fault.
1823 */ 2226 */
1824 if ((main_irq_cause & main_irq_mask) && (main_irq_cause != 0xffffffffU)) { 2227 if (pending_irqs && main_irq_cause != 0xffffffffU) {
1825 if (unlikely((main_irq_cause & PCI_ERR) && HAS_PCI(host))) 2228 if (unlikely((pending_irqs & PCI_ERR) && HAS_PCI(host)))
1826 handled = mv_pci_error(host, hpriv->base); 2229 handled = mv_pci_error(host, hpriv->base);
1827 else 2230 else
1828 handled = mv_host_intr(host, main_irq_cause); 2231 handled = mv_host_intr(host, pending_irqs);
1829 } 2232 }
1830 spin_unlock(&host->lock); 2233 spin_unlock(&host->lock);
1831 return IRQ_RETVAL(handled); 2234 return IRQ_RETVAL(handled);
@@ -1894,7 +2297,7 @@ static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio)
1894 2297
1895static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio) 2298static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1896{ 2299{
1897 writel(0x0fcfffff, mmio + MV_FLASH_CTL); 2300 writel(0x0fcfffff, mmio + MV_FLASH_CTL_OFS);
1898} 2301}
1899 2302
1900static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx, 2303static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
@@ -1913,7 +2316,7 @@ static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
1913{ 2316{
1914 u32 tmp; 2317 u32 tmp;
1915 2318
1916 writel(0, mmio + MV_GPIO_PORT_CTL); 2319 writel(0, mmio + MV_GPIO_PORT_CTL_OFS);
1917 2320
1918 /* FIXME: handle MV_HP_ERRATA_50XXB2 errata */ 2321 /* FIXME: handle MV_HP_ERRATA_50XXB2 errata */
1919 2322
@@ -1931,14 +2334,14 @@ static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
1931 int fix_apm_sq = (hpriv->hp_flags & MV_HP_ERRATA_50XXB0); 2334 int fix_apm_sq = (hpriv->hp_flags & MV_HP_ERRATA_50XXB0);
1932 2335
1933 if (fix_apm_sq) { 2336 if (fix_apm_sq) {
1934 tmp = readl(phy_mmio + MV5_LT_MODE); 2337 tmp = readl(phy_mmio + MV5_LTMODE_OFS);
1935 tmp |= (1 << 19); 2338 tmp |= (1 << 19);
1936 writel(tmp, phy_mmio + MV5_LT_MODE); 2339 writel(tmp, phy_mmio + MV5_LTMODE_OFS);
1937 2340
1938 tmp = readl(phy_mmio + MV5_PHY_CTL); 2341 tmp = readl(phy_mmio + MV5_PHY_CTL_OFS);
1939 tmp &= ~0x3; 2342 tmp &= ~0x3;
1940 tmp |= 0x1; 2343 tmp |= 0x1;
1941 writel(tmp, phy_mmio + MV5_PHY_CTL); 2344 writel(tmp, phy_mmio + MV5_PHY_CTL_OFS);
1942 } 2345 }
1943 2346
1944 tmp = readl(phy_mmio + MV5_PHY_MODE); 2347 tmp = readl(phy_mmio + MV5_PHY_MODE);
@@ -1956,11 +2359,6 @@ static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio,
1956{ 2359{
1957 void __iomem *port_mmio = mv_port_base(mmio, port); 2360 void __iomem *port_mmio = mv_port_base(mmio, port);
1958 2361
1959 /*
1960 * The datasheet warns against setting ATA_RST when EDMA is active
1961 * (but doesn't say what the problem might be). So we first try
1962 * to disable the EDMA engine before doing the ATA_RST operation.
1963 */
1964 mv_reset_channel(hpriv, mmio, port); 2362 mv_reset_channel(hpriv, mmio, port);
1965 2363
1966 ZERO(0x028); /* command */ 2364 ZERO(0x028); /* command */
@@ -1975,7 +2373,7 @@ static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio,
1975 ZERO(0x024); /* respq outp */ 2373 ZERO(0x024); /* respq outp */
1976 ZERO(0x020); /* respq inp */ 2374 ZERO(0x020); /* respq inp */
1977 ZERO(0x02c); /* test control */ 2375 ZERO(0x02c); /* test control */
1978 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT); 2376 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT_OFS);
1979} 2377}
1980#undef ZERO 2378#undef ZERO
1981 2379
@@ -2021,14 +2419,13 @@ static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio)
2021 struct mv_host_priv *hpriv = host->private_data; 2419 struct mv_host_priv *hpriv = host->private_data;
2022 u32 tmp; 2420 u32 tmp;
2023 2421
2024 tmp = readl(mmio + MV_PCI_MODE); 2422 tmp = readl(mmio + MV_PCI_MODE_OFS);
2025 tmp &= 0xff00ffff; 2423 tmp &= 0xff00ffff;
2026 writel(tmp, mmio + MV_PCI_MODE); 2424 writel(tmp, mmio + MV_PCI_MODE_OFS);
2027 2425
2028 ZERO(MV_PCI_DISC_TIMER); 2426 ZERO(MV_PCI_DISC_TIMER);
2029 ZERO(MV_PCI_MSI_TRIGGER); 2427 ZERO(MV_PCI_MSI_TRIGGER);
2030 writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT); 2428 writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT_OFS);
2031 ZERO(PCI_HC_MAIN_IRQ_MASK_OFS);
2032 ZERO(MV_PCI_SERR_MASK); 2429 ZERO(MV_PCI_SERR_MASK);
2033 ZERO(hpriv->irq_cause_ofs); 2430 ZERO(hpriv->irq_cause_ofs);
2034 ZERO(hpriv->irq_mask_ofs); 2431 ZERO(hpriv->irq_mask_ofs);
@@ -2045,10 +2442,10 @@ static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
2045 2442
2046 mv5_reset_flash(hpriv, mmio); 2443 mv5_reset_flash(hpriv, mmio);
2047 2444
2048 tmp = readl(mmio + MV_GPIO_PORT_CTL); 2445 tmp = readl(mmio + MV_GPIO_PORT_CTL_OFS);
2049 tmp &= 0x3; 2446 tmp &= 0x3;
2050 tmp |= (1 << 5) | (1 << 6); 2447 tmp |= (1 << 5) | (1 << 6);
2051 writel(tmp, mmio + MV_GPIO_PORT_CTL); 2448 writel(tmp, mmio + MV_GPIO_PORT_CTL_OFS);
2052} 2449}
2053 2450
2054/** 2451/**
@@ -2121,7 +2518,7 @@ static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
2121 void __iomem *port_mmio; 2518 void __iomem *port_mmio;
2122 u32 tmp; 2519 u32 tmp;
2123 2520
2124 tmp = readl(mmio + MV_RESET_CFG); 2521 tmp = readl(mmio + MV_RESET_CFG_OFS);
2125 if ((tmp & (1 << 0)) == 0) { 2522 if ((tmp & (1 << 0)) == 0) {
2126 hpriv->signal[idx].amps = 0x7 << 8; 2523 hpriv->signal[idx].amps = 0x7 << 8;
2127 hpriv->signal[idx].pre = 0x1 << 5; 2524 hpriv->signal[idx].pre = 0x1 << 5;
@@ -2137,7 +2534,7 @@ static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
2137 2534
2138static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio) 2535static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
2139{ 2536{
2140 writel(0x00000060, mmio + MV_GPIO_PORT_CTL); 2537 writel(0x00000060, mmio + MV_GPIO_PORT_CTL_OFS);
2141} 2538}
2142 2539
2143static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio, 2540static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
@@ -2235,11 +2632,6 @@ static void mv_soc_reset_hc_port(struct mv_host_priv *hpriv,
2235{ 2632{
2236 void __iomem *port_mmio = mv_port_base(mmio, port); 2633 void __iomem *port_mmio = mv_port_base(mmio, port);
2237 2634
2238 /*
2239 * The datasheet warns against setting ATA_RST when EDMA is active
2240 * (but doesn't say what the problem might be). So we first try
2241 * to disable the EDMA engine before doing the ATA_RST operation.
2242 */
2243 mv_reset_channel(hpriv, mmio, port); 2635 mv_reset_channel(hpriv, mmio, port);
2244 2636
2245 ZERO(0x028); /* command */ 2637 ZERO(0x028); /* command */
@@ -2254,7 +2646,7 @@ static void mv_soc_reset_hc_port(struct mv_host_priv *hpriv,
2254 ZERO(0x024); /* respq outp */ 2646 ZERO(0x024); /* respq outp */
2255 ZERO(0x020); /* respq inp */ 2647 ZERO(0x020); /* respq inp */
2256 ZERO(0x02c); /* test control */ 2648 ZERO(0x02c); /* test control */
2257 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT); 2649 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT_OFS);
2258} 2650}
2259 2651
2260#undef ZERO 2652#undef ZERO
@@ -2297,38 +2689,39 @@ static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio)
2297 return; 2689 return;
2298} 2690}
2299 2691
2300static void mv_setup_ifctl(void __iomem *port_mmio, int want_gen2i) 2692static void mv_setup_ifcfg(void __iomem *port_mmio, int want_gen2i)
2301{ 2693{
2302 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CFG); 2694 u32 ifcfg = readl(port_mmio + SATA_INTERFACE_CFG_OFS);
2303 2695
2304 ifctl = (ifctl & 0xf7f) | 0x9b1000; /* from chip spec */ 2696 ifcfg = (ifcfg & 0xf7f) | 0x9b1000; /* from chip spec */
2305 if (want_gen2i) 2697 if (want_gen2i)
2306 ifctl |= (1 << 7); /* enable gen2i speed */ 2698 ifcfg |= (1 << 7); /* enable gen2i speed */
2307 writelfl(ifctl, port_mmio + SATA_INTERFACE_CFG); 2699 writelfl(ifcfg, port_mmio + SATA_INTERFACE_CFG_OFS);
2308} 2700}
2309 2701
2310/*
2311 * Caller must ensure that EDMA is not active,
2312 * by first doing mv_stop_edma() where needed.
2313 */
2314static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio, 2702static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio,
2315 unsigned int port_no) 2703 unsigned int port_no)
2316{ 2704{
2317 void __iomem *port_mmio = mv_port_base(mmio, port_no); 2705 void __iomem *port_mmio = mv_port_base(mmio, port_no);
2318 2706
2707 /*
2708 * The datasheet warns against setting EDMA_RESET when EDMA is active
2709 * (but doesn't say what the problem might be). So we first try
2710 * to disable the EDMA engine before doing the EDMA_RESET operation.
2711 */
2319 mv_stop_edma_engine(port_mmio); 2712 mv_stop_edma_engine(port_mmio);
2320 writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS); 2713 writelfl(EDMA_RESET, port_mmio + EDMA_CMD_OFS);
2321 2714
2322 if (!IS_GEN_I(hpriv)) { 2715 if (!IS_GEN_I(hpriv)) {
2323 /* Enable 3.0gb/s link speed */ 2716 /* Enable 3.0gb/s link speed: this survives EDMA_RESET */
2324 mv_setup_ifctl(port_mmio, 1); 2717 mv_setup_ifcfg(port_mmio, 1);
2325 } 2718 }
2326 /* 2719 /*
2327 * Strobing ATA_RST here causes a hard reset of the SATA transport, 2720 * Strobing EDMA_RESET here causes a hard reset of the SATA transport,
2328 * link, and physical layers. It resets all SATA interface registers 2721 * link, and physical layers. It resets all SATA interface registers
2329 * (except for SATA_INTERFACE_CFG), and issues a COMRESET to the dev. 2722 * (except for SATA_INTERFACE_CFG), and issues a COMRESET to the dev.
2330 */ 2723 */
2331 writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS); 2724 writelfl(EDMA_RESET, port_mmio + EDMA_CMD_OFS);
2332 udelay(25); /* allow reset propagation */ 2725 udelay(25); /* allow reset propagation */
2333 writelfl(0, port_mmio + EDMA_CMD_OFS); 2726 writelfl(0, port_mmio + EDMA_CMD_OFS);
2334 2727
@@ -2387,12 +2780,13 @@ static int mv_hardreset(struct ata_link *link, unsigned int *class,
2387 2780
2388 rc = sata_link_hardreset(link, timing, deadline + extra, 2781 rc = sata_link_hardreset(link, timing, deadline + extra,
2389 &online, NULL); 2782 &online, NULL);
2783 rc = online ? -EAGAIN : rc;
2390 if (rc) 2784 if (rc)
2391 return rc; 2785 return rc;
2392 sata_scr_read(link, SCR_STATUS, &sstatus); 2786 sata_scr_read(link, SCR_STATUS, &sstatus);
2393 if (!IS_GEN_I(hpriv) && ++attempts >= 5 && sstatus == 0x121) { 2787 if (!IS_GEN_I(hpriv) && ++attempts >= 5 && sstatus == 0x121) {
2394 /* Force 1.5gb/s link speed and try again */ 2788 /* Force 1.5gb/s link speed and try again */
2395 mv_setup_ifctl(mv_ap_base(ap), 0); 2789 mv_setup_ifcfg(mv_ap_base(ap), 0);
2396 if (time_after(jiffies + HZ, deadline)) 2790 if (time_after(jiffies + HZ, deadline))
2397 extra = HZ; /* only extend it once, max */ 2791 extra = HZ; /* only extend it once, max */
2398 } 2792 }
@@ -2403,32 +2797,18 @@ static int mv_hardreset(struct ata_link *link, unsigned int *class,
2403 2797
2404static void mv_eh_freeze(struct ata_port *ap) 2798static void mv_eh_freeze(struct ata_port *ap)
2405{ 2799{
2406 struct mv_host_priv *hpriv = ap->host->private_data;
2407 unsigned int shift, hardport, port = ap->port_no;
2408 u32 main_irq_mask;
2409
2410 /* FIXME: handle coalescing completion events properly */
2411
2412 mv_stop_edma(ap); 2800 mv_stop_edma(ap);
2413 MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport); 2801 mv_enable_port_irqs(ap, 0);
2414
2415 /* disable assertion of portN err, done events */
2416 main_irq_mask = readl(hpriv->main_irq_mask_addr);
2417 main_irq_mask &= ~((DONE_IRQ | ERR_IRQ) << shift);
2418 writelfl(main_irq_mask, hpriv->main_irq_mask_addr);
2419} 2802}
2420 2803
2421static void mv_eh_thaw(struct ata_port *ap) 2804static void mv_eh_thaw(struct ata_port *ap)
2422{ 2805{
2423 struct mv_host_priv *hpriv = ap->host->private_data; 2806 struct mv_host_priv *hpriv = ap->host->private_data;
2424 unsigned int shift, hardport, port = ap->port_no; 2807 unsigned int port = ap->port_no;
2808 unsigned int hardport = mv_hardport_from_port(port);
2425 void __iomem *hc_mmio = mv_hc_base_from_port(hpriv->base, port); 2809 void __iomem *hc_mmio = mv_hc_base_from_port(hpriv->base, port);
2426 void __iomem *port_mmio = mv_ap_base(ap); 2810 void __iomem *port_mmio = mv_ap_base(ap);
2427 u32 main_irq_mask, hc_irq_cause; 2811 u32 hc_irq_cause;
2428
2429 /* FIXME: handle coalescing completion events properly */
2430
2431 MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport);
2432 2812
2433 /* clear EDMA errors on this port */ 2813 /* clear EDMA errors on this port */
2434 writel(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS); 2814 writel(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
@@ -2438,10 +2818,7 @@ static void mv_eh_thaw(struct ata_port *ap)
2438 hc_irq_cause &= ~((DEV_IRQ | DMA_IRQ) << hardport); 2818 hc_irq_cause &= ~((DEV_IRQ | DMA_IRQ) << hardport);
2439 writelfl(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS); 2819 writelfl(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
2440 2820
2441 /* enable assertion of portN err, done events */ 2821 mv_enable_port_irqs(ap, ERR_IRQ);
2442 main_irq_mask = readl(hpriv->main_irq_mask_addr);
2443 main_irq_mask |= ((DONE_IRQ | ERR_IRQ) << shift);
2444 writelfl(main_irq_mask, hpriv->main_irq_mask_addr);
2445} 2822}
2446 2823
2447/** 2824/**
@@ -2493,6 +2870,34 @@ static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio)
2493 readl(port_mmio + EDMA_ERR_IRQ_MASK_OFS)); 2870 readl(port_mmio + EDMA_ERR_IRQ_MASK_OFS));
2494} 2871}
2495 2872
2873static unsigned int mv_in_pcix_mode(struct ata_host *host)
2874{
2875 struct mv_host_priv *hpriv = host->private_data;
2876 void __iomem *mmio = hpriv->base;
2877 u32 reg;
2878
2879 if (!HAS_PCI(host) || !IS_PCIE(hpriv))
2880 return 0; /* not PCI-X capable */
2881 reg = readl(mmio + MV_PCI_MODE_OFS);
2882 if ((reg & MV_PCI_MODE_MASK) == 0)
2883 return 0; /* conventional PCI mode */
2884 return 1; /* chip is in PCI-X mode */
2885}
2886
2887static int mv_pci_cut_through_okay(struct ata_host *host)
2888{
2889 struct mv_host_priv *hpriv = host->private_data;
2890 void __iomem *mmio = hpriv->base;
2891 u32 reg;
2892
2893 if (!mv_in_pcix_mode(host)) {
2894 reg = readl(mmio + PCI_COMMAND_OFS);
2895 if (reg & PCI_COMMAND_MRDTRIG)
2896 return 0; /* not okay */
2897 }
2898 return 1; /* okay */
2899}
2900
2496static int mv_chip_id(struct ata_host *host, unsigned int board_idx) 2901static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
2497{ 2902{
2498 struct pci_dev *pdev = to_pci_dev(host->dev); 2903 struct pci_dev *pdev = to_pci_dev(host->dev);
@@ -2560,7 +2965,7 @@ static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
2560 break; 2965 break;
2561 2966
2562 case chip_7042: 2967 case chip_7042:
2563 hp_flags |= MV_HP_PCIE; 2968 hp_flags |= MV_HP_PCIE | MV_HP_CUT_THROUGH;
2564 if (pdev->vendor == PCI_VENDOR_ID_TTI && 2969 if (pdev->vendor == PCI_VENDOR_ID_TTI &&
2565 (pdev->device == 0x2300 || pdev->device == 0x2310)) 2970 (pdev->device == 0x2300 || pdev->device == 0x2310))
2566 { 2971 {
@@ -2590,9 +2995,12 @@ static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
2590 " and avoid the final two gigabytes on" 2995 " and avoid the final two gigabytes on"
2591 " all RocketRAID BIOS initialized drives.\n"); 2996 " all RocketRAID BIOS initialized drives.\n");
2592 } 2997 }
2998 /* drop through */
2593 case chip_6042: 2999 case chip_6042:
2594 hpriv->ops = &mv6xxx_ops; 3000 hpriv->ops = &mv6xxx_ops;
2595 hp_flags |= MV_HP_GEN_IIE; 3001 hp_flags |= MV_HP_GEN_IIE;
3002 if (board_idx == chip_6042 && mv_pci_cut_through_okay(host))
3003 hp_flags |= MV_HP_CUT_THROUGH;
2596 3004
2597 switch (pdev->revision) { 3005 switch (pdev->revision) {
2598 case 0x0: 3006 case 0x0:
@@ -2663,7 +3071,7 @@ static int mv_init_host(struct ata_host *host, unsigned int board_idx)
2663 } 3071 }
2664 3072
2665 /* global interrupt mask: 0 == mask everything */ 3073 /* global interrupt mask: 0 == mask everything */
2666 writel(0, hpriv->main_irq_mask_addr); 3074 mv_set_main_irq_mask(host, ~0, 0);
2667 3075
2668 n_hc = mv_get_hc_count(host->ports[0]->flags); 3076 n_hc = mv_get_hc_count(host->ports[0]->flags);
2669 3077
@@ -2711,25 +3119,12 @@ static int mv_init_host(struct ata_host *host, unsigned int board_idx)
2711 3119
2712 /* and unmask interrupt generation for host regs */ 3120 /* and unmask interrupt generation for host regs */
2713 writelfl(hpriv->unmask_all_irqs, mmio + hpriv->irq_mask_ofs); 3121 writelfl(hpriv->unmask_all_irqs, mmio + hpriv->irq_mask_ofs);
2714 if (IS_GEN_I(hpriv)) 3122
2715 writelfl(~HC_MAIN_MASKED_IRQS_5, 3123 /*
2716 hpriv->main_irq_mask_addr); 3124 * enable only global host interrupts for now.
2717 else 3125 * The per-port interrupts get done later as ports are set up.
2718 writelfl(~HC_MAIN_MASKED_IRQS, 3126 */
2719 hpriv->main_irq_mask_addr); 3127 mv_set_main_irq_mask(host, 0, PCI_ERR);
2720
2721 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x "
2722 "PCI int cause/mask=0x%08x/0x%08x\n",
2723 readl(hpriv->main_irq_cause_addr),
2724 readl(hpriv->main_irq_mask_addr),
2725 readl(mmio + hpriv->irq_cause_ofs),
2726 readl(mmio + hpriv->irq_mask_ofs));
2727 } else {
2728 writelfl(~HC_MAIN_MASKED_IRQS_SOC,
2729 hpriv->main_irq_mask_addr);
2730 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x\n",
2731 readl(hpriv->main_irq_cause_addr),
2732 readl(hpriv->main_irq_mask_addr));
2733 } 3128 }
2734done: 3129done:
2735 return rc; 3130 return rc;
diff --git a/drivers/ata/sata_promise.c b/drivers/ata/sata_promise.c
index 5a10dc5048ad..030665ba76b7 100644
--- a/drivers/ata/sata_promise.c
+++ b/drivers/ata/sata_promise.c
@@ -53,7 +53,15 @@ enum {
53 PDC_MMIO_BAR = 3, 53 PDC_MMIO_BAR = 3,
54 PDC_MAX_PRD = LIBATA_MAX_PRD - 1, /* -1 for ASIC PRD bug workaround */ 54 PDC_MAX_PRD = LIBATA_MAX_PRD - 1, /* -1 for ASIC PRD bug workaround */
55 55
56 /* register offsets */ 56 /* host register offsets (from host->iomap[PDC_MMIO_BAR]) */
57 PDC_INT_SEQMASK = 0x40, /* Mask of asserted SEQ INTs */
58 PDC_FLASH_CTL = 0x44, /* Flash control register */
59 PDC_SATA_PLUG_CSR = 0x6C, /* SATA Plug control/status reg */
60 PDC2_SATA_PLUG_CSR = 0x60, /* SATAII Plug control/status reg */
61 PDC_TBG_MODE = 0x41C, /* TBG mode (not SATAII) */
62 PDC_SLEW_CTL = 0x470, /* slew rate control reg (not SATAII) */
63
64 /* per-port ATA register offsets (from ap->ioaddr.cmd_addr) */
57 PDC_FEATURE = 0x04, /* Feature/Error reg (per port) */ 65 PDC_FEATURE = 0x04, /* Feature/Error reg (per port) */
58 PDC_SECTOR_COUNT = 0x08, /* Sector count reg (per port) */ 66 PDC_SECTOR_COUNT = 0x08, /* Sector count reg (per port) */
59 PDC_SECTOR_NUMBER = 0x0C, /* Sector number reg (per port) */ 67 PDC_SECTOR_NUMBER = 0x0C, /* Sector number reg (per port) */
@@ -63,14 +71,11 @@ enum {
63 PDC_COMMAND = 0x1C, /* Command/status reg (per port) */ 71 PDC_COMMAND = 0x1C, /* Command/status reg (per port) */
64 PDC_ALTSTATUS = 0x38, /* Alternate-status/device-control reg (per port) */ 72 PDC_ALTSTATUS = 0x38, /* Alternate-status/device-control reg (per port) */
65 PDC_PKT_SUBMIT = 0x40, /* Command packet pointer addr */ 73 PDC_PKT_SUBMIT = 0x40, /* Command packet pointer addr */
66 PDC_INT_SEQMASK = 0x40, /* Mask of asserted SEQ INTs */
67 PDC_FLASH_CTL = 0x44, /* Flash control register */
68 PDC_GLOBAL_CTL = 0x48, /* Global control/status (per port) */ 74 PDC_GLOBAL_CTL = 0x48, /* Global control/status (per port) */
69 PDC_CTLSTAT = 0x60, /* IDE control and status (per port) */ 75 PDC_CTLSTAT = 0x60, /* IDE control and status (per port) */
70 PDC_SATA_PLUG_CSR = 0x6C, /* SATA Plug control/status reg */ 76
71 PDC2_SATA_PLUG_CSR = 0x60, /* SATAII Plug control/status reg */ 77 /* per-port SATA register offsets (from ap->ioaddr.scr_addr) */
72 PDC_TBG_MODE = 0x41C, /* TBG mode (not SATAII) */ 78 PDC_PHYMODE4 = 0x14,
73 PDC_SLEW_CTL = 0x470, /* slew rate control reg (not SATAII) */
74 79
75 /* PDC_GLOBAL_CTL bit definitions */ 80 /* PDC_GLOBAL_CTL bit definitions */
76 PDC_PH_ERR = (1 << 8), /* PCI error while loading packet */ 81 PDC_PH_ERR = (1 << 8), /* PCI error while loading packet */
@@ -134,7 +139,7 @@ struct pdc_port_priv {
134 139
135static int pdc_sata_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val); 140static int pdc_sata_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val);
136static int pdc_sata_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val); 141static int pdc_sata_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val);
137static int pdc_ata_init_one (struct pci_dev *pdev, const struct pci_device_id *ent); 142static int pdc_ata_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
138static int pdc_common_port_start(struct ata_port *ap); 143static int pdc_common_port_start(struct ata_port *ap);
139static int pdc_sata_port_start(struct ata_port *ap); 144static int pdc_sata_port_start(struct ata_port *ap);
140static void pdc_qc_prep(struct ata_queued_cmd *qc); 145static void pdc_qc_prep(struct ata_queued_cmd *qc);
@@ -332,12 +337,12 @@ static int pdc_sata_port_start(struct ata_port *ap)
332 337
333 /* fix up PHYMODE4 align timing */ 338 /* fix up PHYMODE4 align timing */
334 if (ap->flags & PDC_FLAG_GEN_II) { 339 if (ap->flags & PDC_FLAG_GEN_II) {
335 void __iomem *mmio = ap->ioaddr.scr_addr; 340 void __iomem *sata_mmio = ap->ioaddr.scr_addr;
336 unsigned int tmp; 341 unsigned int tmp;
337 342
338 tmp = readl(mmio + 0x014); 343 tmp = readl(sata_mmio + PDC_PHYMODE4);
339 tmp = (tmp & ~3) | 1; /* set bits 1:0 = 0:1 */ 344 tmp = (tmp & ~3) | 1; /* set bits 1:0 = 0:1 */
340 writel(tmp, mmio + 0x014); 345 writel(tmp, sata_mmio + PDC_PHYMODE4);
341 } 346 }
342 347
343 return 0; 348 return 0;
@@ -345,32 +350,32 @@ static int pdc_sata_port_start(struct ata_port *ap)
345 350
346static void pdc_reset_port(struct ata_port *ap) 351static void pdc_reset_port(struct ata_port *ap)
347{ 352{
348 void __iomem *mmio = ap->ioaddr.cmd_addr + PDC_CTLSTAT; 353 void __iomem *ata_ctlstat_mmio = ap->ioaddr.cmd_addr + PDC_CTLSTAT;
349 unsigned int i; 354 unsigned int i;
350 u32 tmp; 355 u32 tmp;
351 356
352 for (i = 11; i > 0; i--) { 357 for (i = 11; i > 0; i--) {
353 tmp = readl(mmio); 358 tmp = readl(ata_ctlstat_mmio);
354 if (tmp & PDC_RESET) 359 if (tmp & PDC_RESET)
355 break; 360 break;
356 361
357 udelay(100); 362 udelay(100);
358 363
359 tmp |= PDC_RESET; 364 tmp |= PDC_RESET;
360 writel(tmp, mmio); 365 writel(tmp, ata_ctlstat_mmio);
361 } 366 }
362 367
363 tmp &= ~PDC_RESET; 368 tmp &= ~PDC_RESET;
364 writel(tmp, mmio); 369 writel(tmp, ata_ctlstat_mmio);
365 readl(mmio); /* flush */ 370 readl(ata_ctlstat_mmio); /* flush */
366} 371}
367 372
368static int pdc_pata_cable_detect(struct ata_port *ap) 373static int pdc_pata_cable_detect(struct ata_port *ap)
369{ 374{
370 u8 tmp; 375 u8 tmp;
371 void __iomem *mmio = ap->ioaddr.cmd_addr + PDC_CTLSTAT + 0x03; 376 void __iomem *ata_mmio = ap->ioaddr.cmd_addr;
372 377
373 tmp = readb(mmio); 378 tmp = readb(ata_mmio + PDC_CTLSTAT + 3);
374 if (tmp & 0x01) 379 if (tmp & 0x01)
375 return ATA_CBL_PATA40; 380 return ATA_CBL_PATA40;
376 return ATA_CBL_PATA80; 381 return ATA_CBL_PATA80;
@@ -557,31 +562,25 @@ static void pdc_qc_prep(struct ata_queued_cmd *qc)
557 switch (qc->tf.protocol) { 562 switch (qc->tf.protocol) {
558 case ATA_PROT_DMA: 563 case ATA_PROT_DMA:
559 pdc_fill_sg(qc); 564 pdc_fill_sg(qc);
560 /* fall through */ 565 /*FALLTHROUGH*/
561
562 case ATA_PROT_NODATA: 566 case ATA_PROT_NODATA:
563 i = pdc_pkt_header(&qc->tf, qc->ap->prd_dma, 567 i = pdc_pkt_header(&qc->tf, qc->ap->prd_dma,
564 qc->dev->devno, pp->pkt); 568 qc->dev->devno, pp->pkt);
565
566 if (qc->tf.flags & ATA_TFLAG_LBA48) 569 if (qc->tf.flags & ATA_TFLAG_LBA48)
567 i = pdc_prep_lba48(&qc->tf, pp->pkt, i); 570 i = pdc_prep_lba48(&qc->tf, pp->pkt, i);
568 else 571 else
569 i = pdc_prep_lba28(&qc->tf, pp->pkt, i); 572 i = pdc_prep_lba28(&qc->tf, pp->pkt, i);
570
571 pdc_pkt_footer(&qc->tf, pp->pkt, i); 573 pdc_pkt_footer(&qc->tf, pp->pkt, i);
572 break; 574 break;
573
574 case ATAPI_PROT_PIO: 575 case ATAPI_PROT_PIO:
575 pdc_fill_sg(qc); 576 pdc_fill_sg(qc);
576 break; 577 break;
577
578 case ATAPI_PROT_DMA: 578 case ATAPI_PROT_DMA:
579 pdc_fill_sg(qc); 579 pdc_fill_sg(qc);
580 /*FALLTHROUGH*/ 580 /*FALLTHROUGH*/
581 case ATAPI_PROT_NODATA: 581 case ATAPI_PROT_NODATA:
582 pdc_atapi_pkt(qc); 582 pdc_atapi_pkt(qc);
583 break; 583 break;
584
585 default: 584 default:
586 break; 585 break;
587 } 586 }
@@ -611,7 +610,7 @@ static unsigned int pdc_sata_ata_port_to_ata_no(const struct ata_port *ap)
611 unsigned int nr_ports = pdc_sata_nr_ports(ap); 610 unsigned int nr_ports = pdc_sata_nr_ports(ap);
612 unsigned int i; 611 unsigned int i;
613 612
614 for(i = 0; i < nr_ports && host->ports[i] != ap; ++i) 613 for (i = 0; i < nr_ports && host->ports[i] != ap; ++i)
615 ; 614 ;
616 BUG_ON(i >= nr_ports); 615 BUG_ON(i >= nr_ports);
617 return pdc_port_no_to_ata_no(i, pdc_is_sataii_tx4(ap->flags)); 616 return pdc_port_no_to_ata_no(i, pdc_is_sataii_tx4(ap->flags));
@@ -624,14 +623,14 @@ static unsigned int pdc_sata_hotplug_offset(const struct ata_port *ap)
624 623
625static void pdc_freeze(struct ata_port *ap) 624static void pdc_freeze(struct ata_port *ap)
626{ 625{
627 void __iomem *mmio = ap->ioaddr.cmd_addr; 626 void __iomem *ata_mmio = ap->ioaddr.cmd_addr;
628 u32 tmp; 627 u32 tmp;
629 628
630 tmp = readl(mmio + PDC_CTLSTAT); 629 tmp = readl(ata_mmio + PDC_CTLSTAT);
631 tmp |= PDC_IRQ_DISABLE; 630 tmp |= PDC_IRQ_DISABLE;
632 tmp &= ~PDC_DMA_ENABLE; 631 tmp &= ~PDC_DMA_ENABLE;
633 writel(tmp, mmio + PDC_CTLSTAT); 632 writel(tmp, ata_mmio + PDC_CTLSTAT);
634 readl(mmio + PDC_CTLSTAT); /* flush */ 633 readl(ata_mmio + PDC_CTLSTAT); /* flush */
635} 634}
636 635
637static void pdc_sata_freeze(struct ata_port *ap) 636static void pdc_sata_freeze(struct ata_port *ap)
@@ -659,17 +658,17 @@ static void pdc_sata_freeze(struct ata_port *ap)
659 658
660static void pdc_thaw(struct ata_port *ap) 659static void pdc_thaw(struct ata_port *ap)
661{ 660{
662 void __iomem *mmio = ap->ioaddr.cmd_addr; 661 void __iomem *ata_mmio = ap->ioaddr.cmd_addr;
663 u32 tmp; 662 u32 tmp;
664 663
665 /* clear IRQ */ 664 /* clear IRQ */
666 readl(mmio + PDC_INT_SEQMASK); 665 readl(ata_mmio + PDC_COMMAND);
667 666
668 /* turn IRQ back on */ 667 /* turn IRQ back on */
669 tmp = readl(mmio + PDC_CTLSTAT); 668 tmp = readl(ata_mmio + PDC_CTLSTAT);
670 tmp &= ~PDC_IRQ_DISABLE; 669 tmp &= ~PDC_IRQ_DISABLE;
671 writel(tmp, mmio + PDC_CTLSTAT); 670 writel(tmp, ata_mmio + PDC_CTLSTAT);
672 readl(mmio + PDC_CTLSTAT); /* flush */ 671 readl(ata_mmio + PDC_CTLSTAT); /* flush */
673} 672}
674 673
675static void pdc_sata_thaw(struct ata_port *ap) 674static void pdc_sata_thaw(struct ata_port *ap)
@@ -743,11 +742,11 @@ static void pdc_error_intr(struct ata_port *ap, struct ata_queued_cmd *qc,
743 ata_port_abort(ap); 742 ata_port_abort(ap);
744} 743}
745 744
746static inline unsigned int pdc_host_intr(struct ata_port *ap, 745static unsigned int pdc_host_intr(struct ata_port *ap,
747 struct ata_queued_cmd *qc) 746 struct ata_queued_cmd *qc)
748{ 747{
749 unsigned int handled = 0; 748 unsigned int handled = 0;
750 void __iomem *port_mmio = ap->ioaddr.cmd_addr; 749 void __iomem *ata_mmio = ap->ioaddr.cmd_addr;
751 u32 port_status, err_mask; 750 u32 port_status, err_mask;
752 751
753 err_mask = PDC_ERR_MASK; 752 err_mask = PDC_ERR_MASK;
@@ -755,7 +754,7 @@ static inline unsigned int pdc_host_intr(struct ata_port *ap,
755 err_mask &= ~PDC1_ERR_MASK; 754 err_mask &= ~PDC1_ERR_MASK;
756 else 755 else
757 err_mask &= ~PDC2_ERR_MASK; 756 err_mask &= ~PDC2_ERR_MASK;
758 port_status = readl(port_mmio + PDC_GLOBAL_CTL); 757 port_status = readl(ata_mmio + PDC_GLOBAL_CTL);
759 if (unlikely(port_status & err_mask)) { 758 if (unlikely(port_status & err_mask)) {
760 pdc_error_intr(ap, qc, port_status, err_mask); 759 pdc_error_intr(ap, qc, port_status, err_mask);
761 return 1; 760 return 1;
@@ -770,7 +769,6 @@ static inline unsigned int pdc_host_intr(struct ata_port *ap,
770 ata_qc_complete(qc); 769 ata_qc_complete(qc);
771 handled = 1; 770 handled = 1;
772 break; 771 break;
773
774 default: 772 default:
775 ap->stats.idle_irq++; 773 ap->stats.idle_irq++;
776 break; 774 break;
@@ -781,10 +779,9 @@ static inline unsigned int pdc_host_intr(struct ata_port *ap,
781 779
782static void pdc_irq_clear(struct ata_port *ap) 780static void pdc_irq_clear(struct ata_port *ap)
783{ 781{
784 struct ata_host *host = ap->host; 782 void __iomem *ata_mmio = ap->ioaddr.cmd_addr;
785 void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
786 783
787 readl(mmio + PDC_INT_SEQMASK); 784 readl(ata_mmio + PDC_COMMAND);
788} 785}
789 786
790static irqreturn_t pdc_interrupt(int irq, void *dev_instance) 787static irqreturn_t pdc_interrupt(int irq, void *dev_instance)
@@ -794,7 +791,7 @@ static irqreturn_t pdc_interrupt(int irq, void *dev_instance)
794 u32 mask = 0; 791 u32 mask = 0;
795 unsigned int i, tmp; 792 unsigned int i, tmp;
796 unsigned int handled = 0; 793 unsigned int handled = 0;
797 void __iomem *mmio_base; 794 void __iomem *host_mmio;
798 unsigned int hotplug_offset, ata_no; 795 unsigned int hotplug_offset, ata_no;
799 u32 hotplug_status; 796 u32 hotplug_status;
800 int is_sataii_tx4; 797 int is_sataii_tx4;
@@ -806,7 +803,7 @@ static irqreturn_t pdc_interrupt(int irq, void *dev_instance)
806 return IRQ_NONE; 803 return IRQ_NONE;
807 } 804 }
808 805
809 mmio_base = host->iomap[PDC_MMIO_BAR]; 806 host_mmio = host->iomap[PDC_MMIO_BAR];
810 807
811 spin_lock(&host->lock); 808 spin_lock(&host->lock);
812 809
@@ -815,26 +812,26 @@ static irqreturn_t pdc_interrupt(int irq, void *dev_instance)
815 hotplug_offset = PDC2_SATA_PLUG_CSR; 812 hotplug_offset = PDC2_SATA_PLUG_CSR;
816 else 813 else
817 hotplug_offset = PDC_SATA_PLUG_CSR; 814 hotplug_offset = PDC_SATA_PLUG_CSR;
818 hotplug_status = readl(mmio_base + hotplug_offset); 815 hotplug_status = readl(host_mmio + hotplug_offset);
819 if (hotplug_status & 0xff) 816 if (hotplug_status & 0xff)
820 writel(hotplug_status | 0xff, mmio_base + hotplug_offset); 817 writel(hotplug_status | 0xff, host_mmio + hotplug_offset);
821 hotplug_status &= 0xff; /* clear uninteresting bits */ 818 hotplug_status &= 0xff; /* clear uninteresting bits */
822 819
823 /* reading should also clear interrupts */ 820 /* reading should also clear interrupts */
824 mask = readl(mmio_base + PDC_INT_SEQMASK); 821 mask = readl(host_mmio + PDC_INT_SEQMASK);
825 822
826 if (mask == 0xffffffff && hotplug_status == 0) { 823 if (mask == 0xffffffff && hotplug_status == 0) {
827 VPRINTK("QUICK EXIT 2\n"); 824 VPRINTK("QUICK EXIT 2\n");
828 goto done_irq; 825 goto done_irq;
829 } 826 }
830 827
831 mask &= 0xffff; /* only 16 tags possible */ 828 mask &= 0xffff; /* only 16 SEQIDs possible */
832 if (mask == 0 && hotplug_status == 0) { 829 if (mask == 0 && hotplug_status == 0) {
833 VPRINTK("QUICK EXIT 3\n"); 830 VPRINTK("QUICK EXIT 3\n");
834 goto done_irq; 831 goto done_irq;
835 } 832 }
836 833
837 writel(mask, mmio_base + PDC_INT_SEQMASK); 834 writel(mask, host_mmio + PDC_INT_SEQMASK);
838 835
839 is_sataii_tx4 = pdc_is_sataii_tx4(host->ports[0]->flags); 836 is_sataii_tx4 = pdc_is_sataii_tx4(host->ports[0]->flags);
840 837
@@ -875,23 +872,24 @@ done_irq:
875 return IRQ_RETVAL(handled); 872 return IRQ_RETVAL(handled);
876} 873}
877 874
878static inline void pdc_packet_start(struct ata_queued_cmd *qc) 875static void pdc_packet_start(struct ata_queued_cmd *qc)
879{ 876{
880 struct ata_port *ap = qc->ap; 877 struct ata_port *ap = qc->ap;
881 struct pdc_port_priv *pp = ap->private_data; 878 struct pdc_port_priv *pp = ap->private_data;
882 void __iomem *mmio = ap->host->iomap[PDC_MMIO_BAR]; 879 void __iomem *host_mmio = ap->host->iomap[PDC_MMIO_BAR];
880 void __iomem *ata_mmio = ap->ioaddr.cmd_addr;
883 unsigned int port_no = ap->port_no; 881 unsigned int port_no = ap->port_no;
884 u8 seq = (u8) (port_no + 1); 882 u8 seq = (u8) (port_no + 1);
885 883
886 VPRINTK("ENTER, ap %p\n", ap); 884 VPRINTK("ENTER, ap %p\n", ap);
887 885
888 writel(0x00000001, mmio + (seq * 4)); 886 writel(0x00000001, host_mmio + (seq * 4));
889 readl(mmio + (seq * 4)); /* flush */ 887 readl(host_mmio + (seq * 4)); /* flush */
890 888
891 pp->pkt[2] = seq; 889 pp->pkt[2] = seq;
892 wmb(); /* flush PRD, pkt writes */ 890 wmb(); /* flush PRD, pkt writes */
893 writel(pp->pkt_dma, ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT); 891 writel(pp->pkt_dma, ata_mmio + PDC_PKT_SUBMIT);
894 readl(ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT); /* flush */ 892 readl(ata_mmio + PDC_PKT_SUBMIT); /* flush */
895} 893}
896 894
897static unsigned int pdc_qc_issue(struct ata_queued_cmd *qc) 895static unsigned int pdc_qc_issue(struct ata_queued_cmd *qc)
@@ -909,11 +907,9 @@ static unsigned int pdc_qc_issue(struct ata_queued_cmd *qc)
909 case ATA_PROT_DMA: 907 case ATA_PROT_DMA:
910 pdc_packet_start(qc); 908 pdc_packet_start(qc);
911 return 0; 909 return 0;
912
913 default: 910 default:
914 break; 911 break;
915 } 912 }
916
917 return ata_sff_qc_issue(qc); 913 return ata_sff_qc_issue(qc);
918} 914}
919 915
@@ -987,7 +983,7 @@ static void pdc_ata_setup_port(struct ata_port *ap,
987 983
988static void pdc_host_init(struct ata_host *host) 984static void pdc_host_init(struct ata_host *host)
989{ 985{
990 void __iomem *mmio = host->iomap[PDC_MMIO_BAR]; 986 void __iomem *host_mmio = host->iomap[PDC_MMIO_BAR];
991 int is_gen2 = host->ports[0]->flags & PDC_FLAG_GEN_II; 987 int is_gen2 = host->ports[0]->flags & PDC_FLAG_GEN_II;
992 int hotplug_offset; 988 int hotplug_offset;
993 u32 tmp; 989 u32 tmp;
@@ -1004,38 +1000,38 @@ static void pdc_host_init(struct ata_host *host)
1004 */ 1000 */
1005 1001
1006 /* enable BMR_BURST, maybe change FIFO_SHD to 8 dwords */ 1002 /* enable BMR_BURST, maybe change FIFO_SHD to 8 dwords */
1007 tmp = readl(mmio + PDC_FLASH_CTL); 1003 tmp = readl(host_mmio + PDC_FLASH_CTL);
1008 tmp |= 0x02000; /* bit 13 (enable bmr burst) */ 1004 tmp |= 0x02000; /* bit 13 (enable bmr burst) */
1009 if (!is_gen2) 1005 if (!is_gen2)
1010 tmp |= 0x10000; /* bit 16 (fifo threshold at 8 dw) */ 1006 tmp |= 0x10000; /* bit 16 (fifo threshold at 8 dw) */
1011 writel(tmp, mmio + PDC_FLASH_CTL); 1007 writel(tmp, host_mmio + PDC_FLASH_CTL);
1012 1008
1013 /* clear plug/unplug flags for all ports */ 1009 /* clear plug/unplug flags for all ports */
1014 tmp = readl(mmio + hotplug_offset); 1010 tmp = readl(host_mmio + hotplug_offset);
1015 writel(tmp | 0xff, mmio + hotplug_offset); 1011 writel(tmp | 0xff, host_mmio + hotplug_offset);
1016 1012
1017 /* unmask plug/unplug ints */ 1013 /* unmask plug/unplug ints */
1018 tmp = readl(mmio + hotplug_offset); 1014 tmp = readl(host_mmio + hotplug_offset);
1019 writel(tmp & ~0xff0000, mmio + hotplug_offset); 1015 writel(tmp & ~0xff0000, host_mmio + hotplug_offset);
1020 1016
1021 /* don't initialise TBG or SLEW on 2nd generation chips */ 1017 /* don't initialise TBG or SLEW on 2nd generation chips */
1022 if (is_gen2) 1018 if (is_gen2)
1023 return; 1019 return;
1024 1020
1025 /* reduce TBG clock to 133 Mhz. */ 1021 /* reduce TBG clock to 133 Mhz. */
1026 tmp = readl(mmio + PDC_TBG_MODE); 1022 tmp = readl(host_mmio + PDC_TBG_MODE);
1027 tmp &= ~0x30000; /* clear bit 17, 16*/ 1023 tmp &= ~0x30000; /* clear bit 17, 16*/
1028 tmp |= 0x10000; /* set bit 17:16 = 0:1 */ 1024 tmp |= 0x10000; /* set bit 17:16 = 0:1 */
1029 writel(tmp, mmio + PDC_TBG_MODE); 1025 writel(tmp, host_mmio + PDC_TBG_MODE);
1030 1026
1031 readl(mmio + PDC_TBG_MODE); /* flush */ 1027 readl(host_mmio + PDC_TBG_MODE); /* flush */
1032 msleep(10); 1028 msleep(10);
1033 1029
1034 /* adjust slew rate control register. */ 1030 /* adjust slew rate control register. */
1035 tmp = readl(mmio + PDC_SLEW_CTL); 1031 tmp = readl(host_mmio + PDC_SLEW_CTL);
1036 tmp &= 0xFFFFF03F; /* clear bit 11 ~ 6 */ 1032 tmp &= 0xFFFFF03F; /* clear bit 11 ~ 6 */
1037 tmp |= 0x00000900; /* set bit 11-9 = 100b , bit 8-6 = 100 */ 1033 tmp |= 0x00000900; /* set bit 11-9 = 100b , bit 8-6 = 100 */
1038 writel(tmp, mmio + PDC_SLEW_CTL); 1034 writel(tmp, host_mmio + PDC_SLEW_CTL);
1039} 1035}
1040 1036
1041static int pdc_ata_init_one(struct pci_dev *pdev, 1037static int pdc_ata_init_one(struct pci_dev *pdev,
@@ -1045,7 +1041,7 @@ static int pdc_ata_init_one(struct pci_dev *pdev,
1045 const struct ata_port_info *pi = &pdc_port_info[ent->driver_data]; 1041 const struct ata_port_info *pi = &pdc_port_info[ent->driver_data];
1046 const struct ata_port_info *ppi[PDC_MAX_PORTS]; 1042 const struct ata_port_info *ppi[PDC_MAX_PORTS];
1047 struct ata_host *host; 1043 struct ata_host *host;
1048 void __iomem *base; 1044 void __iomem *host_mmio;
1049 int n_ports, i, rc; 1045 int n_ports, i, rc;
1050 int is_sataii_tx4; 1046 int is_sataii_tx4;
1051 1047
@@ -1062,7 +1058,7 @@ static int pdc_ata_init_one(struct pci_dev *pdev,
1062 pcim_pin_device(pdev); 1058 pcim_pin_device(pdev);
1063 if (rc) 1059 if (rc)
1064 return rc; 1060 return rc;
1065 base = pcim_iomap_table(pdev)[PDC_MMIO_BAR]; 1061 host_mmio = pcim_iomap_table(pdev)[PDC_MMIO_BAR];
1066 1062
1067 /* determine port configuration and setup host */ 1063 /* determine port configuration and setup host */
1068 n_ports = 2; 1064 n_ports = 2;
@@ -1072,7 +1068,7 @@ static int pdc_ata_init_one(struct pci_dev *pdev,
1072 ppi[i] = pi; 1068 ppi[i] = pi;
1073 1069
1074 if (pi->flags & PDC_FLAG_SATA_PATA) { 1070 if (pi->flags & PDC_FLAG_SATA_PATA) {
1075 u8 tmp = readb(base + PDC_FLASH_CTL+1); 1071 u8 tmp = readb(host_mmio + PDC_FLASH_CTL + 1);
1076 if (!(tmp & 0x80)) 1072 if (!(tmp & 0x80))
1077 ppi[n_ports++] = pi + 1; 1073 ppi[n_ports++] = pi + 1;
1078 } 1074 }
@@ -1088,13 +1084,13 @@ static int pdc_ata_init_one(struct pci_dev *pdev,
1088 for (i = 0; i < host->n_ports; i++) { 1084 for (i = 0; i < host->n_ports; i++) {
1089 struct ata_port *ap = host->ports[i]; 1085 struct ata_port *ap = host->ports[i];
1090 unsigned int ata_no = pdc_port_no_to_ata_no(i, is_sataii_tx4); 1086 unsigned int ata_no = pdc_port_no_to_ata_no(i, is_sataii_tx4);
1091 unsigned int port_offset = 0x200 + ata_no * 0x80; 1087 unsigned int ata_offset = 0x200 + ata_no * 0x80;
1092 unsigned int scr_offset = 0x400 + ata_no * 0x100; 1088 unsigned int scr_offset = 0x400 + ata_no * 0x100;
1093 1089
1094 pdc_ata_setup_port(ap, base + port_offset, base + scr_offset); 1090 pdc_ata_setup_port(ap, host_mmio + ata_offset, host_mmio + scr_offset);
1095 1091
1096 ata_port_pbar_desc(ap, PDC_MMIO_BAR, -1, "mmio"); 1092 ata_port_pbar_desc(ap, PDC_MMIO_BAR, -1, "mmio");
1097 ata_port_pbar_desc(ap, PDC_MMIO_BAR, port_offset, "port"); 1093 ata_port_pbar_desc(ap, PDC_MMIO_BAR, ata_offset, "ata");
1098 } 1094 }
1099 1095
1100 /* initialize adapter */ 1096 /* initialize adapter */
diff --git a/drivers/ata/sata_sil24.c b/drivers/ata/sata_sil24.c
index 27a110110077..8ee6b5b4ede7 100644
--- a/drivers/ata/sata_sil24.c
+++ b/drivers/ata/sata_sil24.c
@@ -899,14 +899,25 @@ static bool sil24_qc_fill_rtf(struct ata_queued_cmd *qc)
899 899
900static void sil24_pmp_attach(struct ata_port *ap) 900static void sil24_pmp_attach(struct ata_port *ap)
901{ 901{
902 u32 *gscr = ap->link.device->gscr;
903
902 sil24_config_pmp(ap, 1); 904 sil24_config_pmp(ap, 1);
903 sil24_init_port(ap); 905 sil24_init_port(ap);
906
907 if (sata_pmp_gscr_vendor(gscr) == 0x11ab &&
908 sata_pmp_gscr_devid(gscr) == 0x4140) {
909 ata_port_printk(ap, KERN_INFO,
910 "disabling NCQ support due to sil24-mv4140 quirk\n");
911 ap->flags &= ~ATA_FLAG_NCQ;
912 }
904} 913}
905 914
906static void sil24_pmp_detach(struct ata_port *ap) 915static void sil24_pmp_detach(struct ata_port *ap)
907{ 916{
908 sil24_init_port(ap); 917 sil24_init_port(ap);
909 sil24_config_pmp(ap, 0); 918 sil24_config_pmp(ap, 0);
919
920 ap->flags |= ATA_FLAG_NCQ;
910} 921}
911 922
912static int sil24_pmp_hardreset(struct ata_link *link, unsigned int *class, 923static int sil24_pmp_hardreset(struct ata_link *link, unsigned int *class,