aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/ata
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2008-06-10 05:22:26 -0400
committerDavid S. Miller <davem@davemloft.net>2008-06-10 05:22:26 -0400
commit65b53e4cc90e59936733b3b95b9451d2ca47528d (patch)
tree29932718192962671c48c3fd1ea017a6112459e8 /drivers/ata
parent788c0a53164c05c5ccdb1472474372b72ba74644 (diff)
parent2e761e0532a784816e7e822dbaaece8c5d4be14d (diff)
Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6
Conflicts: drivers/net/tg3.c drivers/net/wireless/rt2x00/rt2x00dev.c net/mac80211/ieee80211_i.h
Diffstat (limited to 'drivers/ata')
-rw-r--r--drivers/ata/ahci.c8
-rw-r--r--drivers/ata/ata_piix.c7
-rw-r--r--drivers/ata/libata-acpi.c75
-rw-r--r--drivers/ata/libata-core.c42
-rw-r--r--drivers/ata/libata-eh.c207
-rw-r--r--drivers/ata/libata-pmp.c51
-rw-r--r--drivers/ata/libata-scsi.c9
-rw-r--r--drivers/ata/pata_ali.c10
-rw-r--r--drivers/ata/pata_amd.c14
-rw-r--r--drivers/ata/pata_at32.c2
-rw-r--r--drivers/ata/pata_bf54x.c5
-rw-r--r--drivers/ata/pata_cypress.c8
-rw-r--r--drivers/ata/pata_legacy.c50
-rw-r--r--drivers/ata/pata_ns87410.c6
-rw-r--r--drivers/ata/pata_ns87415.c4
-rw-r--r--drivers/ata/pata_qdi.c16
-rw-r--r--drivers/ata/pata_sl82c105.c2
-rw-r--r--drivers/ata/pata_via.c14
-rw-r--r--drivers/ata/pata_winbond.c6
-rw-r--r--drivers/ata/sata_fsl.c224
-rw-r--r--drivers/ata/sata_mv.c240
-rw-r--r--drivers/ata/sata_promise.c148
-rw-r--r--drivers/ata/sata_sil24.c11
23 files changed, 665 insertions, 494 deletions
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 97f83fb2ee2e..544b7d6c617c 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -502,10 +502,10 @@ static const struct pci_device_id ahci_pci_tbl[] = {
502 { PCI_VDEVICE(NVIDIA, 0x0bcd), board_ahci }, /* MCP7B */ 502 { PCI_VDEVICE(NVIDIA, 0x0bcd), board_ahci }, /* MCP7B */
503 { PCI_VDEVICE(NVIDIA, 0x0bce), board_ahci }, /* MCP7B */ 503 { PCI_VDEVICE(NVIDIA, 0x0bce), board_ahci }, /* MCP7B */
504 { PCI_VDEVICE(NVIDIA, 0x0bcf), board_ahci }, /* MCP7B */ 504 { PCI_VDEVICE(NVIDIA, 0x0bcf), board_ahci }, /* MCP7B */
505 { PCI_VDEVICE(NVIDIA, 0x0bd0), board_ahci }, /* MCP7B */ 505 { PCI_VDEVICE(NVIDIA, 0x0bc4), board_ahci }, /* MCP7B */
506 { PCI_VDEVICE(NVIDIA, 0x0bd1), board_ahci }, /* MCP7B */ 506 { PCI_VDEVICE(NVIDIA, 0x0bc5), board_ahci }, /* MCP7B */
507 { PCI_VDEVICE(NVIDIA, 0x0bd2), board_ahci }, /* MCP7B */ 507 { PCI_VDEVICE(NVIDIA, 0x0bc6), board_ahci }, /* MCP7B */
508 { PCI_VDEVICE(NVIDIA, 0x0bd3), board_ahci }, /* MCP7B */ 508 { PCI_VDEVICE(NVIDIA, 0x0bc7), board_ahci }, /* MCP7B */
509 509
510 /* SiS */ 510 /* SiS */
511 { PCI_VDEVICE(SI, 0x1184), board_ahci }, /* SiS 966 */ 511 { PCI_VDEVICE(SI, 0x1184), board_ahci }, /* SiS 966 */
diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c
index a9027b8fbdd5..3548ee7014ca 100644
--- a/drivers/ata/ata_piix.c
+++ b/drivers/ata/ata_piix.c
@@ -247,10 +247,11 @@ static const struct pci_device_id piix_pci_tbl[] = {
247 { 0x8086, 0x2820, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata }, 247 { 0x8086, 0x2820, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata },
248 /* SATA Controller 2 IDE (ICH8) */ 248 /* SATA Controller 2 IDE (ICH8) */
249 { 0x8086, 0x2825, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata }, 249 { 0x8086, 0x2825, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
250 /* Mobile SATA Controller IDE (ICH8M) */
251 { 0x8086, 0x2828, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata },
252 /* Mobile SATA Controller IDE (ICH8M), Apple */ 250 /* Mobile SATA Controller IDE (ICH8M), Apple */
253 { 0x8086, 0x2828, 0x106b, 0x00a0, 0, 0, ich8m_apple_sata }, 251 { 0x8086, 0x2828, 0x106b, 0x00a0, 0, 0, ich8m_apple_sata },
252 { 0x8086, 0x2828, 0x106b, 0x00a1, 0, 0, ich8m_apple_sata },
253 /* Mobile SATA Controller IDE (ICH8M) */
254 { 0x8086, 0x2828, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata },
254 /* SATA Controller IDE (ICH9) */ 255 /* SATA Controller IDE (ICH9) */
255 { 0x8086, 0x2920, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata }, 256 { 0x8086, 0x2920, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata },
256 /* SATA Controller IDE (ICH9) */ 257 /* SATA Controller IDE (ICH9) */
@@ -526,7 +527,7 @@ static struct ata_port_info piix_port_info[] = {
526 527
527 [ich8m_apple_sata] = 528 [ich8m_apple_sata] =
528 { 529 {
529 .flags = PIIX_SATA_FLAGS | PIIX_FLAG_SIDPR, 530 .flags = PIIX_SATA_FLAGS,
530 .pio_mask = 0x1f, /* pio0-4 */ 531 .pio_mask = 0x1f, /* pio0-4 */
531 .mwdma_mask = 0x07, /* mwdma0-2 */ 532 .mwdma_mask = 0x07, /* mwdma0-2 */
532 .udma_mask = ATA_UDMA6, 533 .udma_mask = ATA_UDMA6,
diff --git a/drivers/ata/libata-acpi.c b/drivers/ata/libata-acpi.c
index 70b77e0899a8..dbf6ca781f66 100644
--- a/drivers/ata/libata-acpi.c
+++ b/drivers/ata/libata-acpi.c
@@ -118,8 +118,8 @@ static void ata_acpi_associate_ide_port(struct ata_port *ap)
118 ap->pflags |= ATA_PFLAG_INIT_GTM_VALID; 118 ap->pflags |= ATA_PFLAG_INIT_GTM_VALID;
119} 119}
120 120
121static void ata_acpi_handle_hotplug(struct ata_port *ap, struct ata_device *dev, 121static void ata_acpi_handle_hotplug(struct ata_port *ap, struct ata_device
122 u32 event) 122 *dev, u32 event)
123{ 123{
124 char event_string[12]; 124 char event_string[12];
125 char *envp[] = { event_string, NULL }; 125 char *envp[] = { event_string, NULL };
@@ -127,6 +127,9 @@ static void ata_acpi_handle_hotplug(struct ata_port *ap, struct ata_device *dev,
127 struct kobject *kobj = NULL; 127 struct kobject *kobj = NULL;
128 int wait = 0; 128 int wait = 0;
129 unsigned long flags; 129 unsigned long flags;
130 acpi_handle handle, tmphandle;
131 unsigned long sta;
132 acpi_status status;
130 133
131 if (!ap) 134 if (!ap)
132 ap = dev->link->ap; 135 ap = dev->link->ap;
@@ -134,32 +137,57 @@ static void ata_acpi_handle_hotplug(struct ata_port *ap, struct ata_device *dev,
134 137
135 spin_lock_irqsave(ap->lock, flags); 138 spin_lock_irqsave(ap->lock, flags);
136 139
140 if (dev)
141 handle = dev->acpi_handle;
142 else
143 handle = ap->acpi_handle;
144
145 status = acpi_get_handle(handle, "_EJ0", &tmphandle);
146 if (ACPI_FAILURE(status)) {
147 /* This device is not ejectable */
148 spin_unlock_irqrestore(ap->lock, flags);
149 return;
150 }
151
152 status = acpi_evaluate_integer(handle, "_STA", NULL, &sta);
153 if (ACPI_FAILURE(status)) {
154 printk ("Unable to determine bay status\n");
155 spin_unlock_irqrestore(ap->lock, flags);
156 return;
157 }
158
137 switch (event) { 159 switch (event) {
138 case ACPI_NOTIFY_BUS_CHECK: 160 case ACPI_NOTIFY_BUS_CHECK:
139 case ACPI_NOTIFY_DEVICE_CHECK: 161 case ACPI_NOTIFY_DEVICE_CHECK:
140 ata_ehi_push_desc(ehi, "ACPI event"); 162 ata_ehi_push_desc(ehi, "ACPI event");
141 ata_ehi_hotplugged(ehi); 163 if (!sta) {
142 ata_port_freeze(ap); 164 /* Device has been unplugged */
143 break; 165 if (dev)
144 166 dev->flags |= ATA_DFLAG_DETACH;
145 case ACPI_NOTIFY_EJECT_REQUEST: 167 else {
146 ata_ehi_push_desc(ehi, "ACPI event"); 168 struct ata_link *tlink;
147 if (dev) 169 struct ata_device *tdev;
148 dev->flags |= ATA_DFLAG_DETACH; 170
149 else { 171 ata_port_for_each_link(tlink, ap) {
150 struct ata_link *tlink; 172 ata_link_for_each_dev(tdev, tlink) {
151 struct ata_device *tdev; 173 tdev->flags |=
152 174 ATA_DFLAG_DETACH;
153 ata_port_for_each_link(tlink, ap) 175 }
154 ata_link_for_each_dev(tdev, tlink) 176 }
155 tdev->flags |= ATA_DFLAG_DETACH; 177 }
178 ata_port_schedule_eh(ap);
179 wait = 1;
180 } else {
181 ata_ehi_hotplugged(ehi);
182 ata_port_freeze(ap);
156 } 183 }
157
158 ata_port_schedule_eh(ap);
159 wait = 1;
160 break;
161 } 184 }
162 185
186 spin_unlock_irqrestore(ap->lock, flags);
187
188 if (wait)
189 ata_port_wait_eh(ap);
190
163 if (dev) { 191 if (dev) {
164 if (dev->sdev) 192 if (dev->sdev)
165 kobj = &dev->sdev->sdev_gendev.kobj; 193 kobj = &dev->sdev->sdev_gendev.kobj;
@@ -170,11 +198,6 @@ static void ata_acpi_handle_hotplug(struct ata_port *ap, struct ata_device *dev,
170 sprintf(event_string, "BAY_EVENT=%d", event); 198 sprintf(event_string, "BAY_EVENT=%d", event);
171 kobject_uevent_env(kobj, KOBJ_CHANGE, envp); 199 kobject_uevent_env(kobj, KOBJ_CHANGE, envp);
172 } 200 }
173
174 spin_unlock_irqrestore(ap->lock, flags);
175
176 if (wait)
177 ata_port_wait_eh(ap);
178} 201}
179 202
180static void ata_acpi_dev_notify(acpi_handle handle, u32 event, void *data) 203static void ata_acpi_dev_notify(acpi_handle handle, u32 event, void *data)
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 927b692d723c..cc816ca623d3 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -2126,6 +2126,13 @@ int ata_dev_configure(struct ata_device *dev)
2126 dev->horkage |= ata_dev_blacklisted(dev); 2126 dev->horkage |= ata_dev_blacklisted(dev);
2127 ata_force_horkage(dev); 2127 ata_force_horkage(dev);
2128 2128
2129 if (dev->horkage & ATA_HORKAGE_DISABLE) {
2130 ata_dev_printk(dev, KERN_INFO,
2131 "unsupported device, disabling\n");
2132 ata_dev_disable(dev);
2133 return 0;
2134 }
2135
2129 /* let ACPI work its magic */ 2136 /* let ACPI work its magic */
2130 rc = ata_acpi_on_devcfg(dev); 2137 rc = ata_acpi_on_devcfg(dev);
2131 if (rc) 2138 if (rc)
@@ -3490,22 +3497,11 @@ int sata_link_resume(struct ata_link *link, const unsigned long *params,
3490 if ((rc = sata_link_debounce(link, params, deadline))) 3497 if ((rc = sata_link_debounce(link, params, deadline)))
3491 return rc; 3498 return rc;
3492 3499
3493 /* Clear SError. PMP and some host PHYs require this to 3500 /* clear SError, some PHYs require this even for SRST to work */
3494 * operate and clearing should be done before checking PHY
3495 * online status to avoid race condition (hotplugging between
3496 * link resume and status check).
3497 */
3498 if (!(rc = sata_scr_read(link, SCR_ERROR, &serror))) 3501 if (!(rc = sata_scr_read(link, SCR_ERROR, &serror)))
3499 rc = sata_scr_write(link, SCR_ERROR, serror); 3502 rc = sata_scr_write(link, SCR_ERROR, serror);
3500 if (rc == 0 || rc == -EINVAL) {
3501 unsigned long flags;
3502 3503
3503 spin_lock_irqsave(link->ap->lock, flags); 3504 return rc != -EINVAL ? rc : 0;
3504 link->eh_info.serror = 0;
3505 spin_unlock_irqrestore(link->ap->lock, flags);
3506 rc = 0;
3507 }
3508 return rc;
3509} 3505}
3510 3506
3511/** 3507/**
@@ -3653,9 +3649,13 @@ int sata_link_hardreset(struct ata_link *link, const unsigned long *timing,
3653 if (check_ready) 3649 if (check_ready)
3654 rc = ata_wait_ready(link, deadline, check_ready); 3650 rc = ata_wait_ready(link, deadline, check_ready);
3655 out: 3651 out:
3656 if (rc && rc != -EAGAIN) 3652 if (rc && rc != -EAGAIN) {
3653 /* online is set iff link is online && reset succeeded */
3654 if (online)
3655 *online = false;
3657 ata_link_printk(link, KERN_ERR, 3656 ata_link_printk(link, KERN_ERR,
3658 "COMRESET failed (errno=%d)\n", rc); 3657 "COMRESET failed (errno=%d)\n", rc);
3658 }
3659 DPRINTK("EXIT, rc=%d\n", rc); 3659 DPRINTK("EXIT, rc=%d\n", rc);
3660 return rc; 3660 return rc;
3661} 3661}
@@ -3700,8 +3700,14 @@ int sata_std_hardreset(struct ata_link *link, unsigned int *class,
3700 */ 3700 */
3701void ata_std_postreset(struct ata_link *link, unsigned int *classes) 3701void ata_std_postreset(struct ata_link *link, unsigned int *classes)
3702{ 3702{
3703 u32 serror;
3704
3703 DPRINTK("ENTER\n"); 3705 DPRINTK("ENTER\n");
3704 3706
3707 /* reset complete, clear SError */
3708 if (!sata_scr_read(link, SCR_ERROR, &serror))
3709 sata_scr_write(link, SCR_ERROR, serror);
3710
3705 /* print link status */ 3711 /* print link status */
3706 sata_print_link_status(link); 3712 sata_print_link_status(link);
3707 3713
@@ -3894,8 +3900,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
3894 { "SAMSUNG CD-ROM SN-124", "N001", ATA_HORKAGE_NODMA }, 3900 { "SAMSUNG CD-ROM SN-124", "N001", ATA_HORKAGE_NODMA },
3895 { "Seagate STT20000A", NULL, ATA_HORKAGE_NODMA }, 3901 { "Seagate STT20000A", NULL, ATA_HORKAGE_NODMA },
3896 /* Odd clown on sil3726/4726 PMPs */ 3902 /* Odd clown on sil3726/4726 PMPs */
3897 { "Config Disk", NULL, ATA_HORKAGE_NODMA | 3903 { "Config Disk", NULL, ATA_HORKAGE_DISABLE },
3898 ATA_HORKAGE_SKIP_PM },
3899 3904
3900 /* Weird ATAPI devices */ 3905 /* Weird ATAPI devices */
3901 { "TORiSAN DVD-ROM DRD-N216", NULL, ATA_HORKAGE_MAX_SEC_128 }, 3906 { "TORiSAN DVD-ROM DRD-N216", NULL, ATA_HORKAGE_MAX_SEC_128 },
@@ -5398,7 +5403,7 @@ static void ata_host_stop(struct device *gendev, void *res)
5398 */ 5403 */
5399static void ata_finalize_port_ops(struct ata_port_operations *ops) 5404static void ata_finalize_port_ops(struct ata_port_operations *ops)
5400{ 5405{
5401 static spinlock_t lock = SPIN_LOCK_UNLOCKED; 5406 static DEFINE_SPINLOCK(lock);
5402 const struct ata_port_operations *cur; 5407 const struct ata_port_operations *cur;
5403 void **begin = (void **)ops; 5408 void **begin = (void **)ops;
5404 void **end = (void **)&ops->inherits; 5409 void **end = (void **)&ops->inherits;
@@ -5616,7 +5621,7 @@ int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
5616 spin_lock_irqsave(ap->lock, flags); 5621 spin_lock_irqsave(ap->lock, flags);
5617 5622
5618 ehi->probe_mask |= ATA_ALL_DEVICES; 5623 ehi->probe_mask |= ATA_ALL_DEVICES;
5619 ehi->action |= ATA_EH_RESET; 5624 ehi->action |= ATA_EH_RESET | ATA_EH_LPM;
5620 ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET; 5625 ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET;
5621 5626
5622 ap->pflags &= ~ATA_PFLAG_INITIALIZING; 5627 ap->pflags &= ~ATA_PFLAG_INITIALIZING;
@@ -5649,7 +5654,6 @@ int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
5649 struct ata_port *ap = host->ports[i]; 5654 struct ata_port *ap = host->ports[i];
5650 5655
5651 ata_scsi_scan_host(ap, 1); 5656 ata_scsi_scan_host(ap, 1);
5652 ata_lpm_schedule(ap, ap->pm_policy);
5653 } 5657 }
5654 5658
5655 return 0; 5659 return 0;
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index 62e033146bed..7894d83ea1eb 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -1308,12 +1308,7 @@ static void ata_eh_analyze_serror(struct ata_link *link)
1308 unsigned int err_mask = 0, action = 0; 1308 unsigned int err_mask = 0, action = 0;
1309 u32 hotplug_mask; 1309 u32 hotplug_mask;
1310 1310
1311 if (serror & SERR_PERSISTENT) { 1311 if (serror & (SERR_PERSISTENT | SERR_DATA)) {
1312 err_mask |= AC_ERR_ATA_BUS;
1313 action |= ATA_EH_RESET;
1314 }
1315 if (serror &
1316 (SERR_DATA_RECOVERED | SERR_COMM_RECOVERED | SERR_DATA)) {
1317 err_mask |= AC_ERR_ATA_BUS; 1312 err_mask |= AC_ERR_ATA_BUS;
1318 action |= ATA_EH_RESET; 1313 action |= ATA_EH_RESET;
1319 } 1314 }
@@ -2047,19 +2042,11 @@ static int ata_do_reset(struct ata_link *link, ata_reset_fn_t reset,
2047 unsigned int *classes, unsigned long deadline) 2042 unsigned int *classes, unsigned long deadline)
2048{ 2043{
2049 struct ata_device *dev; 2044 struct ata_device *dev;
2050 int rc;
2051 2045
2052 ata_link_for_each_dev(dev, link) 2046 ata_link_for_each_dev(dev, link)
2053 classes[dev->devno] = ATA_DEV_UNKNOWN; 2047 classes[dev->devno] = ATA_DEV_UNKNOWN;
2054 2048
2055 rc = reset(link, classes, deadline); 2049 return reset(link, classes, deadline);
2056
2057 /* convert all ATA_DEV_UNKNOWN to ATA_DEV_NONE */
2058 ata_link_for_each_dev(dev, link)
2059 if (classes[dev->devno] == ATA_DEV_UNKNOWN)
2060 classes[dev->devno] = ATA_DEV_NONE;
2061
2062 return rc;
2063} 2050}
2064 2051
2065static int ata_eh_followup_srst_needed(struct ata_link *link, 2052static int ata_eh_followup_srst_needed(struct ata_link *link,
@@ -2096,9 +2083,11 @@ int ata_eh_reset(struct ata_link *link, int classify,
2096 ata_reset_fn_t reset; 2083 ata_reset_fn_t reset;
2097 unsigned long flags; 2084 unsigned long flags;
2098 u32 sstatus; 2085 u32 sstatus;
2099 int rc; 2086 int nr_known, rc;
2100 2087
2101 /* about to reset */ 2088 /*
2089 * Prepare to reset
2090 */
2102 spin_lock_irqsave(ap->lock, flags); 2091 spin_lock_irqsave(ap->lock, flags);
2103 ap->pflags |= ATA_PFLAG_RESETTING; 2092 ap->pflags |= ATA_PFLAG_RESETTING;
2104 spin_unlock_irqrestore(ap->lock, flags); 2093 spin_unlock_irqrestore(ap->lock, flags);
@@ -2124,16 +2113,8 @@ int ata_eh_reset(struct ata_link *link, int classify,
2124 ap->ops->set_piomode(ap, dev); 2113 ap->ops->set_piomode(ap, dev);
2125 } 2114 }
2126 2115
2127 if (!softreset && !hardreset) {
2128 if (verbose)
2129 ata_link_printk(link, KERN_INFO, "no reset method "
2130 "available, skipping reset\n");
2131 if (!(lflags & ATA_LFLAG_ASSUME_CLASS))
2132 lflags |= ATA_LFLAG_ASSUME_ATA;
2133 goto done;
2134 }
2135
2136 /* prefer hardreset */ 2116 /* prefer hardreset */
2117 reset = NULL;
2137 ehc->i.action &= ~ATA_EH_RESET; 2118 ehc->i.action &= ~ATA_EH_RESET;
2138 if (hardreset) { 2119 if (hardreset) {
2139 reset = hardreset; 2120 reset = hardreset;
@@ -2141,11 +2122,6 @@ int ata_eh_reset(struct ata_link *link, int classify,
2141 } else if (softreset) { 2122 } else if (softreset) {
2142 reset = softreset; 2123 reset = softreset;
2143 ehc->i.action = ATA_EH_SOFTRESET; 2124 ehc->i.action = ATA_EH_SOFTRESET;
2144 } else {
2145 ata_link_printk(link, KERN_ERR, "BUG: no reset method, "
2146 "please report to linux-ide@vger.kernel.org\n");
2147 dump_stack();
2148 return -EINVAL;
2149 } 2125 }
2150 2126
2151 if (prereset) { 2127 if (prereset) {
@@ -2165,55 +2141,71 @@ int ata_eh_reset(struct ata_link *link, int classify,
2165 "prereset failed (errno=%d)\n", rc); 2141 "prereset failed (errno=%d)\n", rc);
2166 goto out; 2142 goto out;
2167 } 2143 }
2168 }
2169 2144
2170 /* prereset() might have cleared ATA_EH_RESET */ 2145 /* prereset() might have cleared ATA_EH_RESET. If so,
2171 if (!(ehc->i.action & ATA_EH_RESET)) { 2146 * bang classes and return.
2172 /* prereset told us not to reset, bang classes and return */ 2147 */
2173 ata_link_for_each_dev(dev, link) 2148 if (reset && !(ehc->i.action & ATA_EH_RESET)) {
2174 classes[dev->devno] = ATA_DEV_NONE; 2149 ata_link_for_each_dev(dev, link)
2175 rc = 0; 2150 classes[dev->devno] = ATA_DEV_NONE;
2176 goto out; 2151 rc = 0;
2152 goto out;
2153 }
2177 } 2154 }
2178 2155
2179 retry: 2156 retry:
2157 /*
2158 * Perform reset
2159 */
2160 if (ata_is_host_link(link))
2161 ata_eh_freeze_port(ap);
2162
2180 deadline = jiffies + ata_eh_reset_timeouts[try++]; 2163 deadline = jiffies + ata_eh_reset_timeouts[try++];
2181 2164
2182 /* shut up during boot probing */ 2165 if (reset) {
2183 if (verbose) 2166 if (verbose)
2184 ata_link_printk(link, KERN_INFO, "%s resetting link\n", 2167 ata_link_printk(link, KERN_INFO, "%s resetting link\n",
2185 reset == softreset ? "soft" : "hard"); 2168 reset == softreset ? "soft" : "hard");
2186 2169
2187 /* mark that this EH session started with reset */ 2170 /* mark that this EH session started with reset */
2188 if (reset == hardreset) 2171 if (reset == hardreset)
2189 ehc->i.flags |= ATA_EHI_DID_HARDRESET; 2172 ehc->i.flags |= ATA_EHI_DID_HARDRESET;
2190 else 2173 else
2191 ehc->i.flags |= ATA_EHI_DID_SOFTRESET; 2174 ehc->i.flags |= ATA_EHI_DID_SOFTRESET;
2192 2175
2193 rc = ata_do_reset(link, reset, classes, deadline); 2176 rc = ata_do_reset(link, reset, classes, deadline);
2194 2177
2195 if (reset == hardreset && 2178 if (reset == hardreset &&
2196 ata_eh_followup_srst_needed(link, rc, classify, classes)) { 2179 ata_eh_followup_srst_needed(link, rc, classify, classes)) {
2197 /* okay, let's do follow-up softreset */ 2180 /* okay, let's do follow-up softreset */
2198 reset = softreset; 2181 reset = softreset;
2199 2182
2200 if (!reset) { 2183 if (!reset) {
2201 ata_link_printk(link, KERN_ERR, 2184 ata_link_printk(link, KERN_ERR,
2202 "follow-up softreset required " 2185 "follow-up softreset required "
2203 "but no softreset avaliable\n"); 2186 "but no softreset avaliable\n");
2204 rc = -EINVAL; 2187 rc = -EINVAL;
2205 goto fail; 2188 goto fail;
2189 }
2190
2191 ata_eh_about_to_do(link, NULL, ATA_EH_RESET);
2192 rc = ata_do_reset(link, reset, classes, deadline);
2206 } 2193 }
2207 2194
2208 ata_eh_about_to_do(link, NULL, ATA_EH_RESET); 2195 /* -EAGAIN can happen if we skipped followup SRST */
2209 rc = ata_do_reset(link, reset, classes, deadline); 2196 if (rc && rc != -EAGAIN)
2197 goto fail;
2198 } else {
2199 if (verbose)
2200 ata_link_printk(link, KERN_INFO, "no reset method "
2201 "available, skipping reset\n");
2202 if (!(lflags & ATA_LFLAG_ASSUME_CLASS))
2203 lflags |= ATA_LFLAG_ASSUME_ATA;
2210 } 2204 }
2211 2205
2212 /* -EAGAIN can happen if we skipped followup SRST */ 2206 /*
2213 if (rc && rc != -EAGAIN) 2207 * Post-reset processing
2214 goto fail; 2208 */
2215
2216 done:
2217 ata_link_for_each_dev(dev, link) { 2209 ata_link_for_each_dev(dev, link) {
2218 /* After the reset, the device state is PIO 0 and the 2210 /* After the reset, the device state is PIO 0 and the
2219 * controller state is undefined. Reset also wakes up 2211 * controller state is undefined. Reset also wakes up
@@ -2236,9 +2228,53 @@ int ata_eh_reset(struct ata_link *link, int classify,
2236 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0) 2228 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0)
2237 link->sata_spd = (sstatus >> 4) & 0xf; 2229 link->sata_spd = (sstatus >> 4) & 0xf;
2238 2230
2231 /* thaw the port */
2232 if (ata_is_host_link(link))
2233 ata_eh_thaw_port(ap);
2234
2235 /* postreset() should clear hardware SError. Although SError
2236 * is cleared during link resume, clearing SError here is
2237 * necessary as some PHYs raise hotplug events after SRST.
2238 * This introduces race condition where hotplug occurs between
2239 * reset and here. This race is mediated by cross checking
2240 * link onlineness and classification result later.
2241 */
2239 if (postreset) 2242 if (postreset)
2240 postreset(link, classes); 2243 postreset(link, classes);
2241 2244
2245 /* clear cached SError */
2246 spin_lock_irqsave(link->ap->lock, flags);
2247 link->eh_info.serror = 0;
2248 spin_unlock_irqrestore(link->ap->lock, flags);
2249
2250 /* Make sure onlineness and classification result correspond.
2251 * Hotplug could have happened during reset and some
2252 * controllers fail to wait while a drive is spinning up after
2253 * being hotplugged causing misdetection. By cross checking
2254 * link onlineness and classification result, those conditions
2255 * can be reliably detected and retried.
2256 */
2257 nr_known = 0;
2258 ata_link_for_each_dev(dev, link) {
2259 /* convert all ATA_DEV_UNKNOWN to ATA_DEV_NONE */
2260 if (classes[dev->devno] == ATA_DEV_UNKNOWN)
2261 classes[dev->devno] = ATA_DEV_NONE;
2262 else
2263 nr_known++;
2264 }
2265
2266 if (classify && !nr_known && ata_link_online(link)) {
2267 if (try < max_tries) {
2268 ata_link_printk(link, KERN_WARNING, "link online but "
2269 "device misclassified, retrying\n");
2270 rc = -EAGAIN;
2271 goto fail;
2272 }
2273 ata_link_printk(link, KERN_WARNING,
2274 "link online but device misclassified, "
2275 "device detection might fail\n");
2276 }
2277
2242 /* reset successful, schedule revalidation */ 2278 /* reset successful, schedule revalidation */
2243 ata_eh_done(link, NULL, ATA_EH_RESET); 2279 ata_eh_done(link, NULL, ATA_EH_RESET);
2244 ehc->i.action |= ATA_EH_REVALIDATE; 2280 ehc->i.action |= ATA_EH_REVALIDATE;
@@ -2587,7 +2623,7 @@ int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset,
2587 struct ata_link *link; 2623 struct ata_link *link;
2588 struct ata_device *dev; 2624 struct ata_device *dev;
2589 int nr_failed_devs, nr_disabled_devs; 2625 int nr_failed_devs, nr_disabled_devs;
2590 int reset, rc; 2626 int rc;
2591 unsigned long flags; 2627 unsigned long flags;
2592 2628
2593 DPRINTK("ENTER\n"); 2629 DPRINTK("ENTER\n");
@@ -2630,7 +2666,6 @@ int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset,
2630 rc = 0; 2666 rc = 0;
2631 nr_failed_devs = 0; 2667 nr_failed_devs = 0;
2632 nr_disabled_devs = 0; 2668 nr_disabled_devs = 0;
2633 reset = 0;
2634 2669
2635 /* if UNLOADING, finish immediately */ 2670 /* if UNLOADING, finish immediately */
2636 if (ap->pflags & ATA_PFLAG_UNLOADING) 2671 if (ap->pflags & ATA_PFLAG_UNLOADING)
@@ -2644,40 +2679,24 @@ int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset,
2644 if (ata_eh_skip_recovery(link)) 2679 if (ata_eh_skip_recovery(link))
2645 ehc->i.action = 0; 2680 ehc->i.action = 0;
2646 2681
2647 /* do we need to reset? */
2648 if (ehc->i.action & ATA_EH_RESET)
2649 reset = 1;
2650
2651 ata_link_for_each_dev(dev, link) 2682 ata_link_for_each_dev(dev, link)
2652 ehc->classes[dev->devno] = ATA_DEV_UNKNOWN; 2683 ehc->classes[dev->devno] = ATA_DEV_UNKNOWN;
2653 } 2684 }
2654 2685
2655 /* reset */ 2686 /* reset */
2656 if (reset) { 2687 ata_port_for_each_link(link, ap) {
2657 /* if PMP is attached, this function only deals with 2688 struct ata_eh_context *ehc = &link->eh_context;
2658 * downstream links, port should stay thawed.
2659 */
2660 if (!sata_pmp_attached(ap))
2661 ata_eh_freeze_port(ap);
2662
2663 ata_port_for_each_link(link, ap) {
2664 struct ata_eh_context *ehc = &link->eh_context;
2665 2689
2666 if (!(ehc->i.action & ATA_EH_RESET)) 2690 if (!(ehc->i.action & ATA_EH_RESET))
2667 continue; 2691 continue;
2668 2692
2669 rc = ata_eh_reset(link, ata_link_nr_vacant(link), 2693 rc = ata_eh_reset(link, ata_link_nr_vacant(link),
2670 prereset, softreset, hardreset, 2694 prereset, softreset, hardreset, postreset);
2671 postreset); 2695 if (rc) {
2672 if (rc) { 2696 ata_link_printk(link, KERN_ERR,
2673 ata_link_printk(link, KERN_ERR, 2697 "reset failed, giving up\n");
2674 "reset failed, giving up\n"); 2698 goto out;
2675 goto out;
2676 }
2677 } 2699 }
2678
2679 if (!sata_pmp_attached(ap))
2680 ata_eh_thaw_port(ap);
2681 } 2700 }
2682 2701
2683 /* the rest */ 2702 /* the rest */
diff --git a/drivers/ata/libata-pmp.c b/drivers/ata/libata-pmp.c
index ff1822a7da38..7daf4c0f6216 100644
--- a/drivers/ata/libata-pmp.c
+++ b/drivers/ata/libata-pmp.c
@@ -48,7 +48,7 @@ static unsigned int sata_pmp_read(struct ata_link *link, int reg, u32 *r_val)
48 tf.device = link->pmp; 48 tf.device = link->pmp;
49 49
50 err_mask = ata_exec_internal(pmp_dev, &tf, NULL, DMA_NONE, NULL, 0, 50 err_mask = ata_exec_internal(pmp_dev, &tf, NULL, DMA_NONE, NULL, 0,
51 SATA_PMP_SCR_TIMEOUT); 51 SATA_PMP_RW_TIMEOUT);
52 if (err_mask) 52 if (err_mask)
53 return err_mask; 53 return err_mask;
54 54
@@ -88,7 +88,7 @@ static unsigned int sata_pmp_write(struct ata_link *link, int reg, u32 val)
88 tf.lbah = (val >> 24) & 0xff; 88 tf.lbah = (val >> 24) & 0xff;
89 89
90 return ata_exec_internal(pmp_dev, &tf, NULL, DMA_NONE, NULL, 0, 90 return ata_exec_internal(pmp_dev, &tf, NULL, DMA_NONE, NULL, 0,
91 SATA_PMP_SCR_TIMEOUT); 91 SATA_PMP_RW_TIMEOUT);
92} 92}
93 93
94/** 94/**
@@ -257,19 +257,6 @@ static int sata_pmp_configure(struct ata_device *dev, int print_info)
257 goto fail; 257 goto fail;
258 } 258 }
259 259
260 /* turn off notification till fan-out ports are reset and configured */
261 if (gscr[SATA_PMP_GSCR_FEAT_EN] & SATA_PMP_FEAT_NOTIFY) {
262 gscr[SATA_PMP_GSCR_FEAT_EN] &= ~SATA_PMP_FEAT_NOTIFY;
263
264 err_mask = sata_pmp_write(dev->link, SATA_PMP_GSCR_FEAT_EN,
265 gscr[SATA_PMP_GSCR_FEAT_EN]);
266 if (err_mask) {
267 rc = -EIO;
268 reason = "failed to write GSCR_FEAT_EN";
269 goto fail;
270 }
271 }
272
273 if (print_info) { 260 if (print_info) {
274 ata_dev_printk(dev, KERN_INFO, "Port Multiplier %s, " 261 ata_dev_printk(dev, KERN_INFO, "Port Multiplier %s, "
275 "0x%04x:0x%04x r%d, %d ports, feat 0x%x/0x%x\n", 262 "0x%04x:0x%04x r%d, %d ports, feat 0x%x/0x%x\n",
@@ -335,9 +322,12 @@ static void sata_pmp_quirks(struct ata_port *ap)
335 if (vendor == 0x1095 && devid == 0x3726) { 322 if (vendor == 0x1095 && devid == 0x3726) {
336 /* sil3726 quirks */ 323 /* sil3726 quirks */
337 ata_port_for_each_link(link, ap) { 324 ata_port_for_each_link(link, ap) {
338 /* class code report is unreliable */ 325 /* Class code report is unreliable and SRST
326 * times out under certain configurations.
327 */
339 if (link->pmp < 5) 328 if (link->pmp < 5)
340 link->flags |= ATA_LFLAG_ASSUME_ATA; 329 link->flags |= ATA_LFLAG_NO_SRST |
330 ATA_LFLAG_ASSUME_ATA;
341 331
342 /* port 5 is for SEMB device and it doesn't like SRST */ 332 /* port 5 is for SEMB device and it doesn't like SRST */
343 if (link->pmp == 5) 333 if (link->pmp == 5)
@@ -700,8 +690,6 @@ static int sata_pmp_eh_recover_pmp(struct ata_port *ap,
700 if (ehc->i.action & ATA_EH_RESET) { 690 if (ehc->i.action & ATA_EH_RESET) {
701 struct ata_link *tlink; 691 struct ata_link *tlink;
702 692
703 ata_eh_freeze_port(ap);
704
705 /* reset */ 693 /* reset */
706 rc = ata_eh_reset(link, 0, prereset, softreset, hardreset, 694 rc = ata_eh_reset(link, 0, prereset, softreset, hardreset,
707 postreset); 695 postreset);
@@ -711,8 +699,6 @@ static int sata_pmp_eh_recover_pmp(struct ata_port *ap,
711 goto fail; 699 goto fail;
712 } 700 }
713 701
714 ata_eh_thaw_port(ap);
715
716 /* PMP is reset, SErrors cannot be trusted, scan all */ 702 /* PMP is reset, SErrors cannot be trusted, scan all */
717 ata_port_for_each_link(tlink, ap) { 703 ata_port_for_each_link(tlink, ap) {
718 struct ata_eh_context *ehc = &tlink->eh_context; 704 struct ata_eh_context *ehc = &tlink->eh_context;
@@ -864,6 +850,7 @@ static int sata_pmp_eh_recover(struct ata_port *ap)
864 struct ata_link *pmp_link = &ap->link; 850 struct ata_link *pmp_link = &ap->link;
865 struct ata_device *pmp_dev = pmp_link->device; 851 struct ata_device *pmp_dev = pmp_link->device;
866 struct ata_eh_context *pmp_ehc = &pmp_link->eh_context; 852 struct ata_eh_context *pmp_ehc = &pmp_link->eh_context;
853 u32 *gscr = pmp_dev->gscr;
867 struct ata_link *link; 854 struct ata_link *link;
868 struct ata_device *dev; 855 struct ata_device *dev;
869 unsigned int err_mask; 856 unsigned int err_mask;
@@ -901,6 +888,22 @@ static int sata_pmp_eh_recover(struct ata_port *ap)
901 if (rc) 888 if (rc)
902 goto pmp_fail; 889 goto pmp_fail;
903 890
891 /* PHY event notification can disturb reset and other recovery
892 * operations. Turn it off.
893 */
894 if (gscr[SATA_PMP_GSCR_FEAT_EN] & SATA_PMP_FEAT_NOTIFY) {
895 gscr[SATA_PMP_GSCR_FEAT_EN] &= ~SATA_PMP_FEAT_NOTIFY;
896
897 err_mask = sata_pmp_write(pmp_link, SATA_PMP_GSCR_FEAT_EN,
898 gscr[SATA_PMP_GSCR_FEAT_EN]);
899 if (err_mask) {
900 ata_link_printk(pmp_link, KERN_WARNING,
901 "failed to disable NOTIFY (err_mask=0x%x)\n",
902 err_mask);
903 goto pmp_fail;
904 }
905 }
906
904 /* handle disabled links */ 907 /* handle disabled links */
905 rc = sata_pmp_eh_handle_disabled_links(ap); 908 rc = sata_pmp_eh_handle_disabled_links(ap);
906 if (rc) 909 if (rc)
@@ -923,10 +926,10 @@ static int sata_pmp_eh_recover(struct ata_port *ap)
923 926
924 /* enable notification */ 927 /* enable notification */
925 if (pmp_dev->flags & ATA_DFLAG_AN) { 928 if (pmp_dev->flags & ATA_DFLAG_AN) {
926 pmp_dev->gscr[SATA_PMP_GSCR_FEAT_EN] |= SATA_PMP_FEAT_NOTIFY; 929 gscr[SATA_PMP_GSCR_FEAT_EN] |= SATA_PMP_FEAT_NOTIFY;
927 930
928 err_mask = sata_pmp_write(pmp_dev->link, SATA_PMP_GSCR_FEAT_EN, 931 err_mask = sata_pmp_write(pmp_link, SATA_PMP_GSCR_FEAT_EN,
929 pmp_dev->gscr[SATA_PMP_GSCR_FEAT_EN]); 932 gscr[SATA_PMP_GSCR_FEAT_EN]);
930 if (err_mask) { 933 if (err_mask) {
931 ata_dev_printk(pmp_dev, KERN_ERR, "failed to write " 934 ata_dev_printk(pmp_dev, KERN_ERR, "failed to write "
932 "PMP_FEAT_EN (Emask=0x%x)\n", err_mask); 935 "PMP_FEAT_EN (Emask=0x%x)\n", err_mask);
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index 3ce43920e459..2e6e1622dc6d 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -1082,12 +1082,6 @@ static unsigned int ata_scsi_start_stop_xlat(struct ata_queued_cmd *qc)
1082 if (((cdb[4] >> 4) & 0xf) != 0) 1082 if (((cdb[4] >> 4) & 0xf) != 0)
1083 goto invalid_fld; /* power conditions not supported */ 1083 goto invalid_fld; /* power conditions not supported */
1084 1084
1085 if (qc->dev->horkage & ATA_HORKAGE_SKIP_PM) {
1086 /* the device lacks PM support, finish without doing anything */
1087 scmd->result = SAM_STAT_GOOD;
1088 return 1;
1089 }
1090
1091 if (cdb[4] & 0x1) { 1085 if (cdb[4] & 0x1) {
1092 tf->nsect = 1; /* 1 sector, lba=0 */ 1086 tf->nsect = 1; /* 1 sector, lba=0 */
1093 1087
@@ -1643,6 +1637,7 @@ defer:
1643 1637
1644/** 1638/**
1645 * ata_scsi_rbuf_get - Map response buffer. 1639 * ata_scsi_rbuf_get - Map response buffer.
1640 * @cmd: SCSI command containing buffer to be mapped.
1646 * @flags: unsigned long variable to store irq enable status 1641 * @flags: unsigned long variable to store irq enable status
1647 * @copy_in: copy in from user buffer 1642 * @copy_in: copy in from user buffer
1648 * 1643 *
@@ -1960,7 +1955,7 @@ static unsigned int ata_msense_ctl_mode(u8 *buf)
1960 1955
1961/** 1956/**
1962 * ata_msense_rw_recovery - Simulate MODE SENSE r/w error recovery page 1957 * ata_msense_rw_recovery - Simulate MODE SENSE r/w error recovery page
1963 * @bufp: output buffer 1958 * @buf: output buffer
1964 * 1959 *
1965 * Generate a generic MODE SENSE r/w error recovery page. 1960 * Generate a generic MODE SENSE r/w error recovery page.
1966 * 1961 *
diff --git a/drivers/ata/pata_ali.c b/drivers/ata/pata_ali.c
index fcabe46f262b..0f3e659db99a 100644
--- a/drivers/ata/pata_ali.c
+++ b/drivers/ata/pata_ali.c
@@ -177,11 +177,11 @@ static void ali_program_modes(struct ata_port *ap, struct ata_device *adev, stru
177 u8 udma; 177 u8 udma;
178 178
179 if (t != NULL) { 179 if (t != NULL) {
180 t->setup = FIT(t->setup, 1, 8) & 7; 180 t->setup = clamp_val(t->setup, 1, 8) & 7;
181 t->act8b = FIT(t->act8b, 1, 8) & 7; 181 t->act8b = clamp_val(t->act8b, 1, 8) & 7;
182 t->rec8b = FIT(t->rec8b, 1, 16) & 15; 182 t->rec8b = clamp_val(t->rec8b, 1, 16) & 15;
183 t->active = FIT(t->active, 1, 8) & 7; 183 t->active = clamp_val(t->active, 1, 8) & 7;
184 t->recover = FIT(t->recover, 1, 16) & 15; 184 t->recover = clamp_val(t->recover, 1, 16) & 15;
185 185
186 pci_write_config_byte(pdev, cas, t->setup); 186 pci_write_config_byte(pdev, cas, t->setup);
187 pci_write_config_byte(pdev, cbt, (t->act8b << 4) | t->rec8b); 187 pci_write_config_byte(pdev, cbt, (t->act8b << 4) | t->rec8b);
diff --git a/drivers/ata/pata_amd.c b/drivers/ata/pata_amd.c
index 26665c396485..57dd00f463d3 100644
--- a/drivers/ata/pata_amd.c
+++ b/drivers/ata/pata_amd.c
@@ -84,32 +84,32 @@ static void timing_setup(struct ata_port *ap, struct ata_device *adev, int offse
84 84
85 /* Configure the address set up timing */ 85 /* Configure the address set up timing */
86 pci_read_config_byte(pdev, offset + 0x0C, &t); 86 pci_read_config_byte(pdev, offset + 0x0C, &t);
87 t = (t & ~(3 << ((3 - dn) << 1))) | ((FIT(at.setup, 1, 4) - 1) << ((3 - dn) << 1)); 87 t = (t & ~(3 << ((3 - dn) << 1))) | ((clamp_val(at.setup, 1, 4) - 1) << ((3 - dn) << 1));
88 pci_write_config_byte(pdev, offset + 0x0C , t); 88 pci_write_config_byte(pdev, offset + 0x0C , t);
89 89
90 /* Configure the 8bit I/O timing */ 90 /* Configure the 8bit I/O timing */
91 pci_write_config_byte(pdev, offset + 0x0E + (1 - (dn >> 1)), 91 pci_write_config_byte(pdev, offset + 0x0E + (1 - (dn >> 1)),
92 ((FIT(at.act8b, 1, 16) - 1) << 4) | (FIT(at.rec8b, 1, 16) - 1)); 92 ((clamp_val(at.act8b, 1, 16) - 1) << 4) | (clamp_val(at.rec8b, 1, 16) - 1));
93 93
94 /* Drive timing */ 94 /* Drive timing */
95 pci_write_config_byte(pdev, offset + 0x08 + (3 - dn), 95 pci_write_config_byte(pdev, offset + 0x08 + (3 - dn),
96 ((FIT(at.active, 1, 16) - 1) << 4) | (FIT(at.recover, 1, 16) - 1)); 96 ((clamp_val(at.active, 1, 16) - 1) << 4) | (clamp_val(at.recover, 1, 16) - 1));
97 97
98 switch (clock) { 98 switch (clock) {
99 case 1: 99 case 1:
100 t = at.udma ? (0xc0 | (FIT(at.udma, 2, 5) - 2)) : 0x03; 100 t = at.udma ? (0xc0 | (clamp_val(at.udma, 2, 5) - 2)) : 0x03;
101 break; 101 break;
102 102
103 case 2: 103 case 2:
104 t = at.udma ? (0xc0 | amd_cyc2udma[FIT(at.udma, 2, 10)]) : 0x03; 104 t = at.udma ? (0xc0 | amd_cyc2udma[clamp_val(at.udma, 2, 10)]) : 0x03;
105 break; 105 break;
106 106
107 case 3: 107 case 3:
108 t = at.udma ? (0xc0 | amd_cyc2udma[FIT(at.udma, 1, 10)]) : 0x03; 108 t = at.udma ? (0xc0 | amd_cyc2udma[clamp_val(at.udma, 1, 10)]) : 0x03;
109 break; 109 break;
110 110
111 case 4: 111 case 4:
112 t = at.udma ? (0xc0 | amd_cyc2udma[FIT(at.udma, 1, 15)]) : 0x03; 112 t = at.udma ? (0xc0 | amd_cyc2udma[clamp_val(at.udma, 1, 15)]) : 0x03;
113 break; 113 break;
114 114
115 default: 115 default:
diff --git a/drivers/ata/pata_at32.c b/drivers/ata/pata_at32.c
index 5e104385d6a3..82fb6e273169 100644
--- a/drivers/ata/pata_at32.c
+++ b/drivers/ata/pata_at32.c
@@ -291,8 +291,6 @@ static int __init pata_at32_probe(struct platform_device *pdev)
291 if (!info) 291 if (!info)
292 return -ENOMEM; 292 return -ENOMEM;
293 293
294 memset(info, 0, sizeof(struct at32_ide_info));
295
296 info->irq = irq; 294 info->irq = irq;
297 info->cs = board->cs; 295 info->cs = board->cs;
298 296
diff --git a/drivers/ata/pata_bf54x.c b/drivers/ata/pata_bf54x.c
index 9ab89732cf94..55516103626a 100644
--- a/drivers/ata/pata_bf54x.c
+++ b/drivers/ata/pata_bf54x.c
@@ -911,7 +911,10 @@ static void bfin_bmdma_start(struct ata_queued_cmd *qc)
911 /* Reset all transfer count */ 911 /* Reset all transfer count */
912 ATAPI_SET_CONTROL(base, ATAPI_GET_CONTROL(base) | TFRCNT_RST); 912 ATAPI_SET_CONTROL(base, ATAPI_GET_CONTROL(base) | TFRCNT_RST);
913 913
914 /* Set transfer length to buffer len */ 914 /* Set ATAPI state machine contorl in terminate sequence */
915 ATAPI_SET_CONTROL(base, ATAPI_GET_CONTROL(base) | END_ON_TERM);
916
917 /* Set transfer length to buffer len */
915 for_each_sg(qc->sg, sg, qc->n_elem, si) { 918 for_each_sg(qc->sg, sg, qc->n_elem, si) {
916 ATAPI_SET_XFER_LEN(base, (sg_dma_len(sg) >> 1)); 919 ATAPI_SET_XFER_LEN(base, (sg_dma_len(sg) >> 1));
917 } 920 }
diff --git a/drivers/ata/pata_cypress.c b/drivers/ata/pata_cypress.c
index a9c3218e22fd..2ff62608ae37 100644
--- a/drivers/ata/pata_cypress.c
+++ b/drivers/ata/pata_cypress.c
@@ -62,14 +62,14 @@ static void cy82c693_set_piomode(struct ata_port *ap, struct ata_device *adev)
62 return; 62 return;
63 } 63 }
64 64
65 time_16 = FIT(t.recover, 0, 15) | (FIT(t.active, 0, 15) << 4); 65 time_16 = clamp_val(t.recover, 0, 15) | (clamp_val(t.active, 0, 15) << 4);
66 time_8 = FIT(t.act8b, 0, 15) | (FIT(t.rec8b, 0, 15) << 4); 66 time_8 = clamp_val(t.act8b, 0, 15) | (clamp_val(t.rec8b, 0, 15) << 4);
67 67
68 if (adev->devno == 0) { 68 if (adev->devno == 0) {
69 pci_read_config_dword(pdev, CY82_IDE_ADDRSETUP, &addr); 69 pci_read_config_dword(pdev, CY82_IDE_ADDRSETUP, &addr);
70 70
71 addr &= ~0x0F; /* Mask bits */ 71 addr &= ~0x0F; /* Mask bits */
72 addr |= FIT(t.setup, 0, 15); 72 addr |= clamp_val(t.setup, 0, 15);
73 73
74 pci_write_config_dword(pdev, CY82_IDE_ADDRSETUP, addr); 74 pci_write_config_dword(pdev, CY82_IDE_ADDRSETUP, addr);
75 pci_write_config_byte(pdev, CY82_IDE_MASTER_IOR, time_16); 75 pci_write_config_byte(pdev, CY82_IDE_MASTER_IOR, time_16);
@@ -79,7 +79,7 @@ static void cy82c693_set_piomode(struct ata_port *ap, struct ata_device *adev)
79 pci_read_config_dword(pdev, CY82_IDE_ADDRSETUP, &addr); 79 pci_read_config_dword(pdev, CY82_IDE_ADDRSETUP, &addr);
80 80
81 addr &= ~0xF0; /* Mask bits */ 81 addr &= ~0xF0; /* Mask bits */
82 addr |= (FIT(t.setup, 0, 15) << 4); 82 addr |= (clamp_val(t.setup, 0, 15) << 4);
83 83
84 pci_write_config_dword(pdev, CY82_IDE_ADDRSETUP, addr); 84 pci_write_config_dword(pdev, CY82_IDE_ADDRSETUP, addr);
85 pci_write_config_byte(pdev, CY82_IDE_SLAVE_IOR, time_16); 85 pci_write_config_byte(pdev, CY82_IDE_SLAVE_IOR, time_16);
diff --git a/drivers/ata/pata_legacy.c b/drivers/ata/pata_legacy.c
index 7af4b29cc422..fe7cc8ed4ea4 100644
--- a/drivers/ata/pata_legacy.c
+++ b/drivers/ata/pata_legacy.c
@@ -343,8 +343,8 @@ static void ht6560a_set_piomode(struct ata_port *ap, struct ata_device *adev)
343 /* Get the timing data in cycles. For now play safe at 50Mhz */ 343 /* Get the timing data in cycles. For now play safe at 50Mhz */
344 ata_timing_compute(adev, adev->pio_mode, &t, 20000, 1000); 344 ata_timing_compute(adev, adev->pio_mode, &t, 20000, 1000);
345 345
346 active = FIT(t.active, 2, 15); 346 active = clamp_val(t.active, 2, 15);
347 recover = FIT(t.recover, 4, 15); 347 recover = clamp_val(t.recover, 4, 15);
348 348
349 inb(0x3E6); 349 inb(0x3E6);
350 inb(0x3E6); 350 inb(0x3E6);
@@ -377,8 +377,8 @@ static void ht6560b_set_piomode(struct ata_port *ap, struct ata_device *adev)
377 /* Get the timing data in cycles. For now play safe at 50Mhz */ 377 /* Get the timing data in cycles. For now play safe at 50Mhz */
378 ata_timing_compute(adev, adev->pio_mode, &t, 20000, 1000); 378 ata_timing_compute(adev, adev->pio_mode, &t, 20000, 1000);
379 379
380 active = FIT(t.active, 2, 15); 380 active = clamp_val(t.active, 2, 15);
381 recover = FIT(t.recover, 2, 16); 381 recover = clamp_val(t.recover, 2, 16);
382 recover &= 0x15; 382 recover &= 0x15;
383 383
384 inb(0x3E6); 384 inb(0x3E6);
@@ -462,9 +462,9 @@ static void opti82c611a_set_piomode(struct ata_port *ap,
462 ata_timing_merge(&t, &tp, &t, ATA_TIMING_SETUP); 462 ata_timing_merge(&t, &tp, &t, ATA_TIMING_SETUP);
463 } 463 }
464 464
465 active = FIT(t.active, 2, 17) - 2; 465 active = clamp_val(t.active, 2, 17) - 2;
466 recover = FIT(t.recover, 1, 16) - 1; 466 recover = clamp_val(t.recover, 1, 16) - 1;
467 setup = FIT(t.setup, 1, 4) - 1; 467 setup = clamp_val(t.setup, 1, 4) - 1;
468 468
469 /* Select the right timing bank for write timing */ 469 /* Select the right timing bank for write timing */
470 rc = ioread8(ap->ioaddr.lbal_addr); 470 rc = ioread8(ap->ioaddr.lbal_addr);
@@ -541,9 +541,9 @@ static void opti82c46x_set_piomode(struct ata_port *ap, struct ata_device *adev)
541 ata_timing_merge(&t, &tp, &t, ATA_TIMING_SETUP); 541 ata_timing_merge(&t, &tp, &t, ATA_TIMING_SETUP);
542 } 542 }
543 543
544 active = FIT(t.active, 2, 17) - 2; 544 active = clamp_val(t.active, 2, 17) - 2;
545 recover = FIT(t.recover, 1, 16) - 1; 545 recover = clamp_val(t.recover, 1, 16) - 1;
546 setup = FIT(t.setup, 1, 4) - 1; 546 setup = clamp_val(t.setup, 1, 4) - 1;
547 547
548 /* Select the right timing bank for write timing */ 548 /* Select the right timing bank for write timing */
549 rc = ioread8(ap->ioaddr.lbal_addr); 549 rc = ioread8(ap->ioaddr.lbal_addr);
@@ -624,11 +624,11 @@ static void qdi6500_set_piomode(struct ata_port *ap, struct ata_device *adev)
624 ata_timing_compute(adev, adev->pio_mode, &t, 30303, 1000); 624 ata_timing_compute(adev, adev->pio_mode, &t, 30303, 1000);
625 625
626 if (ld_qdi->fast) { 626 if (ld_qdi->fast) {
627 active = 8 - FIT(t.active, 1, 8); 627 active = 8 - clamp_val(t.active, 1, 8);
628 recovery = 18 - FIT(t.recover, 3, 18); 628 recovery = 18 - clamp_val(t.recover, 3, 18);
629 } else { 629 } else {
630 active = 9 - FIT(t.active, 2, 9); 630 active = 9 - clamp_val(t.active, 2, 9);
631 recovery = 15 - FIT(t.recover, 0, 15); 631 recovery = 15 - clamp_val(t.recover, 0, 15);
632 } 632 }
633 timing = (recovery << 4) | active | 0x08; 633 timing = (recovery << 4) | active | 0x08;
634 634
@@ -658,11 +658,11 @@ static void qdi6580dp_set_piomode(struct ata_port *ap, struct ata_device *adev)
658 ata_timing_compute(adev, adev->pio_mode, &t, 30303, 1000); 658 ata_timing_compute(adev, adev->pio_mode, &t, 30303, 1000);
659 659
660 if (ld_qdi->fast) { 660 if (ld_qdi->fast) {
661 active = 8 - FIT(t.active, 1, 8); 661 active = 8 - clamp_val(t.active, 1, 8);
662 recovery = 18 - FIT(t.recover, 3, 18); 662 recovery = 18 - clamp_val(t.recover, 3, 18);
663 } else { 663 } else {
664 active = 9 - FIT(t.active, 2, 9); 664 active = 9 - clamp_val(t.active, 2, 9);
665 recovery = 15 - FIT(t.recover, 0, 15); 665 recovery = 15 - clamp_val(t.recover, 0, 15);
666 } 666 }
667 timing = (recovery << 4) | active | 0x08; 667 timing = (recovery << 4) | active | 0x08;
668 668
@@ -695,11 +695,11 @@ static void qdi6580_set_piomode(struct ata_port *ap, struct ata_device *adev)
695 ata_timing_compute(adev, adev->pio_mode, &t, 30303, 1000); 695 ata_timing_compute(adev, adev->pio_mode, &t, 30303, 1000);
696 696
697 if (ld_qdi->fast) { 697 if (ld_qdi->fast) {
698 active = 8 - FIT(t.active, 1, 8); 698 active = 8 - clamp_val(t.active, 1, 8);
699 recovery = 18 - FIT(t.recover, 3, 18); 699 recovery = 18 - clamp_val(t.recover, 3, 18);
700 } else { 700 } else {
701 active = 9 - FIT(t.active, 2, 9); 701 active = 9 - clamp_val(t.active, 2, 9);
702 recovery = 15 - FIT(t.recover, 0, 15); 702 recovery = 15 - clamp_val(t.recover, 0, 15);
703 } 703 }
704 timing = (recovery << 4) | active | 0x08; 704 timing = (recovery << 4) | active | 0x08;
705 ld_qdi->clock[adev->devno] = timing; 705 ld_qdi->clock[adev->devno] = timing;
@@ -830,8 +830,8 @@ static void winbond_set_piomode(struct ata_port *ap, struct ata_device *adev)
830 else 830 else
831 ata_timing_compute(adev, adev->pio_mode, &t, 30303, 1000); 831 ata_timing_compute(adev, adev->pio_mode, &t, 30303, 1000);
832 832
833 active = (FIT(t.active, 3, 17) - 1) & 0x0F; 833 active = (clamp_val(t.active, 3, 17) - 1) & 0x0F;
834 recovery = (FIT(t.recover, 1, 15) + 1) & 0x0F; 834 recovery = (clamp_val(t.recover, 1, 15) + 1) & 0x0F;
835 timing = (active << 4) | recovery; 835 timing = (active << 4) | recovery;
836 winbond_writecfg(ld_winbond->timing, timing, reg); 836 winbond_writecfg(ld_winbond->timing, timing, reg);
837 837
@@ -842,7 +842,7 @@ static void winbond_set_piomode(struct ata_port *ap, struct ata_device *adev)
842 reg |= 0x08; /* FIFO off */ 842 reg |= 0x08; /* FIFO off */
843 if (!ata_pio_need_iordy(adev)) 843 if (!ata_pio_need_iordy(adev))
844 reg |= 0x02; /* IORDY off */ 844 reg |= 0x02; /* IORDY off */
845 reg |= (FIT(t.setup, 0, 3) << 6); 845 reg |= (clamp_val(t.setup, 0, 3) << 6);
846 winbond_writecfg(ld_winbond->timing, timing + 1, reg); 846 winbond_writecfg(ld_winbond->timing, timing + 1, reg);
847} 847}
848 848
diff --git a/drivers/ata/pata_ns87410.c b/drivers/ata/pata_ns87410.c
index 76d2455bc453..be756b7ef07e 100644
--- a/drivers/ata/pata_ns87410.c
+++ b/drivers/ata/pata_ns87410.c
@@ -91,9 +91,9 @@ static void ns87410_set_piomode(struct ata_port *ap, struct ata_device *adev)
91 return; 91 return;
92 } 92 }
93 93
94 at.active = FIT(at.active, 2, 16) - 2; 94 at.active = clamp_val(at.active, 2, 16) - 2;
95 at.setup = FIT(at.setup, 1, 4) - 1; 95 at.setup = clamp_val(at.setup, 1, 4) - 1;
96 at.recover = FIT(at.recover, 1, 12) - 1; 96 at.recover = clamp_val(at.recover, 1, 12) - 1;
97 97
98 idetcr = (at.setup << 6) | (recoverbits[at.recover] << 3) | activebits[at.active]; 98 idetcr = (at.setup << 6) | (recoverbits[at.recover] << 3) | activebits[at.active];
99 99
diff --git a/drivers/ata/pata_ns87415.c b/drivers/ata/pata_ns87415.c
index ae92b0049bd5..e0aa7eaaee0a 100644
--- a/drivers/ata/pata_ns87415.c
+++ b/drivers/ata/pata_ns87415.c
@@ -66,8 +66,8 @@ static void ns87415_set_mode(struct ata_port *ap, struct ata_device *adev, u8 mo
66 66
67 ata_timing_compute(adev, adev->pio_mode, &t, T, 0); 67 ata_timing_compute(adev, adev->pio_mode, &t, T, 0);
68 68
69 clocking = 17 - FIT(t.active, 2, 17); 69 clocking = 17 - clamp_val(t.active, 2, 17);
70 clocking |= (16 - FIT(t.recover, 1, 16)) << 4; 70 clocking |= (16 - clamp_val(t.recover, 1, 16)) << 4;
71 /* Use the same timing for read and write bytes */ 71 /* Use the same timing for read and write bytes */
72 clocking |= (clocking << 8); 72 clocking |= (clocking << 8);
73 pci_write_config_word(dev, timing, clocking); 73 pci_write_config_word(dev, timing, clocking);
diff --git a/drivers/ata/pata_qdi.c b/drivers/ata/pata_qdi.c
index bf45cf017753..97e5b090d7c2 100644
--- a/drivers/ata/pata_qdi.c
+++ b/drivers/ata/pata_qdi.c
@@ -60,11 +60,11 @@ static void qdi6500_set_piomode(struct ata_port *ap, struct ata_device *adev)
60 ata_timing_compute(adev, adev->pio_mode, &t, 30303, 1000); 60 ata_timing_compute(adev, adev->pio_mode, &t, 30303, 1000);
61 61
62 if (qdi->fast) { 62 if (qdi->fast) {
63 active = 8 - FIT(t.active, 1, 8); 63 active = 8 - clamp_val(t.active, 1, 8);
64 recovery = 18 - FIT(t.recover, 3, 18); 64 recovery = 18 - clamp_val(t.recover, 3, 18);
65 } else { 65 } else {
66 active = 9 - FIT(t.active, 2, 9); 66 active = 9 - clamp_val(t.active, 2, 9);
67 recovery = 15 - FIT(t.recover, 0, 15); 67 recovery = 15 - clamp_val(t.recover, 0, 15);
68 } 68 }
69 timing = (recovery << 4) | active | 0x08; 69 timing = (recovery << 4) | active | 0x08;
70 70
@@ -84,11 +84,11 @@ static void qdi6580_set_piomode(struct ata_port *ap, struct ata_device *adev)
84 ata_timing_compute(adev, adev->pio_mode, &t, 30303, 1000); 84 ata_timing_compute(adev, adev->pio_mode, &t, 30303, 1000);
85 85
86 if (qdi->fast) { 86 if (qdi->fast) {
87 active = 8 - FIT(t.active, 1, 8); 87 active = 8 - clamp_val(t.active, 1, 8);
88 recovery = 18 - FIT(t.recover, 3, 18); 88 recovery = 18 - clamp_val(t.recover, 3, 18);
89 } else { 89 } else {
90 active = 9 - FIT(t.active, 2, 9); 90 active = 9 - clamp_val(t.active, 2, 9);
91 recovery = 15 - FIT(t.recover, 0, 15); 91 recovery = 15 - clamp_val(t.recover, 0, 15);
92 } 92 }
93 timing = (recovery << 4) | active | 0x08; 93 timing = (recovery << 4) | active | 0x08;
94 94
diff --git a/drivers/ata/pata_sl82c105.c b/drivers/ata/pata_sl82c105.c
index 70d94fb28a5f..69877bd81815 100644
--- a/drivers/ata/pata_sl82c105.c
+++ b/drivers/ata/pata_sl82c105.c
@@ -216,7 +216,7 @@ static int sl82c105_qc_defer(struct ata_queued_cmd *qc)
216 struct ata_port *alt = host->ports[1 ^ qc->ap->port_no]; 216 struct ata_port *alt = host->ports[1 ^ qc->ap->port_no];
217 int rc; 217 int rc;
218 218
219 /* First apply the usual rules */ 219 /* First apply the usual rules */
220 rc = ata_std_qc_defer(qc); 220 rc = ata_std_qc_defer(qc);
221 if (rc != 0) 221 if (rc != 0)
222 return rc; 222 return rc;
diff --git a/drivers/ata/pata_via.c b/drivers/ata/pata_via.c
index 2fea6cbe7755..708ed144ede9 100644
--- a/drivers/ata/pata_via.c
+++ b/drivers/ata/pata_via.c
@@ -259,15 +259,15 @@ static void via_do_set_mode(struct ata_port *ap, struct ata_device *adev, int mo
259 259
260 pci_read_config_byte(pdev, 0x4C, &setup); 260 pci_read_config_byte(pdev, 0x4C, &setup);
261 setup &= ~(3 << shift); 261 setup &= ~(3 << shift);
262 setup |= FIT(t.setup, 1, 4) << shift; /* 1,4 or 1,4 - 1 FIXME */ 262 setup |= clamp_val(t.setup, 1, 4) << shift; /* 1,4 or 1,4 - 1 FIXME */
263 pci_write_config_byte(pdev, 0x4C, setup); 263 pci_write_config_byte(pdev, 0x4C, setup);
264 } 264 }
265 265
266 /* Load the PIO mode bits */ 266 /* Load the PIO mode bits */
267 pci_write_config_byte(pdev, 0x4F - ap->port_no, 267 pci_write_config_byte(pdev, 0x4F - ap->port_no,
268 ((FIT(t.act8b, 1, 16) - 1) << 4) | (FIT(t.rec8b, 1, 16) - 1)); 268 ((clamp_val(t.act8b, 1, 16) - 1) << 4) | (clamp_val(t.rec8b, 1, 16) - 1));
269 pci_write_config_byte(pdev, 0x48 + offset, 269 pci_write_config_byte(pdev, 0x48 + offset,
270 ((FIT(t.active, 1, 16) - 1) << 4) | (FIT(t.recover, 1, 16) - 1)); 270 ((clamp_val(t.active, 1, 16) - 1) << 4) | (clamp_val(t.recover, 1, 16) - 1));
271 271
272 /* Load the UDMA bits according to type */ 272 /* Load the UDMA bits according to type */
273 switch(udma_type) { 273 switch(udma_type) {
@@ -275,16 +275,16 @@ static void via_do_set_mode(struct ata_port *ap, struct ata_device *adev, int mo
275 /* BUG() ? */ 275 /* BUG() ? */
276 /* fall through */ 276 /* fall through */
277 case 33: 277 case 33:
278 ut = t.udma ? (0xe0 | (FIT(t.udma, 2, 5) - 2)) : 0x03; 278 ut = t.udma ? (0xe0 | (clamp_val(t.udma, 2, 5) - 2)) : 0x03;
279 break; 279 break;
280 case 66: 280 case 66:
281 ut = t.udma ? (0xe8 | (FIT(t.udma, 2, 9) - 2)) : 0x0f; 281 ut = t.udma ? (0xe8 | (clamp_val(t.udma, 2, 9) - 2)) : 0x0f;
282 break; 282 break;
283 case 100: 283 case 100:
284 ut = t.udma ? (0xe0 | (FIT(t.udma, 2, 9) - 2)) : 0x07; 284 ut = t.udma ? (0xe0 | (clamp_val(t.udma, 2, 9) - 2)) : 0x07;
285 break; 285 break;
286 case 133: 286 case 133:
287 ut = t.udma ? (0xe0 | (FIT(t.udma, 2, 9) - 2)) : 0x07; 287 ut = t.udma ? (0xe0 | (clamp_val(t.udma, 2, 9) - 2)) : 0x07;
288 break; 288 break;
289 } 289 }
290 290
diff --git a/drivers/ata/pata_winbond.c b/drivers/ata/pata_winbond.c
index 6e52a3573fbf..474528f8fe3d 100644
--- a/drivers/ata/pata_winbond.c
+++ b/drivers/ata/pata_winbond.c
@@ -75,8 +75,8 @@ static void winbond_set_piomode(struct ata_port *ap, struct ata_device *adev)
75 else 75 else
76 ata_timing_compute(adev, adev->pio_mode, &t, 30303, 1000); 76 ata_timing_compute(adev, adev->pio_mode, &t, 30303, 1000);
77 77
78 active = (FIT(t.active, 3, 17) - 1) & 0x0F; 78 active = (clamp_val(t.active, 3, 17) - 1) & 0x0F;
79 recovery = (FIT(t.recover, 1, 15) + 1) & 0x0F; 79 recovery = (clamp_val(t.recover, 1, 15) + 1) & 0x0F;
80 timing = (active << 4) | recovery; 80 timing = (active << 4) | recovery;
81 winbond_writecfg(winbond->config, timing, reg); 81 winbond_writecfg(winbond->config, timing, reg);
82 82
@@ -87,7 +87,7 @@ static void winbond_set_piomode(struct ata_port *ap, struct ata_device *adev)
87 reg |= 0x08; /* FIFO off */ 87 reg |= 0x08; /* FIFO off */
88 if (!ata_pio_need_iordy(adev)) 88 if (!ata_pio_need_iordy(adev))
89 reg |= 0x02; /* IORDY off */ 89 reg |= 0x02; /* IORDY off */
90 reg |= (FIT(t.setup, 0, 3) << 6); 90 reg |= (clamp_val(t.setup, 0, 3) << 6);
91 winbond_writecfg(winbond->config, timing + 1, reg); 91 winbond_writecfg(winbond->config, timing + 1, reg);
92} 92}
93 93
diff --git a/drivers/ata/sata_fsl.c b/drivers/ata/sata_fsl.c
index 853559e32315..3924e7209a44 100644
--- a/drivers/ata/sata_fsl.c
+++ b/drivers/ata/sata_fsl.c
@@ -34,7 +34,7 @@ enum {
34 34
35 SATA_FSL_HOST_FLAGS = (ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 35 SATA_FSL_HOST_FLAGS = (ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
36 ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA | 36 ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA |
37 ATA_FLAG_NCQ), 37 ATA_FLAG_PMP | ATA_FLAG_NCQ),
38 38
39 SATA_FSL_MAX_CMDS = SATA_FSL_QUEUE_DEPTH, 39 SATA_FSL_MAX_CMDS = SATA_FSL_QUEUE_DEPTH,
40 SATA_FSL_CMD_HDR_SIZE = 16, /* 4 DWORDS */ 40 SATA_FSL_CMD_HDR_SIZE = 16, /* 4 DWORDS */
@@ -395,7 +395,7 @@ static void sata_fsl_qc_prep(struct ata_queued_cmd *qc)
395 cd = (struct command_desc *)pp->cmdentry + tag; 395 cd = (struct command_desc *)pp->cmdentry + tag;
396 cd_paddr = pp->cmdentry_paddr + tag * SATA_FSL_CMD_DESC_SIZE; 396 cd_paddr = pp->cmdentry_paddr + tag * SATA_FSL_CMD_DESC_SIZE;
397 397
398 ata_tf_to_fis(&qc->tf, 0, 1, (u8 *) &cd->cfis); 398 ata_tf_to_fis(&qc->tf, qc->dev->link->pmp, 1, (u8 *) &cd->cfis);
399 399
400 VPRINTK("Dumping cfis : 0x%x, 0x%x, 0x%x\n", 400 VPRINTK("Dumping cfis : 0x%x, 0x%x, 0x%x\n",
401 cd->cfis[0], cd->cfis[1], cd->cfis[2]); 401 cd->cfis[0], cd->cfis[1], cd->cfis[2]);
@@ -438,6 +438,8 @@ static unsigned int sata_fsl_qc_issue(struct ata_queued_cmd *qc)
438 ioread32(CA + hcr_base), 438 ioread32(CA + hcr_base),
439 ioread32(CE + hcr_base), ioread32(CC + hcr_base)); 439 ioread32(CE + hcr_base), ioread32(CC + hcr_base));
440 440
441 iowrite32(qc->dev->link->pmp, CQPMP + hcr_base);
442
441 /* Simply queue command to the controller/device */ 443 /* Simply queue command to the controller/device */
442 iowrite32(1 << tag, CQ + hcr_base); 444 iowrite32(1 << tag, CQ + hcr_base);
443 445
@@ -558,11 +560,36 @@ static void sata_fsl_thaw(struct ata_port *ap)
558 ioread32(hcr_base + HCONTROL), ioread32(hcr_base + HSTATUS)); 560 ioread32(hcr_base + HCONTROL), ioread32(hcr_base + HSTATUS));
559} 561}
560 562
563static void sata_fsl_pmp_attach(struct ata_port *ap)
564{
565 struct sata_fsl_host_priv *host_priv = ap->host->private_data;
566 void __iomem *hcr_base = host_priv->hcr_base;
567 u32 temp;
568
569 temp = ioread32(hcr_base + HCONTROL);
570 iowrite32((temp | HCONTROL_PMP_ATTACHED), hcr_base + HCONTROL);
571}
572
573static void sata_fsl_pmp_detach(struct ata_port *ap)
574{
575 struct sata_fsl_host_priv *host_priv = ap->host->private_data;
576 void __iomem *hcr_base = host_priv->hcr_base;
577 u32 temp;
578
579 temp = ioread32(hcr_base + HCONTROL);
580 temp &= ~HCONTROL_PMP_ATTACHED;
581 iowrite32(temp, hcr_base + HCONTROL);
582
583 /* enable interrupts on the controller/port */
584 temp = ioread32(hcr_base + HCONTROL);
585 iowrite32((temp | DEFAULT_PORT_IRQ_ENABLE_MASK), hcr_base + HCONTROL);
586
587}
588
561static int sata_fsl_port_start(struct ata_port *ap) 589static int sata_fsl_port_start(struct ata_port *ap)
562{ 590{
563 struct device *dev = ap->host->dev; 591 struct device *dev = ap->host->dev;
564 struct sata_fsl_port_priv *pp; 592 struct sata_fsl_port_priv *pp;
565 int retval;
566 void *mem; 593 void *mem;
567 dma_addr_t mem_dma; 594 dma_addr_t mem_dma;
568 struct sata_fsl_host_priv *host_priv = ap->host->private_data; 595 struct sata_fsl_host_priv *host_priv = ap->host->private_data;
@@ -688,12 +715,13 @@ static int sata_fsl_prereset(struct ata_link *link, unsigned long deadline)
688} 715}
689 716
690static int sata_fsl_softreset(struct ata_link *link, unsigned int *class, 717static int sata_fsl_softreset(struct ata_link *link, unsigned int *class,
691 unsigned long deadline) 718 unsigned long deadline)
692{ 719{
693 struct ata_port *ap = link->ap; 720 struct ata_port *ap = link->ap;
694 struct sata_fsl_port_priv *pp = ap->private_data; 721 struct sata_fsl_port_priv *pp = ap->private_data;
695 struct sata_fsl_host_priv *host_priv = ap->host->private_data; 722 struct sata_fsl_host_priv *host_priv = ap->host->private_data;
696 void __iomem *hcr_base = host_priv->hcr_base; 723 void __iomem *hcr_base = host_priv->hcr_base;
724 int pmp = sata_srst_pmp(link);
697 u32 temp; 725 u32 temp;
698 struct ata_taskfile tf; 726 struct ata_taskfile tf;
699 u8 *cfis; 727 u8 *cfis;
@@ -703,6 +731,9 @@ static int sata_fsl_softreset(struct ata_link *link, unsigned int *class,
703 731
704 DPRINTK("in xx_softreset\n"); 732 DPRINTK("in xx_softreset\n");
705 733
734 if (pmp != SATA_PMP_CTRL_PORT)
735 goto issue_srst;
736
706try_offline_again: 737try_offline_again:
707 /* 738 /*
708 * Force host controller to go off-line, aborting current operations 739 * Force host controller to go off-line, aborting current operations
@@ -746,6 +777,7 @@ try_offline_again:
746 777
747 temp = ioread32(hcr_base + HCONTROL); 778 temp = ioread32(hcr_base + HCONTROL);
748 temp |= (HCONTROL_ONLINE_PHY_RST | HCONTROL_SNOOP_ENABLE); 779 temp |= (HCONTROL_ONLINE_PHY_RST | HCONTROL_SNOOP_ENABLE);
780 temp |= HCONTROL_PMP_ATTACHED;
749 iowrite32(temp, hcr_base + HCONTROL); 781 iowrite32(temp, hcr_base + HCONTROL);
750 782
751 temp = ata_wait_register(hcr_base + HSTATUS, ONLINE, 0, 1, 500); 783 temp = ata_wait_register(hcr_base + HSTATUS, ONLINE, 0, 1, 500);
@@ -771,7 +803,8 @@ try_offline_again:
771 ata_port_printk(ap, KERN_WARNING, 803 ata_port_printk(ap, KERN_WARNING,
772 "No Device OR PHYRDY change,Hstatus = 0x%x\n", 804 "No Device OR PHYRDY change,Hstatus = 0x%x\n",
773 ioread32(hcr_base + HSTATUS)); 805 ioread32(hcr_base + HSTATUS));
774 goto err; 806 *class = ATA_DEV_NONE;
807 goto out;
775 } 808 }
776 809
777 /* 810 /*
@@ -783,7 +816,8 @@ try_offline_again:
783 816
784 if ((temp & 0xFF) != 0x18) { 817 if ((temp & 0xFF) != 0x18) {
785 ata_port_printk(ap, KERN_WARNING, "No Signature Update\n"); 818 ata_port_printk(ap, KERN_WARNING, "No Signature Update\n");
786 goto err; 819 *class = ATA_DEV_NONE;
820 goto out;
787 } else { 821 } else {
788 ata_port_printk(ap, KERN_INFO, 822 ata_port_printk(ap, KERN_INFO,
789 "Signature Update detected @ %d msecs\n", 823 "Signature Update detected @ %d msecs\n",
@@ -798,6 +832,7 @@ try_offline_again:
798 * reached here, we can send a command to the target device 832 * reached here, we can send a command to the target device
799 */ 833 */
800 834
835issue_srst:
801 DPRINTK("Sending SRST/device reset\n"); 836 DPRINTK("Sending SRST/device reset\n");
802 837
803 ata_tf_init(link->device, &tf); 838 ata_tf_init(link->device, &tf);
@@ -808,7 +843,7 @@ try_offline_again:
808 SRST_CMD | CMD_DESC_SNOOP_ENABLE, 0, 0, 5); 843 SRST_CMD | CMD_DESC_SNOOP_ENABLE, 0, 0, 5);
809 844
810 tf.ctl |= ATA_SRST; /* setup SRST bit in taskfile control reg */ 845 tf.ctl |= ATA_SRST; /* setup SRST bit in taskfile control reg */
811 ata_tf_to_fis(&tf, 0, 0, cfis); 846 ata_tf_to_fis(&tf, pmp, 0, cfis);
812 847
813 DPRINTK("Dumping cfis : 0x%x, 0x%x, 0x%x, 0x%x\n", 848 DPRINTK("Dumping cfis : 0x%x, 0x%x, 0x%x, 0x%x\n",
814 cfis[0], cfis[1], cfis[2], cfis[3]); 849 cfis[0], cfis[1], cfis[2], cfis[3]);
@@ -854,8 +889,10 @@ try_offline_again:
854 sata_fsl_setup_cmd_hdr_entry(pp, 0, CMD_DESC_SNOOP_ENABLE, 0, 0, 5); 889 sata_fsl_setup_cmd_hdr_entry(pp, 0, CMD_DESC_SNOOP_ENABLE, 0, 0, 5);
855 890
856 tf.ctl &= ~ATA_SRST; /* 2nd H2D Ctl. register FIS */ 891 tf.ctl &= ~ATA_SRST; /* 2nd H2D Ctl. register FIS */
857 ata_tf_to_fis(&tf, 0, 0, cfis); 892 ata_tf_to_fis(&tf, pmp, 0, cfis);
858 893
894 if (pmp != SATA_PMP_CTRL_PORT)
895 iowrite32(pmp, CQPMP + hcr_base);
859 iowrite32(1, CQ + hcr_base); 896 iowrite32(1, CQ + hcr_base);
860 msleep(150); /* ?? */ 897 msleep(150); /* ?? */
861 898
@@ -886,12 +923,21 @@ try_offline_again:
886 VPRINTK("cereg = 0x%x\n", ioread32(hcr_base + CE)); 923 VPRINTK("cereg = 0x%x\n", ioread32(hcr_base + CE));
887 } 924 }
888 925
926out:
889 return 0; 927 return 0;
890 928
891err: 929err:
892 return -EIO; 930 return -EIO;
893} 931}
894 932
933static void sata_fsl_error_handler(struct ata_port *ap)
934{
935
936 DPRINTK("in xx_error_handler\n");
937 sata_pmp_error_handler(ap);
938
939}
940
895static void sata_fsl_post_internal_cmd(struct ata_queued_cmd *qc) 941static void sata_fsl_post_internal_cmd(struct ata_queued_cmd *qc)
896{ 942{
897 if (qc->flags & ATA_QCFLAG_FAILED) 943 if (qc->flags & ATA_QCFLAG_FAILED)
@@ -905,18 +951,21 @@ static void sata_fsl_post_internal_cmd(struct ata_queued_cmd *qc)
905 951
906static void sata_fsl_error_intr(struct ata_port *ap) 952static void sata_fsl_error_intr(struct ata_port *ap)
907{ 953{
908 struct ata_link *link = &ap->link;
909 struct ata_eh_info *ehi = &link->eh_info;
910 struct sata_fsl_host_priv *host_priv = ap->host->private_data; 954 struct sata_fsl_host_priv *host_priv = ap->host->private_data;
911 void __iomem *hcr_base = host_priv->hcr_base; 955 void __iomem *hcr_base = host_priv->hcr_base;
912 u32 hstatus, dereg, cereg = 0, SError = 0; 956 u32 hstatus, dereg=0, cereg = 0, SError = 0;
913 unsigned int err_mask = 0, action = 0; 957 unsigned int err_mask = 0, action = 0;
914 struct ata_queued_cmd *qc; 958 int freeze = 0, abort=0;
915 int freeze = 0; 959 struct ata_link *link = NULL;
960 struct ata_queued_cmd *qc = NULL;
961 struct ata_eh_info *ehi;
916 962
917 hstatus = ioread32(hcr_base + HSTATUS); 963 hstatus = ioread32(hcr_base + HSTATUS);
918 cereg = ioread32(hcr_base + CE); 964 cereg = ioread32(hcr_base + CE);
919 965
966 /* first, analyze and record host port events */
967 link = &ap->link;
968 ehi = &link->eh_info;
920 ata_ehi_clear_desc(ehi); 969 ata_ehi_clear_desc(ehi);
921 970
922 /* 971 /*
@@ -926,42 +975,28 @@ static void sata_fsl_error_intr(struct ata_port *ap)
926 sata_fsl_scr_read(ap, SCR_ERROR, &SError); 975 sata_fsl_scr_read(ap, SCR_ERROR, &SError);
927 if (unlikely(SError & 0xFFFF0000)) { 976 if (unlikely(SError & 0xFFFF0000)) {
928 sata_fsl_scr_write(ap, SCR_ERROR, SError); 977 sata_fsl_scr_write(ap, SCR_ERROR, SError);
929 err_mask |= AC_ERR_ATA_BUS;
930 } 978 }
931 979
932 DPRINTK("error_intr,hStat=0x%x,CE=0x%x,DE =0x%x,SErr=0x%x\n", 980 DPRINTK("error_intr,hStat=0x%x,CE=0x%x,DE =0x%x,SErr=0x%x\n",
933 hstatus, cereg, ioread32(hcr_base + DE), SError); 981 hstatus, cereg, ioread32(hcr_base + DE), SError);
934 982
935 /* handle single device errors */ 983 /* handle fatal errors */
936 if (cereg) { 984 if (hstatus & FATAL_ERROR_DECODE) {
937 /* 985 ehi->err_mask |= AC_ERR_ATA_BUS;
938 * clear the command error, also clears queue to the device 986 ehi->action |= ATA_EH_SOFTRESET;
939 * in error, and we can (re)issue commands to this device.
940 * When a device is in error all commands queued into the
941 * host controller and at the device are considered aborted
942 * and the queue for that device is stopped. Now, after
943 * clearing the device error, we can issue commands to the
944 * device to interrogate it to find the source of the error.
945 */
946 dereg = ioread32(hcr_base + DE);
947 iowrite32(dereg, hcr_base + DE);
948 iowrite32(cereg, hcr_base + CE);
949 987
950 DPRINTK("single device error, CE=0x%x, DE=0x%x\n",
951 ioread32(hcr_base + CE), ioread32(hcr_base + DE));
952 /* 988 /*
953 * We should consider this as non fatal error, and TF must 989 * Ignore serror in case of fatal errors as we always want
954 * be updated as done below. 990 * to do a soft-reset of the FSL SATA controller. Analyzing
991 * serror may cause libata to schedule a hard-reset action,
992 * and hard-reset currently does not do controller
993 * offline/online, causing command timeouts and leads to an
994 * un-recoverable state, hence make libATA ignore
995 * autopsy in case of fatal errors.
955 */ 996 */
956 997
957 err_mask |= AC_ERR_DEV; 998 ehi->flags |= ATA_EHI_NO_AUTOPSY;
958 }
959 999
960 /* handle fatal errors */
961 if (hstatus & FATAL_ERROR_DECODE) {
962 err_mask |= AC_ERR_ATA_BUS;
963 action |= ATA_EH_RESET;
964 /* how will fatal error interrupts be completed ?? */
965 freeze = 1; 1000 freeze = 1;
966 } 1001 }
967 1002
@@ -971,30 +1006,83 @@ static void sata_fsl_error_intr(struct ata_port *ap)
971 1006
972 /* Setup a soft-reset EH action */ 1007 /* Setup a soft-reset EH action */
973 ata_ehi_hotplugged(ehi); 1008 ata_ehi_hotplugged(ehi);
1009 ata_ehi_push_desc(ehi, "%s", "PHY RDY changed");
974 freeze = 1; 1010 freeze = 1;
975 } 1011 }
976 1012
977 /* record error info */ 1013 /* handle single device errors */
978 qc = ata_qc_from_tag(ap, link->active_tag); 1014 if (cereg) {
1015 /*
1016 * clear the command error, also clears queue to the device
1017 * in error, and we can (re)issue commands to this device.
1018 * When a device is in error all commands queued into the
1019 * host controller and at the device are considered aborted
1020 * and the queue for that device is stopped. Now, after
1021 * clearing the device error, we can issue commands to the
1022 * device to interrogate it to find the source of the error.
1023 */
1024 abort = 1;
1025
1026 DPRINTK("single device error, CE=0x%x, DE=0x%x\n",
1027 ioread32(hcr_base + CE), ioread32(hcr_base + DE));
979 1028
980 if (qc) 1029 /* find out the offending link and qc */
1030 if (ap->nr_pmp_links) {
1031 dereg = ioread32(hcr_base + DE);
1032 iowrite32(dereg, hcr_base + DE);
1033 iowrite32(cereg, hcr_base + CE);
1034
1035 if (dereg < ap->nr_pmp_links) {
1036 link = &ap->pmp_link[dereg];
1037 ehi = &link->eh_info;
1038 qc = ata_qc_from_tag(ap, link->active_tag);
1039 /*
1040 * We should consider this as non fatal error,
1041 * and TF must be updated as done below.
1042 */
1043
1044 err_mask |= AC_ERR_DEV;
1045
1046 } else {
1047 err_mask |= AC_ERR_HSM;
1048 action |= ATA_EH_HARDRESET;
1049 freeze = 1;
1050 }
1051 } else {
1052 dereg = ioread32(hcr_base + DE);
1053 iowrite32(dereg, hcr_base + DE);
1054 iowrite32(cereg, hcr_base + CE);
1055
1056 qc = ata_qc_from_tag(ap, link->active_tag);
1057 /*
1058 * We should consider this as non fatal error,
1059 * and TF must be updated as done below.
1060 */
1061 err_mask |= AC_ERR_DEV;
1062 }
1063 }
1064
1065 /* record error info */
1066 if (qc) {
981 qc->err_mask |= err_mask; 1067 qc->err_mask |= err_mask;
982 else 1068 } else
983 ehi->err_mask |= err_mask; 1069 ehi->err_mask |= err_mask;
984 1070
985 ehi->action |= action; 1071 ehi->action |= action;
986 ehi->serror |= SError;
987 1072
988 /* freeze or abort */ 1073 /* freeze or abort */
989 if (freeze) 1074 if (freeze)
990 ata_port_freeze(ap); 1075 ata_port_freeze(ap);
991 else 1076 else if (abort) {
992 ata_port_abort(ap); 1077 if (qc)
1078 ata_link_abort(qc->dev->link);
1079 else
1080 ata_port_abort(ap);
1081 }
993} 1082}
994 1083
995static void sata_fsl_host_intr(struct ata_port *ap) 1084static void sata_fsl_host_intr(struct ata_port *ap)
996{ 1085{
997 struct ata_link *link = &ap->link;
998 struct sata_fsl_host_priv *host_priv = ap->host->private_data; 1086 struct sata_fsl_host_priv *host_priv = ap->host->private_data;
999 void __iomem *hcr_base = host_priv->hcr_base; 1087 void __iomem *hcr_base = host_priv->hcr_base;
1000 u32 hstatus, qc_active = 0; 1088 u32 hstatus, qc_active = 0;
@@ -1017,10 +1105,19 @@ static void sata_fsl_host_intr(struct ata_port *ap)
1017 return; 1105 return;
1018 } 1106 }
1019 1107
1020 if (link->sactive) { /* only true for NCQ commands */ 1108 /* Read command completed register */
1109 qc_active = ioread32(hcr_base + CC);
1110
1111 VPRINTK("Status of all queues :\n");
1112 VPRINTK("qc_active/CC = 0x%x, CA = 0x%x, CE=0x%x,CQ=0x%x,apqa=0x%x\n",
1113 qc_active,
1114 ioread32(hcr_base + CA),
1115 ioread32(hcr_base + CE),
1116 ioread32(hcr_base + CQ),
1117 ap->qc_active);
1118
1119 if (qc_active & ap->qc_active) {
1021 int i; 1120 int i;
1022 /* Read command completed register */
1023 qc_active = ioread32(hcr_base + CC);
1024 /* clear CC bit, this will also complete the interrupt */ 1121 /* clear CC bit, this will also complete the interrupt */
1025 iowrite32(qc_active, hcr_base + CC); 1122 iowrite32(qc_active, hcr_base + CC);
1026 1123
@@ -1032,8 +1129,9 @@ static void sata_fsl_host_intr(struct ata_port *ap)
1032 for (i = 0; i < SATA_FSL_QUEUE_DEPTH; i++) { 1129 for (i = 0; i < SATA_FSL_QUEUE_DEPTH; i++) {
1033 if (qc_active & (1 << i)) { 1130 if (qc_active & (1 << i)) {
1034 qc = ata_qc_from_tag(ap, i); 1131 qc = ata_qc_from_tag(ap, i);
1035 if (qc) 1132 if (qc) {
1036 ata_qc_complete(qc); 1133 ata_qc_complete(qc);
1134 }
1037 DPRINTK 1135 DPRINTK
1038 ("completing ncq cmd,tag=%d,CC=0x%x,CA=0x%x\n", 1136 ("completing ncq cmd,tag=%d,CC=0x%x,CA=0x%x\n",
1039 i, ioread32(hcr_base + CC), 1137 i, ioread32(hcr_base + CC),
@@ -1042,19 +1140,21 @@ static void sata_fsl_host_intr(struct ata_port *ap)
1042 } 1140 }
1043 return; 1141 return;
1044 1142
1045 } else if (ap->qc_active) { 1143 } else if ((ap->qc_active & (1 << ATA_TAG_INTERNAL))) {
1046 iowrite32(1, hcr_base + CC); 1144 iowrite32(1, hcr_base + CC);
1047 qc = ata_qc_from_tag(ap, link->active_tag); 1145 qc = ata_qc_from_tag(ap, ATA_TAG_INTERNAL);
1048 1146
1049 DPRINTK("completing non-ncq cmd, tag=%d,CC=0x%x\n", 1147 DPRINTK("completing non-ncq cmd, CC=0x%x\n",
1050 link->active_tag, ioread32(hcr_base + CC)); 1148 ioread32(hcr_base + CC));
1051 1149
1052 if (qc) 1150 if (qc) {
1053 ata_qc_complete(qc); 1151 ata_qc_complete(qc);
1152 }
1054 } else { 1153 } else {
1055 /* Spurious Interrupt!! */ 1154 /* Spurious Interrupt!! */
1056 DPRINTK("spurious interrupt!!, CC = 0x%x\n", 1155 DPRINTK("spurious interrupt!!, CC = 0x%x\n",
1057 ioread32(hcr_base + CC)); 1156 ioread32(hcr_base + CC));
1157 iowrite32(qc_active, hcr_base + CC);
1058 return; 1158 return;
1059 } 1159 }
1060} 1160}
@@ -1130,9 +1230,6 @@ static int sata_fsl_init_controller(struct ata_host *host)
1130 iowrite32(0x00000FFFF, hcr_base + CE); 1230 iowrite32(0x00000FFFF, hcr_base + CE);
1131 iowrite32(0x00000FFFF, hcr_base + DE); 1231 iowrite32(0x00000FFFF, hcr_base + DE);
1132 1232
1133 /* initially assuming no Port multiplier, set CQPMP to 0 */
1134 iowrite32(0x0, hcr_base + CQPMP);
1135
1136 /* 1233 /*
1137 * host controller will be brought on-line, during xx_port_start() 1234 * host controller will be brought on-line, during xx_port_start()
1138 * callback, that should also initiate the OOB, COMINIT sequence 1235 * callback, that should also initiate the OOB, COMINIT sequence
@@ -1154,8 +1251,8 @@ static struct scsi_host_template sata_fsl_sht = {
1154 .dma_boundary = ATA_DMA_BOUNDARY, 1251 .dma_boundary = ATA_DMA_BOUNDARY,
1155}; 1252};
1156 1253
1157static const struct ata_port_operations sata_fsl_ops = { 1254static struct ata_port_operations sata_fsl_ops = {
1158 .inherits = &sata_port_ops, 1255 .inherits = &sata_pmp_port_ops,
1159 1256
1160 .qc_prep = sata_fsl_qc_prep, 1257 .qc_prep = sata_fsl_qc_prep,
1161 .qc_issue = sata_fsl_qc_issue, 1258 .qc_issue = sata_fsl_qc_issue,
@@ -1168,10 +1265,15 @@ static const struct ata_port_operations sata_fsl_ops = {
1168 .thaw = sata_fsl_thaw, 1265 .thaw = sata_fsl_thaw,
1169 .prereset = sata_fsl_prereset, 1266 .prereset = sata_fsl_prereset,
1170 .softreset = sata_fsl_softreset, 1267 .softreset = sata_fsl_softreset,
1268 .pmp_softreset = sata_fsl_softreset,
1269 .error_handler = sata_fsl_error_handler,
1171 .post_internal_cmd = sata_fsl_post_internal_cmd, 1270 .post_internal_cmd = sata_fsl_post_internal_cmd,
1172 1271
1173 .port_start = sata_fsl_port_start, 1272 .port_start = sata_fsl_port_start,
1174 .port_stop = sata_fsl_port_stop, 1273 .port_stop = sata_fsl_port_stop,
1274
1275 .pmp_attach = sata_fsl_pmp_attach,
1276 .pmp_detach = sata_fsl_pmp_detach,
1175}; 1277};
1176 1278
1177static const struct ata_port_info sata_fsl_port_info[] = { 1279static const struct ata_port_info sata_fsl_port_info[] = {
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
index bb73b2222627..acf347f71a2f 100644
--- a/drivers/ata/sata_mv.c
+++ b/drivers/ata/sata_mv.c
@@ -72,7 +72,7 @@
72#include <linux/libata.h> 72#include <linux/libata.h>
73 73
74#define DRV_NAME "sata_mv" 74#define DRV_NAME "sata_mv"
75#define DRV_VERSION "1.20" 75#define DRV_VERSION "1.24"
76 76
77enum { 77enum {
78 /* BAR's are enumerated in terms of pci_resource_start() terms */ 78 /* BAR's are enumerated in terms of pci_resource_start() terms */
@@ -122,14 +122,17 @@ enum {
122 /* Host Flags */ 122 /* Host Flags */
123 MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */ 123 MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */
124 MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */ 124 MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */
125 /* SoC integrated controllers, no PCI interface */
126 MV_FLAG_SOC = (1 << 28),
127 125
128 MV_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 126 MV_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
129 ATA_FLAG_MMIO | ATA_FLAG_NO_ATAPI | 127 ATA_FLAG_MMIO | ATA_FLAG_NO_ATAPI |
130 ATA_FLAG_PIO_POLLING, 128 ATA_FLAG_PIO_POLLING,
129
131 MV_6XXX_FLAGS = MV_FLAG_IRQ_COALESCE, 130 MV_6XXX_FLAGS = MV_FLAG_IRQ_COALESCE,
132 131
132 MV_GENIIE_FLAGS = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
133 ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA |
134 ATA_FLAG_NCQ | ATA_FLAG_AN,
135
133 CRQB_FLAG_READ = (1 << 0), 136 CRQB_FLAG_READ = (1 << 0),
134 CRQB_TAG_SHIFT = 1, 137 CRQB_TAG_SHIFT = 1,
135 CRQB_IOID_SHIFT = 6, /* CRQB Gen-II/IIE IO Id shift */ 138 CRQB_IOID_SHIFT = 6, /* CRQB Gen-II/IIE IO Id shift */
@@ -197,13 +200,6 @@ enum {
197 HC_MAIN_RSVD = (0x7f << 25), /* bits 31-25 */ 200 HC_MAIN_RSVD = (0x7f << 25), /* bits 31-25 */
198 HC_MAIN_RSVD_5 = (0x1fff << 19), /* bits 31-19 */ 201 HC_MAIN_RSVD_5 = (0x1fff << 19), /* bits 31-19 */
199 HC_MAIN_RSVD_SOC = (0x3fffffb << 6), /* bits 31-9, 7-6 */ 202 HC_MAIN_RSVD_SOC = (0x3fffffb << 6), /* bits 31-9, 7-6 */
200 HC_MAIN_MASKED_IRQS = (TRAN_LO_DONE | TRAN_HI_DONE |
201 PORTS_0_3_COAL_DONE | PORTS_4_7_COAL_DONE |
202 PORTS_0_7_COAL_DONE | GPIO_INT | TWSI_INT |
203 HC_MAIN_RSVD),
204 HC_MAIN_MASKED_IRQS_5 = (PORTS_0_3_COAL_DONE | PORTS_4_7_COAL_DONE |
205 HC_MAIN_RSVD_5),
206 HC_MAIN_MASKED_IRQS_SOC = (PORTS_0_3_COAL_DONE | HC_MAIN_RSVD_SOC),
207 203
208 /* SATAHC registers */ 204 /* SATAHC registers */
209 HC_CFG_OFS = 0, 205 HC_CFG_OFS = 0,
@@ -221,6 +217,7 @@ enum {
221 SATA_STATUS_OFS = 0x300, /* ctrl, err regs follow status */ 217 SATA_STATUS_OFS = 0x300, /* ctrl, err regs follow status */
222 SATA_ACTIVE_OFS = 0x350, 218 SATA_ACTIVE_OFS = 0x350,
223 SATA_FIS_IRQ_CAUSE_OFS = 0x364, 219 SATA_FIS_IRQ_CAUSE_OFS = 0x364,
220 SATA_FIS_IRQ_AN = (1 << 9), /* async notification */
224 221
225 LTMODE_OFS = 0x30c, 222 LTMODE_OFS = 0x30c,
226 LTMODE_BIT8 = (1 << 8), /* unknown, but necessary */ 223 LTMODE_BIT8 = (1 << 8), /* unknown, but necessary */
@@ -357,12 +354,12 @@ enum {
357 MV_HP_ERRATA_50XXB2 = (1 << 2), 354 MV_HP_ERRATA_50XXB2 = (1 << 2),
358 MV_HP_ERRATA_60X1B2 = (1 << 3), 355 MV_HP_ERRATA_60X1B2 = (1 << 3),
359 MV_HP_ERRATA_60X1C0 = (1 << 4), 356 MV_HP_ERRATA_60X1C0 = (1 << 4),
360 MV_HP_ERRATA_XX42A0 = (1 << 5),
361 MV_HP_GEN_I = (1 << 6), /* Generation I: 50xx */ 357 MV_HP_GEN_I = (1 << 6), /* Generation I: 50xx */
362 MV_HP_GEN_II = (1 << 7), /* Generation II: 60xx */ 358 MV_HP_GEN_II = (1 << 7), /* Generation II: 60xx */
363 MV_HP_GEN_IIE = (1 << 8), /* Generation IIE: 6042/7042 */ 359 MV_HP_GEN_IIE = (1 << 8), /* Generation IIE: 6042/7042 */
364 MV_HP_PCIE = (1 << 9), /* PCIe bus/regs: 7042 */ 360 MV_HP_PCIE = (1 << 9), /* PCIe bus/regs: 7042 */
365 MV_HP_CUT_THROUGH = (1 << 10), /* can use EDMA cut-through */ 361 MV_HP_CUT_THROUGH = (1 << 10), /* can use EDMA cut-through */
362 MV_HP_FLAG_SOC = (1 << 11), /* SystemOnChip, no PCI */
366 363
367 /* Port private flags (pp_flags) */ 364 /* Port private flags (pp_flags) */
368 MV_PP_FLAG_EDMA_EN = (1 << 0), /* is EDMA engine enabled? */ 365 MV_PP_FLAG_EDMA_EN = (1 << 0), /* is EDMA engine enabled? */
@@ -375,7 +372,7 @@ enum {
375#define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II) 372#define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II)
376#define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE) 373#define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
377#define IS_PCIE(hpriv) ((hpriv)->hp_flags & MV_HP_PCIE) 374#define IS_PCIE(hpriv) ((hpriv)->hp_flags & MV_HP_PCIE)
378#define HAS_PCI(host) (!((host)->ports[0]->flags & MV_FLAG_SOC)) 375#define IS_SOC(hpriv) ((hpriv)->hp_flags & MV_HP_FLAG_SOC)
379 376
380#define WINDOW_CTRL(i) (0x20030 + ((i) << 4)) 377#define WINDOW_CTRL(i) (0x20030 + ((i) << 4))
381#define WINDOW_BASE(i) (0x20034 + ((i) << 4)) 378#define WINDOW_BASE(i) (0x20034 + ((i) << 4))
@@ -459,6 +456,7 @@ struct mv_port_signal {
459 456
460struct mv_host_priv { 457struct mv_host_priv {
461 u32 hp_flags; 458 u32 hp_flags;
459 u32 main_irq_mask;
462 struct mv_port_signal signal[8]; 460 struct mv_port_signal signal[8];
463 const struct mv_hw_ops *ops; 461 const struct mv_hw_ops *ops;
464 int n_ports; 462 int n_ports;
@@ -640,25 +638,19 @@ static const struct ata_port_info mv_port_info[] = {
640 .port_ops = &mv6_ops, 638 .port_ops = &mv6_ops,
641 }, 639 },
642 { /* chip_6042 */ 640 { /* chip_6042 */
643 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS | 641 .flags = MV_GENIIE_FLAGS,
644 ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA |
645 ATA_FLAG_NCQ,
646 .pio_mask = 0x1f, /* pio0-4 */ 642 .pio_mask = 0x1f, /* pio0-4 */
647 .udma_mask = ATA_UDMA6, 643 .udma_mask = ATA_UDMA6,
648 .port_ops = &mv_iie_ops, 644 .port_ops = &mv_iie_ops,
649 }, 645 },
650 { /* chip_7042 */ 646 { /* chip_7042 */
651 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS | 647 .flags = MV_GENIIE_FLAGS,
652 ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA |
653 ATA_FLAG_NCQ,
654 .pio_mask = 0x1f, /* pio0-4 */ 648 .pio_mask = 0x1f, /* pio0-4 */
655 .udma_mask = ATA_UDMA6, 649 .udma_mask = ATA_UDMA6,
656 .port_ops = &mv_iie_ops, 650 .port_ops = &mv_iie_ops,
657 }, 651 },
658 { /* chip_soc */ 652 { /* chip_soc */
659 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS | 653 .flags = MV_GENIIE_FLAGS,
660 ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA |
661 ATA_FLAG_NCQ | MV_FLAG_SOC,
662 .pio_mask = 0x1f, /* pio0-4 */ 654 .pio_mask = 0x1f, /* pio0-4 */
663 .udma_mask = ATA_UDMA6, 655 .udma_mask = ATA_UDMA6,
664 .port_ops = &mv_iie_ops, 656 .port_ops = &mv_iie_ops,
@@ -818,12 +810,7 @@ static void mv_set_edma_ptrs(void __iomem *port_mmio,
818 writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS); 810 writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS);
819 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | index, 811 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | index,
820 port_mmio + EDMA_REQ_Q_IN_PTR_OFS); 812 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
821 813 writelfl(index, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
822 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
823 writelfl((pp->crqb_dma & 0xffffffff) | index,
824 port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
825 else
826 writelfl(index, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
827 814
828 /* 815 /*
829 * initialize response queue 816 * initialize response queue
@@ -833,17 +820,38 @@ static void mv_set_edma_ptrs(void __iomem *port_mmio,
833 820
834 WARN_ON(pp->crpb_dma & 0xff); 821 WARN_ON(pp->crpb_dma & 0xff);
835 writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS); 822 writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS);
836 823 writelfl(index, port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
837 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
838 writelfl((pp->crpb_dma & 0xffffffff) | index,
839 port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
840 else
841 writelfl(index, port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
842
843 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | index, 824 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | index,
844 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS); 825 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
845} 826}
846 827
828static void mv_set_main_irq_mask(struct ata_host *host,
829 u32 disable_bits, u32 enable_bits)
830{
831 struct mv_host_priv *hpriv = host->private_data;
832 u32 old_mask, new_mask;
833
834 old_mask = hpriv->main_irq_mask;
835 new_mask = (old_mask & ~disable_bits) | enable_bits;
836 if (new_mask != old_mask) {
837 hpriv->main_irq_mask = new_mask;
838 writelfl(new_mask, hpriv->main_irq_mask_addr);
839 }
840}
841
842static void mv_enable_port_irqs(struct ata_port *ap,
843 unsigned int port_bits)
844{
845 unsigned int shift, hardport, port = ap->port_no;
846 u32 disable_bits, enable_bits;
847
848 MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport);
849
850 disable_bits = (DONE_IRQ | ERR_IRQ) << shift;
851 enable_bits = port_bits << shift;
852 mv_set_main_irq_mask(ap->host, disable_bits, enable_bits);
853}
854
847/** 855/**
848 * mv_start_dma - Enable eDMA engine 856 * mv_start_dma - Enable eDMA engine
849 * @base: port base address 857 * @base: port base address
@@ -886,9 +894,11 @@ static void mv_start_dma(struct ata_port *ap, void __iomem *port_mmio,
886 mv_edma_cfg(ap, want_ncq); 894 mv_edma_cfg(ap, want_ncq);
887 895
888 /* clear FIS IRQ Cause */ 896 /* clear FIS IRQ Cause */
889 writelfl(0, port_mmio + SATA_FIS_IRQ_CAUSE_OFS); 897 if (IS_GEN_IIE(hpriv))
898 writelfl(0, port_mmio + SATA_FIS_IRQ_CAUSE_OFS);
890 899
891 mv_set_edma_ptrs(port_mmio, hpriv, pp); 900 mv_set_edma_ptrs(port_mmio, hpriv, pp);
901 mv_enable_port_irqs(ap, DONE_IRQ|ERR_IRQ);
892 902
893 writelfl(EDMA_EN, port_mmio + EDMA_CMD_OFS); 903 writelfl(EDMA_EN, port_mmio + EDMA_CMD_OFS);
894 pp->pp_flags |= MV_PP_FLAG_EDMA_EN; 904 pp->pp_flags |= MV_PP_FLAG_EDMA_EN;
@@ -1231,7 +1241,7 @@ static void mv_edma_cfg(struct ata_port *ap, int want_ncq)
1231 1241
1232 cfg |= (1 << 23); /* do not mask PM field in rx'd FIS */ 1242 cfg |= (1 << 23); /* do not mask PM field in rx'd FIS */
1233 cfg |= (1 << 22); /* enab 4-entry host queue cache */ 1243 cfg |= (1 << 22); /* enab 4-entry host queue cache */
1234 if (HAS_PCI(ap->host)) 1244 if (!IS_SOC(hpriv))
1235 cfg |= (1 << 18); /* enab early completion */ 1245 cfg |= (1 << 18); /* enab early completion */
1236 if (hpriv->hp_flags & MV_HP_CUT_THROUGH) 1246 if (hpriv->hp_flags & MV_HP_CUT_THROUGH)
1237 cfg |= (1 << 17); /* enab cut-thru (dis stor&forwrd) */ 1247 cfg |= (1 << 17); /* enab cut-thru (dis stor&forwrd) */
@@ -1341,6 +1351,7 @@ out_port_free_dma_mem:
1341static void mv_port_stop(struct ata_port *ap) 1351static void mv_port_stop(struct ata_port *ap)
1342{ 1352{
1343 mv_stop_edma(ap); 1353 mv_stop_edma(ap);
1354 mv_enable_port_irqs(ap, 0);
1344 mv_port_free_dma_mem(ap); 1355 mv_port_free_dma_mem(ap);
1345} 1356}
1346 1357
@@ -1582,6 +1593,7 @@ static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
1582 * shadow block, etc registers. 1593 * shadow block, etc registers.
1583 */ 1594 */
1584 mv_stop_edma(ap); 1595 mv_stop_edma(ap);
1596 mv_enable_port_irqs(ap, ERR_IRQ);
1585 mv_pmp_select(ap, qc->dev->link->pmp); 1597 mv_pmp_select(ap, qc->dev->link->pmp);
1586 return ata_sff_qc_issue(qc); 1598 return ata_sff_qc_issue(qc);
1587 } 1599 }
@@ -1670,6 +1682,18 @@ static void mv_pmp_eh_prep(struct ata_port *ap, unsigned int pmp_map)
1670 } 1682 }
1671} 1683}
1672 1684
1685static int mv_req_q_empty(struct ata_port *ap)
1686{
1687 void __iomem *port_mmio = mv_ap_base(ap);
1688 u32 in_ptr, out_ptr;
1689
1690 in_ptr = (readl(port_mmio + EDMA_REQ_Q_IN_PTR_OFS)
1691 >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
1692 out_ptr = (readl(port_mmio + EDMA_REQ_Q_OUT_PTR_OFS)
1693 >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
1694 return (in_ptr == out_ptr); /* 1 == queue_is_empty */
1695}
1696
1673static int mv_handle_fbs_ncq_dev_err(struct ata_port *ap) 1697static int mv_handle_fbs_ncq_dev_err(struct ata_port *ap)
1674{ 1698{
1675 struct mv_port_priv *pp = ap->private_data; 1699 struct mv_port_priv *pp = ap->private_data;
@@ -1703,7 +1727,7 @@ static int mv_handle_fbs_ncq_dev_err(struct ata_port *ap)
1703 ap->qc_active, failed_links, 1727 ap->qc_active, failed_links,
1704 ap->nr_active_links); 1728 ap->nr_active_links);
1705 1729
1706 if (ap->nr_active_links <= failed_links) { 1730 if (ap->nr_active_links <= failed_links && mv_req_q_empty(ap)) {
1707 mv_process_crpb_entries(ap, pp); 1731 mv_process_crpb_entries(ap, pp);
1708 mv_stop_edma(ap); 1732 mv_stop_edma(ap);
1709 mv_eh_freeze(ap); 1733 mv_eh_freeze(ap);
@@ -1812,6 +1836,7 @@ static void mv_err_intr(struct ata_port *ap)
1812{ 1836{
1813 void __iomem *port_mmio = mv_ap_base(ap); 1837 void __iomem *port_mmio = mv_ap_base(ap);
1814 u32 edma_err_cause, eh_freeze_mask, serr = 0; 1838 u32 edma_err_cause, eh_freeze_mask, serr = 0;
1839 u32 fis_cause = 0;
1815 struct mv_port_priv *pp = ap->private_data; 1840 struct mv_port_priv *pp = ap->private_data;
1816 struct mv_host_priv *hpriv = ap->host->private_data; 1841 struct mv_host_priv *hpriv = ap->host->private_data;
1817 unsigned int action = 0, err_mask = 0; 1842 unsigned int action = 0, err_mask = 0;
@@ -1821,16 +1846,19 @@ static void mv_err_intr(struct ata_port *ap)
1821 1846
1822 /* 1847 /*
1823 * Read and clear the SError and err_cause bits. 1848 * Read and clear the SError and err_cause bits.
1849 * For GenIIe, if EDMA_ERR_TRANS_IRQ_7 is set, we also must read/clear
1850 * the FIS_IRQ_CAUSE register before clearing edma_err_cause.
1824 */ 1851 */
1825 sata_scr_read(&ap->link, SCR_ERROR, &serr); 1852 sata_scr_read(&ap->link, SCR_ERROR, &serr);
1826 sata_scr_write_flush(&ap->link, SCR_ERROR, serr); 1853 sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
1827 1854
1828 edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS); 1855 edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1856 if (IS_GEN_IIE(hpriv) && (edma_err_cause & EDMA_ERR_TRANS_IRQ_7)) {
1857 fis_cause = readl(port_mmio + SATA_FIS_IRQ_CAUSE_OFS);
1858 writelfl(~fis_cause, port_mmio + SATA_FIS_IRQ_CAUSE_OFS);
1859 }
1829 writelfl(~edma_err_cause, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS); 1860 writelfl(~edma_err_cause, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1830 1861
1831 ata_port_printk(ap, KERN_INFO, "%s: err_cause=%08x pp_flags=0x%x\n",
1832 __func__, edma_err_cause, pp->pp_flags);
1833
1834 if (edma_err_cause & EDMA_ERR_DEV) { 1862 if (edma_err_cause & EDMA_ERR_DEV) {
1835 /* 1863 /*
1836 * Device errors during FIS-based switching operation 1864 * Device errors during FIS-based switching operation
@@ -1844,6 +1872,18 @@ static void mv_err_intr(struct ata_port *ap)
1844 ata_ehi_clear_desc(ehi); 1872 ata_ehi_clear_desc(ehi);
1845 ata_ehi_push_desc(ehi, "edma_err_cause=%08x pp_flags=%08x", 1873 ata_ehi_push_desc(ehi, "edma_err_cause=%08x pp_flags=%08x",
1846 edma_err_cause, pp->pp_flags); 1874 edma_err_cause, pp->pp_flags);
1875
1876 if (IS_GEN_IIE(hpriv) && (edma_err_cause & EDMA_ERR_TRANS_IRQ_7)) {
1877 ata_ehi_push_desc(ehi, "fis_cause=%08x", fis_cause);
1878 if (fis_cause & SATA_FIS_IRQ_AN) {
1879 u32 ec = edma_err_cause &
1880 ~(EDMA_ERR_TRANS_IRQ_7 | EDMA_ERR_IRQ_TRANSIENT);
1881 sata_async_notification(ap);
1882 if (!ec)
1883 return; /* Just an AN; no need for the nukes */
1884 ata_ehi_push_desc(ehi, "SDB notify");
1885 }
1886 }
1847 /* 1887 /*
1848 * All generations share these EDMA error cause bits: 1888 * All generations share these EDMA error cause bits:
1849 */ 1889 */
@@ -2162,20 +2202,20 @@ static irqreturn_t mv_interrupt(int irq, void *dev_instance)
2162 struct ata_host *host = dev_instance; 2202 struct ata_host *host = dev_instance;
2163 struct mv_host_priv *hpriv = host->private_data; 2203 struct mv_host_priv *hpriv = host->private_data;
2164 unsigned int handled = 0; 2204 unsigned int handled = 0;
2165 u32 main_irq_cause, main_irq_mask; 2205 u32 main_irq_cause, pending_irqs;
2166 2206
2167 spin_lock(&host->lock); 2207 spin_lock(&host->lock);
2168 main_irq_cause = readl(hpriv->main_irq_cause_addr); 2208 main_irq_cause = readl(hpriv->main_irq_cause_addr);
2169 main_irq_mask = readl(hpriv->main_irq_mask_addr); 2209 pending_irqs = main_irq_cause & hpriv->main_irq_mask;
2170 /* 2210 /*
2171 * Deal with cases where we either have nothing pending, or have read 2211 * Deal with cases where we either have nothing pending, or have read
2172 * a bogus register value which can indicate HW removal or PCI fault. 2212 * a bogus register value which can indicate HW removal or PCI fault.
2173 */ 2213 */
2174 if ((main_irq_cause & main_irq_mask) && (main_irq_cause != 0xffffffffU)) { 2214 if (pending_irqs && main_irq_cause != 0xffffffffU) {
2175 if (unlikely((main_irq_cause & PCI_ERR) && HAS_PCI(host))) 2215 if (unlikely((pending_irqs & PCI_ERR) && !IS_SOC(hpriv)))
2176 handled = mv_pci_error(host, hpriv->base); 2216 handled = mv_pci_error(host, hpriv->base);
2177 else 2217 else
2178 handled = mv_host_intr(host, main_irq_cause); 2218 handled = mv_host_intr(host, pending_irqs);
2179 } 2219 }
2180 spin_unlock(&host->lock); 2220 spin_unlock(&host->lock);
2181 return IRQ_RETVAL(handled); 2221 return IRQ_RETVAL(handled);
@@ -2373,7 +2413,6 @@ static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio)
2373 ZERO(MV_PCI_DISC_TIMER); 2413 ZERO(MV_PCI_DISC_TIMER);
2374 ZERO(MV_PCI_MSI_TRIGGER); 2414 ZERO(MV_PCI_MSI_TRIGGER);
2375 writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT_OFS); 2415 writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT_OFS);
2376 ZERO(PCI_HC_MAIN_IRQ_MASK_OFS);
2377 ZERO(MV_PCI_SERR_MASK); 2416 ZERO(MV_PCI_SERR_MASK);
2378 ZERO(hpriv->irq_cause_ofs); 2417 ZERO(hpriv->irq_cause_ofs);
2379 ZERO(hpriv->irq_mask_ofs); 2418 ZERO(hpriv->irq_mask_ofs);
@@ -2495,7 +2534,7 @@ static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
2495 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0); 2534 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
2496 int fix_phy_mode4 = 2535 int fix_phy_mode4 =
2497 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0); 2536 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
2498 u32 m2, tmp; 2537 u32 m2, m3;
2499 2538
2500 if (fix_phy_mode2) { 2539 if (fix_phy_mode2) {
2501 m2 = readl(port_mmio + PHY_MODE2); 2540 m2 = readl(port_mmio + PHY_MODE2);
@@ -2512,28 +2551,37 @@ static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
2512 udelay(200); 2551 udelay(200);
2513 } 2552 }
2514 2553
2515 /* who knows what this magic does */ 2554 /*
2516 tmp = readl(port_mmio + PHY_MODE3); 2555 * Gen-II/IIe PHY_MODE3 errata RM#2:
2517 tmp &= ~0x7F800000; 2556 * Achieves better receiver noise performance than the h/w default:
2518 tmp |= 0x2A800000; 2557 */
2519 writel(tmp, port_mmio + PHY_MODE3); 2558 m3 = readl(port_mmio + PHY_MODE3);
2559 m3 = (m3 & 0x1f) | (0x5555601 << 5);
2560
2561 /* Guideline 88F5182 (GL# SATA-S11) */
2562 if (IS_SOC(hpriv))
2563 m3 &= ~0x1c;
2520 2564
2521 if (fix_phy_mode4) { 2565 if (fix_phy_mode4) {
2522 u32 m4; 2566 u32 m4;
2523 2567
2524 m4 = readl(port_mmio + PHY_MODE4); 2568 m4 = readl(port_mmio + PHY_MODE4);
2525 2569
2526 if (hp_flags & MV_HP_ERRATA_60X1B2)
2527 tmp = readl(port_mmio + PHY_MODE3);
2528
2529 /* workaround for errata FEr SATA#10 (part 1) */ 2570 /* workaround for errata FEr SATA#10 (part 1) */
2530 m4 = (m4 & ~(1 << 1)) | (1 << 0); 2571 m4 = (m4 & ~(1 << 1)) | (1 << 0);
2531 2572
2532 writel(m4, port_mmio + PHY_MODE4); 2573 /* enforce bit restrictions on GenIIe devices */
2574 if (IS_GEN_IIE(hpriv))
2575 m4 = (m4 & ~0x5DE3FFFC) | (1 << 2);
2533 2576
2534 if (hp_flags & MV_HP_ERRATA_60X1B2) 2577 writel(m4, port_mmio + PHY_MODE4);
2535 writel(tmp, port_mmio + PHY_MODE3);
2536 } 2578 }
2579 /*
2580 * Workaround for 60x1-B2 errata SATA#13:
2581 * Any write to PHY_MODE4 (above) may corrupt PHY_MODE3,
2582 * so we must always rewrite PHY_MODE3 after PHY_MODE4.
2583 */
2584 writel(m3, port_mmio + PHY_MODE3);
2537 2585
2538 /* Revert values of pre-emphasis and signal amps to the saved ones */ 2586 /* Revert values of pre-emphasis and signal amps to the saved ones */
2539 m2 = readl(port_mmio + PHY_MODE2); 2587 m2 = readl(port_mmio + PHY_MODE2);
@@ -2728,6 +2776,7 @@ static int mv_hardreset(struct ata_link *link, unsigned int *class,
2728 2776
2729 rc = sata_link_hardreset(link, timing, deadline + extra, 2777 rc = sata_link_hardreset(link, timing, deadline + extra,
2730 &online, NULL); 2778 &online, NULL);
2779 rc = online ? -EAGAIN : rc;
2731 if (rc) 2780 if (rc)
2732 return rc; 2781 return rc;
2733 sata_scr_read(link, SCR_STATUS, &sstatus); 2782 sata_scr_read(link, SCR_STATUS, &sstatus);
@@ -2744,32 +2793,18 @@ static int mv_hardreset(struct ata_link *link, unsigned int *class,
2744 2793
2745static void mv_eh_freeze(struct ata_port *ap) 2794static void mv_eh_freeze(struct ata_port *ap)
2746{ 2795{
2747 struct mv_host_priv *hpriv = ap->host->private_data;
2748 unsigned int shift, hardport, port = ap->port_no;
2749 u32 main_irq_mask;
2750
2751 /* FIXME: handle coalescing completion events properly */
2752
2753 mv_stop_edma(ap); 2796 mv_stop_edma(ap);
2754 MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport); 2797 mv_enable_port_irqs(ap, 0);
2755
2756 /* disable assertion of portN err, done events */
2757 main_irq_mask = readl(hpriv->main_irq_mask_addr);
2758 main_irq_mask &= ~((DONE_IRQ | ERR_IRQ) << shift);
2759 writelfl(main_irq_mask, hpriv->main_irq_mask_addr);
2760} 2798}
2761 2799
2762static void mv_eh_thaw(struct ata_port *ap) 2800static void mv_eh_thaw(struct ata_port *ap)
2763{ 2801{
2764 struct mv_host_priv *hpriv = ap->host->private_data; 2802 struct mv_host_priv *hpriv = ap->host->private_data;
2765 unsigned int shift, hardport, port = ap->port_no; 2803 unsigned int port = ap->port_no;
2804 unsigned int hardport = mv_hardport_from_port(port);
2766 void __iomem *hc_mmio = mv_hc_base_from_port(hpriv->base, port); 2805 void __iomem *hc_mmio = mv_hc_base_from_port(hpriv->base, port);
2767 void __iomem *port_mmio = mv_ap_base(ap); 2806 void __iomem *port_mmio = mv_ap_base(ap);
2768 u32 main_irq_mask, hc_irq_cause; 2807 u32 hc_irq_cause;
2769
2770 /* FIXME: handle coalescing completion events properly */
2771
2772 MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport);
2773 2808
2774 /* clear EDMA errors on this port */ 2809 /* clear EDMA errors on this port */
2775 writel(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS); 2810 writel(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
@@ -2779,10 +2814,7 @@ static void mv_eh_thaw(struct ata_port *ap)
2779 hc_irq_cause &= ~((DEV_IRQ | DMA_IRQ) << hardport); 2814 hc_irq_cause &= ~((DEV_IRQ | DMA_IRQ) << hardport);
2780 writelfl(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS); 2815 writelfl(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
2781 2816
2782 /* enable assertion of portN err, done events */ 2817 mv_enable_port_irqs(ap, ERR_IRQ);
2783 main_irq_mask = readl(hpriv->main_irq_mask_addr);
2784 main_irq_mask |= ((DONE_IRQ | ERR_IRQ) << shift);
2785 writelfl(main_irq_mask, hpriv->main_irq_mask_addr);
2786} 2818}
2787 2819
2788/** 2820/**
@@ -2840,7 +2872,7 @@ static unsigned int mv_in_pcix_mode(struct ata_host *host)
2840 void __iomem *mmio = hpriv->base; 2872 void __iomem *mmio = hpriv->base;
2841 u32 reg; 2873 u32 reg;
2842 2874
2843 if (!HAS_PCI(host) || !IS_PCIE(hpriv)) 2875 if (IS_SOC(hpriv) || !IS_PCIE(hpriv))
2844 return 0; /* not PCI-X capable */ 2876 return 0; /* not PCI-X capable */
2845 reg = readl(mmio + MV_PCI_MODE_OFS); 2877 reg = readl(mmio + MV_PCI_MODE_OFS);
2846 if ((reg & MV_PCI_MODE_MASK) == 0) 2878 if ((reg & MV_PCI_MODE_MASK) == 0)
@@ -2967,10 +2999,7 @@ static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
2967 hp_flags |= MV_HP_CUT_THROUGH; 2999 hp_flags |= MV_HP_CUT_THROUGH;
2968 3000
2969 switch (pdev->revision) { 3001 switch (pdev->revision) {
2970 case 0x0: 3002 case 0x2: /* Rev.B0: the first/only public release */
2971 hp_flags |= MV_HP_ERRATA_XX42A0;
2972 break;
2973 case 0x1:
2974 hp_flags |= MV_HP_ERRATA_60X1C0; 3003 hp_flags |= MV_HP_ERRATA_60X1C0;
2975 break; 3004 break;
2976 default: 3005 default:
@@ -2982,7 +3011,7 @@ static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
2982 break; 3011 break;
2983 case chip_soc: 3012 case chip_soc:
2984 hpriv->ops = &mv_soc_ops; 3013 hpriv->ops = &mv_soc_ops;
2985 hp_flags |= MV_HP_ERRATA_60X1C0; 3014 hp_flags |= MV_HP_FLAG_SOC | MV_HP_ERRATA_60X1C0;
2986 break; 3015 break;
2987 3016
2988 default: 3017 default:
@@ -3026,16 +3055,16 @@ static int mv_init_host(struct ata_host *host, unsigned int board_idx)
3026 if (rc) 3055 if (rc)
3027 goto done; 3056 goto done;
3028 3057
3029 if (HAS_PCI(host)) { 3058 if (IS_SOC(hpriv)) {
3030 hpriv->main_irq_cause_addr = mmio + PCI_HC_MAIN_IRQ_CAUSE_OFS;
3031 hpriv->main_irq_mask_addr = mmio + PCI_HC_MAIN_IRQ_MASK_OFS;
3032 } else {
3033 hpriv->main_irq_cause_addr = mmio + SOC_HC_MAIN_IRQ_CAUSE_OFS; 3059 hpriv->main_irq_cause_addr = mmio + SOC_HC_MAIN_IRQ_CAUSE_OFS;
3034 hpriv->main_irq_mask_addr = mmio + SOC_HC_MAIN_IRQ_MASK_OFS; 3060 hpriv->main_irq_mask_addr = mmio + SOC_HC_MAIN_IRQ_MASK_OFS;
3061 } else {
3062 hpriv->main_irq_cause_addr = mmio + PCI_HC_MAIN_IRQ_CAUSE_OFS;
3063 hpriv->main_irq_mask_addr = mmio + PCI_HC_MAIN_IRQ_MASK_OFS;
3035 } 3064 }
3036 3065
3037 /* global interrupt mask: 0 == mask everything */ 3066 /* global interrupt mask: 0 == mask everything */
3038 writel(0, hpriv->main_irq_mask_addr); 3067 mv_set_main_irq_mask(host, ~0, 0);
3039 3068
3040 n_hc = mv_get_hc_count(host->ports[0]->flags); 3069 n_hc = mv_get_hc_count(host->ports[0]->flags);
3041 3070
@@ -3057,7 +3086,7 @@ static int mv_init_host(struct ata_host *host, unsigned int board_idx)
3057 mv_port_init(&ap->ioaddr, port_mmio); 3086 mv_port_init(&ap->ioaddr, port_mmio);
3058 3087
3059#ifdef CONFIG_PCI 3088#ifdef CONFIG_PCI
3060 if (HAS_PCI(host)) { 3089 if (!IS_SOC(hpriv)) {
3061 unsigned int offset = port_mmio - mmio; 3090 unsigned int offset = port_mmio - mmio;
3062 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, -1, "mmio"); 3091 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, -1, "mmio");
3063 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, offset, "port"); 3092 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, offset, "port");
@@ -3077,31 +3106,18 @@ static int mv_init_host(struct ata_host *host, unsigned int board_idx)
3077 writelfl(0, hc_mmio + HC_IRQ_CAUSE_OFS); 3106 writelfl(0, hc_mmio + HC_IRQ_CAUSE_OFS);
3078 } 3107 }
3079 3108
3080 if (HAS_PCI(host)) { 3109 if (!IS_SOC(hpriv)) {
3081 /* Clear any currently outstanding host interrupt conditions */ 3110 /* Clear any currently outstanding host interrupt conditions */
3082 writelfl(0, mmio + hpriv->irq_cause_ofs); 3111 writelfl(0, mmio + hpriv->irq_cause_ofs);
3083 3112
3084 /* and unmask interrupt generation for host regs */ 3113 /* and unmask interrupt generation for host regs */
3085 writelfl(hpriv->unmask_all_irqs, mmio + hpriv->irq_mask_ofs); 3114 writelfl(hpriv->unmask_all_irqs, mmio + hpriv->irq_mask_ofs);
3086 if (IS_GEN_I(hpriv)) 3115
3087 writelfl(~HC_MAIN_MASKED_IRQS_5, 3116 /*
3088 hpriv->main_irq_mask_addr); 3117 * enable only global host interrupts for now.
3089 else 3118 * The per-port interrupts get done later as ports are set up.
3090 writelfl(~HC_MAIN_MASKED_IRQS, 3119 */
3091 hpriv->main_irq_mask_addr); 3120 mv_set_main_irq_mask(host, 0, PCI_ERR);
3092
3093 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x "
3094 "PCI int cause/mask=0x%08x/0x%08x\n",
3095 readl(hpriv->main_irq_cause_addr),
3096 readl(hpriv->main_irq_mask_addr),
3097 readl(mmio + hpriv->irq_cause_ofs),
3098 readl(mmio + hpriv->irq_mask_ofs));
3099 } else {
3100 writelfl(~HC_MAIN_MASKED_IRQS_SOC,
3101 hpriv->main_irq_mask_addr);
3102 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x\n",
3103 readl(hpriv->main_irq_cause_addr),
3104 readl(hpriv->main_irq_mask_addr));
3105 } 3121 }
3106done: 3122done:
3107 return rc; 3123 return rc;
diff --git a/drivers/ata/sata_promise.c b/drivers/ata/sata_promise.c
index 5a10dc5048ad..030665ba76b7 100644
--- a/drivers/ata/sata_promise.c
+++ b/drivers/ata/sata_promise.c
@@ -53,7 +53,15 @@ enum {
53 PDC_MMIO_BAR = 3, 53 PDC_MMIO_BAR = 3,
54 PDC_MAX_PRD = LIBATA_MAX_PRD - 1, /* -1 for ASIC PRD bug workaround */ 54 PDC_MAX_PRD = LIBATA_MAX_PRD - 1, /* -1 for ASIC PRD bug workaround */
55 55
56 /* register offsets */ 56 /* host register offsets (from host->iomap[PDC_MMIO_BAR]) */
57 PDC_INT_SEQMASK = 0x40, /* Mask of asserted SEQ INTs */
58 PDC_FLASH_CTL = 0x44, /* Flash control register */
59 PDC_SATA_PLUG_CSR = 0x6C, /* SATA Plug control/status reg */
60 PDC2_SATA_PLUG_CSR = 0x60, /* SATAII Plug control/status reg */
61 PDC_TBG_MODE = 0x41C, /* TBG mode (not SATAII) */
62 PDC_SLEW_CTL = 0x470, /* slew rate control reg (not SATAII) */
63
64 /* per-port ATA register offsets (from ap->ioaddr.cmd_addr) */
57 PDC_FEATURE = 0x04, /* Feature/Error reg (per port) */ 65 PDC_FEATURE = 0x04, /* Feature/Error reg (per port) */
58 PDC_SECTOR_COUNT = 0x08, /* Sector count reg (per port) */ 66 PDC_SECTOR_COUNT = 0x08, /* Sector count reg (per port) */
59 PDC_SECTOR_NUMBER = 0x0C, /* Sector number reg (per port) */ 67 PDC_SECTOR_NUMBER = 0x0C, /* Sector number reg (per port) */
@@ -63,14 +71,11 @@ enum {
63 PDC_COMMAND = 0x1C, /* Command/status reg (per port) */ 71 PDC_COMMAND = 0x1C, /* Command/status reg (per port) */
64 PDC_ALTSTATUS = 0x38, /* Alternate-status/device-control reg (per port) */ 72 PDC_ALTSTATUS = 0x38, /* Alternate-status/device-control reg (per port) */
65 PDC_PKT_SUBMIT = 0x40, /* Command packet pointer addr */ 73 PDC_PKT_SUBMIT = 0x40, /* Command packet pointer addr */
66 PDC_INT_SEQMASK = 0x40, /* Mask of asserted SEQ INTs */
67 PDC_FLASH_CTL = 0x44, /* Flash control register */
68 PDC_GLOBAL_CTL = 0x48, /* Global control/status (per port) */ 74 PDC_GLOBAL_CTL = 0x48, /* Global control/status (per port) */
69 PDC_CTLSTAT = 0x60, /* IDE control and status (per port) */ 75 PDC_CTLSTAT = 0x60, /* IDE control and status (per port) */
70 PDC_SATA_PLUG_CSR = 0x6C, /* SATA Plug control/status reg */ 76
71 PDC2_SATA_PLUG_CSR = 0x60, /* SATAII Plug control/status reg */ 77 /* per-port SATA register offsets (from ap->ioaddr.scr_addr) */
72 PDC_TBG_MODE = 0x41C, /* TBG mode (not SATAII) */ 78 PDC_PHYMODE4 = 0x14,
73 PDC_SLEW_CTL = 0x470, /* slew rate control reg (not SATAII) */
74 79
75 /* PDC_GLOBAL_CTL bit definitions */ 80 /* PDC_GLOBAL_CTL bit definitions */
76 PDC_PH_ERR = (1 << 8), /* PCI error while loading packet */ 81 PDC_PH_ERR = (1 << 8), /* PCI error while loading packet */
@@ -134,7 +139,7 @@ struct pdc_port_priv {
134 139
135static int pdc_sata_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val); 140static int pdc_sata_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val);
136static int pdc_sata_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val); 141static int pdc_sata_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val);
137static int pdc_ata_init_one (struct pci_dev *pdev, const struct pci_device_id *ent); 142static int pdc_ata_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
138static int pdc_common_port_start(struct ata_port *ap); 143static int pdc_common_port_start(struct ata_port *ap);
139static int pdc_sata_port_start(struct ata_port *ap); 144static int pdc_sata_port_start(struct ata_port *ap);
140static void pdc_qc_prep(struct ata_queued_cmd *qc); 145static void pdc_qc_prep(struct ata_queued_cmd *qc);
@@ -332,12 +337,12 @@ static int pdc_sata_port_start(struct ata_port *ap)
332 337
333 /* fix up PHYMODE4 align timing */ 338 /* fix up PHYMODE4 align timing */
334 if (ap->flags & PDC_FLAG_GEN_II) { 339 if (ap->flags & PDC_FLAG_GEN_II) {
335 void __iomem *mmio = ap->ioaddr.scr_addr; 340 void __iomem *sata_mmio = ap->ioaddr.scr_addr;
336 unsigned int tmp; 341 unsigned int tmp;
337 342
338 tmp = readl(mmio + 0x014); 343 tmp = readl(sata_mmio + PDC_PHYMODE4);
339 tmp = (tmp & ~3) | 1; /* set bits 1:0 = 0:1 */ 344 tmp = (tmp & ~3) | 1; /* set bits 1:0 = 0:1 */
340 writel(tmp, mmio + 0x014); 345 writel(tmp, sata_mmio + PDC_PHYMODE4);
341 } 346 }
342 347
343 return 0; 348 return 0;
@@ -345,32 +350,32 @@ static int pdc_sata_port_start(struct ata_port *ap)
345 350
346static void pdc_reset_port(struct ata_port *ap) 351static void pdc_reset_port(struct ata_port *ap)
347{ 352{
348 void __iomem *mmio = ap->ioaddr.cmd_addr + PDC_CTLSTAT; 353 void __iomem *ata_ctlstat_mmio = ap->ioaddr.cmd_addr + PDC_CTLSTAT;
349 unsigned int i; 354 unsigned int i;
350 u32 tmp; 355 u32 tmp;
351 356
352 for (i = 11; i > 0; i--) { 357 for (i = 11; i > 0; i--) {
353 tmp = readl(mmio); 358 tmp = readl(ata_ctlstat_mmio);
354 if (tmp & PDC_RESET) 359 if (tmp & PDC_RESET)
355 break; 360 break;
356 361
357 udelay(100); 362 udelay(100);
358 363
359 tmp |= PDC_RESET; 364 tmp |= PDC_RESET;
360 writel(tmp, mmio); 365 writel(tmp, ata_ctlstat_mmio);
361 } 366 }
362 367
363 tmp &= ~PDC_RESET; 368 tmp &= ~PDC_RESET;
364 writel(tmp, mmio); 369 writel(tmp, ata_ctlstat_mmio);
365 readl(mmio); /* flush */ 370 readl(ata_ctlstat_mmio); /* flush */
366} 371}
367 372
368static int pdc_pata_cable_detect(struct ata_port *ap) 373static int pdc_pata_cable_detect(struct ata_port *ap)
369{ 374{
370 u8 tmp; 375 u8 tmp;
371 void __iomem *mmio = ap->ioaddr.cmd_addr + PDC_CTLSTAT + 0x03; 376 void __iomem *ata_mmio = ap->ioaddr.cmd_addr;
372 377
373 tmp = readb(mmio); 378 tmp = readb(ata_mmio + PDC_CTLSTAT + 3);
374 if (tmp & 0x01) 379 if (tmp & 0x01)
375 return ATA_CBL_PATA40; 380 return ATA_CBL_PATA40;
376 return ATA_CBL_PATA80; 381 return ATA_CBL_PATA80;
@@ -557,31 +562,25 @@ static void pdc_qc_prep(struct ata_queued_cmd *qc)
557 switch (qc->tf.protocol) { 562 switch (qc->tf.protocol) {
558 case ATA_PROT_DMA: 563 case ATA_PROT_DMA:
559 pdc_fill_sg(qc); 564 pdc_fill_sg(qc);
560 /* fall through */ 565 /*FALLTHROUGH*/
561
562 case ATA_PROT_NODATA: 566 case ATA_PROT_NODATA:
563 i = pdc_pkt_header(&qc->tf, qc->ap->prd_dma, 567 i = pdc_pkt_header(&qc->tf, qc->ap->prd_dma,
564 qc->dev->devno, pp->pkt); 568 qc->dev->devno, pp->pkt);
565
566 if (qc->tf.flags & ATA_TFLAG_LBA48) 569 if (qc->tf.flags & ATA_TFLAG_LBA48)
567 i = pdc_prep_lba48(&qc->tf, pp->pkt, i); 570 i = pdc_prep_lba48(&qc->tf, pp->pkt, i);
568 else 571 else
569 i = pdc_prep_lba28(&qc->tf, pp->pkt, i); 572 i = pdc_prep_lba28(&qc->tf, pp->pkt, i);
570
571 pdc_pkt_footer(&qc->tf, pp->pkt, i); 573 pdc_pkt_footer(&qc->tf, pp->pkt, i);
572 break; 574 break;
573
574 case ATAPI_PROT_PIO: 575 case ATAPI_PROT_PIO:
575 pdc_fill_sg(qc); 576 pdc_fill_sg(qc);
576 break; 577 break;
577
578 case ATAPI_PROT_DMA: 578 case ATAPI_PROT_DMA:
579 pdc_fill_sg(qc); 579 pdc_fill_sg(qc);
580 /*FALLTHROUGH*/ 580 /*FALLTHROUGH*/
581 case ATAPI_PROT_NODATA: 581 case ATAPI_PROT_NODATA:
582 pdc_atapi_pkt(qc); 582 pdc_atapi_pkt(qc);
583 break; 583 break;
584
585 default: 584 default:
586 break; 585 break;
587 } 586 }
@@ -611,7 +610,7 @@ static unsigned int pdc_sata_ata_port_to_ata_no(const struct ata_port *ap)
611 unsigned int nr_ports = pdc_sata_nr_ports(ap); 610 unsigned int nr_ports = pdc_sata_nr_ports(ap);
612 unsigned int i; 611 unsigned int i;
613 612
614 for(i = 0; i < nr_ports && host->ports[i] != ap; ++i) 613 for (i = 0; i < nr_ports && host->ports[i] != ap; ++i)
615 ; 614 ;
616 BUG_ON(i >= nr_ports); 615 BUG_ON(i >= nr_ports);
617 return pdc_port_no_to_ata_no(i, pdc_is_sataii_tx4(ap->flags)); 616 return pdc_port_no_to_ata_no(i, pdc_is_sataii_tx4(ap->flags));
@@ -624,14 +623,14 @@ static unsigned int pdc_sata_hotplug_offset(const struct ata_port *ap)
624 623
625static void pdc_freeze(struct ata_port *ap) 624static void pdc_freeze(struct ata_port *ap)
626{ 625{
627 void __iomem *mmio = ap->ioaddr.cmd_addr; 626 void __iomem *ata_mmio = ap->ioaddr.cmd_addr;
628 u32 tmp; 627 u32 tmp;
629 628
630 tmp = readl(mmio + PDC_CTLSTAT); 629 tmp = readl(ata_mmio + PDC_CTLSTAT);
631 tmp |= PDC_IRQ_DISABLE; 630 tmp |= PDC_IRQ_DISABLE;
632 tmp &= ~PDC_DMA_ENABLE; 631 tmp &= ~PDC_DMA_ENABLE;
633 writel(tmp, mmio + PDC_CTLSTAT); 632 writel(tmp, ata_mmio + PDC_CTLSTAT);
634 readl(mmio + PDC_CTLSTAT); /* flush */ 633 readl(ata_mmio + PDC_CTLSTAT); /* flush */
635} 634}
636 635
637static void pdc_sata_freeze(struct ata_port *ap) 636static void pdc_sata_freeze(struct ata_port *ap)
@@ -659,17 +658,17 @@ static void pdc_sata_freeze(struct ata_port *ap)
659 658
660static void pdc_thaw(struct ata_port *ap) 659static void pdc_thaw(struct ata_port *ap)
661{ 660{
662 void __iomem *mmio = ap->ioaddr.cmd_addr; 661 void __iomem *ata_mmio = ap->ioaddr.cmd_addr;
663 u32 tmp; 662 u32 tmp;
664 663
665 /* clear IRQ */ 664 /* clear IRQ */
666 readl(mmio + PDC_INT_SEQMASK); 665 readl(ata_mmio + PDC_COMMAND);
667 666
668 /* turn IRQ back on */ 667 /* turn IRQ back on */
669 tmp = readl(mmio + PDC_CTLSTAT); 668 tmp = readl(ata_mmio + PDC_CTLSTAT);
670 tmp &= ~PDC_IRQ_DISABLE; 669 tmp &= ~PDC_IRQ_DISABLE;
671 writel(tmp, mmio + PDC_CTLSTAT); 670 writel(tmp, ata_mmio + PDC_CTLSTAT);
672 readl(mmio + PDC_CTLSTAT); /* flush */ 671 readl(ata_mmio + PDC_CTLSTAT); /* flush */
673} 672}
674 673
675static void pdc_sata_thaw(struct ata_port *ap) 674static void pdc_sata_thaw(struct ata_port *ap)
@@ -743,11 +742,11 @@ static void pdc_error_intr(struct ata_port *ap, struct ata_queued_cmd *qc,
743 ata_port_abort(ap); 742 ata_port_abort(ap);
744} 743}
745 744
746static inline unsigned int pdc_host_intr(struct ata_port *ap, 745static unsigned int pdc_host_intr(struct ata_port *ap,
747 struct ata_queued_cmd *qc) 746 struct ata_queued_cmd *qc)
748{ 747{
749 unsigned int handled = 0; 748 unsigned int handled = 0;
750 void __iomem *port_mmio = ap->ioaddr.cmd_addr; 749 void __iomem *ata_mmio = ap->ioaddr.cmd_addr;
751 u32 port_status, err_mask; 750 u32 port_status, err_mask;
752 751
753 err_mask = PDC_ERR_MASK; 752 err_mask = PDC_ERR_MASK;
@@ -755,7 +754,7 @@ static inline unsigned int pdc_host_intr(struct ata_port *ap,
755 err_mask &= ~PDC1_ERR_MASK; 754 err_mask &= ~PDC1_ERR_MASK;
756 else 755 else
757 err_mask &= ~PDC2_ERR_MASK; 756 err_mask &= ~PDC2_ERR_MASK;
758 port_status = readl(port_mmio + PDC_GLOBAL_CTL); 757 port_status = readl(ata_mmio + PDC_GLOBAL_CTL);
759 if (unlikely(port_status & err_mask)) { 758 if (unlikely(port_status & err_mask)) {
760 pdc_error_intr(ap, qc, port_status, err_mask); 759 pdc_error_intr(ap, qc, port_status, err_mask);
761 return 1; 760 return 1;
@@ -770,7 +769,6 @@ static inline unsigned int pdc_host_intr(struct ata_port *ap,
770 ata_qc_complete(qc); 769 ata_qc_complete(qc);
771 handled = 1; 770 handled = 1;
772 break; 771 break;
773
774 default: 772 default:
775 ap->stats.idle_irq++; 773 ap->stats.idle_irq++;
776 break; 774 break;
@@ -781,10 +779,9 @@ static inline unsigned int pdc_host_intr(struct ata_port *ap,
781 779
782static void pdc_irq_clear(struct ata_port *ap) 780static void pdc_irq_clear(struct ata_port *ap)
783{ 781{
784 struct ata_host *host = ap->host; 782 void __iomem *ata_mmio = ap->ioaddr.cmd_addr;
785 void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
786 783
787 readl(mmio + PDC_INT_SEQMASK); 784 readl(ata_mmio + PDC_COMMAND);
788} 785}
789 786
790static irqreturn_t pdc_interrupt(int irq, void *dev_instance) 787static irqreturn_t pdc_interrupt(int irq, void *dev_instance)
@@ -794,7 +791,7 @@ static irqreturn_t pdc_interrupt(int irq, void *dev_instance)
794 u32 mask = 0; 791 u32 mask = 0;
795 unsigned int i, tmp; 792 unsigned int i, tmp;
796 unsigned int handled = 0; 793 unsigned int handled = 0;
797 void __iomem *mmio_base; 794 void __iomem *host_mmio;
798 unsigned int hotplug_offset, ata_no; 795 unsigned int hotplug_offset, ata_no;
799 u32 hotplug_status; 796 u32 hotplug_status;
800 int is_sataii_tx4; 797 int is_sataii_tx4;
@@ -806,7 +803,7 @@ static irqreturn_t pdc_interrupt(int irq, void *dev_instance)
806 return IRQ_NONE; 803 return IRQ_NONE;
807 } 804 }
808 805
809 mmio_base = host->iomap[PDC_MMIO_BAR]; 806 host_mmio = host->iomap[PDC_MMIO_BAR];
810 807
811 spin_lock(&host->lock); 808 spin_lock(&host->lock);
812 809
@@ -815,26 +812,26 @@ static irqreturn_t pdc_interrupt(int irq, void *dev_instance)
815 hotplug_offset = PDC2_SATA_PLUG_CSR; 812 hotplug_offset = PDC2_SATA_PLUG_CSR;
816 else 813 else
817 hotplug_offset = PDC_SATA_PLUG_CSR; 814 hotplug_offset = PDC_SATA_PLUG_CSR;
818 hotplug_status = readl(mmio_base + hotplug_offset); 815 hotplug_status = readl(host_mmio + hotplug_offset);
819 if (hotplug_status & 0xff) 816 if (hotplug_status & 0xff)
820 writel(hotplug_status | 0xff, mmio_base + hotplug_offset); 817 writel(hotplug_status | 0xff, host_mmio + hotplug_offset);
821 hotplug_status &= 0xff; /* clear uninteresting bits */ 818 hotplug_status &= 0xff; /* clear uninteresting bits */
822 819
823 /* reading should also clear interrupts */ 820 /* reading should also clear interrupts */
824 mask = readl(mmio_base + PDC_INT_SEQMASK); 821 mask = readl(host_mmio + PDC_INT_SEQMASK);
825 822
826 if (mask == 0xffffffff && hotplug_status == 0) { 823 if (mask == 0xffffffff && hotplug_status == 0) {
827 VPRINTK("QUICK EXIT 2\n"); 824 VPRINTK("QUICK EXIT 2\n");
828 goto done_irq; 825 goto done_irq;
829 } 826 }
830 827
831 mask &= 0xffff; /* only 16 tags possible */ 828 mask &= 0xffff; /* only 16 SEQIDs possible */
832 if (mask == 0 && hotplug_status == 0) { 829 if (mask == 0 && hotplug_status == 0) {
833 VPRINTK("QUICK EXIT 3\n"); 830 VPRINTK("QUICK EXIT 3\n");
834 goto done_irq; 831 goto done_irq;
835 } 832 }
836 833
837 writel(mask, mmio_base + PDC_INT_SEQMASK); 834 writel(mask, host_mmio + PDC_INT_SEQMASK);
838 835
839 is_sataii_tx4 = pdc_is_sataii_tx4(host->ports[0]->flags); 836 is_sataii_tx4 = pdc_is_sataii_tx4(host->ports[0]->flags);
840 837
@@ -875,23 +872,24 @@ done_irq:
875 return IRQ_RETVAL(handled); 872 return IRQ_RETVAL(handled);
876} 873}
877 874
878static inline void pdc_packet_start(struct ata_queued_cmd *qc) 875static void pdc_packet_start(struct ata_queued_cmd *qc)
879{ 876{
880 struct ata_port *ap = qc->ap; 877 struct ata_port *ap = qc->ap;
881 struct pdc_port_priv *pp = ap->private_data; 878 struct pdc_port_priv *pp = ap->private_data;
882 void __iomem *mmio = ap->host->iomap[PDC_MMIO_BAR]; 879 void __iomem *host_mmio = ap->host->iomap[PDC_MMIO_BAR];
880 void __iomem *ata_mmio = ap->ioaddr.cmd_addr;
883 unsigned int port_no = ap->port_no; 881 unsigned int port_no = ap->port_no;
884 u8 seq = (u8) (port_no + 1); 882 u8 seq = (u8) (port_no + 1);
885 883
886 VPRINTK("ENTER, ap %p\n", ap); 884 VPRINTK("ENTER, ap %p\n", ap);
887 885
888 writel(0x00000001, mmio + (seq * 4)); 886 writel(0x00000001, host_mmio + (seq * 4));
889 readl(mmio + (seq * 4)); /* flush */ 887 readl(host_mmio + (seq * 4)); /* flush */
890 888
891 pp->pkt[2] = seq; 889 pp->pkt[2] = seq;
892 wmb(); /* flush PRD, pkt writes */ 890 wmb(); /* flush PRD, pkt writes */
893 writel(pp->pkt_dma, ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT); 891 writel(pp->pkt_dma, ata_mmio + PDC_PKT_SUBMIT);
894 readl(ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT); /* flush */ 892 readl(ata_mmio + PDC_PKT_SUBMIT); /* flush */
895} 893}
896 894
897static unsigned int pdc_qc_issue(struct ata_queued_cmd *qc) 895static unsigned int pdc_qc_issue(struct ata_queued_cmd *qc)
@@ -909,11 +907,9 @@ static unsigned int pdc_qc_issue(struct ata_queued_cmd *qc)
909 case ATA_PROT_DMA: 907 case ATA_PROT_DMA:
910 pdc_packet_start(qc); 908 pdc_packet_start(qc);
911 return 0; 909 return 0;
912
913 default: 910 default:
914 break; 911 break;
915 } 912 }
916
917 return ata_sff_qc_issue(qc); 913 return ata_sff_qc_issue(qc);
918} 914}
919 915
@@ -987,7 +983,7 @@ static void pdc_ata_setup_port(struct ata_port *ap,
987 983
988static void pdc_host_init(struct ata_host *host) 984static void pdc_host_init(struct ata_host *host)
989{ 985{
990 void __iomem *mmio = host->iomap[PDC_MMIO_BAR]; 986 void __iomem *host_mmio = host->iomap[PDC_MMIO_BAR];
991 int is_gen2 = host->ports[0]->flags & PDC_FLAG_GEN_II; 987 int is_gen2 = host->ports[0]->flags & PDC_FLAG_GEN_II;
992 int hotplug_offset; 988 int hotplug_offset;
993 u32 tmp; 989 u32 tmp;
@@ -1004,38 +1000,38 @@ static void pdc_host_init(struct ata_host *host)
1004 */ 1000 */
1005 1001
1006 /* enable BMR_BURST, maybe change FIFO_SHD to 8 dwords */ 1002 /* enable BMR_BURST, maybe change FIFO_SHD to 8 dwords */
1007 tmp = readl(mmio + PDC_FLASH_CTL); 1003 tmp = readl(host_mmio + PDC_FLASH_CTL);
1008 tmp |= 0x02000; /* bit 13 (enable bmr burst) */ 1004 tmp |= 0x02000; /* bit 13 (enable bmr burst) */
1009 if (!is_gen2) 1005 if (!is_gen2)
1010 tmp |= 0x10000; /* bit 16 (fifo threshold at 8 dw) */ 1006 tmp |= 0x10000; /* bit 16 (fifo threshold at 8 dw) */
1011 writel(tmp, mmio + PDC_FLASH_CTL); 1007 writel(tmp, host_mmio + PDC_FLASH_CTL);
1012 1008
1013 /* clear plug/unplug flags for all ports */ 1009 /* clear plug/unplug flags for all ports */
1014 tmp = readl(mmio + hotplug_offset); 1010 tmp = readl(host_mmio + hotplug_offset);
1015 writel(tmp | 0xff, mmio + hotplug_offset); 1011 writel(tmp | 0xff, host_mmio + hotplug_offset);
1016 1012
1017 /* unmask plug/unplug ints */ 1013 /* unmask plug/unplug ints */
1018 tmp = readl(mmio + hotplug_offset); 1014 tmp = readl(host_mmio + hotplug_offset);
1019 writel(tmp & ~0xff0000, mmio + hotplug_offset); 1015 writel(tmp & ~0xff0000, host_mmio + hotplug_offset);
1020 1016
1021 /* don't initialise TBG or SLEW on 2nd generation chips */ 1017 /* don't initialise TBG or SLEW on 2nd generation chips */
1022 if (is_gen2) 1018 if (is_gen2)
1023 return; 1019 return;
1024 1020
1025 /* reduce TBG clock to 133 Mhz. */ 1021 /* reduce TBG clock to 133 Mhz. */
1026 tmp = readl(mmio + PDC_TBG_MODE); 1022 tmp = readl(host_mmio + PDC_TBG_MODE);
1027 tmp &= ~0x30000; /* clear bit 17, 16*/ 1023 tmp &= ~0x30000; /* clear bit 17, 16*/
1028 tmp |= 0x10000; /* set bit 17:16 = 0:1 */ 1024 tmp |= 0x10000; /* set bit 17:16 = 0:1 */
1029 writel(tmp, mmio + PDC_TBG_MODE); 1025 writel(tmp, host_mmio + PDC_TBG_MODE);
1030 1026
1031 readl(mmio + PDC_TBG_MODE); /* flush */ 1027 readl(host_mmio + PDC_TBG_MODE); /* flush */
1032 msleep(10); 1028 msleep(10);
1033 1029
1034 /* adjust slew rate control register. */ 1030 /* adjust slew rate control register. */
1035 tmp = readl(mmio + PDC_SLEW_CTL); 1031 tmp = readl(host_mmio + PDC_SLEW_CTL);
1036 tmp &= 0xFFFFF03F; /* clear bit 11 ~ 6 */ 1032 tmp &= 0xFFFFF03F; /* clear bit 11 ~ 6 */
1037 tmp |= 0x00000900; /* set bit 11-9 = 100b , bit 8-6 = 100 */ 1033 tmp |= 0x00000900; /* set bit 11-9 = 100b , bit 8-6 = 100 */
1038 writel(tmp, mmio + PDC_SLEW_CTL); 1034 writel(tmp, host_mmio + PDC_SLEW_CTL);
1039} 1035}
1040 1036
1041static int pdc_ata_init_one(struct pci_dev *pdev, 1037static int pdc_ata_init_one(struct pci_dev *pdev,
@@ -1045,7 +1041,7 @@ static int pdc_ata_init_one(struct pci_dev *pdev,
1045 const struct ata_port_info *pi = &pdc_port_info[ent->driver_data]; 1041 const struct ata_port_info *pi = &pdc_port_info[ent->driver_data];
1046 const struct ata_port_info *ppi[PDC_MAX_PORTS]; 1042 const struct ata_port_info *ppi[PDC_MAX_PORTS];
1047 struct ata_host *host; 1043 struct ata_host *host;
1048 void __iomem *base; 1044 void __iomem *host_mmio;
1049 int n_ports, i, rc; 1045 int n_ports, i, rc;
1050 int is_sataii_tx4; 1046 int is_sataii_tx4;
1051 1047
@@ -1062,7 +1058,7 @@ static int pdc_ata_init_one(struct pci_dev *pdev,
1062 pcim_pin_device(pdev); 1058 pcim_pin_device(pdev);
1063 if (rc) 1059 if (rc)
1064 return rc; 1060 return rc;
1065 base = pcim_iomap_table(pdev)[PDC_MMIO_BAR]; 1061 host_mmio = pcim_iomap_table(pdev)[PDC_MMIO_BAR];
1066 1062
1067 /* determine port configuration and setup host */ 1063 /* determine port configuration and setup host */
1068 n_ports = 2; 1064 n_ports = 2;
@@ -1072,7 +1068,7 @@ static int pdc_ata_init_one(struct pci_dev *pdev,
1072 ppi[i] = pi; 1068 ppi[i] = pi;
1073 1069
1074 if (pi->flags & PDC_FLAG_SATA_PATA) { 1070 if (pi->flags & PDC_FLAG_SATA_PATA) {
1075 u8 tmp = readb(base + PDC_FLASH_CTL+1); 1071 u8 tmp = readb(host_mmio + PDC_FLASH_CTL + 1);
1076 if (!(tmp & 0x80)) 1072 if (!(tmp & 0x80))
1077 ppi[n_ports++] = pi + 1; 1073 ppi[n_ports++] = pi + 1;
1078 } 1074 }
@@ -1088,13 +1084,13 @@ static int pdc_ata_init_one(struct pci_dev *pdev,
1088 for (i = 0; i < host->n_ports; i++) { 1084 for (i = 0; i < host->n_ports; i++) {
1089 struct ata_port *ap = host->ports[i]; 1085 struct ata_port *ap = host->ports[i];
1090 unsigned int ata_no = pdc_port_no_to_ata_no(i, is_sataii_tx4); 1086 unsigned int ata_no = pdc_port_no_to_ata_no(i, is_sataii_tx4);
1091 unsigned int port_offset = 0x200 + ata_no * 0x80; 1087 unsigned int ata_offset = 0x200 + ata_no * 0x80;
1092 unsigned int scr_offset = 0x400 + ata_no * 0x100; 1088 unsigned int scr_offset = 0x400 + ata_no * 0x100;
1093 1089
1094 pdc_ata_setup_port(ap, base + port_offset, base + scr_offset); 1090 pdc_ata_setup_port(ap, host_mmio + ata_offset, host_mmio + scr_offset);
1095 1091
1096 ata_port_pbar_desc(ap, PDC_MMIO_BAR, -1, "mmio"); 1092 ata_port_pbar_desc(ap, PDC_MMIO_BAR, -1, "mmio");
1097 ata_port_pbar_desc(ap, PDC_MMIO_BAR, port_offset, "port"); 1093 ata_port_pbar_desc(ap, PDC_MMIO_BAR, ata_offset, "ata");
1098 } 1094 }
1099 1095
1100 /* initialize adapter */ 1096 /* initialize adapter */
diff --git a/drivers/ata/sata_sil24.c b/drivers/ata/sata_sil24.c
index 27a110110077..8ee6b5b4ede7 100644
--- a/drivers/ata/sata_sil24.c
+++ b/drivers/ata/sata_sil24.c
@@ -899,14 +899,25 @@ static bool sil24_qc_fill_rtf(struct ata_queued_cmd *qc)
899 899
900static void sil24_pmp_attach(struct ata_port *ap) 900static void sil24_pmp_attach(struct ata_port *ap)
901{ 901{
902 u32 *gscr = ap->link.device->gscr;
903
902 sil24_config_pmp(ap, 1); 904 sil24_config_pmp(ap, 1);
903 sil24_init_port(ap); 905 sil24_init_port(ap);
906
907 if (sata_pmp_gscr_vendor(gscr) == 0x11ab &&
908 sata_pmp_gscr_devid(gscr) == 0x4140) {
909 ata_port_printk(ap, KERN_INFO,
910 "disabling NCQ support due to sil24-mv4140 quirk\n");
911 ap->flags &= ~ATA_FLAG_NCQ;
912 }
904} 913}
905 914
906static void sil24_pmp_detach(struct ata_port *ap) 915static void sil24_pmp_detach(struct ata_port *ap)
907{ 916{
908 sil24_init_port(ap); 917 sil24_init_port(ap);
909 sil24_config_pmp(ap, 0); 918 sil24_config_pmp(ap, 0);
919
920 ap->flags |= ATA_FLAG_NCQ;
910} 921}
911 922
912static int sil24_pmp_hardreset(struct ata_link *link, unsigned int *class, 923static int sil24_pmp_hardreset(struct ata_link *link, unsigned int *class,