aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/ata
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/ata')
-rw-r--r--drivers/ata/Kconfig70
-rw-r--r--drivers/ata/Makefile6
-rw-r--r--drivers/ata/acard-ahci.c528
-rw-r--r--drivers/ata/ahci.c24
-rw-r--r--drivers/ata/ahci.h19
-rw-r--r--drivers/ata/ahci_platform.c3
-rw-r--r--drivers/ata/ata_generic.c54
-rw-r--r--drivers/ata/ata_piix.c54
-rw-r--r--drivers/ata/libahci.c246
-rw-r--r--drivers/ata/libata-acpi.c3
-rw-r--r--drivers/ata/libata-core.c479
-rw-r--r--drivers/ata/libata-eh.c349
-rw-r--r--drivers/ata/libata-pmp.c76
-rw-r--r--drivers/ata/libata-scsi.c342
-rw-r--r--drivers/ata/libata-sff.c55
-rw-r--r--drivers/ata/libata-transport.c774
-rw-r--r--drivers/ata/libata-transport.h18
-rw-r--r--drivers/ata/libata.h22
-rw-r--r--drivers/ata/pata_acpi.c2
-rw-r--r--drivers/ata/pata_amd.c2
-rw-r--r--drivers/ata/pata_arasan_cf.c983
-rw-r--r--drivers/ata/pata_at32.c2
-rw-r--r--drivers/ata/pata_at91.c285
-rw-r--r--drivers/ata/pata_bf54x.c14
-rw-r--r--drivers/ata/pata_cmd640.c12
-rw-r--r--drivers/ata/pata_cmd64x.c42
-rw-r--r--drivers/ata/pata_cs5520.c2
-rw-r--r--drivers/ata/pata_cs5536.c20
-rw-r--r--drivers/ata/pata_hpt366.c54
-rw-r--r--drivers/ata/pata_hpt37x.c257
-rw-r--r--drivers/ata/pata_hpt3x2n.c155
-rw-r--r--drivers/ata/pata_hpt3x3.c2
-rw-r--r--drivers/ata/pata_it821x.c8
-rw-r--r--drivers/ata/pata_ixp4xx_cf.c4
-rw-r--r--drivers/ata/pata_legacy.c2
-rw-r--r--drivers/ata/pata_macio.c3
-rw-r--r--drivers/ata/pata_marvell.c5
-rw-r--r--drivers/ata/pata_mpc52xx.c10
-rw-r--r--drivers/ata/pata_mpiix.c2
-rw-r--r--drivers/ata/pata_ninja32.c2
-rw-r--r--drivers/ata/pata_octeon_cf.c7
-rw-r--r--drivers/ata/pata_of_platform.c9
-rw-r--r--drivers/ata/pata_palmld.c45
-rw-r--r--drivers/ata/pata_pcmcia.c108
-rw-r--r--drivers/ata/pata_pdc2027x.c6
-rw-r--r--drivers/ata/pata_pdc202xx_old.c23
-rw-r--r--drivers/ata/pata_pxa.c1
-rw-r--r--drivers/ata/pata_rb532_cf.c5
-rw-r--r--drivers/ata/pata_rz1000.c2
-rw-r--r--drivers/ata/pata_samsung_cf.c3
-rw-r--r--drivers/ata/pata_scc.c6
-rw-r--r--drivers/ata/pata_sil680.c20
-rw-r--r--drivers/ata/pata_sis.c6
-rw-r--r--drivers/ata/pata_sl82c105.c11
-rw-r--r--drivers/ata/pata_triflex.c25
-rw-r--r--drivers/ata/pdc_adma.c4
-rw-r--r--drivers/ata/sata_dwc_460ex.c86
-rw-r--r--drivers/ata/sata_fsl.c87
-rw-r--r--drivers/ata/sata_inic162x.c2
-rw-r--r--drivers/ata/sata_mv.c32
-rw-r--r--drivers/ata/sata_nv.c73
-rw-r--r--drivers/ata/sata_promise.c4
-rw-r--r--drivers/ata/sata_qstor.c3
-rw-r--r--drivers/ata/sata_sil.c3
-rw-r--r--drivers/ata/sata_sil24.c17
-rw-r--r--drivers/ata/sata_sis.c2
-rw-r--r--drivers/ata/sata_svw.c12
-rw-r--r--drivers/ata/sata_sx4.c5
-rw-r--r--drivers/ata/sata_uli.c3
-rw-r--r--drivers/ata/sata_via.c22
-rw-r--r--drivers/ata/sata_vsc.c5
71 files changed, 4325 insertions, 1302 deletions
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
index 11ec911016c6..75afa75a515e 100644
--- a/drivers/ata/Kconfig
+++ b/drivers/ata/Kconfig
@@ -2,6 +2,14 @@
2# SATA/PATA driver configuration 2# SATA/PATA driver configuration
3# 3#
4 4
5config HAVE_PATA_PLATFORM
6 bool
7 help
8 This is an internal configuration node for any machine that
9 uses pata-platform driver to enable the relevant driver in the
10 configuration structure without having to submit endless patches
11 to update the PATA_PLATFORM entry.
12
5menuconfig ATA 13menuconfig ATA
6 tristate "Serial ATA and Parallel ATA drivers" 14 tristate "Serial ATA and Parallel ATA drivers"
7 depends on HAS_IOMEM 15 depends on HAS_IOMEM
@@ -90,6 +98,14 @@ config SATA_INIC162X
90 help 98 help
91 This option enables support for Initio 162x Serial ATA. 99 This option enables support for Initio 162x Serial ATA.
92 100
101config SATA_ACARD_AHCI
102 tristate "ACard AHCI variant (ATP 8620)"
103 depends on PCI
104 help
105 This option enables support for Acard.
106
107 If unsure, say N.
108
93config SATA_SIL24 109config SATA_SIL24
94 tristate "Silicon Image 3124/3132 SATA support" 110 tristate "Silicon Image 3124/3132 SATA support"
95 depends on PCI 111 depends on PCI
@@ -128,16 +144,6 @@ config PDC_ADMA
128 144
129 If unsure, say N. 145 If unsure, say N.
130 146
131config PATA_MPC52xx
132 tristate "Freescale MPC52xx SoC internal IDE"
133 depends on PPC_MPC52xx && PPC_BESTCOMM
134 select PPC_BESTCOMM_ATA
135 help
136 This option enables support for integrated IDE controller
137 of the Freescale MPC52xx SoC.
138
139 If unsure, say N.
140
141config PATA_OCTEON_CF 147config PATA_OCTEON_CF
142 tristate "OCTEON Boot Bus Compact Flash support" 148 tristate "OCTEON Boot Bus Compact Flash support"
143 depends on CPU_CAVIUM_OCTEON 149 depends on CPU_CAVIUM_OCTEON
@@ -196,6 +202,18 @@ config SATA_DWC
196 202
197 If unsure, say N. 203 If unsure, say N.
198 204
205config SATA_DWC_DEBUG
206 bool "Debugging driver version"
207 depends on SATA_DWC
208 help
209 This option enables debugging output in the driver.
210
211config SATA_DWC_VDEBUG
212 bool "Verbose debug output"
213 depends on SATA_DWC_DEBUG
214 help
215 This option enables the taskfile dumping and NCQ debugging.
216
199config SATA_MV 217config SATA_MV
200 tristate "Marvell SATA support" 218 tristate "Marvell SATA support"
201 help 219 help
@@ -293,6 +311,12 @@ config PATA_AMD
293 311
294 If unsure, say N. 312 If unsure, say N.
295 313
314config PATA_ARASAN_CF
315 tristate "ARASAN CompactFlash PATA Controller Support"
316 select DMA_ENGINE
317 help
318 Say Y here to support the ARASAN CompactFlash PATA controller
319
296config PATA_ARTOP 320config PATA_ARTOP
297 tristate "ARTOP 6210/6260 PATA support" 321 tristate "ARTOP 6210/6260 PATA support"
298 depends on PCI 322 depends on PCI
@@ -366,7 +390,7 @@ config PATA_CS5535
366 390
367config PATA_CS5536 391config PATA_CS5536
368 tristate "CS5536 PATA support" 392 tristate "CS5536 PATA support"
369 depends on PCI && X86 && !X86_64 393 depends on PCI
370 help 394 help
371 This option enables support for the AMD CS5536 395 This option enables support for the AMD CS5536
372 companion chip used with the Geode LX processor family. 396 companion chip used with the Geode LX processor family.
@@ -410,11 +434,11 @@ config PATA_HPT37X
410 If unsure, say N. 434 If unsure, say N.
411 435
412config PATA_HPT3X2N 436config PATA_HPT3X2N
413 tristate "HPT 372N/302N PATA support" 437 tristate "HPT 371N/372N/302N PATA support"
414 depends on PCI 438 depends on PCI
415 help 439 help
416 This option enables support for the N variant HPT PATA 440 This option enables support for the N variant HPT PATA
417 controllers via the new ATA layer 441 controllers via the new ATA layer.
418 442
419 If unsure, say N. 443 If unsure, say N.
420 444
@@ -491,6 +515,16 @@ config PATA_MARVELL
491 515
492 If unsure, say N. 516 If unsure, say N.
493 517
518config PATA_MPC52xx
519 tristate "Freescale MPC52xx SoC internal IDE"
520 depends on PPC_MPC52xx && PPC_BESTCOMM
521 select PPC_BESTCOMM_ATA
522 help
523 This option enables support for integrated IDE controller
524 of the Freescale MPC52xx SoC.
525
526 If unsure, say N.
527
494config PATA_NETCELL 528config PATA_NETCELL
495 tristate "NETCELL Revolution RAID support" 529 tristate "NETCELL Revolution RAID support"
496 depends on PCI 530 depends on PCI
@@ -765,17 +799,9 @@ config PATA_PCMCIA
765 799
766 If unsure, say N. 800 If unsure, say N.
767 801
768config HAVE_PATA_PLATFORM
769 bool
770 help
771 This is an internal configuration node for any machine that
772 uses pata-platform driver to enable the relevant driver in the
773 configuration structure without having to submit endless patches
774 to update the PATA_PLATFORM entry.
775
776config PATA_PLATFORM 802config PATA_PLATFORM
777 tristate "Generic platform device PATA support" 803 tristate "Generic platform device PATA support"
778 depends on EMBEDDED || PPC || HAVE_PATA_PLATFORM 804 depends on EXPERT || PPC || HAVE_PATA_PLATFORM
779 help 805 help
780 This option enables support for generic directly connected ATA 806 This option enables support for generic directly connected ATA
781 devices commonly found on embedded systems. 807 devices commonly found on embedded systems.
diff --git a/drivers/ata/Makefile b/drivers/ata/Makefile
index d5df04a395ca..8ac64e1aa051 100644
--- a/drivers/ata/Makefile
+++ b/drivers/ata/Makefile
@@ -3,6 +3,7 @@ obj-$(CONFIG_ATA) += libata.o
3 3
4# non-SFF interface 4# non-SFF interface
5obj-$(CONFIG_SATA_AHCI) += ahci.o libahci.o 5obj-$(CONFIG_SATA_AHCI) += ahci.o libahci.o
6obj-$(CONFIG_SATA_ACARD_AHCI) += acard-ahci.o libahci.o
6obj-$(CONFIG_SATA_AHCI_PLATFORM) += ahci_platform.o libahci.o 7obj-$(CONFIG_SATA_AHCI_PLATFORM) += ahci_platform.o libahci.o
7obj-$(CONFIG_SATA_FSL) += sata_fsl.o 8obj-$(CONFIG_SATA_FSL) += sata_fsl.o
8obj-$(CONFIG_SATA_INIC162X) += sata_inic162x.o 9obj-$(CONFIG_SATA_INIC162X) += sata_inic162x.o
@@ -11,7 +12,7 @@ obj-$(CONFIG_SATA_DWC) += sata_dwc_460ex.o
11 12
12# SFF w/ custom DMA 13# SFF w/ custom DMA
13obj-$(CONFIG_PDC_ADMA) += pdc_adma.o 14obj-$(CONFIG_PDC_ADMA) += pdc_adma.o
14obj-$(CONFIG_PATA_MPC52xx) += pata_mpc52xx.o 15obj-$(CONFIG_PATA_ARASAN_CF) += pata_arasan_cf.o
15obj-$(CONFIG_PATA_OCTEON_CF) += pata_octeon_cf.o 16obj-$(CONFIG_PATA_OCTEON_CF) += pata_octeon_cf.o
16obj-$(CONFIG_SATA_QSTOR) += sata_qstor.o 17obj-$(CONFIG_SATA_QSTOR) += sata_qstor.o
17obj-$(CONFIG_SATA_SX4) += sata_sx4.o 18obj-$(CONFIG_SATA_SX4) += sata_sx4.o
@@ -52,6 +53,7 @@ obj-$(CONFIG_PATA_IT821X) += pata_it821x.o
52obj-$(CONFIG_PATA_JMICRON) += pata_jmicron.o 53obj-$(CONFIG_PATA_JMICRON) += pata_jmicron.o
53obj-$(CONFIG_PATA_MACIO) += pata_macio.o 54obj-$(CONFIG_PATA_MACIO) += pata_macio.o
54obj-$(CONFIG_PATA_MARVELL) += pata_marvell.o 55obj-$(CONFIG_PATA_MARVELL) += pata_marvell.o
56obj-$(CONFIG_PATA_MPC52xx) += pata_mpc52xx.o
55obj-$(CONFIG_PATA_NETCELL) += pata_netcell.o 57obj-$(CONFIG_PATA_NETCELL) += pata_netcell.o
56obj-$(CONFIG_PATA_NINJA32) += pata_ninja32.o 58obj-$(CONFIG_PATA_NINJA32) += pata_ninja32.o
57obj-$(CONFIG_PATA_NS87415) += pata_ns87415.o 59obj-$(CONFIG_PATA_NS87415) += pata_ns87415.o
@@ -99,7 +101,7 @@ obj-$(CONFIG_ATA_GENERIC) += ata_generic.o
99# Should be last libata driver 101# Should be last libata driver
100obj-$(CONFIG_PATA_LEGACY) += pata_legacy.o 102obj-$(CONFIG_PATA_LEGACY) += pata_legacy.o
101 103
102libata-objs := libata-core.o libata-scsi.o libata-eh.o 104libata-y := libata-core.o libata-scsi.o libata-eh.o libata-transport.o
103libata-$(CONFIG_ATA_SFF) += libata-sff.o 105libata-$(CONFIG_ATA_SFF) += libata-sff.o
104libata-$(CONFIG_SATA_PMP) += libata-pmp.o 106libata-$(CONFIG_SATA_PMP) += libata-pmp.o
105libata-$(CONFIG_ATA_ACPI) += libata-acpi.o 107libata-$(CONFIG_ATA_ACPI) += libata-acpi.o
diff --git a/drivers/ata/acard-ahci.c b/drivers/ata/acard-ahci.c
new file mode 100644
index 000000000000..ae22be4157b5
--- /dev/null
+++ b/drivers/ata/acard-ahci.c
@@ -0,0 +1,528 @@
1
2/*
3 * acard-ahci.c - ACard AHCI SATA support
4 *
5 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
6 * Please ALWAYS copy linux-ide@vger.kernel.org
7 * on emails.
8 *
9 * Copyright 2010 Red Hat, Inc.
10 *
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
15 * any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; see the file COPYING. If not, write to
24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25 *
26 *
27 * libata documentation is available via 'make {ps|pdf}docs',
28 * as Documentation/DocBook/libata.*
29 *
30 * AHCI hardware documentation:
31 * http://www.intel.com/technology/serialata/pdf/rev1_0.pdf
32 * http://www.intel.com/technology/serialata/pdf/rev1_1.pdf
33 *
34 */
35
36#include <linux/kernel.h>
37#include <linux/module.h>
38#include <linux/pci.h>
39#include <linux/init.h>
40#include <linux/blkdev.h>
41#include <linux/delay.h>
42#include <linux/interrupt.h>
43#include <linux/dma-mapping.h>
44#include <linux/device.h>
45#include <linux/dmi.h>
46#include <linux/gfp.h>
47#include <scsi/scsi_host.h>
48#include <scsi/scsi_cmnd.h>
49#include <linux/libata.h>
50#include "ahci.h"
51
52#define DRV_NAME "acard-ahci"
53#define DRV_VERSION "1.0"
54
55/*
56 Received FIS structure limited to 80h.
57*/
58
59#define ACARD_AHCI_RX_FIS_SZ 128
60
61enum {
62 AHCI_PCI_BAR = 5,
63};
64
65enum board_ids {
66 board_acard_ahci,
67};
68
69struct acard_sg {
70 __le32 addr;
71 __le32 addr_hi;
72 __le32 reserved;
73 __le32 size; /* bit 31 (EOT) max==0x10000 (64k) */
74};
75
76static void acard_ahci_qc_prep(struct ata_queued_cmd *qc);
77static bool acard_ahci_qc_fill_rtf(struct ata_queued_cmd *qc);
78static int acard_ahci_port_start(struct ata_port *ap);
79static int acard_ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
80
81#ifdef CONFIG_PM
82static int acard_ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg);
83static int acard_ahci_pci_device_resume(struct pci_dev *pdev);
84#endif
85
86static struct scsi_host_template acard_ahci_sht = {
87 AHCI_SHT("acard-ahci"),
88};
89
90static struct ata_port_operations acard_ops = {
91 .inherits = &ahci_ops,
92 .qc_prep = acard_ahci_qc_prep,
93 .qc_fill_rtf = acard_ahci_qc_fill_rtf,
94 .port_start = acard_ahci_port_start,
95};
96
97#define AHCI_HFLAGS(flags) .private_data = (void *)(flags)
98
99static const struct ata_port_info acard_ahci_port_info[] = {
100 [board_acard_ahci] =
101 {
102 AHCI_HFLAGS (AHCI_HFLAG_NO_NCQ),
103 .flags = AHCI_FLAG_COMMON,
104 .pio_mask = ATA_PIO4,
105 .udma_mask = ATA_UDMA6,
106 .port_ops = &acard_ops,
107 },
108};
109
110static const struct pci_device_id acard_ahci_pci_tbl[] = {
111 /* ACard */
112 { PCI_VDEVICE(ARTOP, 0x000d), board_acard_ahci }, /* ATP8620 */
113
114 { } /* terminate list */
115};
116
117static struct pci_driver acard_ahci_pci_driver = {
118 .name = DRV_NAME,
119 .id_table = acard_ahci_pci_tbl,
120 .probe = acard_ahci_init_one,
121 .remove = ata_pci_remove_one,
122#ifdef CONFIG_PM
123 .suspend = acard_ahci_pci_device_suspend,
124 .resume = acard_ahci_pci_device_resume,
125#endif
126};
127
128#ifdef CONFIG_PM
129static int acard_ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
130{
131 struct ata_host *host = dev_get_drvdata(&pdev->dev);
132 struct ahci_host_priv *hpriv = host->private_data;
133 void __iomem *mmio = hpriv->mmio;
134 u32 ctl;
135
136 if (mesg.event & PM_EVENT_SUSPEND &&
137 hpriv->flags & AHCI_HFLAG_NO_SUSPEND) {
138 dev_printk(KERN_ERR, &pdev->dev,
139 "BIOS update required for suspend/resume\n");
140 return -EIO;
141 }
142
143 if (mesg.event & PM_EVENT_SLEEP) {
144 /* AHCI spec rev1.1 section 8.3.3:
145 * Software must disable interrupts prior to requesting a
146 * transition of the HBA to D3 state.
147 */
148 ctl = readl(mmio + HOST_CTL);
149 ctl &= ~HOST_IRQ_EN;
150 writel(ctl, mmio + HOST_CTL);
151 readl(mmio + HOST_CTL); /* flush */
152 }
153
154 return ata_pci_device_suspend(pdev, mesg);
155}
156
157static int acard_ahci_pci_device_resume(struct pci_dev *pdev)
158{
159 struct ata_host *host = dev_get_drvdata(&pdev->dev);
160 int rc;
161
162 rc = ata_pci_device_do_resume(pdev);
163 if (rc)
164 return rc;
165
166 if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
167 rc = ahci_reset_controller(host);
168 if (rc)
169 return rc;
170
171 ahci_init_controller(host);
172 }
173
174 ata_host_resume(host);
175
176 return 0;
177}
178#endif
179
180static int acard_ahci_configure_dma_masks(struct pci_dev *pdev, int using_dac)
181{
182 int rc;
183
184 if (using_dac &&
185 !pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
186 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
187 if (rc) {
188 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
189 if (rc) {
190 dev_printk(KERN_ERR, &pdev->dev,
191 "64-bit DMA enable failed\n");
192 return rc;
193 }
194 }
195 } else {
196 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
197 if (rc) {
198 dev_printk(KERN_ERR, &pdev->dev,
199 "32-bit DMA enable failed\n");
200 return rc;
201 }
202 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
203 if (rc) {
204 dev_printk(KERN_ERR, &pdev->dev,
205 "32-bit consistent DMA enable failed\n");
206 return rc;
207 }
208 }
209 return 0;
210}
211
212static void acard_ahci_pci_print_info(struct ata_host *host)
213{
214 struct pci_dev *pdev = to_pci_dev(host->dev);
215 u16 cc;
216 const char *scc_s;
217
218 pci_read_config_word(pdev, 0x0a, &cc);
219 if (cc == PCI_CLASS_STORAGE_IDE)
220 scc_s = "IDE";
221 else if (cc == PCI_CLASS_STORAGE_SATA)
222 scc_s = "SATA";
223 else if (cc == PCI_CLASS_STORAGE_RAID)
224 scc_s = "RAID";
225 else
226 scc_s = "unknown";
227
228 ahci_print_info(host, scc_s);
229}
230
231static unsigned int acard_ahci_fill_sg(struct ata_queued_cmd *qc, void *cmd_tbl)
232{
233 struct scatterlist *sg;
234 struct acard_sg *acard_sg = cmd_tbl + AHCI_CMD_TBL_HDR_SZ;
235 unsigned int si, last_si = 0;
236
237 VPRINTK("ENTER\n");
238
239 /*
240 * Next, the S/G list.
241 */
242 for_each_sg(qc->sg, sg, qc->n_elem, si) {
243 dma_addr_t addr = sg_dma_address(sg);
244 u32 sg_len = sg_dma_len(sg);
245
246 /*
247 * ACard note:
248 * We must set an end-of-table (EOT) bit,
249 * and the segment cannot exceed 64k (0x10000)
250 */
251 acard_sg[si].addr = cpu_to_le32(addr & 0xffffffff);
252 acard_sg[si].addr_hi = cpu_to_le32((addr >> 16) >> 16);
253 acard_sg[si].size = cpu_to_le32(sg_len);
254 last_si = si;
255 }
256
257 acard_sg[last_si].size |= cpu_to_le32(1 << 31); /* set EOT */
258
259 return si;
260}
261
262static void acard_ahci_qc_prep(struct ata_queued_cmd *qc)
263{
264 struct ata_port *ap = qc->ap;
265 struct ahci_port_priv *pp = ap->private_data;
266 int is_atapi = ata_is_atapi(qc->tf.protocol);
267 void *cmd_tbl;
268 u32 opts;
269 const u32 cmd_fis_len = 5; /* five dwords */
270 unsigned int n_elem;
271
272 /*
273 * Fill in command table information. First, the header,
274 * a SATA Register - Host to Device command FIS.
275 */
276 cmd_tbl = pp->cmd_tbl + qc->tag * AHCI_CMD_TBL_SZ;
277
278 ata_tf_to_fis(&qc->tf, qc->dev->link->pmp, 1, cmd_tbl);
279 if (is_atapi) {
280 memset(cmd_tbl + AHCI_CMD_TBL_CDB, 0, 32);
281 memcpy(cmd_tbl + AHCI_CMD_TBL_CDB, qc->cdb, qc->dev->cdb_len);
282 }
283
284 n_elem = 0;
285 if (qc->flags & ATA_QCFLAG_DMAMAP)
286 n_elem = acard_ahci_fill_sg(qc, cmd_tbl);
287
288 /*
289 * Fill in command slot information.
290 *
291 * ACard note: prd table length not filled in
292 */
293 opts = cmd_fis_len | (qc->dev->link->pmp << 12);
294 if (qc->tf.flags & ATA_TFLAG_WRITE)
295 opts |= AHCI_CMD_WRITE;
296 if (is_atapi)
297 opts |= AHCI_CMD_ATAPI | AHCI_CMD_PREFETCH;
298
299 ahci_fill_cmd_slot(pp, qc->tag, opts);
300}
301
302static bool acard_ahci_qc_fill_rtf(struct ata_queued_cmd *qc)
303{
304 struct ahci_port_priv *pp = qc->ap->private_data;
305 u8 *rx_fis = pp->rx_fis;
306
307 if (pp->fbs_enabled)
308 rx_fis += qc->dev->link->pmp * ACARD_AHCI_RX_FIS_SZ;
309
310 /*
311 * After a successful execution of an ATA PIO data-in command,
312 * the device doesn't send D2H Reg FIS to update the TF and
313 * the host should take TF and E_Status from the preceding PIO
314 * Setup FIS.
315 */
316 if (qc->tf.protocol == ATA_PROT_PIO && qc->dma_dir == DMA_FROM_DEVICE &&
317 !(qc->flags & ATA_QCFLAG_FAILED)) {
318 ata_tf_from_fis(rx_fis + RX_FIS_PIO_SETUP, &qc->result_tf);
319 qc->result_tf.command = (rx_fis + RX_FIS_PIO_SETUP)[15];
320 } else
321 ata_tf_from_fis(rx_fis + RX_FIS_D2H_REG, &qc->result_tf);
322
323 return true;
324}
325
326static int acard_ahci_port_start(struct ata_port *ap)
327{
328 struct ahci_host_priv *hpriv = ap->host->private_data;
329 struct device *dev = ap->host->dev;
330 struct ahci_port_priv *pp;
331 void *mem;
332 dma_addr_t mem_dma;
333 size_t dma_sz, rx_fis_sz;
334
335 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
336 if (!pp)
337 return -ENOMEM;
338
339 /* check FBS capability */
340 if ((hpriv->cap & HOST_CAP_FBS) && sata_pmp_supported(ap)) {
341 void __iomem *port_mmio = ahci_port_base(ap);
342 u32 cmd = readl(port_mmio + PORT_CMD);
343 if (cmd & PORT_CMD_FBSCP)
344 pp->fbs_supported = true;
345 else if (hpriv->flags & AHCI_HFLAG_YES_FBS) {
346 dev_printk(KERN_INFO, dev,
347 "port %d can do FBS, forcing FBSCP\n",
348 ap->port_no);
349 pp->fbs_supported = true;
350 } else
351 dev_printk(KERN_WARNING, dev,
352 "port %d is not capable of FBS\n",
353 ap->port_no);
354 }
355
356 if (pp->fbs_supported) {
357 dma_sz = AHCI_PORT_PRIV_FBS_DMA_SZ;
358 rx_fis_sz = ACARD_AHCI_RX_FIS_SZ * 16;
359 } else {
360 dma_sz = AHCI_PORT_PRIV_DMA_SZ;
361 rx_fis_sz = ACARD_AHCI_RX_FIS_SZ;
362 }
363
364 mem = dmam_alloc_coherent(dev, dma_sz, &mem_dma, GFP_KERNEL);
365 if (!mem)
366 return -ENOMEM;
367 memset(mem, 0, dma_sz);
368
369 /*
370 * First item in chunk of DMA memory: 32-slot command table,
371 * 32 bytes each in size
372 */
373 pp->cmd_slot = mem;
374 pp->cmd_slot_dma = mem_dma;
375
376 mem += AHCI_CMD_SLOT_SZ;
377 mem_dma += AHCI_CMD_SLOT_SZ;
378
379 /*
380 * Second item: Received-FIS area
381 */
382 pp->rx_fis = mem;
383 pp->rx_fis_dma = mem_dma;
384
385 mem += rx_fis_sz;
386 mem_dma += rx_fis_sz;
387
388 /*
389 * Third item: data area for storing a single command
390 * and its scatter-gather table
391 */
392 pp->cmd_tbl = mem;
393 pp->cmd_tbl_dma = mem_dma;
394
395 /*
396 * Save off initial list of interrupts to be enabled.
397 * This could be changed later
398 */
399 pp->intr_mask = DEF_PORT_IRQ;
400
401 ap->private_data = pp;
402
403 /* engage engines, captain */
404 return ahci_port_resume(ap);
405}
406
407static int acard_ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
408{
409 static int printed_version;
410 unsigned int board_id = ent->driver_data;
411 struct ata_port_info pi = acard_ahci_port_info[board_id];
412 const struct ata_port_info *ppi[] = { &pi, NULL };
413 struct device *dev = &pdev->dev;
414 struct ahci_host_priv *hpriv;
415 struct ata_host *host;
416 int n_ports, i, rc;
417
418 VPRINTK("ENTER\n");
419
420 WARN_ON((int)ATA_MAX_QUEUE > AHCI_MAX_CMDS);
421
422 if (!printed_version++)
423 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
424
425 /* acquire resources */
426 rc = pcim_enable_device(pdev);
427 if (rc)
428 return rc;
429
430 /* AHCI controllers often implement SFF compatible interface.
431 * Grab all PCI BARs just in case.
432 */
433 rc = pcim_iomap_regions_request_all(pdev, 1 << AHCI_PCI_BAR, DRV_NAME);
434 if (rc == -EBUSY)
435 pcim_pin_device(pdev);
436 if (rc)
437 return rc;
438
439 hpriv = devm_kzalloc(dev, sizeof(*hpriv), GFP_KERNEL);
440 if (!hpriv)
441 return -ENOMEM;
442 hpriv->flags |= (unsigned long)pi.private_data;
443
444 if (!(hpriv->flags & AHCI_HFLAG_NO_MSI))
445 pci_enable_msi(pdev);
446
447 hpriv->mmio = pcim_iomap_table(pdev)[AHCI_PCI_BAR];
448
449 /* save initial config */
450 ahci_save_initial_config(&pdev->dev, hpriv, 0, 0);
451
452 /* prepare host */
453 if (hpriv->cap & HOST_CAP_NCQ)
454 pi.flags |= ATA_FLAG_NCQ;
455
456 if (hpriv->cap & HOST_CAP_PMP)
457 pi.flags |= ATA_FLAG_PMP;
458
459 ahci_set_em_messages(hpriv, &pi);
460
461 /* CAP.NP sometimes indicate the index of the last enabled
462 * port, at other times, that of the last possible port, so
463 * determining the maximum port number requires looking at
464 * both CAP.NP and port_map.
465 */
466 n_ports = max(ahci_nr_ports(hpriv->cap), fls(hpriv->port_map));
467
468 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
469 if (!host)
470 return -ENOMEM;
471 host->private_data = hpriv;
472
473 if (!(hpriv->cap & HOST_CAP_SSS) || ahci_ignore_sss)
474 host->flags |= ATA_HOST_PARALLEL_SCAN;
475 else
476 printk(KERN_INFO "ahci: SSS flag set, parallel bus scan disabled\n");
477
478 for (i = 0; i < host->n_ports; i++) {
479 struct ata_port *ap = host->ports[i];
480
481 ata_port_pbar_desc(ap, AHCI_PCI_BAR, -1, "abar");
482 ata_port_pbar_desc(ap, AHCI_PCI_BAR,
483 0x100 + ap->port_no * 0x80, "port");
484
485 /* set initial link pm policy */
486 /*
487 ap->pm_policy = NOT_AVAILABLE;
488 */
489 /* disabled/not-implemented port */
490 if (!(hpriv->port_map & (1 << i)))
491 ap->ops = &ata_dummy_port_ops;
492 }
493
494 /* initialize adapter */
495 rc = acard_ahci_configure_dma_masks(pdev, hpriv->cap & HOST_CAP_64);
496 if (rc)
497 return rc;
498
499 rc = ahci_reset_controller(host);
500 if (rc)
501 return rc;
502
503 ahci_init_controller(host);
504 acard_ahci_pci_print_info(host);
505
506 pci_set_master(pdev);
507 return ata_host_activate(host, pdev->irq, ahci_interrupt, IRQF_SHARED,
508 &acard_ahci_sht);
509}
510
511static int __init acard_ahci_init(void)
512{
513 return pci_register_driver(&acard_ahci_pci_driver);
514}
515
516static void __exit acard_ahci_exit(void)
517{
518 pci_unregister_driver(&acard_ahci_pci_driver);
519}
520
521MODULE_AUTHOR("Jeff Garzik");
522MODULE_DESCRIPTION("ACard AHCI SATA low-level driver");
523MODULE_LICENSE("GPL");
524MODULE_DEVICE_TABLE(pci, acard_ahci_pci_tbl);
525MODULE_VERSION(DRV_VERSION);
526
527module_init(acard_ahci_init);
528module_exit(acard_ahci_exit);
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 99d0e5a51148..71afe0371311 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -150,7 +150,7 @@ static const struct ata_port_info ahci_port_info[] = {
150 { 150 {
151 AHCI_HFLAGS (AHCI_HFLAG_NO_FPDMA_AA | AHCI_HFLAG_NO_PMP | 151 AHCI_HFLAGS (AHCI_HFLAG_NO_FPDMA_AA | AHCI_HFLAG_NO_PMP |
152 AHCI_HFLAG_YES_NCQ), 152 AHCI_HFLAG_YES_NCQ),
153 .flags = AHCI_FLAG_COMMON, 153 .flags = AHCI_FLAG_COMMON | ATA_FLAG_NO_DIPM,
154 .pio_mask = ATA_PIO4, 154 .pio_mask = ATA_PIO4,
155 .udma_mask = ATA_UDMA6, 155 .udma_mask = ATA_UDMA6,
156 .port_ops = &ahci_ops, 156 .port_ops = &ahci_ops,
@@ -175,8 +175,7 @@ static const struct ata_port_info ahci_port_info[] = {
175 { 175 {
176 AHCI_HFLAGS (AHCI_HFLAG_NO_NCQ | AHCI_HFLAG_NO_MSI | 176 AHCI_HFLAGS (AHCI_HFLAG_NO_NCQ | AHCI_HFLAG_NO_MSI |
177 AHCI_HFLAG_MV_PATA | AHCI_HFLAG_NO_PMP), 177 AHCI_HFLAG_MV_PATA | AHCI_HFLAG_NO_PMP),
178 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 178 .flags = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA,
179 ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA,
180 .pio_mask = ATA_PIO4, 179 .pio_mask = ATA_PIO4,
181 .udma_mask = ATA_UDMA6, 180 .udma_mask = ATA_UDMA6,
182 .port_ops = &ahci_ops, 181 .port_ops = &ahci_ops,
@@ -260,6 +259,14 @@ static const struct pci_device_id ahci_pci_tbl[] = {
260 { PCI_VDEVICE(INTEL, 0x1d02), board_ahci }, /* PBG AHCI */ 259 { PCI_VDEVICE(INTEL, 0x1d02), board_ahci }, /* PBG AHCI */
261 { PCI_VDEVICE(INTEL, 0x1d04), board_ahci }, /* PBG RAID */ 260 { PCI_VDEVICE(INTEL, 0x1d04), board_ahci }, /* PBG RAID */
262 { PCI_VDEVICE(INTEL, 0x1d06), board_ahci }, /* PBG RAID */ 261 { PCI_VDEVICE(INTEL, 0x1d06), board_ahci }, /* PBG RAID */
262 { PCI_VDEVICE(INTEL, 0x2826), board_ahci }, /* PBG RAID */
263 { PCI_VDEVICE(INTEL, 0x2323), board_ahci }, /* DH89xxCC AHCI */
264 { PCI_VDEVICE(INTEL, 0x1e02), board_ahci }, /* Panther Point AHCI */
265 { PCI_VDEVICE(INTEL, 0x1e03), board_ahci }, /* Panther Point AHCI */
266 { PCI_VDEVICE(INTEL, 0x1e04), board_ahci }, /* Panther Point RAID */
267 { PCI_VDEVICE(INTEL, 0x1e05), board_ahci }, /* Panther Point RAID */
268 { PCI_VDEVICE(INTEL, 0x1e06), board_ahci }, /* Panther Point RAID */
269 { PCI_VDEVICE(INTEL, 0x1e07), board_ahci }, /* Panther Point RAID */
263 270
264 /* JMicron 360/1/3/5/6, match class to avoid IDE function */ 271 /* JMicron 360/1/3/5/6, match class to avoid IDE function */
265 { PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, 272 { PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
@@ -379,7 +386,13 @@ static const struct pci_device_id ahci_pci_tbl[] = {
379 { PCI_VDEVICE(MARVELL, 0x6145), board_ahci_mv }, /* 6145 */ 386 { PCI_VDEVICE(MARVELL, 0x6145), board_ahci_mv }, /* 6145 */
380 { PCI_VDEVICE(MARVELL, 0x6121), board_ahci_mv }, /* 6121 */ 387 { PCI_VDEVICE(MARVELL, 0x6121), board_ahci_mv }, /* 6121 */
381 { PCI_DEVICE(0x1b4b, 0x9123), 388 { PCI_DEVICE(0x1b4b, 0x9123),
389 .class = PCI_CLASS_STORAGE_SATA_AHCI,
390 .class_mask = 0xffffff,
382 .driver_data = board_ahci_yes_fbs }, /* 88se9128 */ 391 .driver_data = board_ahci_yes_fbs }, /* 88se9128 */
392 { PCI_DEVICE(0x1b4b, 0x9125),
393 .driver_data = board_ahci_yes_fbs }, /* 88se9125 */
394 { PCI_DEVICE(0x1b4b, 0x91a3),
395 .driver_data = board_ahci_yes_fbs },
383 396
384 /* Promise */ 397 /* Promise */
385 { PCI_VDEVICE(PROMISE, 0x3f20), board_ahci }, /* PDC42819 */ 398 { PCI_VDEVICE(PROMISE, 0x3f20), board_ahci }, /* PDC42819 */
@@ -919,7 +932,7 @@ static bool ahci_broken_suspend(struct pci_dev *pdev)
919 /* 932 /*
920 * Acer eMachines G725 has the same problem. BIOS 933 * Acer eMachines G725 has the same problem. BIOS
921 * V1.03 is known to be broken. V3.04 is known to 934 * V1.03 is known to be broken. V3.04 is known to
922 * work. Inbetween, there are V1.06, V2.06 and V3.03 935 * work. Between, there are V1.06, V2.06 and V3.03
923 * that we don't have much idea about. For now, 936 * that we don't have much idea about. For now,
924 * blacklist anything older than V3.04. 937 * blacklist anything older than V3.04.
925 * 938 *
@@ -1208,9 +1221,6 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1208 ata_port_pbar_desc(ap, AHCI_PCI_BAR, 1221 ata_port_pbar_desc(ap, AHCI_PCI_BAR,
1209 0x100 + ap->port_no * 0x80, "port"); 1222 0x100 + ap->port_no * 0x80, "port");
1210 1223
1211 /* set initial link pm policy */
1212 ap->pm_policy = NOT_AVAILABLE;
1213
1214 /* set enclosure management message type */ 1224 /* set enclosure management message type */
1215 if (ap->flags & ATA_FLAG_EM) 1225 if (ap->flags & ATA_FLAG_EM)
1216 ap->em_message_type = hpriv->em_msg_type; 1226 ap->em_message_type = hpriv->em_msg_type;
diff --git a/drivers/ata/ahci.h b/drivers/ata/ahci.h
index e5fdeebf9ef0..12c5282e7fca 100644
--- a/drivers/ata/ahci.h
+++ b/drivers/ata/ahci.h
@@ -72,6 +72,7 @@ enum {
72 AHCI_CMD_RESET = (1 << 8), 72 AHCI_CMD_RESET = (1 << 8),
73 AHCI_CMD_CLR_BUSY = (1 << 10), 73 AHCI_CMD_CLR_BUSY = (1 << 10),
74 74
75 RX_FIS_PIO_SETUP = 0x20, /* offset of PIO Setup FIS data */
75 RX_FIS_D2H_REG = 0x40, /* offset of D2H Register FIS data */ 76 RX_FIS_D2H_REG = 0x40, /* offset of D2H Register FIS data */
76 RX_FIS_SDB = 0x58, /* offset of SDB FIS data */ 77 RX_FIS_SDB = 0x58, /* offset of SDB FIS data */
77 RX_FIS_UNK = 0x60, /* offset of Unknown FIS data */ 78 RX_FIS_UNK = 0x60, /* offset of Unknown FIS data */
@@ -201,7 +202,6 @@ enum {
201 AHCI_HFLAG_MV_PATA = (1 << 4), /* PATA port */ 202 AHCI_HFLAG_MV_PATA = (1 << 4), /* PATA port */
202 AHCI_HFLAG_NO_MSI = (1 << 5), /* no PCI MSI */ 203 AHCI_HFLAG_NO_MSI = (1 << 5), /* no PCI MSI */
203 AHCI_HFLAG_NO_PMP = (1 << 6), /* no PMP */ 204 AHCI_HFLAG_NO_PMP = (1 << 6), /* no PMP */
204 AHCI_HFLAG_NO_HOTPLUG = (1 << 7), /* ignore PxSERR.DIAG.N */
205 AHCI_HFLAG_SECT255 = (1 << 8), /* max 255 sectors */ 205 AHCI_HFLAG_SECT255 = (1 << 8), /* max 255 sectors */
206 AHCI_HFLAG_YES_NCQ = (1 << 9), /* force NCQ cap on */ 206 AHCI_HFLAG_YES_NCQ = (1 << 9), /* force NCQ cap on */
207 AHCI_HFLAG_NO_SUSPEND = (1 << 10), /* don't suspend */ 207 AHCI_HFLAG_NO_SUSPEND = (1 << 10), /* don't suspend */
@@ -213,10 +213,8 @@ enum {
213 213
214 /* ap->flags bits */ 214 /* ap->flags bits */
215 215
216 AHCI_FLAG_COMMON = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 216 AHCI_FLAG_COMMON = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA |
217 ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA | 217 ATA_FLAG_ACPI_SATA | ATA_FLAG_AN,
218 ATA_FLAG_ACPI_SATA | ATA_FLAG_AN |
219 ATA_FLAG_IPM,
220 218
221 ICH_MAP = 0x90, /* ICH MAP register */ 219 ICH_MAP = 0x90, /* ICH MAP register */
222 220
@@ -227,10 +225,14 @@ enum {
227 /* em_ctl bits */ 225 /* em_ctl bits */
228 EM_CTL_RST = (1 << 9), /* Reset */ 226 EM_CTL_RST = (1 << 9), /* Reset */
229 EM_CTL_TM = (1 << 8), /* Transmit Message */ 227 EM_CTL_TM = (1 << 8), /* Transmit Message */
230 EM_CTL_MR = (1 << 0), /* Message Recieved */ 228 EM_CTL_MR = (1 << 0), /* Message Received */
231 EM_CTL_ALHD = (1 << 26), /* Activity LED */ 229 EM_CTL_ALHD = (1 << 26), /* Activity LED */
232 EM_CTL_XMT = (1 << 25), /* Transmit Only */ 230 EM_CTL_XMT = (1 << 25), /* Transmit Only */
233 EM_CTL_SMB = (1 << 24), /* Single Message Buffer */ 231 EM_CTL_SMB = (1 << 24), /* Single Message Buffer */
232 EM_CTL_SGPIO = (1 << 19), /* SGPIO messages supported */
233 EM_CTL_SES = (1 << 18), /* SES-2 messages supported */
234 EM_CTL_SAFTE = (1 << 17), /* SAF-TE messages supported */
235 EM_CTL_LED = (1 << 16), /* LED messages supported */
234 236
235 /* em message type */ 237 /* em message type */
236 EM_MSG_TYPE_LED = (1 << 0), /* LED */ 238 EM_MSG_TYPE_LED = (1 << 0), /* LED */
@@ -283,7 +285,7 @@ struct ahci_port_priv {
283}; 285};
284 286
285struct ahci_host_priv { 287struct ahci_host_priv {
286 void __iomem * mmio; /* bus-independant mem map */ 288 void __iomem * mmio; /* bus-independent mem map */
287 unsigned int flags; /* AHCI_HFLAG_* */ 289 unsigned int flags; /* AHCI_HFLAG_* */
288 u32 cap; /* cap to use */ 290 u32 cap; /* cap to use */
289 u32 cap2; /* cap2 to use */ 291 u32 cap2; /* cap2 to use */
@@ -311,6 +313,8 @@ extern struct device_attribute *ahci_sdev_attrs[];
311 313
312extern struct ata_port_operations ahci_ops; 314extern struct ata_port_operations ahci_ops;
313 315
316void ahci_fill_cmd_slot(struct ahci_port_priv *pp, unsigned int tag,
317 u32 opts);
314void ahci_save_initial_config(struct device *dev, 318void ahci_save_initial_config(struct device *dev,
315 struct ahci_host_priv *hpriv, 319 struct ahci_host_priv *hpriv,
316 unsigned int force_port_map, 320 unsigned int force_port_map,
@@ -326,6 +330,7 @@ int ahci_stop_engine(struct ata_port *ap);
326void ahci_start_engine(struct ata_port *ap); 330void ahci_start_engine(struct ata_port *ap);
327int ahci_check_ready(struct ata_link *link); 331int ahci_check_ready(struct ata_link *link);
328int ahci_kick_engine(struct ata_port *ap); 332int ahci_kick_engine(struct ata_port *ap);
333int ahci_port_resume(struct ata_port *ap);
329void ahci_set_em_messages(struct ahci_host_priv *hpriv, 334void ahci_set_em_messages(struct ahci_host_priv *hpriv,
330 struct ata_port_info *pi); 335 struct ata_port_info *pi);
331int ahci_reset_em(struct ata_host *host); 336int ahci_reset_em(struct ata_host *host);
diff --git a/drivers/ata/ahci_platform.c b/drivers/ata/ahci_platform.c
index 84b643270e7a..6fef1fa75c54 100644
--- a/drivers/ata/ahci_platform.c
+++ b/drivers/ata/ahci_platform.c
@@ -129,9 +129,6 @@ static int __init ahci_probe(struct platform_device *pdev)
129 ata_port_desc(ap, "mmio %pR", mem); 129 ata_port_desc(ap, "mmio %pR", mem);
130 ata_port_desc(ap, "port 0x%x", 0x100 + ap->port_no * 0x80); 130 ata_port_desc(ap, "port 0x%x", 0x100 + ap->port_no * 0x80);
131 131
132 /* set initial link pm policy */
133 ap->pm_policy = NOT_AVAILABLE;
134
135 /* set enclosure management message type */ 132 /* set enclosure management message type */
136 if (ap->flags & ATA_FLAG_EM) 133 if (ap->flags & ATA_FLAG_EM)
137 ap->em_message_type = hpriv->em_msg_type; 134 ap->em_message_type = hpriv->em_msg_type;
diff --git a/drivers/ata/ata_generic.c b/drivers/ata/ata_generic.c
index cc5f7726bde7..721d38bfa339 100644
--- a/drivers/ata/ata_generic.c
+++ b/drivers/ata/ata_generic.c
@@ -35,6 +35,7 @@
35enum { 35enum {
36 ATA_GEN_CLASS_MATCH = (1 << 0), 36 ATA_GEN_CLASS_MATCH = (1 << 0),
37 ATA_GEN_FORCE_DMA = (1 << 1), 37 ATA_GEN_FORCE_DMA = (1 << 1),
38 ATA_GEN_INTEL_IDER = (1 << 2),
38}; 39};
39 40
40/** 41/**
@@ -109,6 +110,49 @@ static struct ata_port_operations generic_port_ops = {
109static int all_generic_ide; /* Set to claim all devices */ 110static int all_generic_ide; /* Set to claim all devices */
110 111
111/** 112/**
113 * is_intel_ider - identify intel IDE-R devices
114 * @dev: PCI device
115 *
116 * Distinguish Intel IDE-R controller devices from other Intel IDE
117 * devices. IDE-R devices have no timing registers and are in
118 * most respects virtual. They should be driven by the ata_generic
119 * driver.
120 *
121 * IDE-R devices have PCI offset 0xF8.L as zero, later Intel ATA has
122 * it non zero. All Intel ATA has 0x40 writable (timing), but it is
123 * not writable on IDE-R devices (this is guaranteed).
124 */
125
126static int is_intel_ider(struct pci_dev *dev)
127{
128 /* For Intel IDE the value at 0xF8 is only zero on IDE-R
129 interfaces */
130 u32 r;
131 u16 t;
132
133 /* Check the manufacturing ID, it will be zero for IDE-R */
134 pci_read_config_dword(dev, 0xF8, &r);
135 /* Not IDE-R: punt so that ata_(old)piix gets it */
136 if (r != 0)
137 return 0;
138 /* 0xF8 will also be zero on some early Intel IDE devices
139 but they will have a sane timing register */
140 pci_read_config_word(dev, 0x40, &t);
141 if (t != 0)
142 return 0;
143 /* Finally check if the timing register is writable so that
144 we eliminate any early devices hot-docked in a docking
145 station */
146 pci_write_config_word(dev, 0x40, 1);
147 pci_read_config_word(dev, 0x40, &t);
148 if (t) {
149 pci_write_config_word(dev, 0x40, 0);
150 return 0;
151 }
152 return 1;
153}
154
155/**
112 * ata_generic_init - attach generic IDE 156 * ata_generic_init - attach generic IDE
113 * @dev: PCI device found 157 * @dev: PCI device found
114 * @id: match entry 158 * @id: match entry
@@ -134,6 +178,10 @@ static int ata_generic_init_one(struct pci_dev *dev, const struct pci_device_id
134 if ((id->driver_data & ATA_GEN_CLASS_MATCH) && all_generic_ide == 0) 178 if ((id->driver_data & ATA_GEN_CLASS_MATCH) && all_generic_ide == 0)
135 return -ENODEV; 179 return -ENODEV;
136 180
181 if (id->driver_data & ATA_GEN_INTEL_IDER)
182 if (!is_intel_ider(dev))
183 return -ENODEV;
184
137 /* Devices that need care */ 185 /* Devices that need care */
138 if (dev->vendor == PCI_VENDOR_ID_UMC && 186 if (dev->vendor == PCI_VENDOR_ID_UMC &&
139 dev->device == PCI_DEVICE_ID_UMC_UM8886A && 187 dev->device == PCI_DEVICE_ID_UMC_UM8886A &&
@@ -186,7 +234,11 @@ static struct pci_device_id ata_generic[] = {
186 { PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA,PCI_DEVICE_ID_TOSHIBA_PICCOLO_2), }, 234 { PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA,PCI_DEVICE_ID_TOSHIBA_PICCOLO_2), },
187 { PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA,PCI_DEVICE_ID_TOSHIBA_PICCOLO_3), }, 235 { PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA,PCI_DEVICE_ID_TOSHIBA_PICCOLO_3), },
188 { PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA,PCI_DEVICE_ID_TOSHIBA_PICCOLO_5), }, 236 { PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA,PCI_DEVICE_ID_TOSHIBA_PICCOLO_5), },
189#endif 237#endif
238 /* Intel, IDE class device */
239 { PCI_VENDOR_ID_INTEL, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
240 PCI_CLASS_STORAGE_IDE << 8, 0xFFFFFF00UL,
241 .driver_data = ATA_GEN_INTEL_IDER },
190 /* Must come last. If you add entries adjust this table appropriately */ 242 /* Must come last. If you add entries adjust this table appropriately */
191 { PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_IDE << 8, 0xFFFFFF00UL), 243 { PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_IDE << 8, 0xFFFFFF00UL),
192 .driver_data = ATA_GEN_CLASS_MATCH }, 244 .driver_data = ATA_GEN_CLASS_MATCH },
diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c
index d712675d0a96..6f6e7718b05c 100644
--- a/drivers/ata/ata_piix.c
+++ b/drivers/ata/ata_piix.c
@@ -38,16 +38,16 @@
38 * Hardware documentation available at http://developer.intel.com/ 38 * Hardware documentation available at http://developer.intel.com/
39 * 39 *
40 * Documentation 40 * Documentation
41 * Publically available from Intel web site. Errata documentation 41 * Publicly available from Intel web site. Errata documentation
42 * is also publically available. As an aide to anyone hacking on this 42 * is also publicly available. As an aide to anyone hacking on this
43 * driver the list of errata that are relevant is below, going back to 43 * driver the list of errata that are relevant is below, going back to
44 * PIIX4. Older device documentation is now a bit tricky to find. 44 * PIIX4. Older device documentation is now a bit tricky to find.
45 * 45 *
46 * The chipsets all follow very much the same design. The original Triton 46 * The chipsets all follow very much the same design. The original Triton
47 * series chipsets do _not_ support independant device timings, but this 47 * series chipsets do _not_ support independent device timings, but this
48 * is fixed in Triton II. With the odd mobile exception the chips then 48 * is fixed in Triton II. With the odd mobile exception the chips then
49 * change little except in gaining more modes until SATA arrives. This 49 * change little except in gaining more modes until SATA arrives. This
50 * driver supports only the chips with independant timing (that is those 50 * driver supports only the chips with independent timing (that is those
51 * with SITRE and the 0x44 timing register). See pata_oldpiix and pata_mpiix 51 * with SITRE and the 0x44 timing register). See pata_oldpiix and pata_mpiix
52 * for the early chip drivers. 52 * for the early chip drivers.
53 * 53 *
@@ -122,7 +122,7 @@ enum {
122 P2 = 2, /* port 2 */ 122 P2 = 2, /* port 2 */
123 P3 = 3, /* port 3 */ 123 P3 = 3, /* port 3 */
124 IDE = -1, /* IDE */ 124 IDE = -1, /* IDE */
125 NA = -2, /* not avaliable */ 125 NA = -2, /* not available */
126 RV = -3, /* reserved */ 126 RV = -3, /* reserved */
127 127
128 PIIX_AHCI_DEVICE = 6, 128 PIIX_AHCI_DEVICE = 6,
@@ -158,7 +158,6 @@ struct piix_map_db {
158struct piix_host_priv { 158struct piix_host_priv {
159 const int *map; 159 const int *map;
160 u32 saved_iocfg; 160 u32 saved_iocfg;
161 spinlock_t sidpr_lock; /* FIXME: remove once locking in EH is fixed */
162 void __iomem *sidpr; 161 void __iomem *sidpr;
163}; 162};
164 163
@@ -175,6 +174,8 @@ static int piix_sidpr_scr_read(struct ata_link *link,
175 unsigned int reg, u32 *val); 174 unsigned int reg, u32 *val);
176static int piix_sidpr_scr_write(struct ata_link *link, 175static int piix_sidpr_scr_write(struct ata_link *link,
177 unsigned int reg, u32 val); 176 unsigned int reg, u32 val);
177static int piix_sidpr_set_lpm(struct ata_link *link, enum ata_lpm_policy policy,
178 unsigned hints);
178static bool piix_irq_check(struct ata_port *ap); 179static bool piix_irq_check(struct ata_port *ap);
179#ifdef CONFIG_PM 180#ifdef CONFIG_PM
180static int piix_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg); 181static int piix_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg);
@@ -209,6 +210,8 @@ static const struct pci_device_id piix_pci_tbl[] = {
209 { 0x8086, 0x248A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 }, 210 { 0x8086, 0x248A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 },
210 /* Intel ICH3 (E7500/1) UDMA 100 */ 211 /* Intel ICH3 (E7500/1) UDMA 100 */
211 { 0x8086, 0x248B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 }, 212 { 0x8086, 0x248B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 },
213 /* Intel ICH4-L */
214 { 0x8086, 0x24C1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 },
212 /* Intel ICH4 (i845GV, i845E, i852, i855) UDMA 100 */ 215 /* Intel ICH4 (i845GV, i845E, i852, i855) UDMA 100 */
213 { 0x8086, 0x24CA, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 }, 216 { 0x8086, 0x24CA, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 },
214 { 0x8086, 0x24CB, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 }, 217 { 0x8086, 0x24CB, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 },
@@ -227,7 +230,7 @@ static const struct pci_device_id piix_pci_tbl[] = {
227 { 0x8086, 0x2850, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 }, 230 { 0x8086, 0x2850, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 },
228 231
229 /* SATA ports */ 232 /* SATA ports */
230 233
231 /* 82801EB (ICH5) */ 234 /* 82801EB (ICH5) */
232 { 0x8086, 0x24d1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich5_sata }, 235 { 0x8086, 0x24d1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich5_sata },
233 /* 82801EB (ICH5) */ 236 /* 82801EB (ICH5) */
@@ -306,6 +309,14 @@ static const struct pci_device_id piix_pci_tbl[] = {
306 { 0x8086, 0x1d00, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata }, 309 { 0x8086, 0x1d00, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata },
307 /* SATA Controller IDE (PBG) */ 310 /* SATA Controller IDE (PBG) */
308 { 0x8086, 0x1d08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata }, 311 { 0x8086, 0x1d08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
312 /* SATA Controller IDE (Panther Point) */
313 { 0x8086, 0x1e00, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata },
314 /* SATA Controller IDE (Panther Point) */
315 { 0x8086, 0x1e01, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata },
316 /* SATA Controller IDE (Panther Point) */
317 { 0x8086, 0x1e08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
318 /* SATA Controller IDE (Panther Point) */
319 { 0x8086, 0x1e09, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
309 { } /* terminate list */ 320 { } /* terminate list */
310}; 321};
311 322
@@ -348,11 +359,22 @@ static struct ata_port_operations ich_pata_ops = {
348 .set_dmamode = ich_set_dmamode, 359 .set_dmamode = ich_set_dmamode,
349}; 360};
350 361
362static struct device_attribute *piix_sidpr_shost_attrs[] = {
363 &dev_attr_link_power_management_policy,
364 NULL
365};
366
367static struct scsi_host_template piix_sidpr_sht = {
368 ATA_BMDMA_SHT(DRV_NAME),
369 .shost_attrs = piix_sidpr_shost_attrs,
370};
371
351static struct ata_port_operations piix_sidpr_sata_ops = { 372static struct ata_port_operations piix_sidpr_sata_ops = {
352 .inherits = &piix_sata_ops, 373 .inherits = &piix_sata_ops,
353 .hardreset = sata_std_hardreset, 374 .hardreset = sata_std_hardreset,
354 .scr_read = piix_sidpr_scr_read, 375 .scr_read = piix_sidpr_scr_read,
355 .scr_write = piix_sidpr_scr_write, 376 .scr_write = piix_sidpr_scr_write,
377 .set_lpm = piix_sidpr_set_lpm,
356}; 378};
357 379
358static const struct piix_map_db ich5_map_db = { 380static const struct piix_map_db ich5_map_db = {
@@ -956,15 +978,12 @@ static int piix_sidpr_scr_read(struct ata_link *link,
956 unsigned int reg, u32 *val) 978 unsigned int reg, u32 *val)
957{ 979{
958 struct piix_host_priv *hpriv = link->ap->host->private_data; 980 struct piix_host_priv *hpriv = link->ap->host->private_data;
959 unsigned long flags;
960 981
961 if (reg >= ARRAY_SIZE(piix_sidx_map)) 982 if (reg >= ARRAY_SIZE(piix_sidx_map))
962 return -EINVAL; 983 return -EINVAL;
963 984
964 spin_lock_irqsave(&hpriv->sidpr_lock, flags);
965 piix_sidpr_sel(link, reg); 985 piix_sidpr_sel(link, reg);
966 *val = ioread32(hpriv->sidpr + PIIX_SIDPR_DATA); 986 *val = ioread32(hpriv->sidpr + PIIX_SIDPR_DATA);
967 spin_unlock_irqrestore(&hpriv->sidpr_lock, flags);
968 return 0; 987 return 0;
969} 988}
970 989
@@ -972,18 +991,21 @@ static int piix_sidpr_scr_write(struct ata_link *link,
972 unsigned int reg, u32 val) 991 unsigned int reg, u32 val)
973{ 992{
974 struct piix_host_priv *hpriv = link->ap->host->private_data; 993 struct piix_host_priv *hpriv = link->ap->host->private_data;
975 unsigned long flags;
976 994
977 if (reg >= ARRAY_SIZE(piix_sidx_map)) 995 if (reg >= ARRAY_SIZE(piix_sidx_map))
978 return -EINVAL; 996 return -EINVAL;
979 997
980 spin_lock_irqsave(&hpriv->sidpr_lock, flags);
981 piix_sidpr_sel(link, reg); 998 piix_sidpr_sel(link, reg);
982 iowrite32(val, hpriv->sidpr + PIIX_SIDPR_DATA); 999 iowrite32(val, hpriv->sidpr + PIIX_SIDPR_DATA);
983 spin_unlock_irqrestore(&hpriv->sidpr_lock, flags);
984 return 0; 1000 return 0;
985} 1001}
986 1002
1003static int piix_sidpr_set_lpm(struct ata_link *link, enum ata_lpm_policy policy,
1004 unsigned hints)
1005{
1006 return sata_link_scr_lpm(link, policy, false);
1007}
1008
987static bool piix_irq_check(struct ata_port *ap) 1009static bool piix_irq_check(struct ata_port *ap)
988{ 1010{
989 if (unlikely(!ap->ioaddr.bmdma_addr)) 1011 if (unlikely(!ap->ioaddr.bmdma_addr))
@@ -1543,6 +1565,7 @@ static int __devinit piix_init_one(struct pci_dev *pdev,
1543 struct device *dev = &pdev->dev; 1565 struct device *dev = &pdev->dev;
1544 struct ata_port_info port_info[2]; 1566 struct ata_port_info port_info[2];
1545 const struct ata_port_info *ppi[] = { &port_info[0], &port_info[1] }; 1567 const struct ata_port_info *ppi[] = { &port_info[0], &port_info[1] };
1568 struct scsi_host_template *sht = &piix_sht;
1546 unsigned long port_flags; 1569 unsigned long port_flags;
1547 struct ata_host *host; 1570 struct ata_host *host;
1548 struct piix_host_priv *hpriv; 1571 struct piix_host_priv *hpriv;
@@ -1577,7 +1600,6 @@ static int __devinit piix_init_one(struct pci_dev *pdev,
1577 hpriv = devm_kzalloc(dev, sizeof(*hpriv), GFP_KERNEL); 1600 hpriv = devm_kzalloc(dev, sizeof(*hpriv), GFP_KERNEL);
1578 if (!hpriv) 1601 if (!hpriv)
1579 return -ENOMEM; 1602 return -ENOMEM;
1580 spin_lock_init(&hpriv->sidpr_lock);
1581 1603
1582 /* Save IOCFG, this will be used for cable detection, quirk 1604 /* Save IOCFG, this will be used for cable detection, quirk
1583 * detection and restoration on detach. This is necessary 1605 * detection and restoration on detach. This is necessary
@@ -1612,6 +1634,8 @@ static int __devinit piix_init_one(struct pci_dev *pdev,
1612 rc = piix_init_sidpr(host); 1634 rc = piix_init_sidpr(host);
1613 if (rc) 1635 if (rc)
1614 return rc; 1636 return rc;
1637 if (host->ports[0]->ops == &piix_sidpr_sata_ops)
1638 sht = &piix_sidpr_sht;
1615 } 1639 }
1616 1640
1617 /* apply IOCFG bit18 quirk */ 1641 /* apply IOCFG bit18 quirk */
@@ -1638,7 +1662,7 @@ static int __devinit piix_init_one(struct pci_dev *pdev,
1638 host->flags |= ATA_HOST_PARALLEL_SCAN; 1662 host->flags |= ATA_HOST_PARALLEL_SCAN;
1639 1663
1640 pci_set_master(pdev); 1664 pci_set_master(pdev);
1641 return ata_pci_sff_activate_host(host, ata_bmdma_interrupt, &piix_sht); 1665 return ata_pci_sff_activate_host(host, ata_bmdma_interrupt, sht);
1642} 1666}
1643 1667
1644static void piix_remove_one(struct pci_dev *pdev) 1668static void piix_remove_one(struct pci_dev *pdev)
diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
index 8eea309ea212..41223c7f0206 100644
--- a/drivers/ata/libahci.c
+++ b/drivers/ata/libahci.c
@@ -56,9 +56,8 @@ MODULE_PARM_DESC(skip_host_reset, "skip global host reset (0=don't skip, 1=skip)
56module_param_named(ignore_sss, ahci_ignore_sss, int, 0444); 56module_param_named(ignore_sss, ahci_ignore_sss, int, 0444);
57MODULE_PARM_DESC(ignore_sss, "Ignore staggered spinup flag (0=don't ignore, 1=ignore)"); 57MODULE_PARM_DESC(ignore_sss, "Ignore staggered spinup flag (0=don't ignore, 1=ignore)");
58 58
59static int ahci_enable_alpm(struct ata_port *ap, 59static int ahci_set_lpm(struct ata_link *link, enum ata_lpm_policy policy,
60 enum link_pm policy); 60 unsigned hints);
61static void ahci_disable_alpm(struct ata_port *ap);
62static ssize_t ahci_led_show(struct ata_port *ap, char *buf); 61static ssize_t ahci_led_show(struct ata_port *ap, char *buf);
63static ssize_t ahci_led_store(struct ata_port *ap, const char *buf, 62static ssize_t ahci_led_store(struct ata_port *ap, const char *buf,
64 size_t size); 63 size_t size);
@@ -88,10 +87,7 @@ static int ahci_hardreset(struct ata_link *link, unsigned int *class,
88static void ahci_postreset(struct ata_link *link, unsigned int *class); 87static void ahci_postreset(struct ata_link *link, unsigned int *class);
89static void ahci_error_handler(struct ata_port *ap); 88static void ahci_error_handler(struct ata_port *ap);
90static void ahci_post_internal_cmd(struct ata_queued_cmd *qc); 89static void ahci_post_internal_cmd(struct ata_queued_cmd *qc);
91static int ahci_port_resume(struct ata_port *ap);
92static void ahci_dev_config(struct ata_device *dev); 90static void ahci_dev_config(struct ata_device *dev);
93static void ahci_fill_cmd_slot(struct ahci_port_priv *pp, unsigned int tag,
94 u32 opts);
95#ifdef CONFIG_PM 91#ifdef CONFIG_PM
96static int ahci_port_suspend(struct ata_port *ap, pm_message_t mesg); 92static int ahci_port_suspend(struct ata_port *ap, pm_message_t mesg);
97#endif 93#endif
@@ -113,6 +109,8 @@ static ssize_t ahci_read_em_buffer(struct device *dev,
113static ssize_t ahci_store_em_buffer(struct device *dev, 109static ssize_t ahci_store_em_buffer(struct device *dev,
114 struct device_attribute *attr, 110 struct device_attribute *attr,
115 const char *buf, size_t size); 111 const char *buf, size_t size);
112static ssize_t ahci_show_em_supported(struct device *dev,
113 struct device_attribute *attr, char *buf);
116 114
117static DEVICE_ATTR(ahci_host_caps, S_IRUGO, ahci_show_host_caps, NULL); 115static DEVICE_ATTR(ahci_host_caps, S_IRUGO, ahci_show_host_caps, NULL);
118static DEVICE_ATTR(ahci_host_cap2, S_IRUGO, ahci_show_host_cap2, NULL); 116static DEVICE_ATTR(ahci_host_cap2, S_IRUGO, ahci_show_host_cap2, NULL);
@@ -120,6 +118,7 @@ static DEVICE_ATTR(ahci_host_version, S_IRUGO, ahci_show_host_version, NULL);
120static DEVICE_ATTR(ahci_port_cmd, S_IRUGO, ahci_show_port_cmd, NULL); 118static DEVICE_ATTR(ahci_port_cmd, S_IRUGO, ahci_show_port_cmd, NULL);
121static DEVICE_ATTR(em_buffer, S_IWUSR | S_IRUGO, 119static DEVICE_ATTR(em_buffer, S_IWUSR | S_IRUGO,
122 ahci_read_em_buffer, ahci_store_em_buffer); 120 ahci_read_em_buffer, ahci_store_em_buffer);
121static DEVICE_ATTR(em_message_supported, S_IRUGO, ahci_show_em_supported, NULL);
123 122
124struct device_attribute *ahci_shost_attrs[] = { 123struct device_attribute *ahci_shost_attrs[] = {
125 &dev_attr_link_power_management_policy, 124 &dev_attr_link_power_management_policy,
@@ -130,6 +129,7 @@ struct device_attribute *ahci_shost_attrs[] = {
130 &dev_attr_ahci_host_version, 129 &dev_attr_ahci_host_version,
131 &dev_attr_ahci_port_cmd, 130 &dev_attr_ahci_port_cmd,
132 &dev_attr_em_buffer, 131 &dev_attr_em_buffer,
132 &dev_attr_em_message_supported,
133 NULL 133 NULL
134}; 134};
135EXPORT_SYMBOL_GPL(ahci_shost_attrs); 135EXPORT_SYMBOL_GPL(ahci_shost_attrs);
@@ -164,8 +164,7 @@ struct ata_port_operations ahci_ops = {
164 .pmp_attach = ahci_pmp_attach, 164 .pmp_attach = ahci_pmp_attach,
165 .pmp_detach = ahci_pmp_detach, 165 .pmp_detach = ahci_pmp_detach,
166 166
167 .enable_pm = ahci_enable_alpm, 167 .set_lpm = ahci_set_lpm,
168 .disable_pm = ahci_disable_alpm,
169 .em_show = ahci_led_show, 168 .em_show = ahci_led_show,
170 .em_store = ahci_led_store, 169 .em_store = ahci_led_store,
171 .sw_activity_show = ahci_activity_show, 170 .sw_activity_show = ahci_activity_show,
@@ -348,6 +347,24 @@ static ssize_t ahci_store_em_buffer(struct device *dev,
348 return size; 347 return size;
349} 348}
350 349
350static ssize_t ahci_show_em_supported(struct device *dev,
351 struct device_attribute *attr, char *buf)
352{
353 struct Scsi_Host *shost = class_to_shost(dev);
354 struct ata_port *ap = ata_shost_to_port(shost);
355 struct ahci_host_priv *hpriv = ap->host->private_data;
356 void __iomem *mmio = hpriv->mmio;
357 u32 em_ctl;
358
359 em_ctl = readl(mmio + HOST_EM_CTL);
360
361 return sprintf(buf, "%s%s%s%s\n",
362 em_ctl & EM_CTL_LED ? "led " : "",
363 em_ctl & EM_CTL_SAFTE ? "saf-te " : "",
364 em_ctl & EM_CTL_SES ? "ses-2 " : "",
365 em_ctl & EM_CTL_SGPIO ? "sgpio " : "");
366}
367
351/** 368/**
352 * ahci_save_initial_config - Save and fixup initial config values 369 * ahci_save_initial_config - Save and fixup initial config values
353 * @dev: target AHCI device 370 * @dev: target AHCI device
@@ -435,7 +452,7 @@ void ahci_save_initial_config(struct device *dev,
435 } 452 }
436 453
437 if (mask_port_map) { 454 if (mask_port_map) {
438 dev_printk(KERN_ERR, dev, "masking port_map 0x%x -> 0x%x\n", 455 dev_printk(KERN_WARNING, dev, "masking port_map 0x%x -> 0x%x\n",
439 port_map, 456 port_map,
440 port_map & mask_port_map); 457 port_map & mask_port_map);
441 port_map &= mask_port_map; 458 port_map &= mask_port_map;
@@ -569,7 +586,7 @@ int ahci_stop_engine(struct ata_port *ap)
569 writel(tmp, port_mmio + PORT_CMD); 586 writel(tmp, port_mmio + PORT_CMD);
570 587
571 /* wait for engine to stop. This could be as long as 500 msec */ 588 /* wait for engine to stop. This could be as long as 500 msec */
572 tmp = ata_wait_register(port_mmio + PORT_CMD, 589 tmp = ata_wait_register(ap, port_mmio + PORT_CMD,
573 PORT_CMD_LIST_ON, PORT_CMD_LIST_ON, 1, 500); 590 PORT_CMD_LIST_ON, PORT_CMD_LIST_ON, 1, 500);
574 if (tmp & PORT_CMD_LIST_ON) 591 if (tmp & PORT_CMD_LIST_ON)
575 return -EIO; 592 return -EIO;
@@ -616,7 +633,7 @@ static int ahci_stop_fis_rx(struct ata_port *ap)
616 writel(tmp, port_mmio + PORT_CMD); 633 writel(tmp, port_mmio + PORT_CMD);
617 634
618 /* wait for completion, spec says 500ms, give it 1000 */ 635 /* wait for completion, spec says 500ms, give it 1000 */
619 tmp = ata_wait_register(port_mmio + PORT_CMD, PORT_CMD_FIS_ON, 636 tmp = ata_wait_register(ap, port_mmio + PORT_CMD, PORT_CMD_FIS_ON,
620 PORT_CMD_FIS_ON, 10, 1000); 637 PORT_CMD_FIS_ON, 10, 1000);
621 if (tmp & PORT_CMD_FIS_ON) 638 if (tmp & PORT_CMD_FIS_ON)
622 return -EBUSY; 639 return -EBUSY;
@@ -642,127 +659,56 @@ static void ahci_power_up(struct ata_port *ap)
642 writel(cmd | PORT_CMD_ICC_ACTIVE, port_mmio + PORT_CMD); 659 writel(cmd | PORT_CMD_ICC_ACTIVE, port_mmio + PORT_CMD);
643} 660}
644 661
645static void ahci_disable_alpm(struct ata_port *ap) 662static int ahci_set_lpm(struct ata_link *link, enum ata_lpm_policy policy,
663 unsigned int hints)
646{ 664{
665 struct ata_port *ap = link->ap;
647 struct ahci_host_priv *hpriv = ap->host->private_data; 666 struct ahci_host_priv *hpriv = ap->host->private_data;
648 void __iomem *port_mmio = ahci_port_base(ap);
649 u32 cmd;
650 struct ahci_port_priv *pp = ap->private_data; 667 struct ahci_port_priv *pp = ap->private_data;
651
652 /* IPM bits should be disabled by libata-core */
653 /* get the existing command bits */
654 cmd = readl(port_mmio + PORT_CMD);
655
656 /* disable ALPM and ASP */
657 cmd &= ~PORT_CMD_ASP;
658 cmd &= ~PORT_CMD_ALPE;
659
660 /* force the interface back to active */
661 cmd |= PORT_CMD_ICC_ACTIVE;
662
663 /* write out new cmd value */
664 writel(cmd, port_mmio + PORT_CMD);
665 cmd = readl(port_mmio + PORT_CMD);
666
667 /* wait 10ms to be sure we've come out of any low power state */
668 msleep(10);
669
670 /* clear out any PhyRdy stuff from interrupt status */
671 writel(PORT_IRQ_PHYRDY, port_mmio + PORT_IRQ_STAT);
672
673 /* go ahead and clean out PhyRdy Change from Serror too */
674 ahci_scr_write(&ap->link, SCR_ERROR, ((1 << 16) | (1 << 18)));
675
676 /*
677 * Clear flag to indicate that we should ignore all PhyRdy
678 * state changes
679 */
680 hpriv->flags &= ~AHCI_HFLAG_NO_HOTPLUG;
681
682 /*
683 * Enable interrupts on Phy Ready.
684 */
685 pp->intr_mask |= PORT_IRQ_PHYRDY;
686 writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
687
688 /*
689 * don't change the link pm policy - we can be called
690 * just to turn of link pm temporarily
691 */
692}
693
694static int ahci_enable_alpm(struct ata_port *ap,
695 enum link_pm policy)
696{
697 struct ahci_host_priv *hpriv = ap->host->private_data;
698 void __iomem *port_mmio = ahci_port_base(ap); 668 void __iomem *port_mmio = ahci_port_base(ap);
699 u32 cmd;
700 struct ahci_port_priv *pp = ap->private_data;
701 u32 asp;
702 669
703 /* Make sure the host is capable of link power management */ 670 if (policy != ATA_LPM_MAX_POWER) {
704 if (!(hpriv->cap & HOST_CAP_ALPM))
705 return -EINVAL;
706
707 switch (policy) {
708 case MAX_PERFORMANCE:
709 case NOT_AVAILABLE:
710 /* 671 /*
711 * if we came here with NOT_AVAILABLE, 672 * Disable interrupts on Phy Ready. This keeps us from
712 * it just means this is the first time we 673 * getting woken up due to spurious phy ready
713 * have tried to enable - default to max performance, 674 * interrupts.
714 * and let the user go to lower power modes on request.
715 */ 675 */
716 ahci_disable_alpm(ap); 676 pp->intr_mask &= ~PORT_IRQ_PHYRDY;
717 return 0; 677 writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
718 case MIN_POWER: 678
719 /* configure HBA to enter SLUMBER */ 679 sata_link_scr_lpm(link, policy, false);
720 asp = PORT_CMD_ASP;
721 break;
722 case MEDIUM_POWER:
723 /* configure HBA to enter PARTIAL */
724 asp = 0;
725 break;
726 default:
727 return -EINVAL;
728 } 680 }
729 681
730 /* 682 if (hpriv->cap & HOST_CAP_ALPM) {
731 * Disable interrupts on Phy Ready. This keeps us from 683 u32 cmd = readl(port_mmio + PORT_CMD);
732 * getting woken up due to spurious phy ready interrupts
733 * TBD - Hot plug should be done via polling now, is
734 * that even supported?
735 */
736 pp->intr_mask &= ~PORT_IRQ_PHYRDY;
737 writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
738 684
739 /* 685 if (policy == ATA_LPM_MAX_POWER || !(hints & ATA_LPM_HIPM)) {
740 * Set a flag to indicate that we should ignore all PhyRdy 686 cmd &= ~(PORT_CMD_ASP | PORT_CMD_ALPE);
741 * state changes since these can happen now whenever we 687 cmd |= PORT_CMD_ICC_ACTIVE;
742 * change link state
743 */
744 hpriv->flags |= AHCI_HFLAG_NO_HOTPLUG;
745 688
746 /* get the existing command bits */ 689 writel(cmd, port_mmio + PORT_CMD);
747 cmd = readl(port_mmio + PORT_CMD); 690 readl(port_mmio + PORT_CMD);
748 691
749 /* 692 /* wait 10ms to be sure we've come out of LPM state */
750 * Set ASP based on Policy 693 ata_msleep(ap, 10);
751 */ 694 } else {
752 cmd |= asp; 695 cmd |= PORT_CMD_ALPE;
696 if (policy == ATA_LPM_MIN_POWER)
697 cmd |= PORT_CMD_ASP;
753 698
754 /* 699 /* write out new cmd value */
755 * Setting this bit will instruct the HBA to aggressively 700 writel(cmd, port_mmio + PORT_CMD);
756 * enter a lower power link state when it's appropriate and 701 }
757 * based on the value set above for ASP 702 }
758 */
759 cmd |= PORT_CMD_ALPE;
760 703
761 /* write out new cmd value */ 704 if (policy == ATA_LPM_MAX_POWER) {
762 writel(cmd, port_mmio + PORT_CMD); 705 sata_link_scr_lpm(link, policy, false);
763 cmd = readl(port_mmio + PORT_CMD); 706
707 /* turn PHYRDY IRQ back on */
708 pp->intr_mask |= PORT_IRQ_PHYRDY;
709 writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
710 }
764 711
765 /* IPM bits should be set by libata-core */
766 return 0; 712 return 0;
767} 713}
768 714
@@ -813,7 +759,7 @@ static void ahci_start_port(struct ata_port *ap)
813 emp->led_state, 759 emp->led_state,
814 4); 760 4);
815 if (rc == -EBUSY) 761 if (rc == -EBUSY)
816 msleep(1); 762 ata_msleep(ap, 1);
817 else 763 else
818 break; 764 break;
819 } 765 }
@@ -872,7 +818,7 @@ int ahci_reset_controller(struct ata_host *host)
872 * reset must complete within 1 second, or 818 * reset must complete within 1 second, or
873 * the hardware should be considered fried. 819 * the hardware should be considered fried.
874 */ 820 */
875 tmp = ata_wait_register(mmio + HOST_CTL, HOST_RESET, 821 tmp = ata_wait_register(NULL, mmio + HOST_CTL, HOST_RESET,
876 HOST_RESET, 10, 1000); 822 HOST_RESET, 10, 1000);
877 823
878 if (tmp & HOST_RESET) { 824 if (tmp & HOST_RESET) {
@@ -1206,8 +1152,8 @@ static unsigned int ahci_dev_classify(struct ata_port *ap)
1206 return ata_dev_classify(&tf); 1152 return ata_dev_classify(&tf);
1207} 1153}
1208 1154
1209static void ahci_fill_cmd_slot(struct ahci_port_priv *pp, unsigned int tag, 1155void ahci_fill_cmd_slot(struct ahci_port_priv *pp, unsigned int tag,
1210 u32 opts) 1156 u32 opts)
1211{ 1157{
1212 dma_addr_t cmd_tbl_dma; 1158 dma_addr_t cmd_tbl_dma;
1213 1159
@@ -1218,6 +1164,7 @@ static void ahci_fill_cmd_slot(struct ahci_port_priv *pp, unsigned int tag,
1218 pp->cmd_slot[tag].tbl_addr = cpu_to_le32(cmd_tbl_dma & 0xffffffff); 1164 pp->cmd_slot[tag].tbl_addr = cpu_to_le32(cmd_tbl_dma & 0xffffffff);
1219 pp->cmd_slot[tag].tbl_addr_hi = cpu_to_le32((cmd_tbl_dma >> 16) >> 16); 1165 pp->cmd_slot[tag].tbl_addr_hi = cpu_to_le32((cmd_tbl_dma >> 16) >> 16);
1220} 1166}
1167EXPORT_SYMBOL_GPL(ahci_fill_cmd_slot);
1221 1168
1222int ahci_kick_engine(struct ata_port *ap) 1169int ahci_kick_engine(struct ata_port *ap)
1223{ 1170{
@@ -1252,7 +1199,7 @@ int ahci_kick_engine(struct ata_port *ap)
1252 writel(tmp, port_mmio + PORT_CMD); 1199 writel(tmp, port_mmio + PORT_CMD);
1253 1200
1254 rc = 0; 1201 rc = 0;
1255 tmp = ata_wait_register(port_mmio + PORT_CMD, 1202 tmp = ata_wait_register(ap, port_mmio + PORT_CMD,
1256 PORT_CMD_CLO, PORT_CMD_CLO, 1, 500); 1203 PORT_CMD_CLO, PORT_CMD_CLO, 1, 500);
1257 if (tmp & PORT_CMD_CLO) 1204 if (tmp & PORT_CMD_CLO)
1258 rc = -EIO; 1205 rc = -EIO;
@@ -1282,8 +1229,8 @@ static int ahci_exec_polled_cmd(struct ata_port *ap, int pmp,
1282 writel(1, port_mmio + PORT_CMD_ISSUE); 1229 writel(1, port_mmio + PORT_CMD_ISSUE);
1283 1230
1284 if (timeout_msec) { 1231 if (timeout_msec) {
1285 tmp = ata_wait_register(port_mmio + PORT_CMD_ISSUE, 0x1, 0x1, 1232 tmp = ata_wait_register(ap, port_mmio + PORT_CMD_ISSUE,
1286 1, timeout_msec); 1233 0x1, 0x1, 1, timeout_msec);
1287 if (tmp & 0x1) { 1234 if (tmp & 0x1) {
1288 ahci_kick_engine(ap); 1235 ahci_kick_engine(ap);
1289 return -EBUSY; 1236 return -EBUSY;
@@ -1330,7 +1277,7 @@ int ahci_do_softreset(struct ata_link *link, unsigned int *class,
1330 } 1277 }
1331 1278
1332 /* spec says at least 5us, but be generous and sleep for 1ms */ 1279 /* spec says at least 5us, but be generous and sleep for 1ms */
1333 msleep(1); 1280 ata_msleep(ap, 1);
1334 1281
1335 /* issue the second D2H Register FIS */ 1282 /* issue the second D2H Register FIS */
1336 tf.ctl &= ~ATA_SRST; 1283 tf.ctl &= ~ATA_SRST;
@@ -1660,15 +1607,10 @@ static void ahci_port_intr(struct ata_port *ap)
1660 if (unlikely(resetting)) 1607 if (unlikely(resetting))
1661 status &= ~PORT_IRQ_BAD_PMP; 1608 status &= ~PORT_IRQ_BAD_PMP;
1662 1609
1663 /* If we are getting PhyRdy, this is 1610 /* if LPM is enabled, PHYRDY doesn't mean anything */
1664 * just a power state change, we should 1611 if (ap->link.lpm_policy > ATA_LPM_MAX_POWER) {
1665 * clear out this, plus the PhyRdy/Comm
1666 * Wake bits from Serror
1667 */
1668 if ((hpriv->flags & AHCI_HFLAG_NO_HOTPLUG) &&
1669 (status & PORT_IRQ_PHYRDY)) {
1670 status &= ~PORT_IRQ_PHYRDY; 1612 status &= ~PORT_IRQ_PHYRDY;
1671 ahci_scr_write(&ap->link, SCR_ERROR, ((1 << 16) | (1 << 18))); 1613 ahci_scr_write(&ap->link, SCR_ERROR, SERR_PHYRDY_CHG);
1672 } 1614 }
1673 1615
1674 if (unlikely(status & PORT_IRQ_ERROR)) { 1616 if (unlikely(status & PORT_IRQ_ERROR)) {
@@ -1830,12 +1772,24 @@ static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc)
1830static bool ahci_qc_fill_rtf(struct ata_queued_cmd *qc) 1772static bool ahci_qc_fill_rtf(struct ata_queued_cmd *qc)
1831{ 1773{
1832 struct ahci_port_priv *pp = qc->ap->private_data; 1774 struct ahci_port_priv *pp = qc->ap->private_data;
1833 u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG; 1775 u8 *rx_fis = pp->rx_fis;
1834 1776
1835 if (pp->fbs_enabled) 1777 if (pp->fbs_enabled)
1836 d2h_fis += qc->dev->link->pmp * AHCI_RX_FIS_SZ; 1778 rx_fis += qc->dev->link->pmp * AHCI_RX_FIS_SZ;
1779
1780 /*
1781 * After a successful execution of an ATA PIO data-in command,
1782 * the device doesn't send D2H Reg FIS to update the TF and
1783 * the host should take TF and E_Status from the preceding PIO
1784 * Setup FIS.
1785 */
1786 if (qc->tf.protocol == ATA_PROT_PIO && qc->dma_dir == DMA_FROM_DEVICE &&
1787 !(qc->flags & ATA_QCFLAG_FAILED)) {
1788 ata_tf_from_fis(rx_fis + RX_FIS_PIO_SETUP, &qc->result_tf);
1789 qc->result_tf.command = (rx_fis + RX_FIS_PIO_SETUP)[15];
1790 } else
1791 ata_tf_from_fis(rx_fis + RX_FIS_D2H_REG, &qc->result_tf);
1837 1792
1838 ata_tf_from_fis(d2h_fis, &qc->result_tf);
1839 return true; 1793 return true;
1840} 1794}
1841 1795
@@ -1965,7 +1919,17 @@ static void ahci_pmp_attach(struct ata_port *ap)
1965 ahci_enable_fbs(ap); 1919 ahci_enable_fbs(ap);
1966 1920
1967 pp->intr_mask |= PORT_IRQ_BAD_PMP; 1921 pp->intr_mask |= PORT_IRQ_BAD_PMP;
1968 writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK); 1922
1923 /*
1924 * We must not change the port interrupt mask register if the
1925 * port is marked frozen, the value in pp->intr_mask will be
1926 * restored later when the port is thawed.
1927 *
1928 * Note that during initialization, the port is marked as
1929 * frozen since the irq handler is not yet registered.
1930 */
1931 if (!(ap->pflags & ATA_PFLAG_FROZEN))
1932 writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
1969} 1933}
1970 1934
1971static void ahci_pmp_detach(struct ata_port *ap) 1935static void ahci_pmp_detach(struct ata_port *ap)
@@ -1981,10 +1945,13 @@ static void ahci_pmp_detach(struct ata_port *ap)
1981 writel(cmd, port_mmio + PORT_CMD); 1945 writel(cmd, port_mmio + PORT_CMD);
1982 1946
1983 pp->intr_mask &= ~PORT_IRQ_BAD_PMP; 1947 pp->intr_mask &= ~PORT_IRQ_BAD_PMP;
1984 writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK); 1948
1949 /* see comment above in ahci_pmp_attach() */
1950 if (!(ap->pflags & ATA_PFLAG_FROZEN))
1951 writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
1985} 1952}
1986 1953
1987static int ahci_port_resume(struct ata_port *ap) 1954int ahci_port_resume(struct ata_port *ap)
1988{ 1955{
1989 ahci_power_up(ap); 1956 ahci_power_up(ap);
1990 ahci_start_port(ap); 1957 ahci_start_port(ap);
@@ -1996,6 +1963,7 @@ static int ahci_port_resume(struct ata_port *ap)
1996 1963
1997 return 0; 1964 return 0;
1998} 1965}
1966EXPORT_SYMBOL_GPL(ahci_port_resume);
1999 1967
2000#ifdef CONFIG_PM 1968#ifdef CONFIG_PM
2001static int ahci_port_suspend(struct ata_port *ap, pm_message_t mesg) 1969static int ahci_port_suspend(struct ata_port *ap, pm_message_t mesg)
diff --git a/drivers/ata/libata-acpi.c b/drivers/ata/libata-acpi.c
index 8b5ea399a4f4..a791b8ce6294 100644
--- a/drivers/ata/libata-acpi.c
+++ b/drivers/ata/libata-acpi.c
@@ -660,8 +660,7 @@ static int ata_acpi_filter_tf(struct ata_device *dev,
660 * @dev: target ATA device 660 * @dev: target ATA device
661 * @gtf: raw ATA taskfile register set (0x1f1 - 0x1f7) 661 * @gtf: raw ATA taskfile register set (0x1f1 - 0x1f7)
662 * 662 *
663 * Outputs ATA taskfile to standard ATA host controller using MMIO 663 * Outputs ATA taskfile to standard ATA host controller.
664 * or PIO as indicated by the ATA_FLAG_MMIO flag.
665 * Writes the control, feature, nsect, lbal, lbam, and lbah registers. 664 * Writes the control, feature, nsect, lbal, lbam, and lbah registers.
666 * Optionally (ATA_TFLAG_LBA48) writes hob_feature, hob_nsect, 665 * Optionally (ATA_TFLAG_LBA48) writes hob_feature, hob_nsect,
667 * hob_lbal, hob_lbam, and hob_lbah. 666 * hob_lbal, hob_lbam, and hob_lbah.
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 932eaee50245..000d03ae6653 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -68,7 +68,7 @@
68#include <linux/ratelimit.h> 68#include <linux/ratelimit.h>
69 69
70#include "libata.h" 70#include "libata.h"
71 71#include "libata-transport.h"
72 72
73/* debounce timing parameters in msecs { interval, duration, timeout } */ 73/* debounce timing parameters in msecs { interval, duration, timeout } */
74const unsigned long sata_deb_timing_normal[] = { 5, 100, 2000 }; 74const unsigned long sata_deb_timing_normal[] = { 5, 100, 2000 };
@@ -91,8 +91,6 @@ const struct ata_port_operations sata_port_ops = {
91static unsigned int ata_dev_init_params(struct ata_device *dev, 91static unsigned int ata_dev_init_params(struct ata_device *dev,
92 u16 heads, u16 sectors); 92 u16 heads, u16 sectors);
93static unsigned int ata_dev_set_xfermode(struct ata_device *dev); 93static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
94static unsigned int ata_dev_set_feature(struct ata_device *dev,
95 u8 enable, u8 feature);
96static void ata_dev_xfermask(struct ata_device *dev); 94static void ata_dev_xfermask(struct ata_device *dev);
97static unsigned long ata_dev_blacklisted(const struct ata_device *dev); 95static unsigned long ata_dev_blacklisted(const struct ata_device *dev);
98 96
@@ -1017,7 +1015,7 @@ const char *ata_mode_string(unsigned long xfer_mask)
1017 return "<n/a>"; 1015 return "<n/a>";
1018} 1016}
1019 1017
1020static const char *sata_spd_string(unsigned int spd) 1018const char *sata_spd_string(unsigned int spd)
1021{ 1019{
1022 static const char * const spd_str[] = { 1020 static const char * const spd_str[] = {
1023 "1.5 Gbps", 1021 "1.5 Gbps",
@@ -1030,182 +1028,6 @@ static const char *sata_spd_string(unsigned int spd)
1030 return spd_str[spd - 1]; 1028 return spd_str[spd - 1];
1031} 1029}
1032 1030
1033static int ata_dev_set_dipm(struct ata_device *dev, enum link_pm policy)
1034{
1035 struct ata_link *link = dev->link;
1036 struct ata_port *ap = link->ap;
1037 u32 scontrol;
1038 unsigned int err_mask;
1039 int rc;
1040
1041 /*
1042 * disallow DIPM for drivers which haven't set
1043 * ATA_FLAG_IPM. This is because when DIPM is enabled,
1044 * phy ready will be set in the interrupt status on
1045 * state changes, which will cause some drivers to
1046 * think there are errors - additionally drivers will
1047 * need to disable hot plug.
1048 */
1049 if (!(ap->flags & ATA_FLAG_IPM) || !ata_dev_enabled(dev)) {
1050 ap->pm_policy = NOT_AVAILABLE;
1051 return -EINVAL;
1052 }
1053
1054 /*
1055 * For DIPM, we will only enable it for the
1056 * min_power setting.
1057 *
1058 * Why? Because Disks are too stupid to know that
1059 * If the host rejects a request to go to SLUMBER
1060 * they should retry at PARTIAL, and instead it
1061 * just would give up. So, for medium_power to
1062 * work at all, we need to only allow HIPM.
1063 */
1064 rc = sata_scr_read(link, SCR_CONTROL, &scontrol);
1065 if (rc)
1066 return rc;
1067
1068 switch (policy) {
1069 case MIN_POWER:
1070 /* no restrictions on IPM transitions */
1071 scontrol &= ~(0x3 << 8);
1072 rc = sata_scr_write(link, SCR_CONTROL, scontrol);
1073 if (rc)
1074 return rc;
1075
1076 /* enable DIPM */
1077 if (dev->flags & ATA_DFLAG_DIPM)
1078 err_mask = ata_dev_set_feature(dev,
1079 SETFEATURES_SATA_ENABLE, SATA_DIPM);
1080 break;
1081 case MEDIUM_POWER:
1082 /* allow IPM to PARTIAL */
1083 scontrol &= ~(0x1 << 8);
1084 scontrol |= (0x2 << 8);
1085 rc = sata_scr_write(link, SCR_CONTROL, scontrol);
1086 if (rc)
1087 return rc;
1088
1089 /*
1090 * we don't have to disable DIPM since IPM flags
1091 * disallow transitions to SLUMBER, which effectively
1092 * disable DIPM if it does not support PARTIAL
1093 */
1094 break;
1095 case NOT_AVAILABLE:
1096 case MAX_PERFORMANCE:
1097 /* disable all IPM transitions */
1098 scontrol |= (0x3 << 8);
1099 rc = sata_scr_write(link, SCR_CONTROL, scontrol);
1100 if (rc)
1101 return rc;
1102
1103 /*
1104 * we don't have to disable DIPM since IPM flags
1105 * disallow all transitions which effectively
1106 * disable DIPM anyway.
1107 */
1108 break;
1109 }
1110
1111 /* FIXME: handle SET FEATURES failure */
1112 (void) err_mask;
1113
1114 return 0;
1115}
1116
1117/**
1118 * ata_dev_enable_pm - enable SATA interface power management
1119 * @dev: device to enable power management
1120 * @policy: the link power management policy
1121 *
1122 * Enable SATA Interface power management. This will enable
1123 * Device Interface Power Management (DIPM) for min_power
1124 * policy, and then call driver specific callbacks for
1125 * enabling Host Initiated Power management.
1126 *
1127 * Locking: Caller.
1128 * Returns: -EINVAL if IPM is not supported, 0 otherwise.
1129 */
1130void ata_dev_enable_pm(struct ata_device *dev, enum link_pm policy)
1131{
1132 int rc = 0;
1133 struct ata_port *ap = dev->link->ap;
1134
1135 /* set HIPM first, then DIPM */
1136 if (ap->ops->enable_pm)
1137 rc = ap->ops->enable_pm(ap, policy);
1138 if (rc)
1139 goto enable_pm_out;
1140 rc = ata_dev_set_dipm(dev, policy);
1141
1142enable_pm_out:
1143 if (rc)
1144 ap->pm_policy = MAX_PERFORMANCE;
1145 else
1146 ap->pm_policy = policy;
1147 return /* rc */; /* hopefully we can use 'rc' eventually */
1148}
1149
1150#ifdef CONFIG_PM
1151/**
1152 * ata_dev_disable_pm - disable SATA interface power management
1153 * @dev: device to disable power management
1154 *
1155 * Disable SATA Interface power management. This will disable
1156 * Device Interface Power Management (DIPM) without changing
1157 * policy, call driver specific callbacks for disabling Host
1158 * Initiated Power management.
1159 *
1160 * Locking: Caller.
1161 * Returns: void
1162 */
1163static void ata_dev_disable_pm(struct ata_device *dev)
1164{
1165 struct ata_port *ap = dev->link->ap;
1166
1167 ata_dev_set_dipm(dev, MAX_PERFORMANCE);
1168 if (ap->ops->disable_pm)
1169 ap->ops->disable_pm(ap);
1170}
1171#endif /* CONFIG_PM */
1172
1173void ata_lpm_schedule(struct ata_port *ap, enum link_pm policy)
1174{
1175 ap->pm_policy = policy;
1176 ap->link.eh_info.action |= ATA_EH_LPM;
1177 ap->link.eh_info.flags |= ATA_EHI_NO_AUTOPSY;
1178 ata_port_schedule_eh(ap);
1179}
1180
1181#ifdef CONFIG_PM
1182static void ata_lpm_enable(struct ata_host *host)
1183{
1184 struct ata_link *link;
1185 struct ata_port *ap;
1186 struct ata_device *dev;
1187 int i;
1188
1189 for (i = 0; i < host->n_ports; i++) {
1190 ap = host->ports[i];
1191 ata_for_each_link(link, ap, EDGE) {
1192 ata_for_each_dev(dev, link, ALL)
1193 ata_dev_disable_pm(dev);
1194 }
1195 }
1196}
1197
1198static void ata_lpm_disable(struct ata_host *host)
1199{
1200 int i;
1201
1202 for (i = 0; i < host->n_ports; i++) {
1203 struct ata_port *ap = host->ports[i];
1204 ata_lpm_schedule(ap, ap->pm_policy);
1205 }
1206}
1207#endif /* CONFIG_PM */
1208
1209/** 1031/**
1210 * ata_dev_classify - determine device type based on ATA-spec signature 1032 * ata_dev_classify - determine device type based on ATA-spec signature
1211 * @tf: ATA taskfile register set for device to be identified 1033 * @tf: ATA taskfile register set for device to be identified
@@ -1806,8 +1628,14 @@ unsigned ata_exec_internal_sg(struct ata_device *dev,
1806 } 1628 }
1807 } 1629 }
1808 1630
1631 if (ap->ops->error_handler)
1632 ata_eh_release(ap);
1633
1809 rc = wait_for_completion_timeout(&wait, msecs_to_jiffies(timeout)); 1634 rc = wait_for_completion_timeout(&wait, msecs_to_jiffies(timeout));
1810 1635
1636 if (ap->ops->error_handler)
1637 ata_eh_acquire(ap);
1638
1811 ata_sff_flush_pio_task(ap); 1639 ata_sff_flush_pio_task(ap);
1812 1640
1813 if (!rc) { 1641 if (!rc) {
@@ -2412,7 +2240,7 @@ int ata_dev_configure(struct ata_device *dev)
2412 if (id[ATA_ID_CFA_KEY_MGMT] & 1) 2240 if (id[ATA_ID_CFA_KEY_MGMT] & 1)
2413 ata_dev_printk(dev, KERN_WARNING, 2241 ata_dev_printk(dev, KERN_WARNING,
2414 "supports DRM functions and may " 2242 "supports DRM functions and may "
2415 "not be fully accessable.\n"); 2243 "not be fully accessible.\n");
2416 snprintf(revbuf, 7, "CFA"); 2244 snprintf(revbuf, 7, "CFA");
2417 } else { 2245 } else {
2418 snprintf(revbuf, 7, "ATA-%d", ata_id_major_version(id)); 2246 snprintf(revbuf, 7, "ATA-%d", ata_id_major_version(id));
@@ -2420,7 +2248,7 @@ int ata_dev_configure(struct ata_device *dev)
2420 if (ata_id_has_tpm(id)) 2248 if (ata_id_has_tpm(id))
2421 ata_dev_printk(dev, KERN_WARNING, 2249 ata_dev_printk(dev, KERN_WARNING,
2422 "supports DRM functions and may " 2250 "supports DRM functions and may "
2423 "not be fully accessable.\n"); 2251 "not be fully accessible.\n");
2424 } 2252 }
2425 2253
2426 dev->n_sectors = ata_id_n_sectors(id); 2254 dev->n_sectors = ata_id_n_sectors(id);
@@ -2564,13 +2392,6 @@ int ata_dev_configure(struct ata_device *dev)
2564 if (dev->flags & ATA_DFLAG_LBA48) 2392 if (dev->flags & ATA_DFLAG_LBA48)
2565 dev->max_sectors = ATA_MAX_SECTORS_LBA48; 2393 dev->max_sectors = ATA_MAX_SECTORS_LBA48;
2566 2394
2567 if (!(dev->horkage & ATA_HORKAGE_IPM)) {
2568 if (ata_id_has_hipm(dev->id))
2569 dev->flags |= ATA_DFLAG_HIPM;
2570 if (ata_id_has_dipm(dev->id))
2571 dev->flags |= ATA_DFLAG_DIPM;
2572 }
2573
2574 /* Limit PATA drive on SATA cable bridge transfers to udma5, 2395 /* Limit PATA drive on SATA cable bridge transfers to udma5,
2575 200 sectors */ 2396 200 sectors */
2576 if (ata_dev_knobble(dev)) { 2397 if (ata_dev_knobble(dev)) {
@@ -2591,13 +2412,6 @@ int ata_dev_configure(struct ata_device *dev)
2591 dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128, 2412 dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128,
2592 dev->max_sectors); 2413 dev->max_sectors);
2593 2414
2594 if (ata_dev_blacklisted(dev) & ATA_HORKAGE_IPM) {
2595 dev->horkage |= ATA_HORKAGE_IPM;
2596
2597 /* reset link pm_policy for this port to no pm */
2598 ap->pm_policy = MAX_PERFORMANCE;
2599 }
2600
2601 if (ap->ops->dev_config) 2415 if (ap->ops->dev_config)
2602 ap->ops->dev_config(dev); 2416 ap->ops->dev_config(dev);
2603 2417
@@ -3596,7 +3410,7 @@ int ata_wait_ready(struct ata_link *link, unsigned long deadline,
3596 warned = 1; 3410 warned = 1;
3597 } 3411 }
3598 3412
3599 msleep(50); 3413 ata_msleep(link->ap, 50);
3600 } 3414 }
3601} 3415}
3602 3416
@@ -3617,7 +3431,7 @@ int ata_wait_ready(struct ata_link *link, unsigned long deadline,
3617int ata_wait_after_reset(struct ata_link *link, unsigned long deadline, 3431int ata_wait_after_reset(struct ata_link *link, unsigned long deadline,
3618 int (*check_ready)(struct ata_link *link)) 3432 int (*check_ready)(struct ata_link *link))
3619{ 3433{
3620 msleep(ATA_WAIT_AFTER_RESET); 3434 ata_msleep(link->ap, ATA_WAIT_AFTER_RESET);
3621 3435
3622 return ata_wait_ready(link, deadline, check_ready); 3436 return ata_wait_ready(link, deadline, check_ready);
3623} 3437}
@@ -3628,7 +3442,7 @@ int ata_wait_after_reset(struct ata_link *link, unsigned long deadline,
3628 * @params: timing parameters { interval, duratinon, timeout } in msec 3442 * @params: timing parameters { interval, duratinon, timeout } in msec
3629 * @deadline: deadline jiffies for the operation 3443 * @deadline: deadline jiffies for the operation
3630 * 3444 *
3631* Make sure SStatus of @link reaches stable state, determined by 3445 * Make sure SStatus of @link reaches stable state, determined by
3632 * holding the same value where DET is not 1 for @duration polled 3446 * holding the same value where DET is not 1 for @duration polled
3633 * every @interval, before @timeout. Timeout constraints the 3447 * every @interval, before @timeout. Timeout constraints the
3634 * beginning of the stable state. Because DET gets stuck at 1 on 3448 * beginning of the stable state. Because DET gets stuck at 1 on
@@ -3665,7 +3479,7 @@ int sata_link_debounce(struct ata_link *link, const unsigned long *params,
3665 last_jiffies = jiffies; 3479 last_jiffies = jiffies;
3666 3480
3667 while (1) { 3481 while (1) {
3668 msleep(interval); 3482 ata_msleep(link->ap, interval);
3669 if ((rc = sata_scr_read(link, SCR_STATUS, &cur))) 3483 if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
3670 return rc; 3484 return rc;
3671 cur &= 0xf; 3485 cur &= 0xf;
@@ -3730,7 +3544,7 @@ int sata_link_resume(struct ata_link *link, const unsigned long *params,
3730 * immediately after resuming. Delay 200ms before 3544 * immediately after resuming. Delay 200ms before
3731 * debouncing. 3545 * debouncing.
3732 */ 3546 */
3733 msleep(200); 3547 ata_msleep(link->ap, 200);
3734 3548
3735 /* is SControl restored correctly? */ 3549 /* is SControl restored correctly? */
3736 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol))) 3550 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
@@ -3760,6 +3574,78 @@ int sata_link_resume(struct ata_link *link, const unsigned long *params,
3760} 3574}
3761 3575
3762/** 3576/**
3577 * sata_link_scr_lpm - manipulate SControl IPM and SPM fields
3578 * @link: ATA link to manipulate SControl for
3579 * @policy: LPM policy to configure
3580 * @spm_wakeup: initiate LPM transition to active state
3581 *
3582 * Manipulate the IPM field of the SControl register of @link
3583 * according to @policy. If @policy is ATA_LPM_MAX_POWER and
3584 * @spm_wakeup is %true, the SPM field is manipulated to wake up
3585 * the link. This function also clears PHYRDY_CHG before
3586 * returning.
3587 *
3588 * LOCKING:
3589 * EH context.
3590 *
3591 * RETURNS:
3592 * 0 on succes, -errno otherwise.
3593 */
3594int sata_link_scr_lpm(struct ata_link *link, enum ata_lpm_policy policy,
3595 bool spm_wakeup)
3596{
3597 struct ata_eh_context *ehc = &link->eh_context;
3598 bool woken_up = false;
3599 u32 scontrol;
3600 int rc;
3601
3602 rc = sata_scr_read(link, SCR_CONTROL, &scontrol);
3603 if (rc)
3604 return rc;
3605
3606 switch (policy) {
3607 case ATA_LPM_MAX_POWER:
3608 /* disable all LPM transitions */
3609 scontrol |= (0x3 << 8);
3610 /* initiate transition to active state */
3611 if (spm_wakeup) {
3612 scontrol |= (0x4 << 12);
3613 woken_up = true;
3614 }
3615 break;
3616 case ATA_LPM_MED_POWER:
3617 /* allow LPM to PARTIAL */
3618 scontrol &= ~(0x1 << 8);
3619 scontrol |= (0x2 << 8);
3620 break;
3621 case ATA_LPM_MIN_POWER:
3622 if (ata_link_nr_enabled(link) > 0)
3623 /* no restrictions on LPM transitions */
3624 scontrol &= ~(0x3 << 8);
3625 else {
3626 /* empty port, power off */
3627 scontrol &= ~0xf;
3628 scontrol |= (0x1 << 2);
3629 }
3630 break;
3631 default:
3632 WARN_ON(1);
3633 }
3634
3635 rc = sata_scr_write(link, SCR_CONTROL, scontrol);
3636 if (rc)
3637 return rc;
3638
3639 /* give the link time to transit out of LPM state */
3640 if (woken_up)
3641 msleep(10);
3642
3643 /* clear PHYRDY_CHG from SError */
3644 ehc->i.serror &= ~SERR_PHYRDY_CHG;
3645 return sata_scr_write(link, SCR_ERROR, SERR_PHYRDY_CHG);
3646}
3647
3648/**
3763 * ata_std_prereset - prepare for reset 3649 * ata_std_prereset - prepare for reset
3764 * @link: ATA link to be reset 3650 * @link: ATA link to be reset
3765 * @deadline: deadline jiffies for the operation 3651 * @deadline: deadline jiffies for the operation
@@ -3868,7 +3754,7 @@ int sata_link_hardreset(struct ata_link *link, const unsigned long *timing,
3868 /* Couldn't find anything in SATA I/II specs, but AHCI-1.1 3754 /* Couldn't find anything in SATA I/II specs, but AHCI-1.1
3869 * 10.4.2 says at least 1 ms. 3755 * 10.4.2 says at least 1 ms.
3870 */ 3756 */
3871 msleep(1); 3757 ata_msleep(link->ap, 1);
3872 3758
3873 /* bring link back */ 3759 /* bring link back */
3874 rc = sata_link_resume(link, timing, deadline); 3760 rc = sata_link_resume(link, timing, deadline);
@@ -4257,7 +4143,9 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
4257 * Devices which choke on SETXFER. Applies only if both the 4143 * Devices which choke on SETXFER. Applies only if both the
4258 * device and controller are SATA. 4144 * device and controller are SATA.
4259 */ 4145 */
4260 { "PIONEER DVD-RW DVRTD08", "1.00", ATA_HORKAGE_NOSETXFER }, 4146 { "PIONEER DVD-RW DVRTD08", NULL, ATA_HORKAGE_NOSETXFER },
4147 { "PIONEER DVD-RW DVR-212D", NULL, ATA_HORKAGE_NOSETXFER },
4148 { "PIONEER DVD-RW DVR-216D", NULL, ATA_HORKAGE_NOSETXFER },
4261 4149
4262 /* End Marker */ 4150 /* End Marker */
4263 { } 4151 { }
@@ -4329,7 +4217,7 @@ static int glob_match (const char *text, const char *pattern)
4329 return 0; /* End of both strings: match */ 4217 return 0; /* End of both strings: match */
4330 return 1; /* No match */ 4218 return 1; /* No match */
4331} 4219}
4332 4220
4333static unsigned long ata_dev_blacklisted(const struct ata_device *dev) 4221static unsigned long ata_dev_blacklisted(const struct ata_device *dev)
4334{ 4222{
4335 unsigned char model_num[ATA_ID_PROD_LEN + 1]; 4223 unsigned char model_num[ATA_ID_PROD_LEN + 1];
@@ -4551,6 +4439,7 @@ static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
4551 DPRINTK("EXIT, err_mask=%x\n", err_mask); 4439 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4552 return err_mask; 4440 return err_mask;
4553} 4441}
4442
4554/** 4443/**
4555 * ata_dev_set_feature - Issue SET FEATURES - SATA FEATURES 4444 * ata_dev_set_feature - Issue SET FEATURES - SATA FEATURES
4556 * @dev: Device to which command will be sent 4445 * @dev: Device to which command will be sent
@@ -4566,8 +4455,7 @@ static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
4566 * RETURNS: 4455 * RETURNS:
4567 * 0 on success, AC_ERR_* mask otherwise. 4456 * 0 on success, AC_ERR_* mask otherwise.
4568 */ 4457 */
4569static unsigned int ata_dev_set_feature(struct ata_device *dev, u8 enable, 4458unsigned int ata_dev_set_feature(struct ata_device *dev, u8 enable, u8 feature)
4570 u8 feature)
4571{ 4459{
4572 struct ata_taskfile tf; 4460 struct ata_taskfile tf;
4573 unsigned int err_mask; 4461 unsigned int err_mask;
@@ -4927,9 +4815,6 @@ static void ata_verify_xfer(struct ata_queued_cmd *qc)
4927{ 4815{
4928 struct ata_device *dev = qc->dev; 4816 struct ata_device *dev = qc->dev;
4929 4817
4930 if (ata_tag_internal(qc->tag))
4931 return;
4932
4933 if (ata_is_nodata(qc->tf.protocol)) 4818 if (ata_is_nodata(qc->tf.protocol))
4934 return; 4819 return;
4935 4820
@@ -4943,8 +4828,13 @@ static void ata_verify_xfer(struct ata_queued_cmd *qc)
4943 * ata_qc_complete - Complete an active ATA command 4828 * ata_qc_complete - Complete an active ATA command
4944 * @qc: Command to complete 4829 * @qc: Command to complete
4945 * 4830 *
4946 * Indicate to the mid and upper layers that an ATA 4831 * Indicate to the mid and upper layers that an ATA command has
4947 * command has completed, with either an ok or not-ok status. 4832 * completed, with either an ok or not-ok status.
4833 *
4834 * Refrain from calling this function multiple times when
4835 * successfully completing multiple NCQ commands.
4836 * ata_qc_complete_multiple() should be used instead, which will
4837 * properly update IRQ expect state.
4948 * 4838 *
4949 * LOCKING: 4839 * LOCKING:
4950 * spin_lock_irqsave(host lock) 4840 * spin_lock_irqsave(host lock)
@@ -4973,14 +4863,23 @@ void ata_qc_complete(struct ata_queued_cmd *qc)
4973 if (unlikely(qc->err_mask)) 4863 if (unlikely(qc->err_mask))
4974 qc->flags |= ATA_QCFLAG_FAILED; 4864 qc->flags |= ATA_QCFLAG_FAILED;
4975 4865
4976 if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) { 4866 /*
4977 /* always fill result TF for failed qc */ 4867 * Finish internal commands without any further processing
4868 * and always with the result TF filled.
4869 */
4870 if (unlikely(ata_tag_internal(qc->tag))) {
4978 fill_result_tf(qc); 4871 fill_result_tf(qc);
4872 __ata_qc_complete(qc);
4873 return;
4874 }
4979 4875
4980 if (!ata_tag_internal(qc->tag)) 4876 /*
4981 ata_qc_schedule_eh(qc); 4877 * Non-internal qc has failed. Fill the result TF and
4982 else 4878 * summon EH.
4983 __ata_qc_complete(qc); 4879 */
4880 if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) {
4881 fill_result_tf(qc);
4882 ata_qc_schedule_eh(qc);
4984 return; 4883 return;
4985 } 4884 }
4986 4885
@@ -5037,6 +4936,10 @@ void ata_qc_complete(struct ata_queued_cmd *qc)
5037 * requests normally. ap->qc_active and @qc_active is compared 4936 * requests normally. ap->qc_active and @qc_active is compared
5038 * and commands are completed accordingly. 4937 * and commands are completed accordingly.
5039 * 4938 *
4939 * Always use this function when completing multiple NCQ commands
4940 * from IRQ handlers instead of calling ata_qc_complete()
4941 * multiple times to keep IRQ expect status properly in sync.
4942 *
5040 * LOCKING: 4943 * LOCKING:
5041 * spin_lock_irqsave(host lock) 4944 * spin_lock_irqsave(host lock)
5042 * 4945 *
@@ -5422,12 +5325,6 @@ int ata_host_suspend(struct ata_host *host, pm_message_t mesg)
5422 int rc; 5325 int rc;
5423 5326
5424 /* 5327 /*
5425 * disable link pm on all ports before requesting
5426 * any pm activity
5427 */
5428 ata_lpm_enable(host);
5429
5430 /*
5431 * On some hardware, device fails to respond after spun down 5328 * On some hardware, device fails to respond after spun down
5432 * for suspend. As the device won't be used before being 5329 * for suspend. As the device won't be used before being
5433 * resumed, we don't need to touch the device. Ask EH to skip 5330 * resumed, we don't need to touch the device. Ask EH to skip
@@ -5450,7 +5347,7 @@ int ata_host_suspend(struct ata_host *host, pm_message_t mesg)
5450 * 5347 *
5451 * Resume @host. Actual operation is performed by EH. This 5348 * Resume @host. Actual operation is performed by EH. This
5452 * function requests EH to perform PM operations and returns. 5349 * function requests EH to perform PM operations and returns.
5453 * Note that all resume operations are performed parallely. 5350 * Note that all resume operations are performed parallelly.
5454 * 5351 *
5455 * LOCKING: 5352 * LOCKING:
5456 * Kernel thread context (may sleep). 5353 * Kernel thread context (may sleep).
@@ -5460,9 +5357,6 @@ void ata_host_resume(struct ata_host *host)
5460 ata_host_request_pm(host, PMSG_ON, ATA_EH_RESET, 5357 ata_host_request_pm(host, PMSG_ON, ATA_EH_RESET,
5461 ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET, 0); 5358 ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET, 0);
5462 host->dev->power.power_state = PMSG_ON; 5359 host->dev->power.power_state = PMSG_ON;
5463
5464 /* reenable link pm */
5465 ata_lpm_disable(host);
5466} 5360}
5467#endif 5361#endif
5468 5362
@@ -5517,7 +5411,8 @@ void ata_link_init(struct ata_port *ap, struct ata_link *link, int pmp)
5517 int i; 5411 int i;
5518 5412
5519 /* clear everything except for devices */ 5413 /* clear everything except for devices */
5520 memset(link, 0, offsetof(struct ata_link, device[0])); 5414 memset((void *)link + ATA_LINK_CLEAR_BEGIN, 0,
5415 ATA_LINK_CLEAR_END - ATA_LINK_CLEAR_BEGIN);
5521 5416
5522 link->ap = ap; 5417 link->ap = ap;
5523 link->pmp = pmp; 5418 link->pmp = pmp;
@@ -5592,7 +5487,7 @@ struct ata_port *ata_port_alloc(struct ata_host *host)
5592 if (!ap) 5487 if (!ap)
5593 return NULL; 5488 return NULL;
5594 5489
5595 ap->pflags |= ATA_PFLAG_INITIALIZING; 5490 ap->pflags |= ATA_PFLAG_INITIALIZING | ATA_PFLAG_FROZEN;
5596 ap->lock = &host->lock; 5491 ap->lock = &host->lock;
5597 ap->print_id = -1; 5492 ap->print_id = -1;
5598 ap->host = host; 5493 ap->host = host;
@@ -5695,6 +5590,7 @@ struct ata_host *ata_host_alloc(struct device *dev, int max_ports)
5695 dev_set_drvdata(dev, host); 5590 dev_set_drvdata(dev, host);
5696 5591
5697 spin_lock_init(&host->lock); 5592 spin_lock_init(&host->lock);
5593 mutex_init(&host->eh_mutex);
5698 host->dev = dev; 5594 host->dev = dev;
5699 host->n_ports = max_ports; 5595 host->n_ports = max_ports;
5700 5596
@@ -5992,26 +5888,15 @@ void ata_host_init(struct ata_host *host, struct device *dev,
5992 unsigned long flags, struct ata_port_operations *ops) 5888 unsigned long flags, struct ata_port_operations *ops)
5993{ 5889{
5994 spin_lock_init(&host->lock); 5890 spin_lock_init(&host->lock);
5891 mutex_init(&host->eh_mutex);
5995 host->dev = dev; 5892 host->dev = dev;
5996 host->flags = flags; 5893 host->flags = flags;
5997 host->ops = ops; 5894 host->ops = ops;
5998} 5895}
5999 5896
6000 5897int ata_port_probe(struct ata_port *ap)
6001static void async_port_probe(void *data, async_cookie_t cookie)
6002{ 5898{
6003 int rc; 5899 int rc = 0;
6004 struct ata_port *ap = data;
6005
6006 /*
6007 * If we're not allowed to scan this host in parallel,
6008 * we need to wait until all previous scans have completed
6009 * before going further.
6010 * Jeff Garzik says this is only within a controller, so we
6011 * don't need to wait for port 0, only for later ports.
6012 */
6013 if (!(ap->host->flags & ATA_HOST_PARALLEL_SCAN) && ap->port_no != 0)
6014 async_synchronize_cookie(cookie);
6015 5900
6016 /* probe */ 5901 /* probe */
6017 if (ap->ops->error_handler) { 5902 if (ap->ops->error_handler) {
@@ -6022,7 +5907,7 @@ static void async_port_probe(void *data, async_cookie_t cookie)
6022 spin_lock_irqsave(ap->lock, flags); 5907 spin_lock_irqsave(ap->lock, flags);
6023 5908
6024 ehi->probe_mask |= ATA_ALL_DEVICES; 5909 ehi->probe_mask |= ATA_ALL_DEVICES;
6025 ehi->action |= ATA_EH_RESET | ATA_EH_LPM; 5910 ehi->action |= ATA_EH_RESET;
6026 ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET; 5911 ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET;
6027 5912
6028 ap->pflags &= ~ATA_PFLAG_INITIALIZING; 5913 ap->pflags &= ~ATA_PFLAG_INITIALIZING;
@@ -6037,23 +5922,33 @@ static void async_port_probe(void *data, async_cookie_t cookie)
6037 DPRINTK("ata%u: bus probe begin\n", ap->print_id); 5922 DPRINTK("ata%u: bus probe begin\n", ap->print_id);
6038 rc = ata_bus_probe(ap); 5923 rc = ata_bus_probe(ap);
6039 DPRINTK("ata%u: bus probe end\n", ap->print_id); 5924 DPRINTK("ata%u: bus probe end\n", ap->print_id);
6040
6041 if (rc) {
6042 /* FIXME: do something useful here?
6043 * Current libata behavior will
6044 * tear down everything when
6045 * the module is removed
6046 * or the h/w is unplugged.
6047 */
6048 }
6049 } 5925 }
5926 return rc;
5927}
5928
5929
5930static void async_port_probe(void *data, async_cookie_t cookie)
5931{
5932 struct ata_port *ap = data;
5933
5934 /*
5935 * If we're not allowed to scan this host in parallel,
5936 * we need to wait until all previous scans have completed
5937 * before going further.
5938 * Jeff Garzik says this is only within a controller, so we
5939 * don't need to wait for port 0, only for later ports.
5940 */
5941 if (!(ap->host->flags & ATA_HOST_PARALLEL_SCAN) && ap->port_no != 0)
5942 async_synchronize_cookie(cookie);
5943
5944 (void)ata_port_probe(ap);
6050 5945
6051 /* in order to keep device order, we need to synchronize at this point */ 5946 /* in order to keep device order, we need to synchronize at this point */
6052 async_synchronize_cookie(cookie); 5947 async_synchronize_cookie(cookie);
6053 5948
6054 ata_scsi_scan_host(ap, 1); 5949 ata_scsi_scan_host(ap, 1);
6055
6056} 5950}
5951
6057/** 5952/**
6058 * ata_host_register - register initialized ATA host 5953 * ata_host_register - register initialized ATA host
6059 * @host: ATA host to register 5954 * @host: ATA host to register
@@ -6093,9 +5988,18 @@ int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
6093 for (i = 0; i < host->n_ports; i++) 5988 for (i = 0; i < host->n_ports; i++)
6094 host->ports[i]->print_id = ata_print_id++; 5989 host->ports[i]->print_id = ata_print_id++;
6095 5990
5991
5992 /* Create associated sysfs transport objects */
5993 for (i = 0; i < host->n_ports; i++) {
5994 rc = ata_tport_add(host->dev,host->ports[i]);
5995 if (rc) {
5996 goto err_tadd;
5997 }
5998 }
5999
6096 rc = ata_scsi_add_hosts(host, sht); 6000 rc = ata_scsi_add_hosts(host, sht);
6097 if (rc) 6001 if (rc)
6098 return rc; 6002 goto err_tadd;
6099 6003
6100 /* associate with ACPI nodes */ 6004 /* associate with ACPI nodes */
6101 ata_acpi_associate(host); 6005 ata_acpi_associate(host);
@@ -6136,6 +6040,13 @@ int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
6136 } 6040 }
6137 6041
6138 return 0; 6042 return 0;
6043
6044 err_tadd:
6045 while (--i >= 0) {
6046 ata_tport_delete(host->ports[i]);
6047 }
6048 return rc;
6049
6139} 6050}
6140 6051
6141/** 6052/**
@@ -6223,9 +6134,16 @@ static void ata_port_detach(struct ata_port *ap)
6223 /* it better be dead now */ 6134 /* it better be dead now */
6224 WARN_ON(!(ap->pflags & ATA_PFLAG_UNLOADED)); 6135 WARN_ON(!(ap->pflags & ATA_PFLAG_UNLOADED));
6225 6136
6226 cancel_rearming_delayed_work(&ap->hotplug_task); 6137 cancel_delayed_work_sync(&ap->hotplug_task);
6227 6138
6228 skip_eh: 6139 skip_eh:
6140 if (ap->pmp_link) {
6141 int i;
6142 for (i = 0; i < SATA_PMP_MAX_PORTS; i++)
6143 ata_tlink_delete(&ap->pmp_link[i]);
6144 }
6145 ata_tport_delete(ap);
6146
6229 /* remove the associated SCSI host */ 6147 /* remove the associated SCSI host */
6230 scsi_remove_host(ap->scsi_host); 6148 scsi_remove_host(ap->scsi_host);
6231} 6149}
@@ -6542,7 +6460,7 @@ static void __init ata_parse_force_param(void)
6542 6460
6543static int __init ata_init(void) 6461static int __init ata_init(void)
6544{ 6462{
6545 int rc = -ENOMEM; 6463 int rc;
6546 6464
6547 ata_parse_force_param(); 6465 ata_parse_force_param();
6548 6466
@@ -6552,12 +6470,25 @@ static int __init ata_init(void)
6552 return rc; 6470 return rc;
6553 } 6471 }
6554 6472
6473 libata_transport_init();
6474 ata_scsi_transport_template = ata_attach_transport();
6475 if (!ata_scsi_transport_template) {
6476 ata_sff_exit();
6477 rc = -ENOMEM;
6478 goto err_out;
6479 }
6480
6555 printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n"); 6481 printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
6556 return 0; 6482 return 0;
6483
6484err_out:
6485 return rc;
6557} 6486}
6558 6487
6559static void __exit ata_exit(void) 6488static void __exit ata_exit(void)
6560{ 6489{
6490 ata_release_transport(ata_scsi_transport_template);
6491 libata_transport_exit();
6561 ata_sff_exit(); 6492 ata_sff_exit();
6562 kfree(ata_force_tbl); 6493 kfree(ata_force_tbl);
6563} 6494}
@@ -6573,7 +6504,35 @@ int ata_ratelimit(void)
6573} 6504}
6574 6505
6575/** 6506/**
6507 * ata_msleep - ATA EH owner aware msleep
6508 * @ap: ATA port to attribute the sleep to
6509 * @msecs: duration to sleep in milliseconds
6510 *
6511 * Sleeps @msecs. If the current task is owner of @ap's EH, the
6512 * ownership is released before going to sleep and reacquired
6513 * after the sleep is complete. IOW, other ports sharing the
6514 * @ap->host will be allowed to own the EH while this task is
6515 * sleeping.
6516 *
6517 * LOCKING:
6518 * Might sleep.
6519 */
6520void ata_msleep(struct ata_port *ap, unsigned int msecs)
6521{
6522 bool owns_eh = ap && ap->host->eh_owner == current;
6523
6524 if (owns_eh)
6525 ata_eh_release(ap);
6526
6527 msleep(msecs);
6528
6529 if (owns_eh)
6530 ata_eh_acquire(ap);
6531}
6532
6533/**
6576 * ata_wait_register - wait until register value changes 6534 * ata_wait_register - wait until register value changes
6535 * @ap: ATA port to wait register for, can be NULL
6577 * @reg: IO-mapped register 6536 * @reg: IO-mapped register
6578 * @mask: Mask to apply to read register value 6537 * @mask: Mask to apply to read register value
6579 * @val: Wait condition 6538 * @val: Wait condition
@@ -6595,7 +6554,7 @@ int ata_ratelimit(void)
6595 * RETURNS: 6554 * RETURNS:
6596 * The final register value. 6555 * The final register value.
6597 */ 6556 */
6598u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val, 6557u32 ata_wait_register(struct ata_port *ap, void __iomem *reg, u32 mask, u32 val,
6599 unsigned long interval, unsigned long timeout) 6558 unsigned long interval, unsigned long timeout)
6600{ 6559{
6601 unsigned long deadline; 6560 unsigned long deadline;
@@ -6610,7 +6569,7 @@ u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val,
6610 deadline = ata_deadline(jiffies, timeout); 6569 deadline = ata_deadline(jiffies, timeout);
6611 6570
6612 while ((tmp & mask) == val && time_before(jiffies, deadline)) { 6571 while ((tmp & mask) == val && time_before(jiffies, deadline)) {
6613 msleep(interval); 6572 ata_msleep(ap, interval);
6614 tmp = ioread32(reg); 6573 tmp = ioread32(reg);
6615 } 6574 }
6616 6575
@@ -6686,6 +6645,7 @@ EXPORT_SYMBOL_GPL(sata_set_spd);
6686EXPORT_SYMBOL_GPL(ata_wait_after_reset); 6645EXPORT_SYMBOL_GPL(ata_wait_after_reset);
6687EXPORT_SYMBOL_GPL(sata_link_debounce); 6646EXPORT_SYMBOL_GPL(sata_link_debounce);
6688EXPORT_SYMBOL_GPL(sata_link_resume); 6647EXPORT_SYMBOL_GPL(sata_link_resume);
6648EXPORT_SYMBOL_GPL(sata_link_scr_lpm);
6689EXPORT_SYMBOL_GPL(ata_std_prereset); 6649EXPORT_SYMBOL_GPL(ata_std_prereset);
6690EXPORT_SYMBOL_GPL(sata_link_hardreset); 6650EXPORT_SYMBOL_GPL(sata_link_hardreset);
6691EXPORT_SYMBOL_GPL(sata_std_hardreset); 6651EXPORT_SYMBOL_GPL(sata_std_hardreset);
@@ -6693,6 +6653,7 @@ EXPORT_SYMBOL_GPL(ata_std_postreset);
6693EXPORT_SYMBOL_GPL(ata_dev_classify); 6653EXPORT_SYMBOL_GPL(ata_dev_classify);
6694EXPORT_SYMBOL_GPL(ata_dev_pair); 6654EXPORT_SYMBOL_GPL(ata_dev_pair);
6695EXPORT_SYMBOL_GPL(ata_ratelimit); 6655EXPORT_SYMBOL_GPL(ata_ratelimit);
6656EXPORT_SYMBOL_GPL(ata_msleep);
6696EXPORT_SYMBOL_GPL(ata_wait_register); 6657EXPORT_SYMBOL_GPL(ata_wait_register);
6697EXPORT_SYMBOL_GPL(ata_scsi_queuecmd); 6658EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
6698EXPORT_SYMBOL_GPL(ata_scsi_slave_config); 6659EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index e48302eae55f..7f099d6e4e0b 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -57,6 +57,7 @@ enum {
57 /* error flags */ 57 /* error flags */
58 ATA_EFLAG_IS_IO = (1 << 0), 58 ATA_EFLAG_IS_IO = (1 << 0),
59 ATA_EFLAG_DUBIOUS_XFER = (1 << 1), 59 ATA_EFLAG_DUBIOUS_XFER = (1 << 1),
60 ATA_EFLAG_OLD_ER = (1 << 31),
60 61
61 /* error categories */ 62 /* error categories */
62 ATA_ECAT_NONE = 0, 63 ATA_ECAT_NONE = 0,
@@ -396,14 +397,9 @@ static struct ata_ering_entry *ata_ering_top(struct ata_ering *ering)
396 return NULL; 397 return NULL;
397} 398}
398 399
399static void ata_ering_clear(struct ata_ering *ering) 400int ata_ering_map(struct ata_ering *ering,
400{ 401 int (*map_fn)(struct ata_ering_entry *, void *),
401 memset(ering, 0, sizeof(*ering)); 402 void *arg)
402}
403
404static int ata_ering_map(struct ata_ering *ering,
405 int (*map_fn)(struct ata_ering_entry *, void *),
406 void *arg)
407{ 403{
408 int idx, rc = 0; 404 int idx, rc = 0;
409 struct ata_ering_entry *ent; 405 struct ata_ering_entry *ent;
@@ -422,6 +418,17 @@ static int ata_ering_map(struct ata_ering *ering,
422 return rc; 418 return rc;
423} 419}
424 420
421int ata_ering_clear_cb(struct ata_ering_entry *ent, void *void_arg)
422{
423 ent->eflags |= ATA_EFLAG_OLD_ER;
424 return 0;
425}
426
427static void ata_ering_clear(struct ata_ering *ering)
428{
429 ata_ering_map(ering, ata_ering_clear_cb, NULL);
430}
431
425static unsigned int ata_eh_dev_action(struct ata_device *dev) 432static unsigned int ata_eh_dev_action(struct ata_device *dev)
426{ 433{
427 struct ata_eh_context *ehc = &dev->link->eh_context; 434 struct ata_eh_context *ehc = &dev->link->eh_context;
@@ -456,6 +463,41 @@ static void ata_eh_clear_action(struct ata_link *link, struct ata_device *dev,
456} 463}
457 464
458/** 465/**
466 * ata_eh_acquire - acquire EH ownership
467 * @ap: ATA port to acquire EH ownership for
468 *
469 * Acquire EH ownership for @ap. This is the basic exclusion
470 * mechanism for ports sharing a host. Only one port hanging off
471 * the same host can claim the ownership of EH.
472 *
473 * LOCKING:
474 * EH context.
475 */
476void ata_eh_acquire(struct ata_port *ap)
477{
478 mutex_lock(&ap->host->eh_mutex);
479 WARN_ON_ONCE(ap->host->eh_owner);
480 ap->host->eh_owner = current;
481}
482
483/**
484 * ata_eh_release - release EH ownership
485 * @ap: ATA port to release EH ownership for
486 *
487 * Release EH ownership for @ap if the caller. The caller must
488 * have acquired EH ownership using ata_eh_acquire() previously.
489 *
490 * LOCKING:
491 * EH context.
492 */
493void ata_eh_release(struct ata_port *ap)
494{
495 WARN_ON_ONCE(ap->host->eh_owner != current);
496 ap->host->eh_owner = NULL;
497 mutex_unlock(&ap->host->eh_mutex);
498}
499
500/**
459 * ata_scsi_timed_out - SCSI layer time out callback 501 * ata_scsi_timed_out - SCSI layer time out callback
460 * @cmd: timed out SCSI command 502 * @cmd: timed out SCSI command
461 * 503 *
@@ -545,11 +587,43 @@ static void ata_eh_unload(struct ata_port *ap)
545void ata_scsi_error(struct Scsi_Host *host) 587void ata_scsi_error(struct Scsi_Host *host)
546{ 588{
547 struct ata_port *ap = ata_shost_to_port(host); 589 struct ata_port *ap = ata_shost_to_port(host);
548 int i;
549 unsigned long flags; 590 unsigned long flags;
591 LIST_HEAD(eh_work_q);
550 592
551 DPRINTK("ENTER\n"); 593 DPRINTK("ENTER\n");
552 594
595 spin_lock_irqsave(host->host_lock, flags);
596 list_splice_init(&host->eh_cmd_q, &eh_work_q);
597 spin_unlock_irqrestore(host->host_lock, flags);
598
599 ata_scsi_cmd_error_handler(host, ap, &eh_work_q);
600
601 /* If we timed raced normal completion and there is nothing to
602 recover nr_timedout == 0 why exactly are we doing error recovery ? */
603 ata_scsi_port_error_handler(host, ap);
604
605 /* finish or retry handled scmd's and clean up */
606 WARN_ON(host->host_failed || !list_empty(&eh_work_q));
607
608 DPRINTK("EXIT\n");
609}
610
611/**
612 * ata_scsi_cmd_error_handler - error callback for a list of commands
613 * @host: scsi host containing the port
614 * @ap: ATA port within the host
615 * @eh_work_q: list of commands to process
616 *
617 * process the given list of commands and return those finished to the
618 * ap->eh_done_q. This function is the first part of the libata error
619 * handler which processes a given list of failed commands.
620 */
621void ata_scsi_cmd_error_handler(struct Scsi_Host *host, struct ata_port *ap,
622 struct list_head *eh_work_q)
623{
624 int i;
625 unsigned long flags;
626
553 /* make sure sff pio task is not running */ 627 /* make sure sff pio task is not running */
554 ata_sff_flush_pio_task(ap); 628 ata_sff_flush_pio_task(ap);
555 629
@@ -572,20 +646,20 @@ void ata_scsi_error(struct Scsi_Host *host)
572 int nr_timedout = 0; 646 int nr_timedout = 0;
573 647
574 spin_lock_irqsave(ap->lock, flags); 648 spin_lock_irqsave(ap->lock, flags);
575 649
576 /* This must occur under the ap->lock as we don't want 650 /* This must occur under the ap->lock as we don't want
577 a polled recovery to race the real interrupt handler 651 a polled recovery to race the real interrupt handler
578 652
579 The lost_interrupt handler checks for any completed but 653 The lost_interrupt handler checks for any completed but
580 non-notified command and completes much like an IRQ handler. 654 non-notified command and completes much like an IRQ handler.
581 655
582 We then fall into the error recovery code which will treat 656 We then fall into the error recovery code which will treat
583 this as if normal completion won the race */ 657 this as if normal completion won the race */
584 658
585 if (ap->ops->lost_interrupt) 659 if (ap->ops->lost_interrupt)
586 ap->ops->lost_interrupt(ap); 660 ap->ops->lost_interrupt(ap);
587 661
588 list_for_each_entry_safe(scmd, tmp, &host->eh_cmd_q, eh_entry) { 662 list_for_each_entry_safe(scmd, tmp, eh_work_q, eh_entry) {
589 struct ata_queued_cmd *qc; 663 struct ata_queued_cmd *qc;
590 664
591 for (i = 0; i < ATA_MAX_QUEUE; i++) { 665 for (i = 0; i < ATA_MAX_QUEUE; i++) {
@@ -628,15 +702,29 @@ void ata_scsi_error(struct Scsi_Host *host)
628 ap->eh_tries = ATA_EH_MAX_TRIES; 702 ap->eh_tries = ATA_EH_MAX_TRIES;
629 } else 703 } else
630 spin_unlock_wait(ap->lock); 704 spin_unlock_wait(ap->lock);
631
632 /* If we timed raced normal completion and there is nothing to
633 recover nr_timedout == 0 why exactly are we doing error recovery ? */
634 705
635 repeat: 706}
707EXPORT_SYMBOL(ata_scsi_cmd_error_handler);
708
709/**
710 * ata_scsi_port_error_handler - recover the port after the commands
711 * @host: SCSI host containing the port
712 * @ap: the ATA port
713 *
714 * Handle the recovery of the port @ap after all the commands
715 * have been recovered.
716 */
717void ata_scsi_port_error_handler(struct Scsi_Host *host, struct ata_port *ap)
718{
719 unsigned long flags;
720
636 /* invoke error handler */ 721 /* invoke error handler */
637 if (ap->ops->error_handler) { 722 if (ap->ops->error_handler) {
638 struct ata_link *link; 723 struct ata_link *link;
639 724
725 /* acquire EH ownership */
726 ata_eh_acquire(ap);
727 repeat:
640 /* kill fast drain timer */ 728 /* kill fast drain timer */
641 del_timer_sync(&ap->fastdrain_timer); 729 del_timer_sync(&ap->fastdrain_timer);
642 730
@@ -683,7 +771,7 @@ void ata_scsi_error(struct Scsi_Host *host)
683 /* process port suspend request */ 771 /* process port suspend request */
684 ata_eh_handle_port_suspend(ap); 772 ata_eh_handle_port_suspend(ap);
685 773
686 /* Exception might have happend after ->error_handler 774 /* Exception might have happened after ->error_handler
687 * recovered the port but before this point. Repeat 775 * recovered the port but before this point. Repeat
688 * EH in such case. 776 * EH in such case.
689 */ 777 */
@@ -711,14 +799,12 @@ void ata_scsi_error(struct Scsi_Host *host)
711 host->host_eh_scheduled = 0; 799 host->host_eh_scheduled = 0;
712 800
713 spin_unlock_irqrestore(ap->lock, flags); 801 spin_unlock_irqrestore(ap->lock, flags);
802 ata_eh_release(ap);
714 } else { 803 } else {
715 WARN_ON(ata_qc_from_tag(ap, ap->link.active_tag) == NULL); 804 WARN_ON(ata_qc_from_tag(ap, ap->link.active_tag) == NULL);
716 ap->ops->eng_timeout(ap); 805 ap->ops->eng_timeout(ap);
717 } 806 }
718 807
719 /* finish or retry handled scmd's and clean up */
720 WARN_ON(host->host_failed || !list_empty(&host->eh_cmd_q));
721
722 scsi_eh_flush_done_q(&ap->eh_done_q); 808 scsi_eh_flush_done_q(&ap->eh_done_q);
723 809
724 /* clean up */ 810 /* clean up */
@@ -739,9 +825,8 @@ void ata_scsi_error(struct Scsi_Host *host)
739 wake_up_all(&ap->eh_wait_q); 825 wake_up_all(&ap->eh_wait_q);
740 826
741 spin_unlock_irqrestore(ap->lock, flags); 827 spin_unlock_irqrestore(ap->lock, flags);
742
743 DPRINTK("EXIT\n");
744} 828}
829EXPORT_SYMBOL_GPL(ata_scsi_port_error_handler);
745 830
746/** 831/**
747 * ata_port_wait_eh - Wait for the currently pending EH to complete 832 * ata_port_wait_eh - Wait for the currently pending EH to complete
@@ -772,7 +857,7 @@ void ata_port_wait_eh(struct ata_port *ap)
772 857
773 /* make sure SCSI EH is complete */ 858 /* make sure SCSI EH is complete */
774 if (scsi_host_in_recovery(ap->scsi_host)) { 859 if (scsi_host_in_recovery(ap->scsi_host)) {
775 msleep(10); 860 ata_msleep(ap, 10);
776 goto retry; 861 goto retry;
777 } 862 }
778} 863}
@@ -1573,9 +1658,9 @@ static void ata_eh_analyze_serror(struct ata_link *link)
1573 * host links. For disabled PMP links, only N bit is 1658 * host links. For disabled PMP links, only N bit is
1574 * considered as X bit is left at 1 for link plugging. 1659 * considered as X bit is left at 1 for link plugging.
1575 */ 1660 */
1576 hotplug_mask = 0; 1661 if (link->lpm_policy > ATA_LPM_MAX_POWER)
1577 1662 hotplug_mask = 0; /* hotplug doesn't work w/ LPM */
1578 if (!(link->flags & ATA_LFLAG_DISABLED) || ata_is_host_link(link)) 1663 else if (!(link->flags & ATA_LFLAG_DISABLED) || ata_is_host_link(link))
1579 hotplug_mask = SERR_PHYRDY_CHG | SERR_DEV_XCHG; 1664 hotplug_mask = SERR_PHYRDY_CHG | SERR_DEV_XCHG;
1580 else 1665 else
1581 hotplug_mask = SERR_PHYRDY_CHG; 1666 hotplug_mask = SERR_PHYRDY_CHG;
@@ -1657,7 +1742,7 @@ void ata_eh_analyze_ncq_error(struct ata_link *link)
1657 * 1742 *
1658 * Analyze taskfile of @qc and further determine cause of 1743 * Analyze taskfile of @qc and further determine cause of
1659 * failure. This function also requests ATAPI sense data if 1744 * failure. This function also requests ATAPI sense data if
1660 * avaliable. 1745 * available.
1661 * 1746 *
1662 * LOCKING: 1747 * LOCKING:
1663 * Kernel thread context (may sleep). 1748 * Kernel thread context (may sleep).
@@ -1755,7 +1840,7 @@ static int speed_down_verdict_cb(struct ata_ering_entry *ent, void *void_arg)
1755 struct speed_down_verdict_arg *arg = void_arg; 1840 struct speed_down_verdict_arg *arg = void_arg;
1756 int cat; 1841 int cat;
1757 1842
1758 if (ent->timestamp < arg->since) 1843 if ((ent->eflags & ATA_EFLAG_OLD_ER) || (ent->timestamp < arg->since))
1759 return -1; 1844 return -1;
1760 1845
1761 cat = ata_eh_categorize_error(ent->eflags, ent->err_mask, 1846 cat = ata_eh_categorize_error(ent->eflags, ent->err_mask,
@@ -1808,7 +1893,7 @@ static int speed_down_verdict_cb(struct ata_ering_entry *ent, void *void_arg)
1808 * occurred during last 5 mins, NCQ_OFF. 1893 * occurred during last 5 mins, NCQ_OFF.
1809 * 1894 *
1810 * 3. If more than 8 ATA_BUS, TOUT_HSM or UNK_DEV errors 1895 * 3. If more than 8 ATA_BUS, TOUT_HSM or UNK_DEV errors
1811 * ocurred during last 5 mins, FALLBACK_TO_PIO 1896 * occurred during last 5 mins, FALLBACK_TO_PIO
1812 * 1897 *
1813 * 4. If more than 3 TOUT_HSM or UNK_DEV errors occurred 1898 * 4. If more than 3 TOUT_HSM or UNK_DEV errors occurred
1814 * during last 10 mins, NCQ_OFF. 1899 * during last 10 mins, NCQ_OFF.
@@ -2492,7 +2577,7 @@ int ata_eh_reset(struct ata_link *link, int classify,
2492 if (link->flags & ATA_LFLAG_NO_SRST) 2577 if (link->flags & ATA_LFLAG_NO_SRST)
2493 softreset = NULL; 2578 softreset = NULL;
2494 2579
2495 /* make sure each reset attemp is at least COOL_DOWN apart */ 2580 /* make sure each reset attempt is at least COOL_DOWN apart */
2496 if (ehc->i.flags & ATA_EHI_DID_RESET) { 2581 if (ehc->i.flags & ATA_EHI_DID_RESET) {
2497 now = jiffies; 2582 now = jiffies;
2498 WARN_ON(time_after(ehc->last_reset, now)); 2583 WARN_ON(time_after(ehc->last_reset, now));
@@ -2651,7 +2736,7 @@ int ata_eh_reset(struct ata_link *link, int classify,
2651 if (!reset) { 2736 if (!reset) {
2652 ata_link_printk(link, KERN_ERR, 2737 ata_link_printk(link, KERN_ERR,
2653 "follow-up softreset required " 2738 "follow-up softreset required "
2654 "but no softreset avaliable\n"); 2739 "but no softreset available\n");
2655 failed_link = link; 2740 failed_link = link;
2656 rc = -EINVAL; 2741 rc = -EINVAL;
2657 goto fail; 2742 goto fail;
@@ -2717,10 +2802,11 @@ int ata_eh_reset(struct ata_link *link, int classify,
2717 } 2802 }
2718 2803
2719 /* 2804 /*
2720 * Some controllers can't be frozen very well and may set 2805 * Some controllers can't be frozen very well and may set spurious
2721 * spuruious error conditions during reset. Clear accumulated 2806 * error conditions during reset. Clear accumulated error
2722 * error information. As reset is the final recovery action, 2807 * information and re-thaw the port if frozen. As reset is the
2723 * nothing is lost by doing this. 2808 * final recovery action and we cross check link onlineness against
2809 * device classification later, no hotplug event is lost by this.
2724 */ 2810 */
2725 spin_lock_irqsave(link->ap->lock, flags); 2811 spin_lock_irqsave(link->ap->lock, flags);
2726 memset(&link->eh_info, 0, sizeof(link->eh_info)); 2812 memset(&link->eh_info, 0, sizeof(link->eh_info));
@@ -2729,6 +2815,9 @@ int ata_eh_reset(struct ata_link *link, int classify,
2729 ap->pflags &= ~ATA_PFLAG_EH_PENDING; 2815 ap->pflags &= ~ATA_PFLAG_EH_PENDING;
2730 spin_unlock_irqrestore(link->ap->lock, flags); 2816 spin_unlock_irqrestore(link->ap->lock, flags);
2731 2817
2818 if (ap->pflags & ATA_PFLAG_FROZEN)
2819 ata_eh_thaw_port(ap);
2820
2732 /* 2821 /*
2733 * Make sure onlineness and classification result correspond. 2822 * Make sure onlineness and classification result correspond.
2734 * Hotplug could have happened during reset and some 2823 * Hotplug could have happened during reset and some
@@ -2777,8 +2866,9 @@ int ata_eh_reset(struct ata_link *link, int classify,
2777 ata_eh_done(link, NULL, ATA_EH_RESET); 2866 ata_eh_done(link, NULL, ATA_EH_RESET);
2778 if (slave) 2867 if (slave)
2779 ata_eh_done(slave, NULL, ATA_EH_RESET); 2868 ata_eh_done(slave, NULL, ATA_EH_RESET);
2780 ehc->last_reset = jiffies; /* update to completion time */ 2869 ehc->last_reset = jiffies; /* update to completion time */
2781 ehc->i.action |= ATA_EH_REVALIDATE; 2870 ehc->i.action |= ATA_EH_REVALIDATE;
2871 link->lpm_policy = ATA_LPM_UNKNOWN; /* reset LPM state */
2782 2872
2783 rc = 0; 2873 rc = 0;
2784 out: 2874 out:
@@ -2810,8 +2900,10 @@ int ata_eh_reset(struct ata_link *link, int classify,
2810 "reset failed (errno=%d), retrying in %u secs\n", 2900 "reset failed (errno=%d), retrying in %u secs\n",
2811 rc, DIV_ROUND_UP(jiffies_to_msecs(delta), 1000)); 2901 rc, DIV_ROUND_UP(jiffies_to_msecs(delta), 1000));
2812 2902
2903 ata_eh_release(ap);
2813 while (delta) 2904 while (delta)
2814 delta = schedule_timeout_uninterruptible(delta); 2905 delta = schedule_timeout_uninterruptible(delta);
2906 ata_eh_acquire(ap);
2815 } 2907 }
2816 2908
2817 if (try == max_tries - 1) { 2909 if (try == max_tries - 1) {
@@ -3204,7 +3296,138 @@ static int ata_eh_maybe_retry_flush(struct ata_device *dev)
3204 return rc; 3296 return rc;
3205} 3297}
3206 3298
3207static int ata_link_nr_enabled(struct ata_link *link) 3299/**
3300 * ata_eh_set_lpm - configure SATA interface power management
3301 * @link: link to configure power management
3302 * @policy: the link power management policy
3303 * @r_failed_dev: out parameter for failed device
3304 *
3305 * Enable SATA Interface power management. This will enable
3306 * Device Interface Power Management (DIPM) for min_power
3307 * policy, and then call driver specific callbacks for
3308 * enabling Host Initiated Power management.
3309 *
3310 * LOCKING:
3311 * EH context.
3312 *
3313 * RETURNS:
3314 * 0 on success, -errno on failure.
3315 */
3316static int ata_eh_set_lpm(struct ata_link *link, enum ata_lpm_policy policy,
3317 struct ata_device **r_failed_dev)
3318{
3319 struct ata_port *ap = ata_is_host_link(link) ? link->ap : NULL;
3320 struct ata_eh_context *ehc = &link->eh_context;
3321 struct ata_device *dev, *link_dev = NULL, *lpm_dev = NULL;
3322 enum ata_lpm_policy old_policy = link->lpm_policy;
3323 bool no_dipm = link->ap->flags & ATA_FLAG_NO_DIPM;
3324 unsigned int hints = ATA_LPM_EMPTY | ATA_LPM_HIPM;
3325 unsigned int err_mask;
3326 int rc;
3327
3328 /* if the link or host doesn't do LPM, noop */
3329 if ((link->flags & ATA_LFLAG_NO_LPM) || (ap && !ap->ops->set_lpm))
3330 return 0;
3331
3332 /*
3333 * DIPM is enabled only for MIN_POWER as some devices
3334 * misbehave when the host NACKs transition to SLUMBER. Order
3335 * device and link configurations such that the host always
3336 * allows DIPM requests.
3337 */
3338 ata_for_each_dev(dev, link, ENABLED) {
3339 bool hipm = ata_id_has_hipm(dev->id);
3340 bool dipm = ata_id_has_dipm(dev->id) && !no_dipm;
3341
3342 /* find the first enabled and LPM enabled devices */
3343 if (!link_dev)
3344 link_dev = dev;
3345
3346 if (!lpm_dev && (hipm || dipm))
3347 lpm_dev = dev;
3348
3349 hints &= ~ATA_LPM_EMPTY;
3350 if (!hipm)
3351 hints &= ~ATA_LPM_HIPM;
3352
3353 /* disable DIPM before changing link config */
3354 if (policy != ATA_LPM_MIN_POWER && dipm) {
3355 err_mask = ata_dev_set_feature(dev,
3356 SETFEATURES_SATA_DISABLE, SATA_DIPM);
3357 if (err_mask && err_mask != AC_ERR_DEV) {
3358 ata_dev_printk(dev, KERN_WARNING,
3359 "failed to disable DIPM, Emask 0x%x\n",
3360 err_mask);
3361 rc = -EIO;
3362 goto fail;
3363 }
3364 }
3365 }
3366
3367 if (ap) {
3368 rc = ap->ops->set_lpm(link, policy, hints);
3369 if (!rc && ap->slave_link)
3370 rc = ap->ops->set_lpm(ap->slave_link, policy, hints);
3371 } else
3372 rc = sata_pmp_set_lpm(link, policy, hints);
3373
3374 /*
3375 * Attribute link config failure to the first (LPM) enabled
3376 * device on the link.
3377 */
3378 if (rc) {
3379 if (rc == -EOPNOTSUPP) {
3380 link->flags |= ATA_LFLAG_NO_LPM;
3381 return 0;
3382 }
3383 dev = lpm_dev ? lpm_dev : link_dev;
3384 goto fail;
3385 }
3386
3387 /*
3388 * Low level driver acked the transition. Issue DIPM command
3389 * with the new policy set.
3390 */
3391 link->lpm_policy = policy;
3392 if (ap && ap->slave_link)
3393 ap->slave_link->lpm_policy = policy;
3394
3395 /* host config updated, enable DIPM if transitioning to MIN_POWER */
3396 ata_for_each_dev(dev, link, ENABLED) {
3397 if (policy == ATA_LPM_MIN_POWER && !no_dipm &&
3398 ata_id_has_dipm(dev->id)) {
3399 err_mask = ata_dev_set_feature(dev,
3400 SETFEATURES_SATA_ENABLE, SATA_DIPM);
3401 if (err_mask && err_mask != AC_ERR_DEV) {
3402 ata_dev_printk(dev, KERN_WARNING,
3403 "failed to enable DIPM, Emask 0x%x\n",
3404 err_mask);
3405 rc = -EIO;
3406 goto fail;
3407 }
3408 }
3409 }
3410
3411 return 0;
3412
3413fail:
3414 /* restore the old policy */
3415 link->lpm_policy = old_policy;
3416 if (ap && ap->slave_link)
3417 ap->slave_link->lpm_policy = old_policy;
3418
3419 /* if no device or only one more chance is left, disable LPM */
3420 if (!dev || ehc->tries[dev->devno] <= 2) {
3421 ata_link_printk(link, KERN_WARNING,
3422 "disabling LPM on the link\n");
3423 link->flags |= ATA_LFLAG_NO_LPM;
3424 }
3425 if (r_failed_dev)
3426 *r_failed_dev = dev;
3427 return rc;
3428}
3429
3430int ata_link_nr_enabled(struct ata_link *link)
3208{ 3431{
3209 struct ata_device *dev; 3432 struct ata_device *dev;
3210 int cnt = 0; 3433 int cnt = 0;
@@ -3288,6 +3511,16 @@ static int ata_eh_schedule_probe(struct ata_device *dev)
3288 ehc->saved_xfer_mode[dev->devno] = 0; 3511 ehc->saved_xfer_mode[dev->devno] = 0;
3289 ehc->saved_ncq_enabled &= ~(1 << dev->devno); 3512 ehc->saved_ncq_enabled &= ~(1 << dev->devno);
3290 3513
3514 /* the link maybe in a deep sleep, wake it up */
3515 if (link->lpm_policy > ATA_LPM_MAX_POWER) {
3516 if (ata_is_host_link(link))
3517 link->ap->ops->set_lpm(link, ATA_LPM_MAX_POWER,
3518 ATA_LPM_EMPTY);
3519 else
3520 sata_pmp_set_lpm(link, ATA_LPM_MAX_POWER,
3521 ATA_LPM_EMPTY);
3522 }
3523
3291 /* Record and count probe trials on the ering. The specific 3524 /* Record and count probe trials on the ering. The specific
3292 * error mask used is irrelevant. Because a successful device 3525 * error mask used is irrelevant. Because a successful device
3293 * detection clears the ering, this count accumulates only if 3526 * detection clears the ering, this count accumulates only if
@@ -3389,8 +3622,7 @@ int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset,
3389{ 3622{
3390 struct ata_link *link; 3623 struct ata_link *link;
3391 struct ata_device *dev; 3624 struct ata_device *dev;
3392 int nr_failed_devs; 3625 int rc, nr_fails;
3393 int rc;
3394 unsigned long flags, deadline; 3626 unsigned long flags, deadline;
3395 3627
3396 DPRINTK("ENTER\n"); 3628 DPRINTK("ENTER\n");
@@ -3431,7 +3663,6 @@ int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset,
3431 3663
3432 retry: 3664 retry:
3433 rc = 0; 3665 rc = 0;
3434 nr_failed_devs = 0;
3435 3666
3436 /* if UNLOADING, finish immediately */ 3667 /* if UNLOADING, finish immediately */
3437 if (ap->pflags & ATA_PFLAG_UNLOADING) 3668 if (ap->pflags & ATA_PFLAG_UNLOADING)
@@ -3501,8 +3732,10 @@ int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset,
3501 if (time_before_eq(deadline, now)) 3732 if (time_before_eq(deadline, now))
3502 break; 3733 break;
3503 3734
3735 ata_eh_release(ap);
3504 deadline = wait_for_completion_timeout(&ap->park_req_pending, 3736 deadline = wait_for_completion_timeout(&ap->park_req_pending,
3505 deadline - now); 3737 deadline - now);
3738 ata_eh_acquire(ap);
3506 } while (deadline); 3739 } while (deadline);
3507 ata_for_each_link(link, ap, EDGE) { 3740 ata_for_each_link(link, ap, EDGE) {
3508 ata_for_each_dev(dev, link, ALL) { 3741 ata_for_each_dev(dev, link, ALL) {
@@ -3516,13 +3749,17 @@ int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset,
3516 } 3749 }
3517 3750
3518 /* the rest */ 3751 /* the rest */
3519 ata_for_each_link(link, ap, EDGE) { 3752 nr_fails = 0;
3753 ata_for_each_link(link, ap, PMP_FIRST) {
3520 struct ata_eh_context *ehc = &link->eh_context; 3754 struct ata_eh_context *ehc = &link->eh_context;
3521 3755
3756 if (sata_pmp_attached(ap) && ata_is_host_link(link))
3757 goto config_lpm;
3758
3522 /* revalidate existing devices and attach new ones */ 3759 /* revalidate existing devices and attach new ones */
3523 rc = ata_eh_revalidate_and_attach(link, &dev); 3760 rc = ata_eh_revalidate_and_attach(link, &dev);
3524 if (rc) 3761 if (rc)
3525 goto dev_fail; 3762 goto rest_fail;
3526 3763
3527 /* if PMP got attached, return, pmp EH will take care of it */ 3764 /* if PMP got attached, return, pmp EH will take care of it */
3528 if (link->device->class == ATA_DEV_PMP) { 3765 if (link->device->class == ATA_DEV_PMP) {
@@ -3534,7 +3771,7 @@ int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset,
3534 if (ehc->i.flags & ATA_EHI_SETMODE) { 3771 if (ehc->i.flags & ATA_EHI_SETMODE) {
3535 rc = ata_set_mode(link, &dev); 3772 rc = ata_set_mode(link, &dev);
3536 if (rc) 3773 if (rc)
3537 goto dev_fail; 3774 goto rest_fail;
3538 ehc->i.flags &= ~ATA_EHI_SETMODE; 3775 ehc->i.flags &= ~ATA_EHI_SETMODE;
3539 } 3776 }
3540 3777
@@ -3547,7 +3784,7 @@ int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset,
3547 continue; 3784 continue;
3548 rc = atapi_eh_clear_ua(dev); 3785 rc = atapi_eh_clear_ua(dev);
3549 if (rc) 3786 if (rc)
3550 goto dev_fail; 3787 goto rest_fail;
3551 } 3788 }
3552 } 3789 }
3553 3790
@@ -3557,21 +3794,25 @@ int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset,
3557 continue; 3794 continue;
3558 rc = ata_eh_maybe_retry_flush(dev); 3795 rc = ata_eh_maybe_retry_flush(dev);
3559 if (rc) 3796 if (rc)
3560 goto dev_fail; 3797 goto rest_fail;
3561 } 3798 }
3562 3799
3800 config_lpm:
3563 /* configure link power saving */ 3801 /* configure link power saving */
3564 if (ehc->i.action & ATA_EH_LPM) 3802 if (link->lpm_policy != ap->target_lpm_policy) {
3565 ata_for_each_dev(dev, link, ALL) 3803 rc = ata_eh_set_lpm(link, ap->target_lpm_policy, &dev);
3566 ata_dev_enable_pm(dev, ap->pm_policy); 3804 if (rc)
3805 goto rest_fail;
3806 }
3567 3807
3568 /* this link is okay now */ 3808 /* this link is okay now */
3569 ehc->i.flags = 0; 3809 ehc->i.flags = 0;
3570 continue; 3810 continue;
3571 3811
3572dev_fail: 3812 rest_fail:
3573 nr_failed_devs++; 3813 nr_fails++;
3574 ata_eh_handle_dev_fail(dev, rc); 3814 if (dev)
3815 ata_eh_handle_dev_fail(dev, rc);
3575 3816
3576 if (ap->pflags & ATA_PFLAG_FROZEN) { 3817 if (ap->pflags & ATA_PFLAG_FROZEN) {
3577 /* PMP reset requires working host port. 3818 /* PMP reset requires working host port.
@@ -3583,7 +3824,7 @@ dev_fail:
3583 } 3824 }
3584 } 3825 }
3585 3826
3586 if (nr_failed_devs) 3827 if (nr_fails)
3587 goto retry; 3828 goto retry;
3588 3829
3589 out: 3830 out:
diff --git a/drivers/ata/libata-pmp.c b/drivers/ata/libata-pmp.c
index 224faabd7b7e..f06b7ea590d3 100644
--- a/drivers/ata/libata-pmp.c
+++ b/drivers/ata/libata-pmp.c
@@ -11,6 +11,7 @@
11#include <linux/libata.h> 11#include <linux/libata.h>
12#include <linux/slab.h> 12#include <linux/slab.h>
13#include "libata.h" 13#include "libata.h"
14#include "libata-transport.h"
14 15
15const struct ata_port_operations sata_pmp_port_ops = { 16const struct ata_port_operations sata_pmp_port_ops = {
16 .inherits = &sata_port_ops, 17 .inherits = &sata_port_ops,
@@ -185,6 +186,27 @@ int sata_pmp_scr_write(struct ata_link *link, int reg, u32 val)
185} 186}
186 187
187/** 188/**
189 * sata_pmp_set_lpm - configure LPM for a PMP link
190 * @link: PMP link to configure LPM for
191 * @policy: target LPM policy
192 * @hints: LPM hints
193 *
194 * Configure LPM for @link. This function will contain any PMP
195 * specific workarounds if necessary.
196 *
197 * LOCKING:
198 * EH context.
199 *
200 * RETURNS:
201 * 0 on success, -errno on failure.
202 */
203int sata_pmp_set_lpm(struct ata_link *link, enum ata_lpm_policy policy,
204 unsigned hints)
205{
206 return sata_link_scr_lpm(link, policy, true);
207}
208
209/**
188 * sata_pmp_read_gscr - read GSCR block of SATA PMP 210 * sata_pmp_read_gscr - read GSCR block of SATA PMP
189 * @dev: PMP device 211 * @dev: PMP device
190 * @gscr: buffer to read GSCR block into 212 * @gscr: buffer to read GSCR block into
@@ -312,10 +334,10 @@ static int sata_pmp_configure(struct ata_device *dev, int print_info)
312 return rc; 334 return rc;
313} 335}
314 336
315static int sata_pmp_init_links(struct ata_port *ap, int nr_ports) 337static int sata_pmp_init_links (struct ata_port *ap, int nr_ports)
316{ 338{
317 struct ata_link *pmp_link = ap->pmp_link; 339 struct ata_link *pmp_link = ap->pmp_link;
318 int i; 340 int i, err;
319 341
320 if (!pmp_link) { 342 if (!pmp_link) {
321 pmp_link = kzalloc(sizeof(pmp_link[0]) * SATA_PMP_MAX_PORTS, 343 pmp_link = kzalloc(sizeof(pmp_link[0]) * SATA_PMP_MAX_PORTS,
@@ -327,6 +349,13 @@ static int sata_pmp_init_links(struct ata_port *ap, int nr_ports)
327 ata_link_init(ap, &pmp_link[i], i); 349 ata_link_init(ap, &pmp_link[i], i);
328 350
329 ap->pmp_link = pmp_link; 351 ap->pmp_link = pmp_link;
352
353 for (i = 0; i < SATA_PMP_MAX_PORTS; i++) {
354 err = ata_tlink_add(&pmp_link[i]);
355 if (err) {
356 goto err_tlink;
357 }
358 }
330 } 359 }
331 360
332 for (i = 0; i < nr_ports; i++) { 361 for (i = 0; i < nr_ports; i++) {
@@ -339,6 +368,12 @@ static int sata_pmp_init_links(struct ata_port *ap, int nr_ports)
339 } 368 }
340 369
341 return 0; 370 return 0;
371 err_tlink:
372 while (--i >= 0)
373 ata_tlink_delete(&pmp_link[i]);
374 kfree(pmp_link);
375 ap->pmp_link = NULL;
376 return err;
342} 377}
343 378
344static void sata_pmp_quirks(struct ata_port *ap) 379static void sata_pmp_quirks(struct ata_port *ap)
@@ -351,6 +386,9 @@ static void sata_pmp_quirks(struct ata_port *ap)
351 if (vendor == 0x1095 && devid == 0x3726) { 386 if (vendor == 0x1095 && devid == 0x3726) {
352 /* sil3726 quirks */ 387 /* sil3726 quirks */
353 ata_for_each_link(link, ap, EDGE) { 388 ata_for_each_link(link, ap, EDGE) {
389 /* link reports offline after LPM */
390 link->flags |= ATA_LFLAG_NO_LPM;
391
354 /* Class code report is unreliable and SRST 392 /* Class code report is unreliable and SRST
355 * times out under certain configurations. 393 * times out under certain configurations.
356 */ 394 */
@@ -366,6 +404,9 @@ static void sata_pmp_quirks(struct ata_port *ap)
366 } else if (vendor == 0x1095 && devid == 0x4723) { 404 } else if (vendor == 0x1095 && devid == 0x4723) {
367 /* sil4723 quirks */ 405 /* sil4723 quirks */
368 ata_for_each_link(link, ap, EDGE) { 406 ata_for_each_link(link, ap, EDGE) {
407 /* link reports offline after LPM */
408 link->flags |= ATA_LFLAG_NO_LPM;
409
369 /* class code report is unreliable */ 410 /* class code report is unreliable */
370 if (link->pmp < 2) 411 if (link->pmp < 2)
371 link->flags |= ATA_LFLAG_ASSUME_ATA; 412 link->flags |= ATA_LFLAG_ASSUME_ATA;
@@ -378,6 +419,9 @@ static void sata_pmp_quirks(struct ata_port *ap)
378 } else if (vendor == 0x1095 && devid == 0x4726) { 419 } else if (vendor == 0x1095 && devid == 0x4726) {
379 /* sil4726 quirks */ 420 /* sil4726 quirks */
380 ata_for_each_link(link, ap, EDGE) { 421 ata_for_each_link(link, ap, EDGE) {
422 /* link reports offline after LPM */
423 link->flags |= ATA_LFLAG_NO_LPM;
424
381 /* Class code report is unreliable and SRST 425 /* Class code report is unreliable and SRST
382 * times out under certain configurations. 426 * times out under certain configurations.
383 * Config device can be at port 0 or 5 and 427 * Config device can be at port 0 or 5 and
@@ -405,6 +449,16 @@ static void sata_pmp_quirks(struct ata_port *ap)
405 * otherwise. Don't try hard to recover it. 449 * otherwise. Don't try hard to recover it.
406 */ 450 */
407 ap->pmp_link[ap->nr_pmp_links - 1].flags |= ATA_LFLAG_NO_RETRY; 451 ap->pmp_link[ap->nr_pmp_links - 1].flags |= ATA_LFLAG_NO_RETRY;
452 } else if (vendor == 0x197b && devid == 0x2352) {
453 /* chip found in Thermaltake BlackX Duet, jmicron JMB350? */
454 ata_for_each_link(link, ap, EDGE) {
455 /* SRST breaks detection and disks get misclassified
456 * LPM disabled to avoid potential problems
457 */
458 link->flags |= ATA_LFLAG_NO_LPM |
459 ATA_LFLAG_NO_SRST |
460 ATA_LFLAG_ASSUME_ATA;
461 }
408 } 462 }
409} 463}
410 464
@@ -938,15 +992,25 @@ static int sata_pmp_eh_recover(struct ata_port *ap)
938 if (rc) 992 if (rc)
939 goto link_fail; 993 goto link_fail;
940 994
941 /* Connection status might have changed while resetting other
942 * links, check SATA_PMP_GSCR_ERROR before returning.
943 */
944
945 /* clear SNotification */ 995 /* clear SNotification */
946 rc = sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf); 996 rc = sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf);
947 if (rc == 0) 997 if (rc == 0)
948 sata_scr_write(&ap->link, SCR_NOTIFICATION, sntf); 998 sata_scr_write(&ap->link, SCR_NOTIFICATION, sntf);
949 999
1000 /*
1001 * If LPM is active on any fan-out port, hotplug wouldn't
1002 * work. Return w/ PHY event notification disabled.
1003 */
1004 ata_for_each_link(link, ap, EDGE)
1005 if (link->lpm_policy > ATA_LPM_MAX_POWER)
1006 return 0;
1007
1008 /*
1009 * Connection status might have changed while resetting other
1010 * links, enable notification and check SATA_PMP_GSCR_ERROR
1011 * before returning.
1012 */
1013
950 /* enable notification */ 1014 /* enable notification */
951 if (pmp_dev->flags & ATA_DFLAG_AN) { 1015 if (pmp_dev->flags & ATA_DFLAG_AN) {
952 gscr[SATA_PMP_GSCR_FEAT_EN] |= SATA_PMP_FEAT_NOTIFY; 1016 gscr[SATA_PMP_GSCR_FEAT_EN] |= SATA_PMP_FEAT_NOTIFY;
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index a89172c100f5..927f968e99d9 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -51,8 +51,8 @@
51#include <asm/unaligned.h> 51#include <asm/unaligned.h>
52 52
53#include "libata.h" 53#include "libata.h"
54#include "libata-transport.h"
54 55
55#define SECTOR_SIZE 512
56#define ATA_SCSI_RBUF_SIZE 4096 56#define ATA_SCSI_RBUF_SIZE 4096
57 57
58static DEFINE_SPINLOCK(ata_scsi_rbuf_lock); 58static DEFINE_SPINLOCK(ata_scsi_rbuf_lock);
@@ -64,9 +64,6 @@ static struct ata_device *__ata_scsi_find_dev(struct ata_port *ap,
64 const struct scsi_device *scsidev); 64 const struct scsi_device *scsidev);
65static struct ata_device *ata_scsi_find_dev(struct ata_port *ap, 65static struct ata_device *ata_scsi_find_dev(struct ata_port *ap,
66 const struct scsi_device *scsidev); 66 const struct scsi_device *scsidev);
67static int ata_scsi_user_scan(struct Scsi_Host *shost, unsigned int channel,
68 unsigned int id, unsigned int lun);
69
70 67
71#define RW_RECOVERY_MPAGE 0x1 68#define RW_RECOVERY_MPAGE 0x1
72#define RW_RECOVERY_MPAGE_LEN 12 69#define RW_RECOVERY_MPAGE_LEN 12
@@ -106,83 +103,55 @@ static const u8 def_control_mpage[CONTROL_MPAGE_LEN] = {
106 0, 30 /* extended self test time, see 05-359r1 */ 103 0, 30 /* extended self test time, see 05-359r1 */
107}; 104};
108 105
109/* 106static const char *ata_lpm_policy_names[] = {
110 * libata transport template. libata doesn't do real transport stuff. 107 [ATA_LPM_UNKNOWN] = "max_performance",
111 * It just needs the eh_timed_out hook. 108 [ATA_LPM_MAX_POWER] = "max_performance",
112 */ 109 [ATA_LPM_MED_POWER] = "medium_power",
113static struct scsi_transport_template ata_scsi_transport_template = { 110 [ATA_LPM_MIN_POWER] = "min_power",
114 .eh_strategy_handler = ata_scsi_error,
115 .eh_timed_out = ata_scsi_timed_out,
116 .user_scan = ata_scsi_user_scan,
117}; 111};
118 112
119 113static ssize_t ata_scsi_lpm_store(struct device *dev,
120static const struct { 114 struct device_attribute *attr,
121 enum link_pm value; 115 const char *buf, size_t count)
122 const char *name;
123} link_pm_policy[] = {
124 { NOT_AVAILABLE, "max_performance" },
125 { MIN_POWER, "min_power" },
126 { MAX_PERFORMANCE, "max_performance" },
127 { MEDIUM_POWER, "medium_power" },
128};
129
130static const char *ata_scsi_lpm_get(enum link_pm policy)
131{
132 int i;
133
134 for (i = 0; i < ARRAY_SIZE(link_pm_policy); i++)
135 if (link_pm_policy[i].value == policy)
136 return link_pm_policy[i].name;
137
138 return NULL;
139}
140
141static ssize_t ata_scsi_lpm_put(struct device *dev,
142 struct device_attribute *attr,
143 const char *buf, size_t count)
144{ 116{
145 struct Scsi_Host *shost = class_to_shost(dev); 117 struct Scsi_Host *shost = class_to_shost(dev);
146 struct ata_port *ap = ata_shost_to_port(shost); 118 struct ata_port *ap = ata_shost_to_port(shost);
147 enum link_pm policy = 0; 119 enum ata_lpm_policy policy;
148 int i; 120 unsigned long flags;
149 121
150 /* 122 /* UNKNOWN is internal state, iterate from MAX_POWER */
151 * we are skipping array location 0 on purpose - this 123 for (policy = ATA_LPM_MAX_POWER;
152 * is because a value of NOT_AVAILABLE is displayed 124 policy < ARRAY_SIZE(ata_lpm_policy_names); policy++) {
153 * to the user as max_performance, but when the user 125 const char *name = ata_lpm_policy_names[policy];
154 * writes "max_performance", they actually want the 126
155 * value to match MAX_PERFORMANCE. 127 if (strncmp(name, buf, strlen(name)) == 0)
156 */
157 for (i = 1; i < ARRAY_SIZE(link_pm_policy); i++) {
158 const int len = strlen(link_pm_policy[i].name);
159 if (strncmp(link_pm_policy[i].name, buf, len) == 0) {
160 policy = link_pm_policy[i].value;
161 break; 128 break;
162 }
163 } 129 }
164 if (!policy) 130 if (policy == ARRAY_SIZE(ata_lpm_policy_names))
165 return -EINVAL; 131 return -EINVAL;
166 132
167 ata_lpm_schedule(ap, policy); 133 spin_lock_irqsave(ap->lock, flags);
134 ap->target_lpm_policy = policy;
135 ata_port_schedule_eh(ap);
136 spin_unlock_irqrestore(ap->lock, flags);
137
168 return count; 138 return count;
169} 139}
170 140
171static ssize_t 141static ssize_t ata_scsi_lpm_show(struct device *dev,
172ata_scsi_lpm_show(struct device *dev, struct device_attribute *attr, char *buf) 142 struct device_attribute *attr, char *buf)
173{ 143{
174 struct Scsi_Host *shost = class_to_shost(dev); 144 struct Scsi_Host *shost = class_to_shost(dev);
175 struct ata_port *ap = ata_shost_to_port(shost); 145 struct ata_port *ap = ata_shost_to_port(shost);
176 const char *policy =
177 ata_scsi_lpm_get(ap->pm_policy);
178 146
179 if (!policy) 147 if (ap->target_lpm_policy >= ARRAY_SIZE(ata_lpm_policy_names))
180 return -EINVAL; 148 return -EINVAL;
181 149
182 return snprintf(buf, 23, "%s\n", policy); 150 return snprintf(buf, PAGE_SIZE, "%s\n",
151 ata_lpm_policy_names[ap->target_lpm_policy]);
183} 152}
184DEVICE_ATTR(link_power_management_policy, S_IRUGO | S_IWUSR, 153DEVICE_ATTR(link_power_management_policy, S_IRUGO | S_IWUSR,
185 ata_scsi_lpm_show, ata_scsi_lpm_put); 154 ata_scsi_lpm_show, ata_scsi_lpm_store);
186EXPORT_SYMBOL_GPL(dev_attr_link_power_management_policy); 155EXPORT_SYMBOL_GPL(dev_attr_link_power_management_policy);
187 156
188static ssize_t ata_scsi_park_show(struct device *device, 157static ssize_t ata_scsi_park_show(struct device *device,
@@ -377,12 +346,11 @@ struct device_attribute *ata_common_sdev_attrs[] = {
377}; 346};
378EXPORT_SYMBOL_GPL(ata_common_sdev_attrs); 347EXPORT_SYMBOL_GPL(ata_common_sdev_attrs);
379 348
380static void ata_scsi_invalid_field(struct scsi_cmnd *cmd, 349static void ata_scsi_invalid_field(struct scsi_cmnd *cmd)
381 void (*done)(struct scsi_cmnd *))
382{ 350{
383 ata_scsi_set_sense(cmd, ILLEGAL_REQUEST, 0x24, 0x0); 351 ata_scsi_set_sense(cmd, ILLEGAL_REQUEST, 0x24, 0x0);
384 /* "Invalid field in cbd" */ 352 /* "Invalid field in cbd" */
385 done(cmd); 353 cmd->scsi_done(cmd);
386} 354}
387 355
388/** 356/**
@@ -516,7 +484,7 @@ int ata_cmd_ioctl(struct scsi_device *scsidev, void __user *arg)
516 memset(scsi_cmd, 0, sizeof(scsi_cmd)); 484 memset(scsi_cmd, 0, sizeof(scsi_cmd));
517 485
518 if (args[3]) { 486 if (args[3]) {
519 argsize = SECTOR_SIZE * args[3]; 487 argsize = ATA_SECT_SIZE * args[3];
520 argbuf = kmalloc(argsize, GFP_KERNEL); 488 argbuf = kmalloc(argsize, GFP_KERNEL);
521 if (argbuf == NULL) { 489 if (argbuf == NULL) {
522 rc = -ENOMEM; 490 rc = -ENOMEM;
@@ -750,7 +718,6 @@ EXPORT_SYMBOL_GPL(ata_scsi_ioctl);
750 * ata_scsi_qc_new - acquire new ata_queued_cmd reference 718 * ata_scsi_qc_new - acquire new ata_queued_cmd reference
751 * @dev: ATA device to which the new command is attached 719 * @dev: ATA device to which the new command is attached
752 * @cmd: SCSI command that originated this ATA command 720 * @cmd: SCSI command that originated this ATA command
753 * @done: SCSI command completion function
754 * 721 *
755 * Obtain a reference to an unused ata_queued_cmd structure, 722 * Obtain a reference to an unused ata_queued_cmd structure,
756 * which is the basic libata structure representing a single 723 * which is the basic libata structure representing a single
@@ -767,21 +734,20 @@ EXPORT_SYMBOL_GPL(ata_scsi_ioctl);
767 * Command allocated, or %NULL if none available. 734 * Command allocated, or %NULL if none available.
768 */ 735 */
769static struct ata_queued_cmd *ata_scsi_qc_new(struct ata_device *dev, 736static struct ata_queued_cmd *ata_scsi_qc_new(struct ata_device *dev,
770 struct scsi_cmnd *cmd, 737 struct scsi_cmnd *cmd)
771 void (*done)(struct scsi_cmnd *))
772{ 738{
773 struct ata_queued_cmd *qc; 739 struct ata_queued_cmd *qc;
774 740
775 qc = ata_qc_new_init(dev); 741 qc = ata_qc_new_init(dev);
776 if (qc) { 742 if (qc) {
777 qc->scsicmd = cmd; 743 qc->scsicmd = cmd;
778 qc->scsidone = done; 744 qc->scsidone = cmd->scsi_done;
779 745
780 qc->sg = scsi_sglist(cmd); 746 qc->sg = scsi_sglist(cmd);
781 qc->n_elem = scsi_sg_count(cmd); 747 qc->n_elem = scsi_sg_count(cmd);
782 } else { 748 } else {
783 cmd->result = (DID_OK << 16) | (QUEUE_FULL << 1); 749 cmd->result = (DID_OK << 16) | (QUEUE_FULL << 1);
784 done(cmd); 750 cmd->scsi_done(cmd);
785 } 751 }
786 752
787 return qc; 753 return qc;
@@ -1033,7 +999,7 @@ static void ata_gen_passthru_sense(struct ata_queued_cmd *qc)
1033 * @qc: Command that we are erroring out 999 * @qc: Command that we are erroring out
1034 * 1000 *
1035 * Generate sense block for a failed ATA command @qc. Descriptor 1001 * Generate sense block for a failed ATA command @qc. Descriptor
1036 * format is used to accomodate LBA48 block address. 1002 * format is used to accommodate LBA48 block address.
1037 * 1003 *
1038 * LOCKING: 1004 * LOCKING:
1039 * None. 1005 * None.
@@ -1123,21 +1089,21 @@ static int atapi_drain_needed(struct request *rq)
1123static int ata_scsi_dev_config(struct scsi_device *sdev, 1089static int ata_scsi_dev_config(struct scsi_device *sdev,
1124 struct ata_device *dev) 1090 struct ata_device *dev)
1125{ 1091{
1092 struct request_queue *q = sdev->request_queue;
1093
1126 if (!ata_id_has_unload(dev->id)) 1094 if (!ata_id_has_unload(dev->id))
1127 dev->flags |= ATA_DFLAG_NO_UNLOAD; 1095 dev->flags |= ATA_DFLAG_NO_UNLOAD;
1128 1096
1129 /* configure max sectors */ 1097 /* configure max sectors */
1130 blk_queue_max_hw_sectors(sdev->request_queue, dev->max_sectors); 1098 blk_queue_max_hw_sectors(q, dev->max_sectors);
1131 1099
1132 if (dev->class == ATA_DEV_ATAPI) { 1100 if (dev->class == ATA_DEV_ATAPI) {
1133 struct request_queue *q = sdev->request_queue;
1134 void *buf; 1101 void *buf;
1135 1102
1136 /* set the min alignment and padding */ 1103 sdev->sector_size = ATA_SECT_SIZE;
1137 blk_queue_update_dma_alignment(sdev->request_queue, 1104
1138 ATA_DMA_PAD_SZ - 1); 1105 /* set DMA padding */
1139 blk_queue_update_dma_pad(sdev->request_queue, 1106 blk_queue_update_dma_pad(q, ATA_DMA_PAD_SZ - 1);
1140 ATA_DMA_PAD_SZ - 1);
1141 1107
1142 /* configure draining */ 1108 /* configure draining */
1143 buf = kmalloc(ATAPI_MAX_DRAIN, q->bounce_gfp | GFP_KERNEL); 1109 buf = kmalloc(ATAPI_MAX_DRAIN, q->bounce_gfp | GFP_KERNEL);
@@ -1149,12 +1115,24 @@ static int ata_scsi_dev_config(struct scsi_device *sdev,
1149 1115
1150 blk_queue_dma_drain(q, atapi_drain_needed, buf, ATAPI_MAX_DRAIN); 1116 blk_queue_dma_drain(q, atapi_drain_needed, buf, ATAPI_MAX_DRAIN);
1151 } else { 1117 } else {
1152 /* ATA devices must be sector aligned */ 1118 sdev->sector_size = ata_id_logical_sector_size(dev->id);
1153 blk_queue_update_dma_alignment(sdev->request_queue,
1154 ATA_SECT_SIZE - 1);
1155 sdev->manage_start_stop = 1; 1119 sdev->manage_start_stop = 1;
1156 } 1120 }
1157 1121
1122 /*
1123 * ata_pio_sectors() expects buffer for each sector to not cross
1124 * page boundary. Enforce it by requiring buffers to be sector
1125 * aligned, which works iff sector_size is not larger than
1126 * PAGE_SIZE. ATAPI devices also need the alignment as
1127 * IDENTIFY_PACKET is executed as ATA_PROT_PIO.
1128 */
1129 if (sdev->sector_size > PAGE_SIZE)
1130 ata_dev_printk(dev, KERN_WARNING,
1131 "sector_size=%u > PAGE_SIZE, PIO may malfunction\n",
1132 sdev->sector_size);
1133
1134 blk_queue_update_dma_alignment(q, sdev->sector_size - 1);
1135
1158 if (dev->flags & ATA_DFLAG_AN) 1136 if (dev->flags & ATA_DFLAG_AN)
1159 set_bit(SDEV_EVT_MEDIA_CHANGE, sdev->supported_events); 1137 set_bit(SDEV_EVT_MEDIA_CHANGE, sdev->supported_events);
1160 1138
@@ -1166,6 +1144,9 @@ static int ata_scsi_dev_config(struct scsi_device *sdev,
1166 scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, depth); 1144 scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, depth);
1167 } 1145 }
1168 1146
1147 blk_queue_flush_queueable(q, false);
1148
1149 dev->sdev = sdev;
1169 return 0; 1150 return 0;
1170} 1151}
1171 1152
@@ -1696,7 +1677,7 @@ static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc)
1696 goto nothing_to_do; 1677 goto nothing_to_do;
1697 1678
1698 qc->flags |= ATA_QCFLAG_IO; 1679 qc->flags |= ATA_QCFLAG_IO;
1699 qc->nbytes = n_block * ATA_SECT_SIZE; 1680 qc->nbytes = n_block * scmd->device->sector_size;
1700 1681
1701 rc = ata_build_rw_tf(&qc->tf, qc->dev, block, n_block, tf_flags, 1682 rc = ata_build_rw_tf(&qc->tf, qc->dev, block, n_block, tf_flags,
1702 qc->tag); 1683 qc->tag);
@@ -1764,7 +1745,6 @@ static void ata_scsi_qc_complete(struct ata_queued_cmd *qc)
1764 * ata_scsi_translate - Translate then issue SCSI command to ATA device 1745 * ata_scsi_translate - Translate then issue SCSI command to ATA device
1765 * @dev: ATA device to which the command is addressed 1746 * @dev: ATA device to which the command is addressed
1766 * @cmd: SCSI command to execute 1747 * @cmd: SCSI command to execute
1767 * @done: SCSI command completion function
1768 * @xlat_func: Actor which translates @cmd to an ATA taskfile 1748 * @xlat_func: Actor which translates @cmd to an ATA taskfile
1769 * 1749 *
1770 * Our ->queuecommand() function has decided that the SCSI 1750 * Our ->queuecommand() function has decided that the SCSI
@@ -1788,7 +1768,6 @@ static void ata_scsi_qc_complete(struct ata_queued_cmd *qc)
1788 * needs to be deferred. 1768 * needs to be deferred.
1789 */ 1769 */
1790static int ata_scsi_translate(struct ata_device *dev, struct scsi_cmnd *cmd, 1770static int ata_scsi_translate(struct ata_device *dev, struct scsi_cmnd *cmd,
1791 void (*done)(struct scsi_cmnd *),
1792 ata_xlat_func_t xlat_func) 1771 ata_xlat_func_t xlat_func)
1793{ 1772{
1794 struct ata_port *ap = dev->link->ap; 1773 struct ata_port *ap = dev->link->ap;
@@ -1797,7 +1776,7 @@ static int ata_scsi_translate(struct ata_device *dev, struct scsi_cmnd *cmd,
1797 1776
1798 VPRINTK("ENTER\n"); 1777 VPRINTK("ENTER\n");
1799 1778
1800 qc = ata_scsi_qc_new(dev, cmd, done); 1779 qc = ata_scsi_qc_new(dev, cmd);
1801 if (!qc) 1780 if (!qc)
1802 goto err_mem; 1781 goto err_mem;
1803 1782
@@ -1833,14 +1812,14 @@ static int ata_scsi_translate(struct ata_device *dev, struct scsi_cmnd *cmd,
1833 1812
1834early_finish: 1813early_finish:
1835 ata_qc_free(qc); 1814 ata_qc_free(qc);
1836 qc->scsidone(cmd); 1815 cmd->scsi_done(cmd);
1837 DPRINTK("EXIT - early finish (good or error)\n"); 1816 DPRINTK("EXIT - early finish (good or error)\n");
1838 return 0; 1817 return 0;
1839 1818
1840err_did: 1819err_did:
1841 ata_qc_free(qc); 1820 ata_qc_free(qc);
1842 cmd->result = (DID_ERROR << 16); 1821 cmd->result = (DID_ERROR << 16);
1843 qc->scsidone(cmd); 1822 cmd->scsi_done(cmd);
1844err_mem: 1823err_mem:
1845 DPRINTK("EXIT - internal\n"); 1824 DPRINTK("EXIT - internal\n");
1846 return 0; 1825 return 0;
@@ -2001,6 +1980,7 @@ static unsigned int ata_scsiop_inq_00(struct ata_scsi_args *args, u8 *rbuf)
2001 0x89, /* page 0x89, ata info page */ 1980 0x89, /* page 0x89, ata info page */
2002 0xb0, /* page 0xb0, block limits page */ 1981 0xb0, /* page 0xb0, block limits page */
2003 0xb1, /* page 0xb1, block device characteristics page */ 1982 0xb1, /* page 0xb1, block device characteristics page */
1983 0xb2, /* page 0xb2, thin provisioning page */
2004 }; 1984 };
2005 1985
2006 rbuf[3] = sizeof(pages); /* number of supported VPD pages */ 1986 rbuf[3] = sizeof(pages); /* number of supported VPD pages */
@@ -2077,6 +2057,17 @@ static unsigned int ata_scsiop_inq_83(struct ata_scsi_args *args, u8 *rbuf)
2077 ATA_ID_SERNO_LEN); 2057 ATA_ID_SERNO_LEN);
2078 num += ATA_ID_SERNO_LEN; 2058 num += ATA_ID_SERNO_LEN;
2079 2059
2060 if (ata_id_has_wwn(args->id)) {
2061 /* SAT defined lu world wide name */
2062 /* piv=0, assoc=lu, code_set=binary, designator=NAA */
2063 rbuf[num + 0] = 1;
2064 rbuf[num + 1] = 3;
2065 rbuf[num + 3] = ATA_ID_WWN_LEN;
2066 num += 4;
2067 ata_id_string(args->id, (unsigned char *) rbuf + num,
2068 ATA_ID_WWN, ATA_ID_WWN_LEN);
2069 num += ATA_ID_WWN_LEN;
2070 }
2080 rbuf[3] = num - 4; /* page len (assume less than 256 bytes) */ 2071 rbuf[3] = num - 4; /* page len (assume less than 256 bytes) */
2081 return 0; 2072 return 0;
2082} 2073}
@@ -2123,7 +2114,7 @@ static unsigned int ata_scsiop_inq_89(struct ata_scsi_args *args, u8 *rbuf)
2123 2114
2124static unsigned int ata_scsiop_inq_b0(struct ata_scsi_args *args, u8 *rbuf) 2115static unsigned int ata_scsiop_inq_b0(struct ata_scsi_args *args, u8 *rbuf)
2125{ 2116{
2126 u32 min_io_sectors; 2117 u16 min_io_sectors;
2127 2118
2128 rbuf[1] = 0xb0; 2119 rbuf[1] = 0xb0;
2129 rbuf[3] = 0x3c; /* required VPD size with unmap support */ 2120 rbuf[3] = 0x3c; /* required VPD size with unmap support */
@@ -2135,10 +2126,7 @@ static unsigned int ata_scsiop_inq_b0(struct ata_scsi_args *args, u8 *rbuf)
2135 * logical than physical sector size we need to figure out what the 2126 * logical than physical sector size we need to figure out what the
2136 * latter is. 2127 * latter is.
2137 */ 2128 */
2138 if (ata_id_has_large_logical_sectors(args->id)) 2129 min_io_sectors = 1 << ata_id_log2_per_physical_sector(args->id);
2139 min_io_sectors = ata_id_logical_per_physical_sectors(args->id);
2140 else
2141 min_io_sectors = 1;
2142 put_unaligned_be16(min_io_sectors, &rbuf[6]); 2130 put_unaligned_be16(min_io_sectors, &rbuf[6]);
2143 2131
2144 /* 2132 /*
@@ -2151,7 +2139,7 @@ static unsigned int ata_scsiop_inq_b0(struct ata_scsi_args *args, u8 *rbuf)
2151 * with the unmap bit set. 2139 * with the unmap bit set.
2152 */ 2140 */
2153 if (ata_id_has_trim(args->id)) { 2141 if (ata_id_has_trim(args->id)) {
2154 put_unaligned_be32(65535 * 512 / 8, &rbuf[20]); 2142 put_unaligned_be64(65535 * 512 / 8, &rbuf[36]);
2155 put_unaligned_be32(1, &rbuf[28]); 2143 put_unaligned_be32(1, &rbuf[28]);
2156 } 2144 }
2157 2145
@@ -2172,6 +2160,16 @@ static unsigned int ata_scsiop_inq_b1(struct ata_scsi_args *args, u8 *rbuf)
2172 return 0; 2160 return 0;
2173} 2161}
2174 2162
2163static unsigned int ata_scsiop_inq_b2(struct ata_scsi_args *args, u8 *rbuf)
2164{
2165 /* SCSI Thin Provisioning VPD page: SBC-3 rev 22 or later */
2166 rbuf[1] = 0xb2;
2167 rbuf[3] = 0x4;
2168 rbuf[5] = 1 << 6; /* TPWS */
2169
2170 return 0;
2171}
2172
2175/** 2173/**
2176 * ata_scsiop_noop - Command handler that simply returns success. 2174 * ata_scsiop_noop - Command handler that simply returns success.
2177 * @args: device IDENTIFY data / SCSI command of interest. 2175 * @args: device IDENTIFY data / SCSI command of interest.
@@ -2397,21 +2395,13 @@ static unsigned int ata_scsiop_read_cap(struct ata_scsi_args *args, u8 *rbuf)
2397{ 2395{
2398 struct ata_device *dev = args->dev; 2396 struct ata_device *dev = args->dev;
2399 u64 last_lba = dev->n_sectors - 1; /* LBA of the last block */ 2397 u64 last_lba = dev->n_sectors - 1; /* LBA of the last block */
2400 u8 log_per_phys = 0; 2398 u32 sector_size; /* physical sector size in bytes */
2401 u16 lowest_aligned = 0; 2399 u8 log2_per_phys;
2402 u16 word_106 = dev->id[106]; 2400 u16 lowest_aligned;
2403 u16 word_209 = dev->id[209]; 2401
2404 2402 sector_size = ata_id_logical_sector_size(dev->id);
2405 if ((word_106 & 0xc000) == 0x4000) { 2403 log2_per_phys = ata_id_log2_per_physical_sector(dev->id);
2406 /* Number and offset of logical sectors per physical sector */ 2404 lowest_aligned = ata_id_logical_sector_offset(dev->id, log2_per_phys);
2407 if (word_106 & (1 << 13))
2408 log_per_phys = word_106 & 0xf;
2409 if ((word_209 & 0xc000) == 0x4000) {
2410 u16 first = dev->id[209] & 0x3fff;
2411 if (first > 0)
2412 lowest_aligned = (1 << log_per_phys) - first;
2413 }
2414 }
2415 2405
2416 VPRINTK("ENTER\n"); 2406 VPRINTK("ENTER\n");
2417 2407
@@ -2426,8 +2416,10 @@ static unsigned int ata_scsiop_read_cap(struct ata_scsi_args *args, u8 *rbuf)
2426 rbuf[3] = last_lba; 2416 rbuf[3] = last_lba;
2427 2417
2428 /* sector size */ 2418 /* sector size */
2429 rbuf[6] = ATA_SECT_SIZE >> 8; 2419 rbuf[4] = sector_size >> (8 * 3);
2430 rbuf[7] = ATA_SECT_SIZE & 0xff; 2420 rbuf[5] = sector_size >> (8 * 2);
2421 rbuf[6] = sector_size >> (8 * 1);
2422 rbuf[7] = sector_size;
2431 } else { 2423 } else {
2432 /* sector count, 64-bit */ 2424 /* sector count, 64-bit */
2433 rbuf[0] = last_lba >> (8 * 7); 2425 rbuf[0] = last_lba >> (8 * 7);
@@ -2440,11 +2432,13 @@ static unsigned int ata_scsiop_read_cap(struct ata_scsi_args *args, u8 *rbuf)
2440 rbuf[7] = last_lba; 2432 rbuf[7] = last_lba;
2441 2433
2442 /* sector size */ 2434 /* sector size */
2443 rbuf[10] = ATA_SECT_SIZE >> 8; 2435 rbuf[ 8] = sector_size >> (8 * 3);
2444 rbuf[11] = ATA_SECT_SIZE & 0xff; 2436 rbuf[ 9] = sector_size >> (8 * 2);
2437 rbuf[10] = sector_size >> (8 * 1);
2438 rbuf[11] = sector_size;
2445 2439
2446 rbuf[12] = 0; 2440 rbuf[12] = 0;
2447 rbuf[13] = log_per_phys; 2441 rbuf[13] = log2_per_phys;
2448 rbuf[14] = (lowest_aligned >> 8) & 0x3f; 2442 rbuf[14] = (lowest_aligned >> 8) & 0x3f;
2449 rbuf[15] = lowest_aligned; 2443 rbuf[15] = lowest_aligned;
2450 2444
@@ -2577,8 +2571,11 @@ static void atapi_qc_complete(struct ata_queued_cmd *qc)
2577 * 2571 *
2578 * If door lock fails, always clear sdev->locked to 2572 * If door lock fails, always clear sdev->locked to
2579 * avoid this infinite loop. 2573 * avoid this infinite loop.
2574 *
2575 * This may happen before SCSI scan is complete. Make
2576 * sure qc->dev->sdev isn't NULL before dereferencing.
2580 */ 2577 */
2581 if (qc->cdb[0] == ALLOW_MEDIUM_REMOVAL) 2578 if (qc->cdb[0] == ALLOW_MEDIUM_REMOVAL && qc->dev->sdev)
2582 qc->dev->sdev->locked = 0; 2579 qc->dev->sdev->locked = 0;
2583 2580
2584 qc->scsicmd->result = SAM_STAT_CHECK_CONDITION; 2581 qc->scsicmd->result = SAM_STAT_CHECK_CONDITION;
@@ -2888,9 +2885,8 @@ static unsigned int ata_scsi_pass_thru(struct ata_queued_cmd *qc)
2888 tf->device = dev->devno ? 2885 tf->device = dev->devno ?
2889 tf->device | ATA_DEV1 : tf->device & ~ATA_DEV1; 2886 tf->device | ATA_DEV1 : tf->device & ~ATA_DEV1;
2890 2887
2891 /* READ/WRITE LONG use a non-standard sect_size */
2892 qc->sect_size = ATA_SECT_SIZE;
2893 switch (tf->command) { 2888 switch (tf->command) {
2889 /* READ/WRITE LONG use a non-standard sect_size */
2894 case ATA_CMD_READ_LONG: 2890 case ATA_CMD_READ_LONG:
2895 case ATA_CMD_READ_LONG_ONCE: 2891 case ATA_CMD_READ_LONG_ONCE:
2896 case ATA_CMD_WRITE_LONG: 2892 case ATA_CMD_WRITE_LONG:
@@ -2898,6 +2894,45 @@ static unsigned int ata_scsi_pass_thru(struct ata_queued_cmd *qc)
2898 if (tf->protocol != ATA_PROT_PIO || tf->nsect != 1) 2894 if (tf->protocol != ATA_PROT_PIO || tf->nsect != 1)
2899 goto invalid_fld; 2895 goto invalid_fld;
2900 qc->sect_size = scsi_bufflen(scmd); 2896 qc->sect_size = scsi_bufflen(scmd);
2897 break;
2898
2899 /* commands using reported Logical Block size (e.g. 512 or 4K) */
2900 case ATA_CMD_CFA_WRITE_NE:
2901 case ATA_CMD_CFA_TRANS_SECT:
2902 case ATA_CMD_CFA_WRITE_MULT_NE:
2903 /* XXX: case ATA_CMD_CFA_WRITE_SECTORS_WITHOUT_ERASE: */
2904 case ATA_CMD_READ:
2905 case ATA_CMD_READ_EXT:
2906 case ATA_CMD_READ_QUEUED:
2907 /* XXX: case ATA_CMD_READ_QUEUED_EXT: */
2908 case ATA_CMD_FPDMA_READ:
2909 case ATA_CMD_READ_MULTI:
2910 case ATA_CMD_READ_MULTI_EXT:
2911 case ATA_CMD_PIO_READ:
2912 case ATA_CMD_PIO_READ_EXT:
2913 case ATA_CMD_READ_STREAM_DMA_EXT:
2914 case ATA_CMD_READ_STREAM_EXT:
2915 case ATA_CMD_VERIFY:
2916 case ATA_CMD_VERIFY_EXT:
2917 case ATA_CMD_WRITE:
2918 case ATA_CMD_WRITE_EXT:
2919 case ATA_CMD_WRITE_FUA_EXT:
2920 case ATA_CMD_WRITE_QUEUED:
2921 case ATA_CMD_WRITE_QUEUED_FUA_EXT:
2922 case ATA_CMD_FPDMA_WRITE:
2923 case ATA_CMD_WRITE_MULTI:
2924 case ATA_CMD_WRITE_MULTI_EXT:
2925 case ATA_CMD_WRITE_MULTI_FUA_EXT:
2926 case ATA_CMD_PIO_WRITE:
2927 case ATA_CMD_PIO_WRITE_EXT:
2928 case ATA_CMD_WRITE_STREAM_DMA_EXT:
2929 case ATA_CMD_WRITE_STREAM_EXT:
2930 qc->sect_size = scmd->device->sector_size;
2931 break;
2932
2933 /* Everything else uses 512 byte "sectors" */
2934 default:
2935 qc->sect_size = ATA_SECT_SIZE;
2901 } 2936 }
2902 2937
2903 /* 2938 /*
@@ -3100,7 +3135,6 @@ static inline void ata_scsi_dump_cdb(struct ata_port *ap,
3100} 3135}
3101 3136
3102static inline int __ata_scsi_queuecmd(struct scsi_cmnd *scmd, 3137static inline int __ata_scsi_queuecmd(struct scsi_cmnd *scmd,
3103 void (*done)(struct scsi_cmnd *),
3104 struct ata_device *dev) 3138 struct ata_device *dev)
3105{ 3139{
3106 u8 scsi_op = scmd->cmnd[0]; 3140 u8 scsi_op = scmd->cmnd[0];
@@ -3134,9 +3168,9 @@ static inline int __ata_scsi_queuecmd(struct scsi_cmnd *scmd,
3134 } 3168 }
3135 3169
3136 if (xlat_func) 3170 if (xlat_func)
3137 rc = ata_scsi_translate(dev, scmd, done, xlat_func); 3171 rc = ata_scsi_translate(dev, scmd, xlat_func);
3138 else 3172 else
3139 ata_scsi_simulate(dev, scmd, done); 3173 ata_scsi_simulate(dev, scmd);
3140 3174
3141 return rc; 3175 return rc;
3142 3176
@@ -3144,14 +3178,14 @@ static inline int __ata_scsi_queuecmd(struct scsi_cmnd *scmd,
3144 DPRINTK("bad CDB len=%u, scsi_op=0x%02x, max=%u\n", 3178 DPRINTK("bad CDB len=%u, scsi_op=0x%02x, max=%u\n",
3145 scmd->cmd_len, scsi_op, dev->cdb_len); 3179 scmd->cmd_len, scsi_op, dev->cdb_len);
3146 scmd->result = DID_ERROR << 16; 3180 scmd->result = DID_ERROR << 16;
3147 done(scmd); 3181 scmd->scsi_done(scmd);
3148 return 0; 3182 return 0;
3149} 3183}
3150 3184
3151/** 3185/**
3152 * ata_scsi_queuecmd - Issue SCSI cdb to libata-managed device 3186 * ata_scsi_queuecmd - Issue SCSI cdb to libata-managed device
3187 * @shost: SCSI host of command to be sent
3153 * @cmd: SCSI command to be sent 3188 * @cmd: SCSI command to be sent
3154 * @done: Completion function, called when command is complete
3155 * 3189 *
3156 * In some cases, this function translates SCSI commands into 3190 * In some cases, this function translates SCSI commands into
3157 * ATA taskfiles, and queues the taskfiles to be sent to 3191 * ATA taskfiles, and queues the taskfiles to be sent to
@@ -3161,37 +3195,36 @@ static inline int __ata_scsi_queuecmd(struct scsi_cmnd *scmd,
3161 * ATA and ATAPI devices appearing as SCSI devices. 3195 * ATA and ATAPI devices appearing as SCSI devices.
3162 * 3196 *
3163 * LOCKING: 3197 * LOCKING:
3164 * Releases scsi-layer-held lock, and obtains host lock. 3198 * ATA host lock
3165 * 3199 *
3166 * RETURNS: 3200 * RETURNS:
3167 * Return value from __ata_scsi_queuecmd() if @cmd can be queued, 3201 * Return value from __ata_scsi_queuecmd() if @cmd can be queued,
3168 * 0 otherwise. 3202 * 0 otherwise.
3169 */ 3203 */
3170int ata_scsi_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)) 3204int ata_scsi_queuecmd(struct Scsi_Host *shost, struct scsi_cmnd *cmd)
3171{ 3205{
3172 struct ata_port *ap; 3206 struct ata_port *ap;
3173 struct ata_device *dev; 3207 struct ata_device *dev;
3174 struct scsi_device *scsidev = cmd->device; 3208 struct scsi_device *scsidev = cmd->device;
3175 struct Scsi_Host *shost = scsidev->host;
3176 int rc = 0; 3209 int rc = 0;
3210 unsigned long irq_flags;
3177 3211
3178 ap = ata_shost_to_port(shost); 3212 ap = ata_shost_to_port(shost);
3179 3213
3180 spin_unlock(shost->host_lock); 3214 spin_lock_irqsave(ap->lock, irq_flags);
3181 spin_lock(ap->lock);
3182 3215
3183 ata_scsi_dump_cdb(ap, cmd); 3216 ata_scsi_dump_cdb(ap, cmd);
3184 3217
3185 dev = ata_scsi_find_dev(ap, scsidev); 3218 dev = ata_scsi_find_dev(ap, scsidev);
3186 if (likely(dev)) 3219 if (likely(dev))
3187 rc = __ata_scsi_queuecmd(cmd, done, dev); 3220 rc = __ata_scsi_queuecmd(cmd, dev);
3188 else { 3221 else {
3189 cmd->result = (DID_BAD_TARGET << 16); 3222 cmd->result = (DID_BAD_TARGET << 16);
3190 done(cmd); 3223 cmd->scsi_done(cmd);
3191 } 3224 }
3192 3225
3193 spin_unlock(ap->lock); 3226 spin_unlock_irqrestore(ap->lock, irq_flags);
3194 spin_lock(shost->host_lock); 3227
3195 return rc; 3228 return rc;
3196} 3229}
3197 3230
@@ -3199,7 +3232,6 @@ int ata_scsi_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
3199 * ata_scsi_simulate - simulate SCSI command on ATA device 3232 * ata_scsi_simulate - simulate SCSI command on ATA device
3200 * @dev: the target device 3233 * @dev: the target device
3201 * @cmd: SCSI command being sent to device. 3234 * @cmd: SCSI command being sent to device.
3202 * @done: SCSI command completion function.
3203 * 3235 *
3204 * Interprets and directly executes a select list of SCSI commands 3236 * Interprets and directly executes a select list of SCSI commands
3205 * that can be handled internally. 3237 * that can be handled internally.
@@ -3208,8 +3240,7 @@ int ata_scsi_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
3208 * spin_lock_irqsave(host lock) 3240 * spin_lock_irqsave(host lock)
3209 */ 3241 */
3210 3242
3211void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd, 3243void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd)
3212 void (*done)(struct scsi_cmnd *))
3213{ 3244{
3214 struct ata_scsi_args args; 3245 struct ata_scsi_args args;
3215 const u8 *scsicmd = cmd->cmnd; 3246 const u8 *scsicmd = cmd->cmnd;
@@ -3218,17 +3249,17 @@ void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd,
3218 args.dev = dev; 3249 args.dev = dev;
3219 args.id = dev->id; 3250 args.id = dev->id;
3220 args.cmd = cmd; 3251 args.cmd = cmd;
3221 args.done = done; 3252 args.done = cmd->scsi_done;
3222 3253
3223 switch(scsicmd[0]) { 3254 switch(scsicmd[0]) {
3224 /* TODO: worth improving? */ 3255 /* TODO: worth improving? */
3225 case FORMAT_UNIT: 3256 case FORMAT_UNIT:
3226 ata_scsi_invalid_field(cmd, done); 3257 ata_scsi_invalid_field(cmd);
3227 break; 3258 break;
3228 3259
3229 case INQUIRY: 3260 case INQUIRY:
3230 if (scsicmd[1] & 2) /* is CmdDt set? */ 3261 if (scsicmd[1] & 2) /* is CmdDt set? */
3231 ata_scsi_invalid_field(cmd, done); 3262 ata_scsi_invalid_field(cmd);
3232 else if ((scsicmd[1] & 1) == 0) /* is EVPD clear? */ 3263 else if ((scsicmd[1] & 1) == 0) /* is EVPD clear? */
3233 ata_scsi_rbuf_fill(&args, ata_scsiop_inq_std); 3264 ata_scsi_rbuf_fill(&args, ata_scsiop_inq_std);
3234 else switch (scsicmd[2]) { 3265 else switch (scsicmd[2]) {
@@ -3250,8 +3281,11 @@ void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd,
3250 case 0xb1: 3281 case 0xb1:
3251 ata_scsi_rbuf_fill(&args, ata_scsiop_inq_b1); 3282 ata_scsi_rbuf_fill(&args, ata_scsiop_inq_b1);
3252 break; 3283 break;
3284 case 0xb2:
3285 ata_scsi_rbuf_fill(&args, ata_scsiop_inq_b2);
3286 break;
3253 default: 3287 default:
3254 ata_scsi_invalid_field(cmd, done); 3288 ata_scsi_invalid_field(cmd);
3255 break; 3289 break;
3256 } 3290 }
3257 break; 3291 break;
@@ -3263,7 +3297,7 @@ void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd,
3263 3297
3264 case MODE_SELECT: /* unconditionally return */ 3298 case MODE_SELECT: /* unconditionally return */
3265 case MODE_SELECT_10: /* bad-field-in-cdb */ 3299 case MODE_SELECT_10: /* bad-field-in-cdb */
3266 ata_scsi_invalid_field(cmd, done); 3300 ata_scsi_invalid_field(cmd);
3267 break; 3301 break;
3268 3302
3269 case READ_CAPACITY: 3303 case READ_CAPACITY:
@@ -3274,7 +3308,7 @@ void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd,
3274 if ((scsicmd[1] & 0x1f) == SAI_READ_CAPACITY_16) 3308 if ((scsicmd[1] & 0x1f) == SAI_READ_CAPACITY_16)
3275 ata_scsi_rbuf_fill(&args, ata_scsiop_read_cap); 3309 ata_scsi_rbuf_fill(&args, ata_scsiop_read_cap);
3276 else 3310 else
3277 ata_scsi_invalid_field(cmd, done); 3311 ata_scsi_invalid_field(cmd);
3278 break; 3312 break;
3279 3313
3280 case REPORT_LUNS: 3314 case REPORT_LUNS:
@@ -3284,7 +3318,7 @@ void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd,
3284 case REQUEST_SENSE: 3318 case REQUEST_SENSE:
3285 ata_scsi_set_sense(cmd, 0, 0, 0); 3319 ata_scsi_set_sense(cmd, 0, 0, 0);
3286 cmd->result = (DRIVER_SENSE << 24); 3320 cmd->result = (DRIVER_SENSE << 24);
3287 done(cmd); 3321 cmd->scsi_done(cmd);
3288 break; 3322 break;
3289 3323
3290 /* if we reach this, then writeback caching is disabled, 3324 /* if we reach this, then writeback caching is disabled,
@@ -3306,14 +3340,14 @@ void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd,
3306 if ((tmp8 == 0x4) && (!scsicmd[3]) && (!scsicmd[4])) 3340 if ((tmp8 == 0x4) && (!scsicmd[3]) && (!scsicmd[4]))
3307 ata_scsi_rbuf_fill(&args, ata_scsiop_noop); 3341 ata_scsi_rbuf_fill(&args, ata_scsiop_noop);
3308 else 3342 else
3309 ata_scsi_invalid_field(cmd, done); 3343 ata_scsi_invalid_field(cmd);
3310 break; 3344 break;
3311 3345
3312 /* all other commands */ 3346 /* all other commands */
3313 default: 3347 default:
3314 ata_scsi_set_sense(cmd, ILLEGAL_REQUEST, 0x20, 0x0); 3348 ata_scsi_set_sense(cmd, ILLEGAL_REQUEST, 0x20, 0x0);
3315 /* "Invalid command operation code" */ 3349 /* "Invalid command operation code" */
3316 done(cmd); 3350 cmd->scsi_done(cmd);
3317 break; 3351 break;
3318 } 3352 }
3319} 3353}
@@ -3334,7 +3368,7 @@ int ata_scsi_add_hosts(struct ata_host *host, struct scsi_host_template *sht)
3334 *(struct ata_port **)&shost->hostdata[0] = ap; 3368 *(struct ata_port **)&shost->hostdata[0] = ap;
3335 ap->scsi_host = shost; 3369 ap->scsi_host = shost;
3336 3370
3337 shost->transportt = &ata_scsi_transport_template; 3371 shost->transportt = ata_scsi_transport_template;
3338 shost->unique_id = ap->print_id; 3372 shost->unique_id = ap->print_id;
3339 shost->max_id = 16; 3373 shost->max_id = 16;
3340 shost->max_lun = 1; 3374 shost->max_lun = 1;
@@ -3393,6 +3427,8 @@ void ata_scsi_scan_host(struct ata_port *ap, int sync)
3393 if (!IS_ERR(sdev)) { 3427 if (!IS_ERR(sdev)) {
3394 dev->sdev = sdev; 3428 dev->sdev = sdev;
3395 scsi_device_put(sdev); 3429 scsi_device_put(sdev);
3430 } else {
3431 dev->sdev = NULL;
3396 } 3432 }
3397 } 3433 }
3398 } 3434 }
@@ -3616,8 +3652,8 @@ void ata_scsi_hotplug(struct work_struct *work)
3616 * RETURNS: 3652 * RETURNS:
3617 * Zero. 3653 * Zero.
3618 */ 3654 */
3619static int ata_scsi_user_scan(struct Scsi_Host *shost, unsigned int channel, 3655int ata_scsi_user_scan(struct Scsi_Host *shost, unsigned int channel,
3620 unsigned int id, unsigned int lun) 3656 unsigned int id, unsigned int lun)
3621{ 3657{
3622 struct ata_port *ap = ata_shost_to_port(shost); 3658 struct ata_port *ap = ata_shost_to_port(shost);
3623 unsigned long flags; 3659 unsigned long flags;
@@ -3735,7 +3771,7 @@ struct ata_port *ata_sas_port_alloc(struct ata_host *host,
3735 return NULL; 3771 return NULL;
3736 3772
3737 ap->port_no = 0; 3773 ap->port_no = 0;
3738 ap->lock = shost->host_lock; 3774 ap->lock = &host->lock;
3739 ap->pio_mask = port_info->pio_mask; 3775 ap->pio_mask = port_info->pio_mask;
3740 ap->mwdma_mask = port_info->mwdma_mask; 3776 ap->mwdma_mask = port_info->mwdma_mask;
3741 ap->udma_mask = port_info->udma_mask; 3777 ap->udma_mask = port_info->udma_mask;
@@ -3761,6 +3797,12 @@ EXPORT_SYMBOL_GPL(ata_sas_port_alloc);
3761 */ 3797 */
3762int ata_sas_port_start(struct ata_port *ap) 3798int ata_sas_port_start(struct ata_port *ap)
3763{ 3799{
3800 /*
3801 * the port is marked as frozen at allocation time, but if we don't
3802 * have new eh, we won't thaw it
3803 */
3804 if (!ap->ops->error_handler)
3805 ap->pflags &= ~ATA_PFLAG_FROZEN;
3764 return 0; 3806 return 0;
3765} 3807}
3766EXPORT_SYMBOL_GPL(ata_sas_port_start); 3808EXPORT_SYMBOL_GPL(ata_sas_port_start);
@@ -3797,7 +3839,7 @@ int ata_sas_port_init(struct ata_port *ap)
3797 3839
3798 if (!rc) { 3840 if (!rc) {
3799 ap->print_id = ata_print_id++; 3841 ap->print_id = ata_print_id++;
3800 rc = ata_bus_probe(ap); 3842 rc = ata_port_probe(ap);
3801 } 3843 }
3802 3844
3803 return rc; 3845 return rc;
@@ -3838,7 +3880,6 @@ EXPORT_SYMBOL_GPL(ata_sas_slave_configure);
3838/** 3880/**
3839 * ata_sas_queuecmd - Issue SCSI cdb to libata-managed device 3881 * ata_sas_queuecmd - Issue SCSI cdb to libata-managed device
3840 * @cmd: SCSI command to be sent 3882 * @cmd: SCSI command to be sent
3841 * @done: Completion function, called when command is complete
3842 * @ap: ATA port to which the command is being sent 3883 * @ap: ATA port to which the command is being sent
3843 * 3884 *
3844 * RETURNS: 3885 * RETURNS:
@@ -3846,18 +3887,17 @@ EXPORT_SYMBOL_GPL(ata_sas_slave_configure);
3846 * 0 otherwise. 3887 * 0 otherwise.
3847 */ 3888 */
3848 3889
3849int ata_sas_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *), 3890int ata_sas_queuecmd(struct scsi_cmnd *cmd, struct ata_port *ap)
3850 struct ata_port *ap)
3851{ 3891{
3852 int rc = 0; 3892 int rc = 0;
3853 3893
3854 ata_scsi_dump_cdb(ap, cmd); 3894 ata_scsi_dump_cdb(ap, cmd);
3855 3895
3856 if (likely(ata_dev_enabled(ap->link.device))) 3896 if (likely(ata_dev_enabled(ap->link.device)))
3857 rc = __ata_scsi_queuecmd(cmd, done, ap->link.device); 3897 rc = __ata_scsi_queuecmd(cmd, ap->link.device);
3858 else { 3898 else {
3859 cmd->result = (DID_BAD_TARGET << 16); 3899 cmd->result = (DID_BAD_TARGET << 16);
3860 done(cmd); 3900 cmd->scsi_done(cmd);
3861 } 3901 }
3862 return rc; 3902 return rc;
3863} 3903}
diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c
index e30c537cce32..b1b926c55a72 100644
--- a/drivers/ata/libata-sff.c
+++ b/drivers/ata/libata-sff.c
@@ -222,7 +222,7 @@ int ata_sff_busy_sleep(struct ata_port *ap,
222 timeout = ata_deadline(timer_start, tmout_pat); 222 timeout = ata_deadline(timer_start, tmout_pat);
223 while (status != 0xff && (status & ATA_BUSY) && 223 while (status != 0xff && (status & ATA_BUSY) &&
224 time_before(jiffies, timeout)) { 224 time_before(jiffies, timeout)) {
225 msleep(50); 225 ata_msleep(ap, 50);
226 status = ata_sff_busy_wait(ap, ATA_BUSY, 3); 226 status = ata_sff_busy_wait(ap, ATA_BUSY, 3);
227 } 227 }
228 228
@@ -234,7 +234,7 @@ int ata_sff_busy_sleep(struct ata_port *ap,
234 timeout = ata_deadline(timer_start, tmout); 234 timeout = ata_deadline(timer_start, tmout);
235 while (status != 0xff && (status & ATA_BUSY) && 235 while (status != 0xff && (status & ATA_BUSY) &&
236 time_before(jiffies, timeout)) { 236 time_before(jiffies, timeout)) {
237 msleep(50); 237 ata_msleep(ap, 50);
238 status = ap->ops->sff_check_status(ap); 238 status = ap->ops->sff_check_status(ap);
239 } 239 }
240 240
@@ -360,7 +360,7 @@ static void ata_dev_select(struct ata_port *ap, unsigned int device,
360 360
361 if (wait) { 361 if (wait) {
362 if (can_sleep && ap->link.device[device].class == ATA_DEV_ATAPI) 362 if (can_sleep && ap->link.device[device].class == ATA_DEV_ATAPI)
363 msleep(150); 363 ata_msleep(ap, 150);
364 ata_wait_idle(ap); 364 ata_wait_idle(ap);
365 } 365 }
366} 366}
@@ -1302,6 +1302,18 @@ fsm_start:
1302} 1302}
1303EXPORT_SYMBOL_GPL(ata_sff_hsm_move); 1303EXPORT_SYMBOL_GPL(ata_sff_hsm_move);
1304 1304
1305void ata_sff_queue_work(struct work_struct *work)
1306{
1307 queue_work(ata_sff_wq, work);
1308}
1309EXPORT_SYMBOL_GPL(ata_sff_queue_work);
1310
1311void ata_sff_queue_delayed_work(struct delayed_work *dwork, unsigned long delay)
1312{
1313 queue_delayed_work(ata_sff_wq, dwork, delay);
1314}
1315EXPORT_SYMBOL_GPL(ata_sff_queue_delayed_work);
1316
1305void ata_sff_queue_pio_task(struct ata_link *link, unsigned long delay) 1317void ata_sff_queue_pio_task(struct ata_link *link, unsigned long delay)
1306{ 1318{
1307 struct ata_port *ap = link->ap; 1319 struct ata_port *ap = link->ap;
@@ -1311,8 +1323,7 @@ void ata_sff_queue_pio_task(struct ata_link *link, unsigned long delay)
1311 ap->sff_pio_task_link = link; 1323 ap->sff_pio_task_link = link;
1312 1324
1313 /* may fail if ata_sff_flush_pio_task() in progress */ 1325 /* may fail if ata_sff_flush_pio_task() in progress */
1314 queue_delayed_work(ata_sff_wq, &ap->sff_pio_task, 1326 ata_sff_queue_delayed_work(&ap->sff_pio_task, msecs_to_jiffies(delay));
1315 msecs_to_jiffies(delay));
1316} 1327}
1317EXPORT_SYMBOL_GPL(ata_sff_queue_pio_task); 1328EXPORT_SYMBOL_GPL(ata_sff_queue_pio_task);
1318 1329
@@ -1320,7 +1331,7 @@ void ata_sff_flush_pio_task(struct ata_port *ap)
1320{ 1331{
1321 DPRINTK("ENTER\n"); 1332 DPRINTK("ENTER\n");
1322 1333
1323 cancel_rearming_delayed_work(&ap->sff_pio_task); 1334 cancel_delayed_work_sync(&ap->sff_pio_task);
1324 ap->hsm_task_state = HSM_ST_IDLE; 1335 ap->hsm_task_state = HSM_ST_IDLE;
1325 1336
1326 if (ata_msg_ctl(ap)) 1337 if (ata_msg_ctl(ap))
@@ -1336,7 +1347,7 @@ static void ata_sff_pio_task(struct work_struct *work)
1336 u8 status; 1347 u8 status;
1337 int poll_next; 1348 int poll_next;
1338 1349
1339 BUG_ON(ap->sff_pio_task_link == NULL); 1350 BUG_ON(ap->sff_pio_task_link == NULL);
1340 /* qc can be NULL if timeout occurred */ 1351 /* qc can be NULL if timeout occurred */
1341 qc = ata_qc_from_tag(ap, link->active_tag); 1352 qc = ata_qc_from_tag(ap, link->active_tag);
1342 if (!qc) { 1353 if (!qc) {
@@ -1356,7 +1367,7 @@ fsm_start:
1356 */ 1367 */
1357 status = ata_sff_busy_wait(ap, ATA_BUSY, 5); 1368 status = ata_sff_busy_wait(ap, ATA_BUSY, 5);
1358 if (status & ATA_BUSY) { 1369 if (status & ATA_BUSY) {
1359 msleep(2); 1370 ata_msleep(ap, 2);
1360 status = ata_sff_busy_wait(ap, ATA_BUSY, 10); 1371 status = ata_sff_busy_wait(ap, ATA_BUSY, 10);
1361 if (status & ATA_BUSY) { 1372 if (status & ATA_BUSY) {
1362 ata_sff_queue_pio_task(link, ATA_SHORT_PAUSE); 1373 ata_sff_queue_pio_task(link, ATA_SHORT_PAUSE);
@@ -1532,11 +1543,10 @@ static unsigned int __ata_sff_port_intr(struct ata_port *ap,
1532 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) 1543 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
1533 return ata_sff_idle_irq(ap); 1544 return ata_sff_idle_irq(ap);
1534 break; 1545 break;
1535 case HSM_ST: 1546 case HSM_ST_IDLE:
1536 case HSM_ST_LAST:
1537 break;
1538 default:
1539 return ata_sff_idle_irq(ap); 1547 return ata_sff_idle_irq(ap);
1548 default:
1549 break;
1540 } 1550 }
1541 1551
1542 /* check main status, clearing INTRQ if needed */ 1552 /* check main status, clearing INTRQ if needed */
@@ -1937,7 +1947,7 @@ int ata_sff_wait_after_reset(struct ata_link *link, unsigned int devmask,
1937 unsigned int dev1 = devmask & (1 << 1); 1947 unsigned int dev1 = devmask & (1 << 1);
1938 int rc, ret = 0; 1948 int rc, ret = 0;
1939 1949
1940 msleep(ATA_WAIT_AFTER_RESET); 1950 ata_msleep(ap, ATA_WAIT_AFTER_RESET);
1941 1951
1942 /* always check readiness of the master device */ 1952 /* always check readiness of the master device */
1943 rc = ata_sff_wait_ready(link, deadline); 1953 rc = ata_sff_wait_ready(link, deadline);
@@ -1966,7 +1976,7 @@ int ata_sff_wait_after_reset(struct ata_link *link, unsigned int devmask,
1966 lbal = ioread8(ioaddr->lbal_addr); 1976 lbal = ioread8(ioaddr->lbal_addr);
1967 if ((nsect == 1) && (lbal == 1)) 1977 if ((nsect == 1) && (lbal == 1))
1968 break; 1978 break;
1969 msleep(50); /* give drive a breather */ 1979 ata_msleep(ap, 50); /* give drive a breather */
1970 } 1980 }
1971 1981
1972 rc = ata_sff_wait_ready(link, deadline); 1982 rc = ata_sff_wait_ready(link, deadline);
@@ -2437,13 +2447,18 @@ int ata_pci_sff_activate_host(struct ata_host *host,
2437 return -ENOMEM; 2447 return -ENOMEM;
2438 2448
2439 if (!legacy_mode && pdev->irq) { 2449 if (!legacy_mode && pdev->irq) {
2450 int i;
2451
2440 rc = devm_request_irq(dev, pdev->irq, irq_handler, 2452 rc = devm_request_irq(dev, pdev->irq, irq_handler,
2441 IRQF_SHARED, drv_name, host); 2453 IRQF_SHARED, drv_name, host);
2442 if (rc) 2454 if (rc)
2443 goto out; 2455 goto out;
2444 2456
2445 ata_port_desc(host->ports[0], "irq %d", pdev->irq); 2457 for (i = 0; i < 2; i++) {
2446 ata_port_desc(host->ports[1], "irq %d", pdev->irq); 2458 if (ata_port_is_dummy(host->ports[i]))
2459 continue;
2460 ata_port_desc(host->ports[i], "irq %d", pdev->irq);
2461 }
2447 } else if (legacy_mode) { 2462 } else if (legacy_mode) {
2448 if (!ata_port_is_dummy(host->ports[0])) { 2463 if (!ata_port_is_dummy(host->ports[0])) {
2449 rc = devm_request_irq(dev, ATA_PRIMARY_IRQ(pdev), 2464 rc = devm_request_irq(dev, ATA_PRIMARY_IRQ(pdev),
@@ -2829,7 +2844,7 @@ unsigned int ata_bmdma_port_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
2829 bmdma_stopped = true; 2844 bmdma_stopped = true;
2830 2845
2831 if (unlikely(host_stat & ATA_DMA_ERR)) { 2846 if (unlikely(host_stat & ATA_DMA_ERR)) {
2832 /* error when transfering data to/from memory */ 2847 /* error when transferring data to/from memory */
2833 qc->err_mask |= AC_ERR_HOST_BUS; 2848 qc->err_mask |= AC_ERR_HOST_BUS;
2834 ap->hsm_task_state = HSM_ST_ERR; 2849 ap->hsm_task_state = HSM_ST_ERR;
2835 } 2850 }
@@ -3022,7 +3037,7 @@ void ata_bmdma_start(struct ata_queued_cmd *qc)
3022 * Or maybe I'm just being paranoid. 3037 * Or maybe I'm just being paranoid.
3023 * 3038 *
3024 * FIXME: The posting of this write means I/O starts are 3039 * FIXME: The posting of this write means I/O starts are
3025 * unneccessarily delayed for MMIO 3040 * unnecessarily delayed for MMIO
3026 */ 3041 */
3027} 3042}
3028EXPORT_SYMBOL_GPL(ata_bmdma_start); 3043EXPORT_SYMBOL_GPL(ata_bmdma_start);
@@ -3335,14 +3350,14 @@ void ata_sff_port_init(struct ata_port *ap)
3335 3350
3336int __init ata_sff_init(void) 3351int __init ata_sff_init(void)
3337{ 3352{
3338 ata_sff_wq = alloc_workqueue("ata_sff", WQ_RESCUER, WQ_MAX_ACTIVE); 3353 ata_sff_wq = alloc_workqueue("ata_sff", WQ_MEM_RECLAIM, WQ_MAX_ACTIVE);
3339 if (!ata_sff_wq) 3354 if (!ata_sff_wq)
3340 return -ENOMEM; 3355 return -ENOMEM;
3341 3356
3342 return 0; 3357 return 0;
3343} 3358}
3344 3359
3345void __exit ata_sff_exit(void) 3360void ata_sff_exit(void)
3346{ 3361{
3347 destroy_workqueue(ata_sff_wq); 3362 destroy_workqueue(ata_sff_wq);
3348} 3363}
diff --git a/drivers/ata/libata-transport.c b/drivers/ata/libata-transport.c
new file mode 100644
index 000000000000..ce9dc6207f37
--- /dev/null
+++ b/drivers/ata/libata-transport.c
@@ -0,0 +1,774 @@
1/*
2 * Copyright 2008 ioogle, Inc. All rights reserved.
3 * Released under GPL v2.
4 *
5 * Libata transport class.
6 *
7 * The ATA transport class contains common code to deal with ATA HBAs,
8 * an approximated representation of ATA topologies in the driver model,
9 * and various sysfs attributes to expose these topologies and management
10 * interfaces to user-space.
11 *
12 * There are 3 objects defined in in this class:
13 * - ata_port
14 * - ata_link
15 * - ata_device
16 * Each port has a link object. Each link can have up to two devices for PATA
17 * and generally one for SATA.
18 * If there is SATA port multiplier [PMP], 15 additional ata_link object are
19 * created.
20 *
21 * These objects are created when the ata host is initialized and when a PMP is
22 * found. They are removed only when the HBA is removed, cleaned before the
23 * error handler runs.
24 */
25
26
27#include <linux/kernel.h>
28#include <linux/blkdev.h>
29#include <linux/spinlock.h>
30#include <linux/slab.h>
31#include <scsi/scsi_transport.h>
32#include <linux/libata.h>
33#include <linux/hdreg.h>
34#include <linux/uaccess.h>
35
36#include "libata.h"
37#include "libata-transport.h"
38
39#define ATA_PORT_ATTRS 2
40#define ATA_LINK_ATTRS 3
41#define ATA_DEV_ATTRS 9
42
43struct scsi_transport_template;
44struct scsi_transport_template *ata_scsi_transport_template;
45
46struct ata_internal {
47 struct scsi_transport_template t;
48
49 struct device_attribute private_port_attrs[ATA_PORT_ATTRS];
50 struct device_attribute private_link_attrs[ATA_LINK_ATTRS];
51 struct device_attribute private_dev_attrs[ATA_DEV_ATTRS];
52
53 struct transport_container link_attr_cont;
54 struct transport_container dev_attr_cont;
55
56 /*
57 * The array of null terminated pointers to attributes
58 * needed by scsi_sysfs.c
59 */
60 struct device_attribute *link_attrs[ATA_LINK_ATTRS + 1];
61 struct device_attribute *port_attrs[ATA_PORT_ATTRS + 1];
62 struct device_attribute *dev_attrs[ATA_DEV_ATTRS + 1];
63};
64#define to_ata_internal(tmpl) container_of(tmpl, struct ata_internal, t)
65
66
67#define tdev_to_device(d) \
68 container_of((d), struct ata_device, tdev)
69#define transport_class_to_dev(dev) \
70 tdev_to_device((dev)->parent)
71
72#define tdev_to_link(d) \
73 container_of((d), struct ata_link, tdev)
74#define transport_class_to_link(dev) \
75 tdev_to_link((dev)->parent)
76
77#define tdev_to_port(d) \
78 container_of((d), struct ata_port, tdev)
79#define transport_class_to_port(dev) \
80 tdev_to_port((dev)->parent)
81
82
83/* Device objects are always created whit link objects */
84static int ata_tdev_add(struct ata_device *dev);
85static void ata_tdev_delete(struct ata_device *dev);
86
87
88/*
89 * Hack to allow attributes of the same name in different objects.
90 */
91#define ATA_DEVICE_ATTR(_prefix,_name,_mode,_show,_store) \
92 struct device_attribute device_attr_##_prefix##_##_name = \
93 __ATTR(_name,_mode,_show,_store)
94
95#define ata_bitfield_name_match(title, table) \
96static ssize_t \
97get_ata_##title##_names(u32 table_key, char *buf) \
98{ \
99 char *prefix = ""; \
100 ssize_t len = 0; \
101 int i; \
102 \
103 for (i = 0; i < ARRAY_SIZE(table); i++) { \
104 if (table[i].value & table_key) { \
105 len += sprintf(buf + len, "%s%s", \
106 prefix, table[i].name); \
107 prefix = ", "; \
108 } \
109 } \
110 len += sprintf(buf + len, "\n"); \
111 return len; \
112}
113
114#define ata_bitfield_name_search(title, table) \
115static ssize_t \
116get_ata_##title##_names(u32 table_key, char *buf) \
117{ \
118 ssize_t len = 0; \
119 int i; \
120 \
121 for (i = 0; i < ARRAY_SIZE(table); i++) { \
122 if (table[i].value == table_key) { \
123 len += sprintf(buf + len, "%s", \
124 table[i].name); \
125 break; \
126 } \
127 } \
128 len += sprintf(buf + len, "\n"); \
129 return len; \
130}
131
132static struct {
133 u32 value;
134 char *name;
135} ata_class_names[] = {
136 { ATA_DEV_UNKNOWN, "unknown" },
137 { ATA_DEV_ATA, "ata" },
138 { ATA_DEV_ATA_UNSUP, "ata" },
139 { ATA_DEV_ATAPI, "atapi" },
140 { ATA_DEV_ATAPI_UNSUP, "atapi" },
141 { ATA_DEV_PMP, "pmp" },
142 { ATA_DEV_PMP_UNSUP, "pmp" },
143 { ATA_DEV_SEMB, "semb" },
144 { ATA_DEV_SEMB_UNSUP, "semb" },
145 { ATA_DEV_NONE, "none" }
146};
147ata_bitfield_name_search(class, ata_class_names)
148
149
150static struct {
151 u32 value;
152 char *name;
153} ata_err_names[] = {
154 { AC_ERR_DEV, "DeviceError" },
155 { AC_ERR_HSM, "HostStateMachineError" },
156 { AC_ERR_TIMEOUT, "Timeout" },
157 { AC_ERR_MEDIA, "MediaError" },
158 { AC_ERR_ATA_BUS, "BusError" },
159 { AC_ERR_HOST_BUS, "HostBusError" },
160 { AC_ERR_SYSTEM, "SystemError" },
161 { AC_ERR_INVALID, "InvalidArg" },
162 { AC_ERR_OTHER, "Unknown" },
163 { AC_ERR_NODEV_HINT, "NoDeviceHint" },
164 { AC_ERR_NCQ, "NCQError" }
165};
166ata_bitfield_name_match(err, ata_err_names)
167
168static struct {
169 u32 value;
170 char *name;
171} ata_xfer_names[] = {
172 { XFER_UDMA_7, "XFER_UDMA_7" },
173 { XFER_UDMA_6, "XFER_UDMA_6" },
174 { XFER_UDMA_5, "XFER_UDMA_5" },
175 { XFER_UDMA_4, "XFER_UDMA_4" },
176 { XFER_UDMA_3, "XFER_UDMA_3" },
177 { XFER_UDMA_2, "XFER_UDMA_2" },
178 { XFER_UDMA_1, "XFER_UDMA_1" },
179 { XFER_UDMA_0, "XFER_UDMA_0" },
180 { XFER_MW_DMA_4, "XFER_MW_DMA_4" },
181 { XFER_MW_DMA_3, "XFER_MW_DMA_3" },
182 { XFER_MW_DMA_2, "XFER_MW_DMA_2" },
183 { XFER_MW_DMA_1, "XFER_MW_DMA_1" },
184 { XFER_MW_DMA_0, "XFER_MW_DMA_0" },
185 { XFER_SW_DMA_2, "XFER_SW_DMA_2" },
186 { XFER_SW_DMA_1, "XFER_SW_DMA_1" },
187 { XFER_SW_DMA_0, "XFER_SW_DMA_0" },
188 { XFER_PIO_6, "XFER_PIO_6" },
189 { XFER_PIO_5, "XFER_PIO_5" },
190 { XFER_PIO_4, "XFER_PIO_4" },
191 { XFER_PIO_3, "XFER_PIO_3" },
192 { XFER_PIO_2, "XFER_PIO_2" },
193 { XFER_PIO_1, "XFER_PIO_1" },
194 { XFER_PIO_0, "XFER_PIO_0" },
195 { XFER_PIO_SLOW, "XFER_PIO_SLOW" }
196};
197ata_bitfield_name_match(xfer,ata_xfer_names)
198
199/*
200 * ATA Port attributes
201 */
202#define ata_port_show_simple(field, name, format_string, cast) \
203static ssize_t \
204show_ata_port_##name(struct device *dev, \
205 struct device_attribute *attr, char *buf) \
206{ \
207 struct ata_port *ap = transport_class_to_port(dev); \
208 \
209 return snprintf(buf, 20, format_string, cast ap->field); \
210}
211
212#define ata_port_simple_attr(field, name, format_string, type) \
213 ata_port_show_simple(field, name, format_string, (type)) \
214static DEVICE_ATTR(name, S_IRUGO, show_ata_port_##name, NULL)
215
216ata_port_simple_attr(nr_pmp_links, nr_pmp_links, "%d\n", int);
217ata_port_simple_attr(stats.idle_irq, idle_irq, "%ld\n", unsigned long);
218
219static DECLARE_TRANSPORT_CLASS(ata_port_class,
220 "ata_port", NULL, NULL, NULL);
221
222static void ata_tport_release(struct device *dev)
223{
224 put_device(dev->parent);
225}
226
227/**
228 * ata_is_port -- check if a struct device represents a ATA port
229 * @dev: device to check
230 *
231 * Returns:
232 * %1 if the device represents a ATA Port, %0 else
233 */
234int ata_is_port(const struct device *dev)
235{
236 return dev->release == ata_tport_release;
237}
238
239static int ata_tport_match(struct attribute_container *cont,
240 struct device *dev)
241{
242 if (!ata_is_port(dev))
243 return 0;
244 return &ata_scsi_transport_template->host_attrs.ac == cont;
245}
246
247/**
248 * ata_tport_delete -- remove ATA PORT
249 * @port: ATA PORT to remove
250 *
251 * Removes the specified ATA PORT. Remove the associated link as well.
252 */
253void ata_tport_delete(struct ata_port *ap)
254{
255 struct device *dev = &ap->tdev;
256
257 ata_tlink_delete(&ap->link);
258
259 transport_remove_device(dev);
260 device_del(dev);
261 transport_destroy_device(dev);
262 put_device(dev);
263}
264
265/** ata_tport_add - initialize a transport ATA port structure
266 *
267 * @parent: parent device
268 * @ap: existing ata_port structure
269 *
270 * Initialize a ATA port structure for sysfs. It will be added to the device
271 * tree below the device specified by @parent which could be a PCI device.
272 *
273 * Returns %0 on success
274 */
275int ata_tport_add(struct device *parent,
276 struct ata_port *ap)
277{
278 int error;
279 struct device *dev = &ap->tdev;
280
281 device_initialize(dev);
282
283 dev->parent = get_device(parent);
284 dev->release = ata_tport_release;
285 dev_set_name(dev, "ata%d", ap->print_id);
286 transport_setup_device(dev);
287 error = device_add(dev);
288 if (error) {
289 goto tport_err;
290 }
291
292 transport_add_device(dev);
293 transport_configure_device(dev);
294
295 error = ata_tlink_add(&ap->link);
296 if (error) {
297 goto tport_link_err;
298 }
299 return 0;
300
301 tport_link_err:
302 transport_remove_device(dev);
303 device_del(dev);
304
305 tport_err:
306 transport_destroy_device(dev);
307 put_device(dev);
308 return error;
309}
310
311
312/*
313 * ATA link attributes
314 */
315
316
317#define ata_link_show_linkspeed(field) \
318static ssize_t \
319show_ata_link_##field(struct device *dev, \
320 struct device_attribute *attr, char *buf) \
321{ \
322 struct ata_link *link = transport_class_to_link(dev); \
323 \
324 return sprintf(buf,"%s\n", sata_spd_string(fls(link->field))); \
325}
326
327#define ata_link_linkspeed_attr(field) \
328 ata_link_show_linkspeed(field) \
329static DEVICE_ATTR(field, S_IRUGO, show_ata_link_##field, NULL)
330
331ata_link_linkspeed_attr(hw_sata_spd_limit);
332ata_link_linkspeed_attr(sata_spd_limit);
333ata_link_linkspeed_attr(sata_spd);
334
335
336static DECLARE_TRANSPORT_CLASS(ata_link_class,
337 "ata_link", NULL, NULL, NULL);
338
339static void ata_tlink_release(struct device *dev)
340{
341 put_device(dev->parent);
342}
343
344/**
345 * ata_is_link -- check if a struct device represents a ATA link
346 * @dev: device to check
347 *
348 * Returns:
349 * %1 if the device represents a ATA link, %0 else
350 */
351int ata_is_link(const struct device *dev)
352{
353 return dev->release == ata_tlink_release;
354}
355
356static int ata_tlink_match(struct attribute_container *cont,
357 struct device *dev)
358{
359 struct ata_internal* i = to_ata_internal(ata_scsi_transport_template);
360 if (!ata_is_link(dev))
361 return 0;
362 return &i->link_attr_cont.ac == cont;
363}
364
365/**
366 * ata_tlink_delete -- remove ATA LINK
367 * @port: ATA LINK to remove
368 *
369 * Removes the specified ATA LINK. remove associated ATA device(s) as well.
370 */
371void ata_tlink_delete(struct ata_link *link)
372{
373 struct device *dev = &link->tdev;
374 struct ata_device *ata_dev;
375
376 ata_for_each_dev(ata_dev, link, ALL) {
377 ata_tdev_delete(ata_dev);
378 }
379
380 transport_remove_device(dev);
381 device_del(dev);
382 transport_destroy_device(dev);
383 put_device(dev);
384}
385
386/**
387 * ata_tlink_add -- initialize a transport ATA link structure
388 * @link: allocated ata_link structure.
389 *
390 * Initialize an ATA LINK structure for sysfs. It will be added in the
391 * device tree below the ATA PORT it belongs to.
392 *
393 * Returns %0 on success
394 */
395int ata_tlink_add(struct ata_link *link)
396{
397 struct device *dev = &link->tdev;
398 struct ata_port *ap = link->ap;
399 struct ata_device *ata_dev;
400 int error;
401
402 device_initialize(dev);
403 dev->parent = get_device(&ap->tdev);
404 dev->release = ata_tlink_release;
405 if (ata_is_host_link(link))
406 dev_set_name(dev, "link%d", ap->print_id);
407 else
408 dev_set_name(dev, "link%d.%d", ap->print_id, link->pmp);
409
410 transport_setup_device(dev);
411
412 error = device_add(dev);
413 if (error) {
414 goto tlink_err;
415 }
416
417 transport_add_device(dev);
418 transport_configure_device(dev);
419
420 ata_for_each_dev(ata_dev, link, ALL) {
421 error = ata_tdev_add(ata_dev);
422 if (error) {
423 goto tlink_dev_err;
424 }
425 }
426 return 0;
427 tlink_dev_err:
428 while (--ata_dev >= link->device) {
429 ata_tdev_delete(ata_dev);
430 }
431 transport_remove_device(dev);
432 device_del(dev);
433 tlink_err:
434 transport_destroy_device(dev);
435 put_device(dev);
436 return error;
437}
438
439/*
440 * ATA device attributes
441 */
442
443#define ata_dev_show_class(title, field) \
444static ssize_t \
445show_ata_dev_##field(struct device *dev, \
446 struct device_attribute *attr, char *buf) \
447{ \
448 struct ata_device *ata_dev = transport_class_to_dev(dev); \
449 \
450 return get_ata_##title##_names(ata_dev->field, buf); \
451}
452
453#define ata_dev_attr(title, field) \
454 ata_dev_show_class(title, field) \
455static DEVICE_ATTR(field, S_IRUGO, show_ata_dev_##field, NULL)
456
457ata_dev_attr(class, class);
458ata_dev_attr(xfer, pio_mode);
459ata_dev_attr(xfer, dma_mode);
460ata_dev_attr(xfer, xfer_mode);
461
462
463#define ata_dev_show_simple(field, format_string, cast) \
464static ssize_t \
465show_ata_dev_##field(struct device *dev, \
466 struct device_attribute *attr, char *buf) \
467{ \
468 struct ata_device *ata_dev = transport_class_to_dev(dev); \
469 \
470 return snprintf(buf, 20, format_string, cast ata_dev->field); \
471}
472
473#define ata_dev_simple_attr(field, format_string, type) \
474 ata_dev_show_simple(field, format_string, (type)) \
475static DEVICE_ATTR(field, S_IRUGO, \
476 show_ata_dev_##field, NULL)
477
478ata_dev_simple_attr(spdn_cnt, "%d\n", int);
479
480struct ata_show_ering_arg {
481 char* buf;
482 int written;
483};
484
485static int ata_show_ering(struct ata_ering_entry *ent, void *void_arg)
486{
487 struct ata_show_ering_arg* arg = void_arg;
488 struct timespec time;
489
490 jiffies_to_timespec(ent->timestamp,&time);
491 arg->written += sprintf(arg->buf + arg->written,
492 "[%5lu.%06lu]",
493 time.tv_sec, time.tv_nsec);
494 arg->written += get_ata_err_names(ent->err_mask,
495 arg->buf + arg->written);
496 return 0;
497}
498
499static ssize_t
500show_ata_dev_ering(struct device *dev,
501 struct device_attribute *attr, char *buf)
502{
503 struct ata_device *ata_dev = transport_class_to_dev(dev);
504 struct ata_show_ering_arg arg = { buf, 0 };
505
506 ata_ering_map(&ata_dev->ering, ata_show_ering, &arg);
507 return arg.written;
508}
509
510
511static DEVICE_ATTR(ering, S_IRUGO, show_ata_dev_ering, NULL);
512
513static ssize_t
514show_ata_dev_id(struct device *dev,
515 struct device_attribute *attr, char *buf)
516{
517 struct ata_device *ata_dev = transport_class_to_dev(dev);
518 int written = 0, i = 0;
519
520 if (ata_dev->class == ATA_DEV_PMP)
521 return 0;
522 for(i=0;i<ATA_ID_WORDS;i++) {
523 written += snprintf(buf+written, 20, "%04x%c",
524 ata_dev->id[i],
525 ((i+1) & 7) ? ' ' : '\n');
526 }
527 return written;
528}
529
530static DEVICE_ATTR(id, S_IRUGO, show_ata_dev_id, NULL);
531
532static ssize_t
533show_ata_dev_gscr(struct device *dev,
534 struct device_attribute *attr, char *buf)
535{
536 struct ata_device *ata_dev = transport_class_to_dev(dev);
537 int written = 0, i = 0;
538
539 if (ata_dev->class != ATA_DEV_PMP)
540 return 0;
541 for(i=0;i<SATA_PMP_GSCR_DWORDS;i++) {
542 written += snprintf(buf+written, 20, "%08x%c",
543 ata_dev->gscr[i],
544 ((i+1) & 3) ? ' ' : '\n');
545 }
546 if (SATA_PMP_GSCR_DWORDS & 3)
547 buf[written-1] = '\n';
548 return written;
549}
550
551static DEVICE_ATTR(gscr, S_IRUGO, show_ata_dev_gscr, NULL);
552
553static DECLARE_TRANSPORT_CLASS(ata_dev_class,
554 "ata_device", NULL, NULL, NULL);
555
556static void ata_tdev_release(struct device *dev)
557{
558 put_device(dev->parent);
559}
560
561/**
562 * ata_is_ata_dev -- check if a struct device represents a ATA device
563 * @dev: device to check
564 *
565 * Returns:
566 * %1 if the device represents a ATA device, %0 else
567 */
568int ata_is_ata_dev(const struct device *dev)
569{
570 return dev->release == ata_tdev_release;
571}
572
573static int ata_tdev_match(struct attribute_container *cont,
574 struct device *dev)
575{
576 struct ata_internal* i = to_ata_internal(ata_scsi_transport_template);
577 if (!ata_is_ata_dev(dev))
578 return 0;
579 return &i->dev_attr_cont.ac == cont;
580}
581
582/**
583 * ata_tdev_free -- free a ATA LINK
584 * @dev: ATA PHY to free
585 *
586 * Frees the specified ATA PHY.
587 *
588 * Note:
589 * This function must only be called on a PHY that has not
590 * successfully been added using ata_tdev_add().
591 */
592static void ata_tdev_free(struct ata_device *dev)
593{
594 transport_destroy_device(&dev->tdev);
595 put_device(&dev->tdev);
596}
597
598/**
599 * ata_tdev_delete -- remove ATA device
600 * @port: ATA PORT to remove
601 *
602 * Removes the specified ATA device.
603 */
604static void ata_tdev_delete(struct ata_device *ata_dev)
605{
606 struct device *dev = &ata_dev->tdev;
607
608 transport_remove_device(dev);
609 device_del(dev);
610 ata_tdev_free(ata_dev);
611}
612
613
614/**
615 * ata_tdev_add -- initialize a transport ATA device structure.
616 * @ata_dev: ata_dev structure.
617 *
618 * Initialize an ATA device structure for sysfs. It will be added in the
619 * device tree below the ATA LINK device it belongs to.
620 *
621 * Returns %0 on success
622 */
623static int ata_tdev_add(struct ata_device *ata_dev)
624{
625 struct device *dev = &ata_dev->tdev;
626 struct ata_link *link = ata_dev->link;
627 struct ata_port *ap = link->ap;
628 int error;
629
630 device_initialize(dev);
631 dev->parent = get_device(&link->tdev);
632 dev->release = ata_tdev_release;
633 if (ata_is_host_link(link))
634 dev_set_name(dev, "dev%d.%d", ap->print_id,ata_dev->devno);
635 else
636 dev_set_name(dev, "dev%d.%d.0", ap->print_id, link->pmp);
637
638 transport_setup_device(dev);
639 error = device_add(dev);
640 if (error) {
641 ata_tdev_free(ata_dev);
642 return error;
643 }
644
645 transport_add_device(dev);
646 transport_configure_device(dev);
647 return 0;
648}
649
650
651/*
652 * Setup / Teardown code
653 */
654
655#define SETUP_TEMPLATE(attrb, field, perm, test) \
656 i->private_##attrb[count] = dev_attr_##field; \
657 i->private_##attrb[count].attr.mode = perm; \
658 i->attrb[count] = &i->private_##attrb[count]; \
659 if (test) \
660 count++
661
662#define SETUP_LINK_ATTRIBUTE(field) \
663 SETUP_TEMPLATE(link_attrs, field, S_IRUGO, 1)
664
665#define SETUP_PORT_ATTRIBUTE(field) \
666 SETUP_TEMPLATE(port_attrs, field, S_IRUGO, 1)
667
668#define SETUP_DEV_ATTRIBUTE(field) \
669 SETUP_TEMPLATE(dev_attrs, field, S_IRUGO, 1)
670
671/**
672 * ata_attach_transport -- instantiate ATA transport template
673 */
674struct scsi_transport_template *ata_attach_transport(void)
675{
676 struct ata_internal *i;
677 int count;
678
679 i = kzalloc(sizeof(struct ata_internal), GFP_KERNEL);
680 if (!i)
681 return NULL;
682
683 i->t.eh_strategy_handler = ata_scsi_error;
684 i->t.eh_timed_out = ata_scsi_timed_out;
685 i->t.user_scan = ata_scsi_user_scan;
686
687 i->t.host_attrs.ac.attrs = &i->port_attrs[0];
688 i->t.host_attrs.ac.class = &ata_port_class.class;
689 i->t.host_attrs.ac.match = ata_tport_match;
690 transport_container_register(&i->t.host_attrs);
691
692 i->link_attr_cont.ac.class = &ata_link_class.class;
693 i->link_attr_cont.ac.attrs = &i->link_attrs[0];
694 i->link_attr_cont.ac.match = ata_tlink_match;
695 transport_container_register(&i->link_attr_cont);
696
697 i->dev_attr_cont.ac.class = &ata_dev_class.class;
698 i->dev_attr_cont.ac.attrs = &i->dev_attrs[0];
699 i->dev_attr_cont.ac.match = ata_tdev_match;
700 transport_container_register(&i->dev_attr_cont);
701
702 count = 0;
703 SETUP_PORT_ATTRIBUTE(nr_pmp_links);
704 SETUP_PORT_ATTRIBUTE(idle_irq);
705 BUG_ON(count > ATA_PORT_ATTRS);
706 i->port_attrs[count] = NULL;
707
708 count = 0;
709 SETUP_LINK_ATTRIBUTE(hw_sata_spd_limit);
710 SETUP_LINK_ATTRIBUTE(sata_spd_limit);
711 SETUP_LINK_ATTRIBUTE(sata_spd);
712 BUG_ON(count > ATA_LINK_ATTRS);
713 i->link_attrs[count] = NULL;
714
715 count = 0;
716 SETUP_DEV_ATTRIBUTE(class);
717 SETUP_DEV_ATTRIBUTE(pio_mode);
718 SETUP_DEV_ATTRIBUTE(dma_mode);
719 SETUP_DEV_ATTRIBUTE(xfer_mode);
720 SETUP_DEV_ATTRIBUTE(spdn_cnt);
721 SETUP_DEV_ATTRIBUTE(ering);
722 SETUP_DEV_ATTRIBUTE(id);
723 SETUP_DEV_ATTRIBUTE(gscr);
724 BUG_ON(count > ATA_DEV_ATTRS);
725 i->dev_attrs[count] = NULL;
726
727 return &i->t;
728}
729
730/**
731 * ata_release_transport -- release ATA transport template instance
732 * @t: transport template instance
733 */
734void ata_release_transport(struct scsi_transport_template *t)
735{
736 struct ata_internal *i = to_ata_internal(t);
737
738 transport_container_unregister(&i->t.host_attrs);
739 transport_container_unregister(&i->link_attr_cont);
740 transport_container_unregister(&i->dev_attr_cont);
741
742 kfree(i);
743}
744
745__init int libata_transport_init(void)
746{
747 int error;
748
749 error = transport_class_register(&ata_link_class);
750 if (error)
751 goto out_unregister_transport;
752 error = transport_class_register(&ata_port_class);
753 if (error)
754 goto out_unregister_link;
755 error = transport_class_register(&ata_dev_class);
756 if (error)
757 goto out_unregister_port;
758 return 0;
759
760 out_unregister_port:
761 transport_class_unregister(&ata_port_class);
762 out_unregister_link:
763 transport_class_unregister(&ata_link_class);
764 out_unregister_transport:
765 return error;
766
767}
768
769void __exit libata_transport_exit(void)
770{
771 transport_class_unregister(&ata_link_class);
772 transport_class_unregister(&ata_port_class);
773 transport_class_unregister(&ata_dev_class);
774}
diff --git a/drivers/ata/libata-transport.h b/drivers/ata/libata-transport.h
new file mode 100644
index 000000000000..2820cf864f11
--- /dev/null
+++ b/drivers/ata/libata-transport.h
@@ -0,0 +1,18 @@
1#ifndef _LIBATA_TRANSPORT_H
2#define _LIBATA_TRANSPORT_H
3
4
5extern struct scsi_transport_template *ata_scsi_transport_template;
6
7int ata_tlink_add(struct ata_link *link);
8void ata_tlink_delete(struct ata_link *link);
9
10int ata_tport_add(struct device *parent, struct ata_port *ap);
11void ata_tport_delete(struct ata_port *ap);
12
13struct scsi_transport_template *ata_attach_transport(void);
14void ata_release_transport(struct scsi_transport_template *t);
15
16__init int libata_transport_init(void);
17void __exit libata_transport_exit(void);
18#endif
diff --git a/drivers/ata/libata.h b/drivers/ata/libata.h
index 9ce1ecc63e39..773de97988a2 100644
--- a/drivers/ata/libata.h
+++ b/drivers/ata/libata.h
@@ -86,6 +86,8 @@ extern int ata_dev_revalidate(struct ata_device *dev, unsigned int new_class,
86extern int ata_dev_configure(struct ata_device *dev); 86extern int ata_dev_configure(struct ata_device *dev);
87extern int sata_down_spd_limit(struct ata_link *link, u32 spd_limit); 87extern int sata_down_spd_limit(struct ata_link *link, u32 spd_limit);
88extern int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel); 88extern int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel);
89extern unsigned int ata_dev_set_feature(struct ata_device *dev,
90 u8 enable, u8 feature);
89extern void ata_sg_clean(struct ata_queued_cmd *qc); 91extern void ata_sg_clean(struct ata_queued_cmd *qc);
90extern void ata_qc_free(struct ata_queued_cmd *qc); 92extern void ata_qc_free(struct ata_queued_cmd *qc);
91extern void ata_qc_issue(struct ata_queued_cmd *qc); 93extern void ata_qc_issue(struct ata_queued_cmd *qc);
@@ -100,8 +102,8 @@ extern int sata_link_init_spd(struct ata_link *link);
100extern int ata_task_ioctl(struct scsi_device *scsidev, void __user *arg); 102extern int ata_task_ioctl(struct scsi_device *scsidev, void __user *arg);
101extern int ata_cmd_ioctl(struct scsi_device *scsidev, void __user *arg); 103extern int ata_cmd_ioctl(struct scsi_device *scsidev, void __user *arg);
102extern struct ata_port *ata_port_alloc(struct ata_host *host); 104extern struct ata_port *ata_port_alloc(struct ata_host *host);
103extern void ata_dev_enable_pm(struct ata_device *dev, enum link_pm policy); 105extern const char *sata_spd_string(unsigned int spd);
104extern void ata_lpm_schedule(struct ata_port *ap, enum link_pm); 106extern int ata_port_probe(struct ata_port *ap);
105 107
106/* libata-acpi.c */ 108/* libata-acpi.c */
107#ifdef CONFIG_ATA_ACPI 109#ifdef CONFIG_ATA_ACPI
@@ -137,10 +139,15 @@ extern void ata_scsi_hotplug(struct work_struct *work);
137extern void ata_schedule_scsi_eh(struct Scsi_Host *shost); 139extern void ata_schedule_scsi_eh(struct Scsi_Host *shost);
138extern void ata_scsi_dev_rescan(struct work_struct *work); 140extern void ata_scsi_dev_rescan(struct work_struct *work);
139extern int ata_bus_probe(struct ata_port *ap); 141extern int ata_bus_probe(struct ata_port *ap);
142extern int ata_scsi_user_scan(struct Scsi_Host *shost, unsigned int channel,
143 unsigned int id, unsigned int lun);
144
140 145
141/* libata-eh.c */ 146/* libata-eh.c */
142extern unsigned long ata_internal_cmd_timeout(struct ata_device *dev, u8 cmd); 147extern unsigned long ata_internal_cmd_timeout(struct ata_device *dev, u8 cmd);
143extern void ata_internal_cmd_timed_out(struct ata_device *dev, u8 cmd); 148extern void ata_internal_cmd_timed_out(struct ata_device *dev, u8 cmd);
149extern void ata_eh_acquire(struct ata_port *ap);
150extern void ata_eh_release(struct ata_port *ap);
144extern enum blk_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd); 151extern enum blk_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd);
145extern void ata_scsi_error(struct Scsi_Host *host); 152extern void ata_scsi_error(struct Scsi_Host *host);
146extern void ata_port_wait_eh(struct ata_port *ap); 153extern void ata_port_wait_eh(struct ata_port *ap);
@@ -164,11 +171,16 @@ extern int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset,
164 ata_postreset_fn_t postreset, 171 ata_postreset_fn_t postreset,
165 struct ata_link **r_failed_disk); 172 struct ata_link **r_failed_disk);
166extern void ata_eh_finish(struct ata_port *ap); 173extern void ata_eh_finish(struct ata_port *ap);
174extern int ata_ering_map(struct ata_ering *ering,
175 int (*map_fn)(struct ata_ering_entry *, void *),
176 void *arg);
167 177
168/* libata-pmp.c */ 178/* libata-pmp.c */
169#ifdef CONFIG_SATA_PMP 179#ifdef CONFIG_SATA_PMP
170extern int sata_pmp_scr_read(struct ata_link *link, int reg, u32 *val); 180extern int sata_pmp_scr_read(struct ata_link *link, int reg, u32 *val);
171extern int sata_pmp_scr_write(struct ata_link *link, int reg, u32 val); 181extern int sata_pmp_scr_write(struct ata_link *link, int reg, u32 val);
182extern int sata_pmp_set_lpm(struct ata_link *link, enum ata_lpm_policy policy,
183 unsigned hints);
172extern int sata_pmp_attach(struct ata_device *dev); 184extern int sata_pmp_attach(struct ata_device *dev);
173#else /* CONFIG_SATA_PMP */ 185#else /* CONFIG_SATA_PMP */
174static inline int sata_pmp_scr_read(struct ata_link *link, int reg, u32 *val) 186static inline int sata_pmp_scr_read(struct ata_link *link, int reg, u32 *val)
@@ -181,6 +193,12 @@ static inline int sata_pmp_scr_write(struct ata_link *link, int reg, u32 val)
181 return -EINVAL; 193 return -EINVAL;
182} 194}
183 195
196static inline int sata_pmp_set_lpm(struct ata_link *link,
197 enum ata_lpm_policy policy, unsigned hints)
198{
199 return -EINVAL;
200}
201
184static inline int sata_pmp_attach(struct ata_device *dev) 202static inline int sata_pmp_attach(struct ata_device *dev)
185{ 203{
186 return -EINVAL; 204 return -EINVAL;
diff --git a/drivers/ata/pata_acpi.c b/drivers/ata/pata_acpi.c
index c8d47034d5e9..91949d997555 100644
--- a/drivers/ata/pata_acpi.c
+++ b/drivers/ata/pata_acpi.c
@@ -245,7 +245,7 @@ static struct ata_port_operations pacpi_ops = {
245static int pacpi_init_one (struct pci_dev *pdev, const struct pci_device_id *id) 245static int pacpi_init_one (struct pci_dev *pdev, const struct pci_device_id *id)
246{ 246{
247 static const struct ata_port_info info = { 247 static const struct ata_port_info info = {
248 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST, 248 .flags = ATA_FLAG_SLAVE_POSS,
249 249
250 .pio_mask = ATA_PIO4, 250 .pio_mask = ATA_PIO4,
251 .mwdma_mask = ATA_MWDMA2, 251 .mwdma_mask = ATA_MWDMA2,
diff --git a/drivers/ata/pata_amd.c b/drivers/ata/pata_amd.c
index 620a07cabe31..b0975a5ad8c4 100644
--- a/drivers/ata/pata_amd.c
+++ b/drivers/ata/pata_amd.c
@@ -11,7 +11,7 @@
11 * Power management on ports 11 * Power management on ports
12 * 12 *
13 * 13 *
14 * Documentation publically available. 14 * Documentation publicly available.
15 */ 15 */
16 16
17#include <linux/kernel.h> 17#include <linux/kernel.h>
diff --git a/drivers/ata/pata_arasan_cf.c b/drivers/ata/pata_arasan_cf.c
new file mode 100644
index 000000000000..719bb73a73e0
--- /dev/null
+++ b/drivers/ata/pata_arasan_cf.c
@@ -0,0 +1,983 @@
1/*
2 * drivers/ata/pata_arasan_cf.c
3 *
4 * Arasan Compact Flash host controller source file
5 *
6 * Copyright (C) 2011 ST Microelectronics
7 * Viresh Kumar <viresh.kumar@st.com>
8 *
9 * This file is licensed under the terms of the GNU General Public
10 * License version 2. This program is licensed "as is" without any
11 * warranty of any kind, whether express or implied.
12 */
13
14/*
15 * The Arasan CompactFlash Device Controller IP core has three basic modes of
16 * operation: PC card ATA using I/O mode, PC card ATA using memory mode, PC card
17 * ATA using true IDE modes. This driver supports only True IDE mode currently.
18 *
19 * Arasan CF Controller shares global irq register with Arasan XD Controller.
20 *
21 * Tested on arch/arm/mach-spear13xx
22 */
23
24#include <linux/ata.h>
25#include <linux/clk.h>
26#include <linux/completion.h>
27#include <linux/delay.h>
28#include <linux/dmaengine.h>
29#include <linux/io.h>
30#include <linux/irq.h>
31#include <linux/kernel.h>
32#include <linux/libata.h>
33#include <linux/module.h>
34#include <linux/pata_arasan_cf_data.h>
35#include <linux/platform_device.h>
36#include <linux/pm.h>
37#include <linux/slab.h>
38#include <linux/spinlock.h>
39#include <linux/types.h>
40#include <linux/workqueue.h>
41
42#define DRIVER_NAME "arasan_cf"
43#define TIMEOUT msecs_to_jiffies(3000)
44
45/* Registers */
46/* CompactFlash Interface Status */
47#define CFI_STS 0x000
48 #define STS_CHG (1)
49 #define BIN_AUDIO_OUT (1 << 1)
50 #define CARD_DETECT1 (1 << 2)
51 #define CARD_DETECT2 (1 << 3)
52 #define INP_ACK (1 << 4)
53 #define CARD_READY (1 << 5)
54 #define IO_READY (1 << 6)
55 #define B16_IO_PORT_SEL (1 << 7)
56/* IRQ */
57#define IRQ_STS 0x004
58/* Interrupt Enable */
59#define IRQ_EN 0x008
60 #define CARD_DETECT_IRQ (1)
61 #define STATUS_CHNG_IRQ (1 << 1)
62 #define MEM_MODE_IRQ (1 << 2)
63 #define IO_MODE_IRQ (1 << 3)
64 #define TRUE_IDE_MODE_IRQ (1 << 8)
65 #define PIO_XFER_ERR_IRQ (1 << 9)
66 #define BUF_AVAIL_IRQ (1 << 10)
67 #define XFER_DONE_IRQ (1 << 11)
68 #define IGNORED_IRQS (STATUS_CHNG_IRQ | MEM_MODE_IRQ | IO_MODE_IRQ |\
69 TRUE_IDE_MODE_IRQ)
70 #define TRUE_IDE_IRQS (CARD_DETECT_IRQ | PIO_XFER_ERR_IRQ |\
71 BUF_AVAIL_IRQ | XFER_DONE_IRQ)
72/* Operation Mode */
73#define OP_MODE 0x00C
74 #define CARD_MODE_MASK (0x3)
75 #define MEM_MODE (0x0)
76 #define IO_MODE (0x1)
77 #define TRUE_IDE_MODE (0x2)
78
79 #define CARD_TYPE_MASK (1 << 2)
80 #define CF_CARD (0)
81 #define CF_PLUS_CARD (1 << 2)
82
83 #define CARD_RESET (1 << 3)
84 #define CFHOST_ENB (1 << 4)
85 #define OUTPUTS_TRISTATE (1 << 5)
86 #define ULTRA_DMA_ENB (1 << 8)
87 #define MULTI_WORD_DMA_ENB (1 << 9)
88 #define DRQ_BLOCK_SIZE_MASK (0x3 << 11)
89 #define DRQ_BLOCK_SIZE_512 (0)
90 #define DRQ_BLOCK_SIZE_1024 (1 << 11)
91 #define DRQ_BLOCK_SIZE_2048 (2 << 11)
92 #define DRQ_BLOCK_SIZE_4096 (3 << 11)
93/* CF Interface Clock Configuration */
94#define CLK_CFG 0x010
95 #define CF_IF_CLK_MASK (0XF)
96/* CF Timing Mode Configuration */
97#define TM_CFG 0x014
98 #define MEM_MODE_TIMING_MASK (0x3)
99 #define MEM_MODE_TIMING_250NS (0x0)
100 #define MEM_MODE_TIMING_120NS (0x1)
101 #define MEM_MODE_TIMING_100NS (0x2)
102 #define MEM_MODE_TIMING_80NS (0x3)
103
104 #define IO_MODE_TIMING_MASK (0x3 << 2)
105 #define IO_MODE_TIMING_250NS (0x0 << 2)
106 #define IO_MODE_TIMING_120NS (0x1 << 2)
107 #define IO_MODE_TIMING_100NS (0x2 << 2)
108 #define IO_MODE_TIMING_80NS (0x3 << 2)
109
110 #define TRUEIDE_PIO_TIMING_MASK (0x7 << 4)
111 #define TRUEIDE_PIO_TIMING_SHIFT 4
112
113 #define TRUEIDE_MWORD_DMA_TIMING_MASK (0x7 << 7)
114 #define TRUEIDE_MWORD_DMA_TIMING_SHIFT 7
115
116 #define ULTRA_DMA_TIMING_MASK (0x7 << 10)
117 #define ULTRA_DMA_TIMING_SHIFT 10
118/* CF Transfer Address */
119#define XFER_ADDR 0x014
120 #define XFER_ADDR_MASK (0x7FF)
121 #define MAX_XFER_COUNT 0x20000u
122/* Transfer Control */
123#define XFER_CTR 0x01C
124 #define XFER_COUNT_MASK (0x3FFFF)
125 #define ADDR_INC_DISABLE (1 << 24)
126 #define XFER_WIDTH_MASK (1 << 25)
127 #define XFER_WIDTH_8B (0)
128 #define XFER_WIDTH_16B (1 << 25)
129
130 #define MEM_TYPE_MASK (1 << 26)
131 #define MEM_TYPE_COMMON (0)
132 #define MEM_TYPE_ATTRIBUTE (1 << 26)
133
134 #define MEM_IO_XFER_MASK (1 << 27)
135 #define MEM_XFER (0)
136 #define IO_XFER (1 << 27)
137
138 #define DMA_XFER_MODE (1 << 28)
139
140 #define AHB_BUS_NORMAL_PIO_OPRTN (~(1 << 29))
141 #define XFER_DIR_MASK (1 << 30)
142 #define XFER_READ (0)
143 #define XFER_WRITE (1 << 30)
144
145 #define XFER_START (1 << 31)
146/* Write Data Port */
147#define WRITE_PORT 0x024
148/* Read Data Port */
149#define READ_PORT 0x028
150/* ATA Data Port */
151#define ATA_DATA_PORT 0x030
152 #define ATA_DATA_PORT_MASK (0xFFFF)
153/* ATA Error/Features */
154#define ATA_ERR_FTR 0x034
155/* ATA Sector Count */
156#define ATA_SC 0x038
157/* ATA Sector Number */
158#define ATA_SN 0x03C
159/* ATA Cylinder Low */
160#define ATA_CL 0x040
161/* ATA Cylinder High */
162#define ATA_CH 0x044
163/* ATA Select Card/Head */
164#define ATA_SH 0x048
165/* ATA Status-Command */
166#define ATA_STS_CMD 0x04C
167/* ATA Alternate Status/Device Control */
168#define ATA_ASTS_DCTR 0x050
169/* Extended Write Data Port 0x200-0x3FC */
170#define EXT_WRITE_PORT 0x200
171/* Extended Read Data Port 0x400-0x5FC */
172#define EXT_READ_PORT 0x400
173 #define FIFO_SIZE 0x200u
174/* Global Interrupt Status */
175#define GIRQ_STS 0x800
176/* Global Interrupt Status enable */
177#define GIRQ_STS_EN 0x804
178/* Global Interrupt Signal enable */
179#define GIRQ_SGN_EN 0x808
180 #define GIRQ_CF (1)
181 #define GIRQ_XD (1 << 1)
182
183/* Compact Flash Controller Dev Structure */
184struct arasan_cf_dev {
185 /* pointer to ata_host structure */
186 struct ata_host *host;
187 /* clk structure, only if HAVE_CLK is defined */
188#ifdef CONFIG_HAVE_CLK
189 struct clk *clk;
190#endif
191
192 /* physical base address of controller */
193 dma_addr_t pbase;
194 /* virtual base address of controller */
195 void __iomem *vbase;
196 /* irq number*/
197 int irq;
198
199 /* status to be updated to framework regarding DMA transfer */
200 u8 dma_status;
201 /* Card is present or Not */
202 u8 card_present;
203
204 /* dma specific */
205 /* Completion for transfer complete interrupt from controller */
206 struct completion cf_completion;
207 /* Completion for DMA transfer complete. */
208 struct completion dma_completion;
209 /* Dma channel allocated */
210 struct dma_chan *dma_chan;
211 /* Mask for DMA transfers */
212 dma_cap_mask_t mask;
213 /* dma channel private data */
214 void *dma_priv;
215 /* DMA transfer work */
216 struct work_struct work;
217 /* DMA delayed finish work */
218 struct delayed_work dwork;
219 /* qc to be transferred using DMA */
220 struct ata_queued_cmd *qc;
221};
222
223static struct scsi_host_template arasan_cf_sht = {
224 ATA_BASE_SHT(DRIVER_NAME),
225 .sg_tablesize = SG_NONE,
226 .dma_boundary = 0xFFFFFFFFUL,
227};
228
229static void cf_dumpregs(struct arasan_cf_dev *acdev)
230{
231 struct device *dev = acdev->host->dev;
232
233 dev_dbg(dev, ": =========== REGISTER DUMP ===========");
234 dev_dbg(dev, ": CFI_STS: %x", readl(acdev->vbase + CFI_STS));
235 dev_dbg(dev, ": IRQ_STS: %x", readl(acdev->vbase + IRQ_STS));
236 dev_dbg(dev, ": IRQ_EN: %x", readl(acdev->vbase + IRQ_EN));
237 dev_dbg(dev, ": OP_MODE: %x", readl(acdev->vbase + OP_MODE));
238 dev_dbg(dev, ": CLK_CFG: %x", readl(acdev->vbase + CLK_CFG));
239 dev_dbg(dev, ": TM_CFG: %x", readl(acdev->vbase + TM_CFG));
240 dev_dbg(dev, ": XFER_CTR: %x", readl(acdev->vbase + XFER_CTR));
241 dev_dbg(dev, ": GIRQ_STS: %x", readl(acdev->vbase + GIRQ_STS));
242 dev_dbg(dev, ": GIRQ_STS_EN: %x", readl(acdev->vbase + GIRQ_STS_EN));
243 dev_dbg(dev, ": GIRQ_SGN_EN: %x", readl(acdev->vbase + GIRQ_SGN_EN));
244 dev_dbg(dev, ": =====================================");
245}
246
247/* Enable/Disable global interrupts shared between CF and XD ctrlr. */
248static void cf_ginterrupt_enable(struct arasan_cf_dev *acdev, bool enable)
249{
250 /* enable should be 0 or 1 */
251 writel(enable, acdev->vbase + GIRQ_STS_EN);
252 writel(enable, acdev->vbase + GIRQ_SGN_EN);
253}
254
255/* Enable/Disable CF interrupts */
256static inline void
257cf_interrupt_enable(struct arasan_cf_dev *acdev, u32 mask, bool enable)
258{
259 u32 val = readl(acdev->vbase + IRQ_EN);
260 /* clear & enable/disable irqs */
261 if (enable) {
262 writel(mask, acdev->vbase + IRQ_STS);
263 writel(val | mask, acdev->vbase + IRQ_EN);
264 } else
265 writel(val & ~mask, acdev->vbase + IRQ_EN);
266}
267
268static inline void cf_card_reset(struct arasan_cf_dev *acdev)
269{
270 u32 val = readl(acdev->vbase + OP_MODE);
271
272 writel(val | CARD_RESET, acdev->vbase + OP_MODE);
273 udelay(200);
274 writel(val & ~CARD_RESET, acdev->vbase + OP_MODE);
275}
276
277static inline void cf_ctrl_reset(struct arasan_cf_dev *acdev)
278{
279 writel(readl(acdev->vbase + OP_MODE) & ~CFHOST_ENB,
280 acdev->vbase + OP_MODE);
281 writel(readl(acdev->vbase + OP_MODE) | CFHOST_ENB,
282 acdev->vbase + OP_MODE);
283}
284
285static void cf_card_detect(struct arasan_cf_dev *acdev, bool hotplugged)
286{
287 struct ata_port *ap = acdev->host->ports[0];
288 struct ata_eh_info *ehi = &ap->link.eh_info;
289 u32 val = readl(acdev->vbase + CFI_STS);
290
291 /* Both CD1 & CD2 should be low if card inserted completely */
292 if (!(val & (CARD_DETECT1 | CARD_DETECT2))) {
293 if (acdev->card_present)
294 return;
295 acdev->card_present = 1;
296 cf_card_reset(acdev);
297 } else {
298 if (!acdev->card_present)
299 return;
300 acdev->card_present = 0;
301 }
302
303 if (hotplugged) {
304 ata_ehi_hotplugged(ehi);
305 ata_port_freeze(ap);
306 }
307}
308
309static int cf_init(struct arasan_cf_dev *acdev)
310{
311 struct arasan_cf_pdata *pdata = dev_get_platdata(acdev->host->dev);
312 unsigned long flags;
313 int ret = 0;
314
315#ifdef CONFIG_HAVE_CLK
316 ret = clk_enable(acdev->clk);
317 if (ret) {
318 dev_dbg(acdev->host->dev, "clock enable failed");
319 return ret;
320 }
321#endif
322
323 spin_lock_irqsave(&acdev->host->lock, flags);
324 /* configure CF interface clock */
325 writel((pdata->cf_if_clk <= CF_IF_CLK_200M) ? pdata->cf_if_clk :
326 CF_IF_CLK_166M, acdev->vbase + CLK_CFG);
327
328 writel(TRUE_IDE_MODE | CFHOST_ENB, acdev->vbase + OP_MODE);
329 cf_interrupt_enable(acdev, CARD_DETECT_IRQ, 1);
330 cf_ginterrupt_enable(acdev, 1);
331 spin_unlock_irqrestore(&acdev->host->lock, flags);
332
333 return ret;
334}
335
336static void cf_exit(struct arasan_cf_dev *acdev)
337{
338 unsigned long flags;
339
340 spin_lock_irqsave(&acdev->host->lock, flags);
341 cf_ginterrupt_enable(acdev, 0);
342 cf_interrupt_enable(acdev, TRUE_IDE_IRQS, 0);
343 cf_card_reset(acdev);
344 writel(readl(acdev->vbase + OP_MODE) & ~CFHOST_ENB,
345 acdev->vbase + OP_MODE);
346 spin_unlock_irqrestore(&acdev->host->lock, flags);
347#ifdef CONFIG_HAVE_CLK
348 clk_disable(acdev->clk);
349#endif
350}
351
352static void dma_callback(void *dev)
353{
354 struct arasan_cf_dev *acdev = (struct arasan_cf_dev *) dev;
355
356 complete(&acdev->dma_completion);
357}
358
359static bool filter(struct dma_chan *chan, void *slave)
360{
361 chan->private = slave;
362 return true;
363}
364
365static inline void dma_complete(struct arasan_cf_dev *acdev)
366{
367 struct ata_queued_cmd *qc = acdev->qc;
368 unsigned long flags;
369
370 acdev->qc = NULL;
371 ata_sff_interrupt(acdev->irq, acdev->host);
372
373 spin_lock_irqsave(&acdev->host->lock, flags);
374 if (unlikely(qc->err_mask) && ata_is_dma(qc->tf.protocol))
375 ata_ehi_push_desc(&qc->ap->link.eh_info, "DMA Failed: Timeout");
376 spin_unlock_irqrestore(&acdev->host->lock, flags);
377}
378
379static inline int wait4buf(struct arasan_cf_dev *acdev)
380{
381 if (!wait_for_completion_timeout(&acdev->cf_completion, TIMEOUT)) {
382 u32 rw = acdev->qc->tf.flags & ATA_TFLAG_WRITE;
383
384 dev_err(acdev->host->dev, "%s TimeOut", rw ? "write" : "read");
385 return -ETIMEDOUT;
386 }
387
388 /* Check if PIO Error interrupt has occurred */
389 if (acdev->dma_status & ATA_DMA_ERR)
390 return -EAGAIN;
391
392 return 0;
393}
394
395static int
396dma_xfer(struct arasan_cf_dev *acdev, dma_addr_t src, dma_addr_t dest, u32 len)
397{
398 struct dma_async_tx_descriptor *tx;
399 struct dma_chan *chan = acdev->dma_chan;
400 dma_cookie_t cookie;
401 unsigned long flags = DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_SRC_UNMAP |
402 DMA_COMPL_SKIP_DEST_UNMAP;
403 int ret = 0;
404
405 tx = chan->device->device_prep_dma_memcpy(chan, dest, src, len, flags);
406 if (!tx) {
407 dev_err(acdev->host->dev, "device_prep_dma_memcpy failed\n");
408 return -EAGAIN;
409 }
410
411 tx->callback = dma_callback;
412 tx->callback_param = acdev;
413 cookie = tx->tx_submit(tx);
414
415 ret = dma_submit_error(cookie);
416 if (ret) {
417 dev_err(acdev->host->dev, "dma_submit_error\n");
418 return ret;
419 }
420
421 chan->device->device_issue_pending(chan);
422
423 /* Wait for DMA to complete */
424 if (!wait_for_completion_timeout(&acdev->dma_completion, TIMEOUT)) {
425 chan->device->device_control(chan, DMA_TERMINATE_ALL, 0);
426 dev_err(acdev->host->dev, "wait_for_completion_timeout\n");
427 return -ETIMEDOUT;
428 }
429
430 return ret;
431}
432
433static int sg_xfer(struct arasan_cf_dev *acdev, struct scatterlist *sg)
434{
435 dma_addr_t dest = 0, src = 0;
436 u32 xfer_cnt, sglen, dma_len, xfer_ctr;
437 u32 write = acdev->qc->tf.flags & ATA_TFLAG_WRITE;
438 unsigned long flags;
439 int ret = 0;
440
441 sglen = sg_dma_len(sg);
442 if (write) {
443 src = sg_dma_address(sg);
444 dest = acdev->pbase + EXT_WRITE_PORT;
445 } else {
446 dest = sg_dma_address(sg);
447 src = acdev->pbase + EXT_READ_PORT;
448 }
449
450 /*
451 * For each sg:
452 * MAX_XFER_COUNT data will be transferred before we get transfer
453 * complete interrupt. Between after FIFO_SIZE data
454 * buffer available interrupt will be generated. At this time we will
455 * fill FIFO again: max FIFO_SIZE data.
456 */
457 while (sglen) {
458 xfer_cnt = min(sglen, MAX_XFER_COUNT);
459 spin_lock_irqsave(&acdev->host->lock, flags);
460 xfer_ctr = readl(acdev->vbase + XFER_CTR) &
461 ~XFER_COUNT_MASK;
462 writel(xfer_ctr | xfer_cnt | XFER_START,
463 acdev->vbase + XFER_CTR);
464 spin_unlock_irqrestore(&acdev->host->lock, flags);
465
466 /* continue dma xfers until current sg is completed */
467 while (xfer_cnt) {
468 /* wait for read to complete */
469 if (!write) {
470 ret = wait4buf(acdev);
471 if (ret)
472 goto fail;
473 }
474
475 /* read/write FIFO in chunk of FIFO_SIZE */
476 dma_len = min(xfer_cnt, FIFO_SIZE);
477 ret = dma_xfer(acdev, src, dest, dma_len);
478 if (ret) {
479 dev_err(acdev->host->dev, "dma failed");
480 goto fail;
481 }
482
483 if (write)
484 src += dma_len;
485 else
486 dest += dma_len;
487
488 sglen -= dma_len;
489 xfer_cnt -= dma_len;
490
491 /* wait for write to complete */
492 if (write) {
493 ret = wait4buf(acdev);
494 if (ret)
495 goto fail;
496 }
497 }
498 }
499
500fail:
501 spin_lock_irqsave(&acdev->host->lock, flags);
502 writel(readl(acdev->vbase + XFER_CTR) & ~XFER_START,
503 acdev->vbase + XFER_CTR);
504 spin_unlock_irqrestore(&acdev->host->lock, flags);
505
506 return ret;
507}
508
509/*
510 * This routine uses External DMA controller to read/write data to FIFO of CF
511 * controller. There are two xfer related interrupt supported by CF controller:
512 * - buf_avail: This interrupt is generated as soon as we have buffer of 512
513 * bytes available for reading or empty buffer available for writing.
514 * - xfer_done: This interrupt is generated on transfer of "xfer_size" amount of
515 * data to/from FIFO. xfer_size is programmed in XFER_CTR register.
516 *
517 * Max buffer size = FIFO_SIZE = 512 Bytes.
518 * Max xfer_size = MAX_XFER_COUNT = 256 KB.
519 */
520static void data_xfer(struct work_struct *work)
521{
522 struct arasan_cf_dev *acdev = container_of(work, struct arasan_cf_dev,
523 work);
524 struct ata_queued_cmd *qc = acdev->qc;
525 struct scatterlist *sg;
526 unsigned long flags;
527 u32 temp;
528 int ret = 0;
529
530 /* request dma channels */
531 /* dma_request_channel may sleep, so calling from process context */
532 acdev->dma_chan = dma_request_channel(acdev->mask, filter,
533 acdev->dma_priv);
534 if (!acdev->dma_chan) {
535 dev_err(acdev->host->dev, "Unable to get dma_chan\n");
536 goto chan_request_fail;
537 }
538
539 for_each_sg(qc->sg, sg, qc->n_elem, temp) {
540 ret = sg_xfer(acdev, sg);
541 if (ret)
542 break;
543 }
544
545 dma_release_channel(acdev->dma_chan);
546
547 /* data xferred successfully */
548 if (!ret) {
549 u32 status;
550
551 spin_lock_irqsave(&acdev->host->lock, flags);
552 status = ioread8(qc->ap->ioaddr.altstatus_addr);
553 spin_unlock_irqrestore(&acdev->host->lock, flags);
554 if (status & (ATA_BUSY | ATA_DRQ)) {
555 ata_sff_queue_delayed_work(&acdev->dwork, 1);
556 return;
557 }
558
559 goto sff_intr;
560 }
561
562 cf_dumpregs(acdev);
563
564chan_request_fail:
565 spin_lock_irqsave(&acdev->host->lock, flags);
566 /* error when transferring data to/from memory */
567 qc->err_mask |= AC_ERR_HOST_BUS;
568 qc->ap->hsm_task_state = HSM_ST_ERR;
569
570 cf_ctrl_reset(acdev);
571 spin_unlock_irqrestore(qc->ap->lock, flags);
572sff_intr:
573 dma_complete(acdev);
574}
575
576static void delayed_finish(struct work_struct *work)
577{
578 struct arasan_cf_dev *acdev = container_of(work, struct arasan_cf_dev,
579 dwork.work);
580 struct ata_queued_cmd *qc = acdev->qc;
581 unsigned long flags;
582 u8 status;
583
584 spin_lock_irqsave(&acdev->host->lock, flags);
585 status = ioread8(qc->ap->ioaddr.altstatus_addr);
586 spin_unlock_irqrestore(&acdev->host->lock, flags);
587
588 if (status & (ATA_BUSY | ATA_DRQ))
589 ata_sff_queue_delayed_work(&acdev->dwork, 1);
590 else
591 dma_complete(acdev);
592}
593
594static irqreturn_t arasan_cf_interrupt(int irq, void *dev)
595{
596 struct arasan_cf_dev *acdev = ((struct ata_host *)dev)->private_data;
597 unsigned long flags;
598 u32 irqsts;
599
600 irqsts = readl(acdev->vbase + GIRQ_STS);
601 if (!(irqsts & GIRQ_CF))
602 return IRQ_NONE;
603
604 spin_lock_irqsave(&acdev->host->lock, flags);
605 irqsts = readl(acdev->vbase + IRQ_STS);
606 writel(irqsts, acdev->vbase + IRQ_STS); /* clear irqs */
607 writel(GIRQ_CF, acdev->vbase + GIRQ_STS); /* clear girqs */
608
609 /* handle only relevant interrupts */
610 irqsts &= ~IGNORED_IRQS;
611
612 if (irqsts & CARD_DETECT_IRQ) {
613 cf_card_detect(acdev, 1);
614 spin_unlock_irqrestore(&acdev->host->lock, flags);
615 return IRQ_HANDLED;
616 }
617
618 if (irqsts & PIO_XFER_ERR_IRQ) {
619 acdev->dma_status = ATA_DMA_ERR;
620 writel(readl(acdev->vbase + XFER_CTR) & ~XFER_START,
621 acdev->vbase + XFER_CTR);
622 spin_unlock_irqrestore(&acdev->host->lock, flags);
623 complete(&acdev->cf_completion);
624 dev_err(acdev->host->dev, "pio xfer err irq\n");
625 return IRQ_HANDLED;
626 }
627
628 spin_unlock_irqrestore(&acdev->host->lock, flags);
629
630 if (irqsts & BUF_AVAIL_IRQ) {
631 complete(&acdev->cf_completion);
632 return IRQ_HANDLED;
633 }
634
635 if (irqsts & XFER_DONE_IRQ) {
636 struct ata_queued_cmd *qc = acdev->qc;
637
638 /* Send Complete only for write */
639 if (qc->tf.flags & ATA_TFLAG_WRITE)
640 complete(&acdev->cf_completion);
641 }
642
643 return IRQ_HANDLED;
644}
645
646static void arasan_cf_freeze(struct ata_port *ap)
647{
648 struct arasan_cf_dev *acdev = ap->host->private_data;
649
650 /* stop transfer and reset controller */
651 writel(readl(acdev->vbase + XFER_CTR) & ~XFER_START,
652 acdev->vbase + XFER_CTR);
653 cf_ctrl_reset(acdev);
654 acdev->dma_status = ATA_DMA_ERR;
655
656 ata_sff_dma_pause(ap);
657 ata_sff_freeze(ap);
658}
659
660void arasan_cf_error_handler(struct ata_port *ap)
661{
662 struct arasan_cf_dev *acdev = ap->host->private_data;
663
664 /*
665 * DMA transfers using an external DMA controller may be scheduled.
666 * Abort them before handling error. Refer data_xfer() for further
667 * details.
668 */
669 cancel_work_sync(&acdev->work);
670 cancel_delayed_work_sync(&acdev->dwork);
671 return ata_sff_error_handler(ap);
672}
673
674static void arasan_cf_dma_start(struct arasan_cf_dev *acdev)
675{
676 u32 xfer_ctr = readl(acdev->vbase + XFER_CTR) & ~XFER_DIR_MASK;
677 u32 write = acdev->qc->tf.flags & ATA_TFLAG_WRITE;
678
679 xfer_ctr |= write ? XFER_WRITE : XFER_READ;
680 writel(xfer_ctr, acdev->vbase + XFER_CTR);
681
682 acdev->qc->ap->ops->sff_exec_command(acdev->qc->ap, &acdev->qc->tf);
683 ata_sff_queue_work(&acdev->work);
684}
685
686unsigned int arasan_cf_qc_issue(struct ata_queued_cmd *qc)
687{
688 struct ata_port *ap = qc->ap;
689 struct arasan_cf_dev *acdev = ap->host->private_data;
690
691 /* defer PIO handling to sff_qc_issue */
692 if (!ata_is_dma(qc->tf.protocol))
693 return ata_sff_qc_issue(qc);
694
695 /* select the device */
696 ata_wait_idle(ap);
697 ata_sff_dev_select(ap, qc->dev->devno);
698 ata_wait_idle(ap);
699
700 /* start the command */
701 switch (qc->tf.protocol) {
702 case ATA_PROT_DMA:
703 WARN_ON_ONCE(qc->tf.flags & ATA_TFLAG_POLLING);
704
705 ap->ops->sff_tf_load(ap, &qc->tf);
706 acdev->dma_status = 0;
707 acdev->qc = qc;
708 arasan_cf_dma_start(acdev);
709 ap->hsm_task_state = HSM_ST_LAST;
710 break;
711
712 default:
713 WARN_ON(1);
714 return AC_ERR_SYSTEM;
715 }
716
717 return 0;
718}
719
720static void arasan_cf_set_piomode(struct ata_port *ap, struct ata_device *adev)
721{
722 struct arasan_cf_dev *acdev = ap->host->private_data;
723 u8 pio = adev->pio_mode - XFER_PIO_0;
724 unsigned long flags;
725 u32 val;
726
727 /* Arasan ctrl supports Mode0 -> Mode6 */
728 if (pio > 6) {
729 dev_err(ap->dev, "Unknown PIO mode\n");
730 return;
731 }
732
733 spin_lock_irqsave(&acdev->host->lock, flags);
734 val = readl(acdev->vbase + OP_MODE) &
735 ~(ULTRA_DMA_ENB | MULTI_WORD_DMA_ENB | DRQ_BLOCK_SIZE_MASK);
736 writel(val, acdev->vbase + OP_MODE);
737 val = readl(acdev->vbase + TM_CFG) & ~TRUEIDE_PIO_TIMING_MASK;
738 val |= pio << TRUEIDE_PIO_TIMING_SHIFT;
739 writel(val, acdev->vbase + TM_CFG);
740
741 cf_interrupt_enable(acdev, BUF_AVAIL_IRQ | XFER_DONE_IRQ, 0);
742 cf_interrupt_enable(acdev, PIO_XFER_ERR_IRQ, 1);
743 spin_unlock_irqrestore(&acdev->host->lock, flags);
744}
745
746static void arasan_cf_set_dmamode(struct ata_port *ap, struct ata_device *adev)
747{
748 struct arasan_cf_dev *acdev = ap->host->private_data;
749 u32 opmode, tmcfg, dma_mode = adev->dma_mode;
750 unsigned long flags;
751
752 spin_lock_irqsave(&acdev->host->lock, flags);
753 opmode = readl(acdev->vbase + OP_MODE) &
754 ~(MULTI_WORD_DMA_ENB | ULTRA_DMA_ENB);
755 tmcfg = readl(acdev->vbase + TM_CFG);
756
757 if ((dma_mode >= XFER_UDMA_0) && (dma_mode <= XFER_UDMA_6)) {
758 opmode |= ULTRA_DMA_ENB;
759 tmcfg &= ~ULTRA_DMA_TIMING_MASK;
760 tmcfg |= (dma_mode - XFER_UDMA_0) << ULTRA_DMA_TIMING_SHIFT;
761 } else if ((dma_mode >= XFER_MW_DMA_0) && (dma_mode <= XFER_MW_DMA_4)) {
762 opmode |= MULTI_WORD_DMA_ENB;
763 tmcfg &= ~TRUEIDE_MWORD_DMA_TIMING_MASK;
764 tmcfg |= (dma_mode - XFER_MW_DMA_0) <<
765 TRUEIDE_MWORD_DMA_TIMING_SHIFT;
766 } else {
767 dev_err(ap->dev, "Unknown DMA mode\n");
768 spin_unlock_irqrestore(&acdev->host->lock, flags);
769 return;
770 }
771
772 writel(opmode, acdev->vbase + OP_MODE);
773 writel(tmcfg, acdev->vbase + TM_CFG);
774 writel(DMA_XFER_MODE, acdev->vbase + XFER_CTR);
775
776 cf_interrupt_enable(acdev, PIO_XFER_ERR_IRQ, 0);
777 cf_interrupt_enable(acdev, BUF_AVAIL_IRQ | XFER_DONE_IRQ, 1);
778 spin_unlock_irqrestore(&acdev->host->lock, flags);
779}
780
781static struct ata_port_operations arasan_cf_ops = {
782 .inherits = &ata_sff_port_ops,
783 .freeze = arasan_cf_freeze,
784 .error_handler = arasan_cf_error_handler,
785 .qc_issue = arasan_cf_qc_issue,
786 .set_piomode = arasan_cf_set_piomode,
787 .set_dmamode = arasan_cf_set_dmamode,
788};
789
790static int __devinit arasan_cf_probe(struct platform_device *pdev)
791{
792 struct arasan_cf_dev *acdev;
793 struct arasan_cf_pdata *pdata = dev_get_platdata(&pdev->dev);
794 struct ata_host *host;
795 struct ata_port *ap;
796 struct resource *res;
797 irq_handler_t irq_handler = NULL;
798 int ret = 0;
799
800 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
801 if (!res)
802 return -EINVAL;
803
804 if (!devm_request_mem_region(&pdev->dev, res->start, resource_size(res),
805 DRIVER_NAME)) {
806 dev_warn(&pdev->dev, "Failed to get memory region resource\n");
807 return -ENOENT;
808 }
809
810 acdev = devm_kzalloc(&pdev->dev, sizeof(*acdev), GFP_KERNEL);
811 if (!acdev) {
812 dev_warn(&pdev->dev, "kzalloc fail\n");
813 return -ENOMEM;
814 }
815
816 /* if irq is 0, support only PIO */
817 acdev->irq = platform_get_irq(pdev, 0);
818 if (acdev->irq)
819 irq_handler = arasan_cf_interrupt;
820 else
821 pdata->quirk |= CF_BROKEN_MWDMA | CF_BROKEN_UDMA;
822
823 acdev->pbase = res->start;
824 acdev->vbase = devm_ioremap_nocache(&pdev->dev, res->start,
825 resource_size(res));
826 if (!acdev->vbase) {
827 dev_warn(&pdev->dev, "ioremap fail\n");
828 return -ENOMEM;
829 }
830
831#ifdef CONFIG_HAVE_CLK
832 acdev->clk = clk_get(&pdev->dev, NULL);
833 if (IS_ERR(acdev->clk)) {
834 dev_warn(&pdev->dev, "Clock not found\n");
835 return PTR_ERR(acdev->clk);
836 }
837#endif
838
839 /* allocate host */
840 host = ata_host_alloc(&pdev->dev, 1);
841 if (!host) {
842 ret = -ENOMEM;
843 dev_warn(&pdev->dev, "alloc host fail\n");
844 goto free_clk;
845 }
846
847 ap = host->ports[0];
848 host->private_data = acdev;
849 acdev->host = host;
850 ap->ops = &arasan_cf_ops;
851 ap->pio_mask = ATA_PIO6;
852 ap->mwdma_mask = ATA_MWDMA4;
853 ap->udma_mask = ATA_UDMA6;
854
855 init_completion(&acdev->cf_completion);
856 init_completion(&acdev->dma_completion);
857 INIT_WORK(&acdev->work, data_xfer);
858 INIT_DELAYED_WORK(&acdev->dwork, delayed_finish);
859 dma_cap_set(DMA_MEMCPY, acdev->mask);
860 acdev->dma_priv = pdata->dma_priv;
861
862 /* Handle platform specific quirks */
863 if (pdata->quirk) {
864 if (pdata->quirk & CF_BROKEN_PIO) {
865 ap->ops->set_piomode = NULL;
866 ap->pio_mask = 0;
867 }
868 if (pdata->quirk & CF_BROKEN_MWDMA)
869 ap->mwdma_mask = 0;
870 if (pdata->quirk & CF_BROKEN_UDMA)
871 ap->udma_mask = 0;
872 }
873 ap->flags |= ATA_FLAG_PIO_POLLING | ATA_FLAG_NO_ATAPI;
874
875 ap->ioaddr.cmd_addr = acdev->vbase + ATA_DATA_PORT;
876 ap->ioaddr.data_addr = acdev->vbase + ATA_DATA_PORT;
877 ap->ioaddr.error_addr = acdev->vbase + ATA_ERR_FTR;
878 ap->ioaddr.feature_addr = acdev->vbase + ATA_ERR_FTR;
879 ap->ioaddr.nsect_addr = acdev->vbase + ATA_SC;
880 ap->ioaddr.lbal_addr = acdev->vbase + ATA_SN;
881 ap->ioaddr.lbam_addr = acdev->vbase + ATA_CL;
882 ap->ioaddr.lbah_addr = acdev->vbase + ATA_CH;
883 ap->ioaddr.device_addr = acdev->vbase + ATA_SH;
884 ap->ioaddr.status_addr = acdev->vbase + ATA_STS_CMD;
885 ap->ioaddr.command_addr = acdev->vbase + ATA_STS_CMD;
886 ap->ioaddr.altstatus_addr = acdev->vbase + ATA_ASTS_DCTR;
887 ap->ioaddr.ctl_addr = acdev->vbase + ATA_ASTS_DCTR;
888
889 ata_port_desc(ap, "phy_addr %llx virt_addr %p",
890 (unsigned long long) res->start, acdev->vbase);
891
892 ret = cf_init(acdev);
893 if (ret)
894 goto free_clk;
895
896 cf_card_detect(acdev, 0);
897
898 return ata_host_activate(host, acdev->irq, irq_handler, 0,
899 &arasan_cf_sht);
900
901free_clk:
902#ifdef CONFIG_HAVE_CLK
903 clk_put(acdev->clk);
904#endif
905 return ret;
906}
907
908static int __devexit arasan_cf_remove(struct platform_device *pdev)
909{
910 struct ata_host *host = dev_get_drvdata(&pdev->dev);
911 struct arasan_cf_dev *acdev = host->ports[0]->private_data;
912
913 ata_host_detach(host);
914 cf_exit(acdev);
915#ifdef CONFIG_HAVE_CLK
916 clk_put(acdev->clk);
917#endif
918
919 return 0;
920}
921
922#ifdef CONFIG_PM
923static int arasan_cf_suspend(struct device *dev)
924{
925 struct platform_device *pdev = to_platform_device(dev);
926 struct ata_host *host = dev_get_drvdata(&pdev->dev);
927 struct arasan_cf_dev *acdev = host->ports[0]->private_data;
928
929 if (acdev->dma_chan) {
930 acdev->dma_chan->device->device_control(acdev->dma_chan,
931 DMA_TERMINATE_ALL, 0);
932 dma_release_channel(acdev->dma_chan);
933 }
934 cf_exit(acdev);
935 return ata_host_suspend(host, PMSG_SUSPEND);
936}
937
938static int arasan_cf_resume(struct device *dev)
939{
940 struct platform_device *pdev = to_platform_device(dev);
941 struct ata_host *host = dev_get_drvdata(&pdev->dev);
942 struct arasan_cf_dev *acdev = host->ports[0]->private_data;
943
944 cf_init(acdev);
945 ata_host_resume(host);
946
947 return 0;
948}
949
950static const struct dev_pm_ops arasan_cf_pm_ops = {
951 .suspend = arasan_cf_suspend,
952 .resume = arasan_cf_resume,
953};
954#endif
955
956static struct platform_driver arasan_cf_driver = {
957 .probe = arasan_cf_probe,
958 .remove = __devexit_p(arasan_cf_remove),
959 .driver = {
960 .name = DRIVER_NAME,
961 .owner = THIS_MODULE,
962#ifdef CONFIG_PM
963 .pm = &arasan_cf_pm_ops,
964#endif
965 },
966};
967
968static int __init arasan_cf_init(void)
969{
970 return platform_driver_register(&arasan_cf_driver);
971}
972module_init(arasan_cf_init);
973
974static void __exit arasan_cf_exit(void)
975{
976 platform_driver_unregister(&arasan_cf_driver);
977}
978module_exit(arasan_cf_exit);
979
980MODULE_AUTHOR("Viresh Kumar <viresh.kumar@st.com>");
981MODULE_DESCRIPTION("Arasan ATA Compact Flash driver");
982MODULE_LICENSE("GPL");
983MODULE_ALIAS("platform:" DRIVER_NAME);
diff --git a/drivers/ata/pata_at32.c b/drivers/ata/pata_at32.c
index 66ce6a526f27..36f189c7ee8c 100644
--- a/drivers/ata/pata_at32.c
+++ b/drivers/ata/pata_at32.c
@@ -194,7 +194,7 @@ static int __init pata_at32_init_one(struct device *dev,
194 /* Setup ATA bindings */ 194 /* Setup ATA bindings */
195 ap->ops = &at32_port_ops; 195 ap->ops = &at32_port_ops;
196 ap->pio_mask = PIO_MASK; 196 ap->pio_mask = PIO_MASK;
197 ap->flags |= ATA_FLAG_MMIO | ATA_FLAG_SLAVE_POSS; 197 ap->flags |= ATA_FLAG_SLAVE_POSS;
198 198
199 /* 199 /*
200 * Since all 8-bit taskfile transfers has to go on the lower 200 * Since all 8-bit taskfile transfers has to go on the lower
diff --git a/drivers/ata/pata_at91.c b/drivers/ata/pata_at91.c
index 0da0dcc7dd08..960c72571395 100644
--- a/drivers/ata/pata_at91.c
+++ b/drivers/ata/pata_at91.c
@@ -3,6 +3,7 @@
3 * with CompactFlash interface in True IDE mode 3 * with CompactFlash interface in True IDE mode
4 * 4 *
5 * Copyright (C) 2009 Matyukevich Sergey 5 * Copyright (C) 2009 Matyukevich Sergey
6 * 2011 Igor Plyatov
6 * 7 *
7 * Based on: 8 * Based on:
8 * * generic platform driver by Paul Mundt: drivers/ata/pata_platform.c 9 * * generic platform driver by Paul Mundt: drivers/ata/pata_platform.c
@@ -31,26 +32,149 @@
31#include <mach/board.h> 32#include <mach/board.h>
32#include <mach/gpio.h> 33#include <mach/gpio.h>
33 34
35#define DRV_NAME "pata_at91"
36#define DRV_VERSION "0.3"
34 37
35#define DRV_NAME "pata_at91" 38#define CF_IDE_OFFSET 0x00c00000
36#define DRV_VERSION "0.1" 39#define CF_ALT_IDE_OFFSET 0x00e00000
37 40#define CF_IDE_RES_SIZE 0x08
38#define CF_IDE_OFFSET 0x00c00000 41#define CS_PULSE_MAXIMUM 319
39#define CF_ALT_IDE_OFFSET 0x00e00000 42#define ER_SMC_CALC 1
40#define CF_IDE_RES_SIZE 0x08 43#define ER_SMC_RECALC 2
41 44
42struct at91_ide_info { 45struct at91_ide_info {
43 unsigned long mode; 46 unsigned long mode;
44 unsigned int cs; 47 unsigned int cs;
45
46 struct clk *mck; 48 struct clk *mck;
47
48 void __iomem *ide_addr; 49 void __iomem *ide_addr;
49 void __iomem *alt_addr; 50 void __iomem *alt_addr;
50}; 51};
51 52
52static const struct ata_timing initial_timing = 53/**
53 {XFER_PIO_0, 70, 290, 240, 600, 165, 150, 600, 0}; 54 * struct smc_range - range of valid values for SMC register.
55 */
56struct smc_range {
57 int min;
58 int max;
59};
60
61/**
62 * adjust_smc_value - adjust value for one of SMC registers.
63 * @value: adjusted value
64 * @range: array of SMC ranges with valid values
65 * @size: SMC ranges array size
66 *
67 * This returns the difference between input and output value or negative
68 * in case of invalid input value.
69 * If negative returned, then output value = maximal possible from ranges.
70 */
71static int adjust_smc_value(int *value, struct smc_range *range, int size)
72{
73 int maximum = (range + size - 1)->max;
74 int remainder;
75
76 do {
77 if (*value < range->min) {
78 remainder = range->min - *value;
79 *value = range->min; /* nearest valid value */
80 return remainder;
81 } else if ((range->min <= *value) && (*value <= range->max))
82 return 0;
83
84 range++;
85 } while (--size);
86 *value = maximum;
87
88 return -1; /* invalid value */
89}
90
91/**
92 * calc_smc_vals - calculate SMC register values
93 * @dev: ATA device
94 * @setup: SMC_SETUP register value
95 * @pulse: SMC_PULSE register value
96 * @cycle: SMC_CYCLE register value
97 *
98 * This returns negative in case of invalid values for SMC registers:
99 * -ER_SMC_RECALC - recalculation required for SMC values,
100 * -ER_SMC_CALC - calculation failed (invalid input values).
101 *
102 * SMC use special coding scheme, see "Coding and Range of Timing
103 * Parameters" table from AT91SAM9 datasheets.
104 *
105 * SMC_SETUP = 128*setup[5] + setup[4:0]
106 * SMC_PULSE = 256*pulse[6] + pulse[5:0]
107 * SMC_CYCLE = 256*cycle[8:7] + cycle[6:0]
108 */
109static int calc_smc_vals(struct device *dev,
110 int *setup, int *pulse, int *cycle, int *cs_pulse)
111{
112 int ret_val;
113 int err = 0;
114 struct smc_range range_setup[] = { /* SMC_SETUP valid values */
115 {.min = 0, .max = 31}, /* first range */
116 {.min = 128, .max = 159} /* second range */
117 };
118 struct smc_range range_pulse[] = { /* SMC_PULSE valid values */
119 {.min = 0, .max = 63}, /* first range */
120 {.min = 256, .max = 319} /* second range */
121 };
122 struct smc_range range_cycle[] = { /* SMC_CYCLE valid values */
123 {.min = 0, .max = 127}, /* first range */
124 {.min = 256, .max = 383}, /* second range */
125 {.min = 512, .max = 639}, /* third range */
126 {.min = 768, .max = 895} /* fourth range */
127 };
128
129 ret_val = adjust_smc_value(setup, range_setup, ARRAY_SIZE(range_setup));
130 if (ret_val < 0)
131 dev_warn(dev, "maximal SMC Setup value\n");
132 else
133 *cycle += ret_val;
134
135 ret_val = adjust_smc_value(pulse, range_pulse, ARRAY_SIZE(range_pulse));
136 if (ret_val < 0)
137 dev_warn(dev, "maximal SMC Pulse value\n");
138 else
139 *cycle += ret_val;
140
141 ret_val = adjust_smc_value(cycle, range_cycle, ARRAY_SIZE(range_cycle));
142 if (ret_val < 0)
143 dev_warn(dev, "maximal SMC Cycle value\n");
144
145 *cs_pulse = *cycle;
146 if (*cs_pulse > CS_PULSE_MAXIMUM) {
147 dev_err(dev, "unable to calculate valid SMC settings\n");
148 return -ER_SMC_CALC;
149 }
150
151 ret_val = adjust_smc_value(cs_pulse, range_pulse,
152 ARRAY_SIZE(range_pulse));
153 if (ret_val < 0) {
154 dev_warn(dev, "maximal SMC CS Pulse value\n");
155 } else if (ret_val != 0) {
156 *cycle = *cs_pulse;
157 dev_warn(dev, "SMC Cycle extended\n");
158 err = -ER_SMC_RECALC;
159 }
160
161 return err;
162}
163
164/**
165 * to_smc_format - convert values into SMC format
166 * @setup: SETUP value of SMC Setup Register
167 * @pulse: PULSE value of SMC Pulse Register
168 * @cycle: CYCLE value of SMC Cycle Register
169 * @cs_pulse: NCS_PULSE value of SMC Pulse Register
170 */
171static void to_smc_format(int *setup, int *pulse, int *cycle, int *cs_pulse)
172{
173 *setup = (*setup & 0x1f) | ((*setup & 0x80) >> 2);
174 *pulse = (*pulse & 0x3f) | ((*pulse & 0x100) >> 2);
175 *cycle = (*cycle & 0x7f) | ((*cycle & 0x300) >> 1);
176 *cs_pulse = (*cs_pulse & 0x3f) | ((*cs_pulse & 0x100) >> 2);
177}
54 178
55static unsigned long calc_mck_cycles(unsigned long ns, unsigned long mck_hz) 179static unsigned long calc_mck_cycles(unsigned long ns, unsigned long mck_hz)
56{ 180{
@@ -69,80 +193,77 @@ static unsigned long calc_mck_cycles(unsigned long ns, unsigned long mck_hz)
69 return (ns * mul + 65536) >> 16; /* rounding */ 193 return (ns * mul + 65536) >> 16; /* rounding */
70} 194}
71 195
72static void set_smc_mode(struct at91_ide_info *info) 196/**
73{ 197 * set_smc_timing - SMC timings setup.
74 at91_sys_write(AT91_SMC_MODE(info->cs), info->mode); 198 * @dev: device
75 return; 199 * @info: AT91 IDE info
76} 200 * @ata: ATA timings
77 201 *
78static void set_smc_timing(struct device *dev, 202 * Its assumed that write timings are same as read timings,
203 * cs_setup = 0 and cs_pulse = cycle.
204 */
205static void set_smc_timing(struct device *dev, struct ata_device *adev,
79 struct at91_ide_info *info, const struct ata_timing *ata) 206 struct at91_ide_info *info, const struct ata_timing *ata)
80{ 207{
81 unsigned long read_cycle, write_cycle, active, recover; 208 int ret = 0;
82 unsigned long nrd_setup, nrd_pulse, nrd_recover; 209 int use_iordy;
83 unsigned long nwe_setup, nwe_pulse; 210 unsigned int t6z; /* data tristate time in ns */
84 211 unsigned int cycle; /* SMC Cycle width in MCK ticks */
85 unsigned long ncs_write_setup, ncs_write_pulse; 212 unsigned int setup; /* SMC Setup width in MCK ticks */
86 unsigned long ncs_read_setup, ncs_read_pulse; 213 unsigned int pulse; /* CFIOR and CFIOW pulse width in MCK ticks */
87 214 unsigned int cs_setup = 0;/* CS4 or CS5 setup width in MCK ticks */
88 unsigned long mck_hz; 215 unsigned int cs_pulse; /* CS4 or CS5 pulse width in MCK ticks*/
89 216 unsigned int tdf_cycles; /* SMC TDF MCK ticks */
90 read_cycle = ata->cyc8b; 217 unsigned long mck_hz; /* MCK frequency in Hz */
91 nrd_setup = ata->setup; 218
92 nrd_pulse = ata->act8b; 219 t6z = (ata->mode < XFER_PIO_5) ? 30 : 20;
93 nrd_recover = ata->rec8b;
94
95 mck_hz = clk_get_rate(info->mck); 220 mck_hz = clk_get_rate(info->mck);
221 cycle = calc_mck_cycles(ata->cyc8b, mck_hz);
222 setup = calc_mck_cycles(ata->setup, mck_hz);
223 pulse = calc_mck_cycles(ata->act8b, mck_hz);
224 tdf_cycles = calc_mck_cycles(t6z, mck_hz);
225
226 do {
227 ret = calc_smc_vals(dev, &setup, &pulse, &cycle, &cs_pulse);
228 } while (ret == -ER_SMC_RECALC);
229
230 if (ret == -ER_SMC_CALC)
231 dev_err(dev, "Interface may not operate correctly\n");
232
233 dev_dbg(dev, "SMC Setup=%u, Pulse=%u, Cycle=%u, CS Pulse=%u\n",
234 setup, pulse, cycle, cs_pulse);
235 to_smc_format(&setup, &pulse, &cycle, &cs_pulse);
236 /* disable or enable waiting for IORDY signal */
237 use_iordy = ata_pio_need_iordy(adev);
238 if (use_iordy)
239 info->mode |= AT91_SMC_EXNWMODE_READY;
240
241 if (tdf_cycles > 15) {
242 tdf_cycles = 15;
243 dev_warn(dev, "maximal SMC TDF Cycles value\n");
244 }
96 245
97 read_cycle = calc_mck_cycles(read_cycle, mck_hz); 246 dev_dbg(dev, "Use IORDY=%u, TDF Cycles=%u\n", use_iordy, tdf_cycles);
98 nrd_setup = calc_mck_cycles(nrd_setup, mck_hz); 247 info->mode |= AT91_SMC_TDF_(tdf_cycles);
99 nrd_pulse = calc_mck_cycles(nrd_pulse, mck_hz);
100 nrd_recover = calc_mck_cycles(nrd_recover, mck_hz);
101
102 active = nrd_setup + nrd_pulse;
103 recover = read_cycle - active;
104
105 /* Need at least two cycles recovery */
106 if (recover < 2)
107 read_cycle = active + 2;
108
109 /* (CS0, CS1, DIR, OE) <= (CFCE1, CFCE2, CFRNW, NCSX) timings */
110 ncs_read_setup = 1;
111 ncs_read_pulse = read_cycle - 2;
112
113 /* Write timings same as read timings */
114 write_cycle = read_cycle;
115 nwe_setup = nrd_setup;
116 nwe_pulse = nrd_pulse;
117 ncs_write_setup = ncs_read_setup;
118 ncs_write_pulse = ncs_read_pulse;
119
120 dev_dbg(dev, "ATA timings: nrd_setup = %lu nrd_pulse = %lu nrd_cycle = %lu\n",
121 nrd_setup, nrd_pulse, read_cycle);
122 dev_dbg(dev, "ATA timings: nwe_setup = %lu nwe_pulse = %lu nwe_cycle = %lu\n",
123 nwe_setup, nwe_pulse, write_cycle);
124 dev_dbg(dev, "ATA timings: ncs_read_setup = %lu ncs_read_pulse = %lu\n",
125 ncs_read_setup, ncs_read_pulse);
126 dev_dbg(dev, "ATA timings: ncs_write_setup = %lu ncs_write_pulse = %lu\n",
127 ncs_write_setup, ncs_write_pulse);
128 248
249 /* write SMC Setup Register */
129 at91_sys_write(AT91_SMC_SETUP(info->cs), 250 at91_sys_write(AT91_SMC_SETUP(info->cs),
130 AT91_SMC_NWESETUP_(nwe_setup) | 251 AT91_SMC_NWESETUP_(setup) |
131 AT91_SMC_NRDSETUP_(nrd_setup) | 252 AT91_SMC_NRDSETUP_(setup) |
132 AT91_SMC_NCS_WRSETUP_(ncs_write_setup) | 253 AT91_SMC_NCS_WRSETUP_(cs_setup) |
133 AT91_SMC_NCS_RDSETUP_(ncs_read_setup)); 254 AT91_SMC_NCS_RDSETUP_(cs_setup));
134 255 /* write SMC Pulse Register */
135 at91_sys_write(AT91_SMC_PULSE(info->cs), 256 at91_sys_write(AT91_SMC_PULSE(info->cs),
136 AT91_SMC_NWEPULSE_(nwe_pulse) | 257 AT91_SMC_NWEPULSE_(pulse) |
137 AT91_SMC_NRDPULSE_(nrd_pulse) | 258 AT91_SMC_NRDPULSE_(pulse) |
138 AT91_SMC_NCS_WRPULSE_(ncs_write_pulse) | 259 AT91_SMC_NCS_WRPULSE_(cs_pulse) |
139 AT91_SMC_NCS_RDPULSE_(ncs_read_pulse)); 260 AT91_SMC_NCS_RDPULSE_(cs_pulse));
140 261 /* write SMC Cycle Register */
141 at91_sys_write(AT91_SMC_CYCLE(info->cs), 262 at91_sys_write(AT91_SMC_CYCLE(info->cs),
142 AT91_SMC_NWECYCLE_(write_cycle) | 263 AT91_SMC_NWECYCLE_(cycle) |
143 AT91_SMC_NRDCYCLE_(read_cycle)); 264 AT91_SMC_NRDCYCLE_(cycle));
144 265 /* write SMC Mode Register*/
145 return; 266 at91_sys_write(AT91_SMC_MODE(info->cs), info->mode);
146} 267}
147 268
148static void pata_at91_set_piomode(struct ata_port *ap, struct ata_device *adev) 269static void pata_at91_set_piomode(struct ata_port *ap, struct ata_device *adev)
@@ -156,15 +277,9 @@ static void pata_at91_set_piomode(struct ata_port *ap, struct ata_device *adev)
156 if (ret) { 277 if (ret) {
157 dev_warn(ap->dev, "Failed to compute ATA timing %d, " 278 dev_warn(ap->dev, "Failed to compute ATA timing %d, "
158 "set PIO_0 timing\n", ret); 279 "set PIO_0 timing\n", ret);
159 set_smc_timing(ap->dev, info, &initial_timing); 280 timing = *ata_timing_find_mode(XFER_PIO_0);
160 } else {
161 set_smc_timing(ap->dev, info, &timing);
162 } 281 }
163 282 set_smc_timing(ap->dev, adev, info, &timing);
164 /* Setup SMC mode */
165 set_smc_mode(info);
166
167 return;
168} 283}
169 284
170static unsigned int pata_at91_data_xfer_noirq(struct ata_device *dev, 285static unsigned int pata_at91_data_xfer_noirq(struct ata_device *dev,
@@ -330,7 +445,7 @@ static int __devexit pata_at91_remove(struct platform_device *pdev)
330static struct platform_driver pata_at91_driver = { 445static struct platform_driver pata_at91_driver = {
331 .probe = pata_at91_probe, 446 .probe = pata_at91_probe,
332 .remove = __devexit_p(pata_at91_remove), 447 .remove = __devexit_p(pata_at91_remove),
333 .driver = { 448 .driver = {
334 .name = DRV_NAME, 449 .name = DRV_NAME,
335 .owner = THIS_MODULE, 450 .owner = THIS_MODULE,
336 }, 451 },
diff --git a/drivers/ata/pata_bf54x.c b/drivers/ata/pata_bf54x.c
index 9cae65de750e..ea64967000ff 100644
--- a/drivers/ata/pata_bf54x.c
+++ b/drivers/ata/pata_bf54x.c
@@ -826,7 +826,7 @@ static void bfin_dev_select(struct ata_port *ap, unsigned int device)
826 * @ctl: value to write 826 * @ctl: value to write
827 */ 827 */
828 828
829static u8 bfin_set_devctl(struct ata_port *ap, u8 ctl) 829static void bfin_set_devctl(struct ata_port *ap, u8 ctl)
830{ 830{
831 void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr; 831 void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr;
832 write_atapi_register(base, ATA_REG_CTRL, ctl); 832 write_atapi_register(base, ATA_REG_CTRL, ctl);
@@ -1046,7 +1046,7 @@ static void bfin_bus_post_reset(struct ata_port *ap, unsigned int devmask)
1046 dev1 = 0; 1046 dev1 = 0;
1047 break; 1047 break;
1048 } 1048 }
1049 msleep(50); /* give drive a breather */ 1049 ata_msleep(ap, 50); /* give drive a breather */
1050 } 1050 }
1051 if (dev1) 1051 if (dev1)
1052 ata_sff_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT); 1052 ata_sff_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
@@ -1087,7 +1087,7 @@ static unsigned int bfin_bus_softreset(struct ata_port *ap,
1087 * 1087 *
1088 * Old drivers/ide uses the 2mS rule and then waits for ready 1088 * Old drivers/ide uses the 2mS rule and then waits for ready
1089 */ 1089 */
1090 msleep(150); 1090 ata_msleep(ap, 150);
1091 1091
1092 /* Before we perform post reset processing we want to see if 1092 /* Before we perform post reset processing we want to see if
1093 * the bus shows 0xFF because the odd clown forgets the D7 1093 * the bus shows 0xFF because the odd clown forgets the D7
@@ -1342,7 +1342,7 @@ static unsigned int bfin_ata_host_intr(struct ata_port *ap,
1342 ap->ops->bmdma_stop(qc); 1342 ap->ops->bmdma_stop(qc);
1343 1343
1344 if (unlikely(host_stat & ATA_DMA_ERR)) { 1344 if (unlikely(host_stat & ATA_DMA_ERR)) {
1345 /* error when transfering data to/from memory */ 1345 /* error when transferring data to/from memory */
1346 qc->err_mask |= AC_ERR_HOST_BUS; 1346 qc->err_mask |= AC_ERR_HOST_BUS;
1347 ap->hsm_task_state = HSM_ST_ERR; 1347 ap->hsm_task_state = HSM_ST_ERR;
1348 } 1348 }
@@ -1454,9 +1454,7 @@ static struct ata_port_operations bfin_pata_ops = {
1454 1454
1455static struct ata_port_info bfin_port_info[] = { 1455static struct ata_port_info bfin_port_info[] = {
1456 { 1456 {
1457 .flags = ATA_FLAG_SLAVE_POSS 1457 .flags = ATA_FLAG_SLAVE_POSS,
1458 | ATA_FLAG_MMIO
1459 | ATA_FLAG_NO_LEGACY,
1460 .pio_mask = ATA_PIO4, 1458 .pio_mask = ATA_PIO4,
1461 .mwdma_mask = 0, 1459 .mwdma_mask = 0,
1462 .udma_mask = 0, 1460 .udma_mask = 0,
@@ -1588,7 +1586,7 @@ static int __devinit bfin_atapi_probe(struct platform_device *pdev)
1588 host->ports[0]->ioaddr.ctl_addr = (void *)res->start; 1586 host->ports[0]->ioaddr.ctl_addr = (void *)res->start;
1589 1587
1590 if (peripheral_request_list(atapi_io_port, "atapi-io-port")) { 1588 if (peripheral_request_list(atapi_io_port, "atapi-io-port")) {
1591 dev_err(&pdev->dev, "Requesting Peripherals faild\n"); 1589 dev_err(&pdev->dev, "Requesting Peripherals failed\n");
1592 return -EFAULT; 1590 return -EFAULT;
1593 } 1591 }
1594 1592
diff --git a/drivers/ata/pata_cmd640.c b/drivers/ata/pata_cmd640.c
index e5f289f59ca3..549d28dbf90d 100644
--- a/drivers/ata/pata_cmd640.c
+++ b/drivers/ata/pata_cmd640.c
@@ -161,6 +161,17 @@ static int cmd640_port_start(struct ata_port *ap)
161 return 0; 161 return 0;
162} 162}
163 163
164static bool cmd640_sff_irq_check(struct ata_port *ap)
165{
166 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
167 int irq_reg = ap->port_no ? ARTIM23 : CFR;
168 u8 irq_stat, irq_mask = ap->port_no ? 0x10 : 0x04;
169
170 pci_read_config_byte(pdev, irq_reg, &irq_stat);
171
172 return irq_stat & irq_mask;
173}
174
164static struct scsi_host_template cmd640_sht = { 175static struct scsi_host_template cmd640_sht = {
165 ATA_PIO_SHT(DRV_NAME), 176 ATA_PIO_SHT(DRV_NAME),
166}; 177};
@@ -169,6 +180,7 @@ static struct ata_port_operations cmd640_port_ops = {
169 .inherits = &ata_sff_port_ops, 180 .inherits = &ata_sff_port_ops,
170 /* In theory xfer_noirq is not needed once we kill the prefetcher */ 181 /* In theory xfer_noirq is not needed once we kill the prefetcher */
171 .sff_data_xfer = ata_sff_data_xfer_noirq, 182 .sff_data_xfer = ata_sff_data_xfer_noirq,
183 .sff_irq_check = cmd640_sff_irq_check,
172 .qc_issue = cmd640_qc_issue, 184 .qc_issue = cmd640_qc_issue,
173 .cable_detect = ata_cable_40wire, 185 .cable_detect = ata_cable_40wire,
174 .set_piomode = cmd640_set_piomode, 186 .set_piomode = cmd640_set_piomode,
diff --git a/drivers/ata/pata_cmd64x.c b/drivers/ata/pata_cmd64x.c
index 905ff76d3cbb..7bafc16cf5e0 100644
--- a/drivers/ata/pata_cmd64x.c
+++ b/drivers/ata/pata_cmd64x.c
@@ -41,6 +41,9 @@
41enum { 41enum {
42 CFR = 0x50, 42 CFR = 0x50,
43 CFR_INTR_CH0 = 0x04, 43 CFR_INTR_CH0 = 0x04,
44 CNTRL = 0x51,
45 CNTRL_CH0 = 0x04,
46 CNTRL_CH1 = 0x08,
44 CMDTIM = 0x52, 47 CMDTIM = 0x52,
45 ARTTIM0 = 0x53, 48 ARTTIM0 = 0x53,
46 DRWTIM0 = 0x54, 49 DRWTIM0 = 0x54,
@@ -328,9 +331,19 @@ static int cmd64x_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
328 .port_ops = &cmd648_port_ops 331 .port_ops = &cmd648_port_ops
329 } 332 }
330 }; 333 };
331 const struct ata_port_info *ppi[] = { &cmd_info[id->driver_data], NULL }; 334 const struct ata_port_info *ppi[] = {
332 u8 mrdmode; 335 &cmd_info[id->driver_data],
336 &cmd_info[id->driver_data],
337 NULL
338 };
339 u8 mrdmode, reg;
333 int rc; 340 int rc;
341 struct pci_dev *bridge = pdev->bus->self;
342 /* mobility split bridges don't report enabled ports correctly */
343 int port_ok = !(bridge && bridge->vendor ==
344 PCI_VENDOR_ID_MOBILITY_ELECTRONICS);
345 /* all (with exceptions below) apart from 643 have CNTRL_CH0 bit */
346 int cntrl_ch0_ok = (id->driver_data != 0);
334 347
335 rc = pcim_enable_device(pdev); 348 rc = pcim_enable_device(pdev);
336 if (rc) 349 if (rc)
@@ -341,11 +354,18 @@ static int cmd64x_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
341 354
342 if (pdev->device == PCI_DEVICE_ID_CMD_646) { 355 if (pdev->device == PCI_DEVICE_ID_CMD_646) {
343 /* Does UDMA work ? */ 356 /* Does UDMA work ? */
344 if (pdev->revision > 4) 357 if (pdev->revision > 4) {
345 ppi[0] = &cmd_info[2]; 358 ppi[0] = &cmd_info[2];
359 ppi[1] = &cmd_info[2];
360 }
346 /* Early rev with other problems ? */ 361 /* Early rev with other problems ? */
347 else if (pdev->revision == 1) 362 else if (pdev->revision == 1) {
348 ppi[0] = &cmd_info[3]; 363 ppi[0] = &cmd_info[3];
364 ppi[1] = &cmd_info[3];
365 }
366 /* revs 1,2 have no CNTRL_CH0 */
367 if (pdev->revision < 3)
368 cntrl_ch0_ok = 0;
349 } 369 }
350 370
351 pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 64); 371 pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 64);
@@ -354,6 +374,20 @@ static int cmd64x_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
354 mrdmode |= 0x02; /* Memory read line enable */ 374 mrdmode |= 0x02; /* Memory read line enable */
355 pci_write_config_byte(pdev, MRDMODE, mrdmode); 375 pci_write_config_byte(pdev, MRDMODE, mrdmode);
356 376
377 /* check for enabled ports */
378 pci_read_config_byte(pdev, CNTRL, &reg);
379 if (!port_ok)
380 dev_printk(KERN_NOTICE, &pdev->dev, "Mobility Bridge detected, ignoring CNTRL port enable/disable\n");
381 if (port_ok && cntrl_ch0_ok && !(reg & CNTRL_CH0)) {
382 dev_printk(KERN_NOTICE, &pdev->dev, "Primary port is disabled\n");
383 ppi[0] = &ata_dummy_port_info;
384
385 }
386 if (port_ok && !(reg & CNTRL_CH1)) {
387 dev_printk(KERN_NOTICE, &pdev->dev, "Secondary port is disabled\n");
388 ppi[1] = &ata_dummy_port_info;
389 }
390
357 /* Force PIO 0 here.. */ 391 /* Force PIO 0 here.. */
358 392
359 /* PPC specific fixup copied from old driver */ 393 /* PPC specific fixup copied from old driver */
diff --git a/drivers/ata/pata_cs5520.c b/drivers/ata/pata_cs5520.c
index 030952f1f97c..e3254fcff0f1 100644
--- a/drivers/ata/pata_cs5520.c
+++ b/drivers/ata/pata_cs5520.c
@@ -29,7 +29,7 @@
29 * General Public License for more details. 29 * General Public License for more details.
30 * 30 *
31 * Documentation: 31 * Documentation:
32 * Not publically available. 32 * Not publicly available.
33 */ 33 */
34#include <linux/kernel.h> 34#include <linux/kernel.h>
35#include <linux/module.h> 35#include <linux/module.h>
diff --git a/drivers/ata/pata_cs5536.c b/drivers/ata/pata_cs5536.c
index 21ee23f89e88..628c8fae5937 100644
--- a/drivers/ata/pata_cs5536.c
+++ b/drivers/ata/pata_cs5536.c
@@ -37,10 +37,22 @@
37#include <linux/delay.h> 37#include <linux/delay.h>
38#include <linux/libata.h> 38#include <linux/libata.h>
39#include <scsi/scsi_host.h> 39#include <scsi/scsi_host.h>
40
41#ifdef CONFIG_X86_32
40#include <asm/msr.h> 42#include <asm/msr.h>
43static int use_msr;
44module_param_named(msr, use_msr, int, 0644);
45MODULE_PARM_DESC(msr, "Force using MSR to configure IDE function (Default: 0)");
46#else
47#undef rdmsr /* avoid accidental MSR usage on, e.g. x86-64 */
48#undef wrmsr
49#define rdmsr(x, y, z) do { } while (0)
50#define wrmsr(x, y, z) do { } while (0)
51#define use_msr 0
52#endif
41 53
42#define DRV_NAME "pata_cs5536" 54#define DRV_NAME "pata_cs5536"
43#define DRV_VERSION "0.0.7" 55#define DRV_VERSION "0.0.8"
44 56
45enum { 57enum {
46 CFG = 0, 58 CFG = 0,
@@ -75,8 +87,6 @@ enum {
75 IDE_ETC_NODMA = 0x03, 87 IDE_ETC_NODMA = 0x03,
76}; 88};
77 89
78static int use_msr;
79
80static const u32 msr_reg[4] = { 90static const u32 msr_reg[4] = {
81 MSR_IDE_CFG, MSR_IDE_DTC, MSR_IDE_CAST, MSR_IDE_ETC, 91 MSR_IDE_CFG, MSR_IDE_DTC, MSR_IDE_CAST, MSR_IDE_ETC,
82}; 92};
@@ -88,7 +98,7 @@ static const u8 pci_reg[4] = {
88static inline int cs5536_read(struct pci_dev *pdev, int reg, u32 *val) 98static inline int cs5536_read(struct pci_dev *pdev, int reg, u32 *val)
89{ 99{
90 if (unlikely(use_msr)) { 100 if (unlikely(use_msr)) {
91 u32 dummy; 101 u32 dummy __maybe_unused;
92 102
93 rdmsr(msr_reg[reg], *val, dummy); 103 rdmsr(msr_reg[reg], *val, dummy);
94 return 0; 104 return 0;
@@ -294,8 +304,6 @@ MODULE_DESCRIPTION("low-level driver for the CS5536 IDE controller");
294MODULE_LICENSE("GPL"); 304MODULE_LICENSE("GPL");
295MODULE_DEVICE_TABLE(pci, cs5536); 305MODULE_DEVICE_TABLE(pci, cs5536);
296MODULE_VERSION(DRV_VERSION); 306MODULE_VERSION(DRV_VERSION);
297module_param_named(msr, use_msr, int, 0644);
298MODULE_PARM_DESC(msr, "Force using MSR to configure IDE function (Default: 0)");
299 307
300module_init(cs5536_init); 308module_init(cs5536_init);
301module_exit(cs5536_exit); 309module_exit(cs5536_exit);
diff --git a/drivers/ata/pata_hpt366.c b/drivers/ata/pata_hpt366.c
index 7688868557b9..6c77d68dbd05 100644
--- a/drivers/ata/pata_hpt366.c
+++ b/drivers/ata/pata_hpt366.c
@@ -14,6 +14,7 @@
14 * Look into engine reset on timeout errors. Should not be required. 14 * Look into engine reset on timeout errors. Should not be required.
15 */ 15 */
16 16
17#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
17 18
18#include <linux/kernel.h> 19#include <linux/kernel.h>
19#include <linux/module.h> 20#include <linux/module.h>
@@ -25,7 +26,7 @@
25#include <linux/libata.h> 26#include <linux/libata.h>
26 27
27#define DRV_NAME "pata_hpt366" 28#define DRV_NAME "pata_hpt366"
28#define DRV_VERSION "0.6.8" 29#define DRV_VERSION "0.6.11"
29 30
30struct hpt_clock { 31struct hpt_clock {
31 u8 xfer_mode; 32 u8 xfer_mode;
@@ -110,18 +111,23 @@ static const struct hpt_clock hpt366_25[] = {
110 { 0, 0x01208585 } 111 { 0, 0x01208585 }
111}; 112};
112 113
113static const char *bad_ata33[] = { 114static const char * const bad_ata33[] = {
114 "Maxtor 92720U8", "Maxtor 92040U6", "Maxtor 91360U4", "Maxtor 91020U3", "Maxtor 90845U3", "Maxtor 90650U2", 115 "Maxtor 92720U8", "Maxtor 92040U6", "Maxtor 91360U4", "Maxtor 91020U3",
115 "Maxtor 91360D8", "Maxtor 91190D7", "Maxtor 91020D6", "Maxtor 90845D5", "Maxtor 90680D4", "Maxtor 90510D3", "Maxtor 90340D2", 116 "Maxtor 90845U3", "Maxtor 90650U2",
116 "Maxtor 91152D8", "Maxtor 91008D7", "Maxtor 90845D6", "Maxtor 90840D6", "Maxtor 90720D5", "Maxtor 90648D5", "Maxtor 90576D4", 117 "Maxtor 91360D8", "Maxtor 91190D7", "Maxtor 91020D6", "Maxtor 90845D5",
118 "Maxtor 90680D4", "Maxtor 90510D3", "Maxtor 90340D2",
119 "Maxtor 91152D8", "Maxtor 91008D7", "Maxtor 90845D6", "Maxtor 90840D6",
120 "Maxtor 90720D5", "Maxtor 90648D5", "Maxtor 90576D4",
117 "Maxtor 90510D4", 121 "Maxtor 90510D4",
118 "Maxtor 90432D3", "Maxtor 90288D2", "Maxtor 90256D2", 122 "Maxtor 90432D3", "Maxtor 90288D2", "Maxtor 90256D2",
119 "Maxtor 91000D8", "Maxtor 90910D8", "Maxtor 90875D7", "Maxtor 90840D7", "Maxtor 90750D6", "Maxtor 90625D5", "Maxtor 90500D4", 123 "Maxtor 91000D8", "Maxtor 90910D8", "Maxtor 90875D7", "Maxtor 90840D7",
120 "Maxtor 91728D8", "Maxtor 91512D7", "Maxtor 91303D6", "Maxtor 91080D5", "Maxtor 90845D4", "Maxtor 90680D4", "Maxtor 90648D3", "Maxtor 90432D2", 124 "Maxtor 90750D6", "Maxtor 90625D5", "Maxtor 90500D4",
125 "Maxtor 91728D8", "Maxtor 91512D7", "Maxtor 91303D6", "Maxtor 91080D5",
126 "Maxtor 90845D4", "Maxtor 90680D4", "Maxtor 90648D3", "Maxtor 90432D2",
121 NULL 127 NULL
122}; 128};
123 129
124static const char *bad_ata66_4[] = { 130static const char * const bad_ata66_4[] = {
125 "IBM-DTLA-307075", 131 "IBM-DTLA-307075",
126 "IBM-DTLA-307060", 132 "IBM-DTLA-307060",
127 "IBM-DTLA-307045", 133 "IBM-DTLA-307045",
@@ -140,12 +146,13 @@ static const char *bad_ata66_4[] = {
140 NULL 146 NULL
141}; 147};
142 148
143static const char *bad_ata66_3[] = { 149static const char * const bad_ata66_3[] = {
144 "WDC AC310200R", 150 "WDC AC310200R",
145 NULL 151 NULL
146}; 152};
147 153
148static int hpt_dma_blacklisted(const struct ata_device *dev, char *modestr, const char *list[]) 154static int hpt_dma_blacklisted(const struct ata_device *dev, char *modestr,
155 const char * const list[])
149{ 156{
150 unsigned char model_num[ATA_ID_PROD_LEN + 1]; 157 unsigned char model_num[ATA_ID_PROD_LEN + 1];
151 int i = 0; 158 int i = 0;
@@ -154,7 +161,7 @@ static int hpt_dma_blacklisted(const struct ata_device *dev, char *modestr, cons
154 161
155 while (list[i] != NULL) { 162 while (list[i] != NULL) {
156 if (!strcmp(list[i], model_num)) { 163 if (!strcmp(list[i], model_num)) {
157 printk(KERN_WARNING DRV_NAME ": %s is not supported for %s.\n", 164 pr_warn("%s is not supported for %s\n",
158 modestr, list[i]); 165 modestr, list[i]);
159 return 1; 166 return 1;
160 } 167 }
@@ -288,6 +295,7 @@ static struct ata_port_operations hpt366_port_ops = {
288static void hpt36x_init_chipset(struct pci_dev *dev) 295static void hpt36x_init_chipset(struct pci_dev *dev)
289{ 296{
290 u8 drive_fast; 297 u8 drive_fast;
298
291 pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, (L1_CACHE_BYTES / 4)); 299 pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, (L1_CACHE_BYTES / 4));
292 pci_write_config_byte(dev, PCI_LATENCY_TIMER, 0x78); 300 pci_write_config_byte(dev, PCI_LATENCY_TIMER, 0x78);
293 pci_write_config_byte(dev, PCI_MIN_GNT, 0x08); 301 pci_write_config_byte(dev, PCI_MIN_GNT, 0x08);
@@ -349,16 +357,16 @@ static int hpt36x_init_one(struct pci_dev *dev, const struct pci_device_id *id)
349 357
350 /* PCI clocking determines the ATA timing values to use */ 358 /* PCI clocking determines the ATA timing values to use */
351 /* info_hpt366 is safe against re-entry so we can scribble on it */ 359 /* info_hpt366 is safe against re-entry so we can scribble on it */
352 switch((reg1 & 0x700) >> 8) { 360 switch ((reg1 & 0x700) >> 8) {
353 case 9: 361 case 9:
354 hpriv = &hpt366_40; 362 hpriv = &hpt366_40;
355 break; 363 break;
356 case 5: 364 case 5:
357 hpriv = &hpt366_25; 365 hpriv = &hpt366_25;
358 break; 366 break;
359 default: 367 default:
360 hpriv = &hpt366_33; 368 hpriv = &hpt366_33;
361 break; 369 break;
362 } 370 }
363 /* Now kick off ATA set up */ 371 /* Now kick off ATA set up */
364 return ata_pci_bmdma_init_one(dev, ppi, &hpt36x_sht, hpriv, 0); 372 return ata_pci_bmdma_init_one(dev, ppi, &hpt36x_sht, hpriv, 0);
@@ -385,9 +393,9 @@ static const struct pci_device_id hpt36x[] = {
385}; 393};
386 394
387static struct pci_driver hpt36x_pci_driver = { 395static struct pci_driver hpt36x_pci_driver = {
388 .name = DRV_NAME, 396 .name = DRV_NAME,
389 .id_table = hpt36x, 397 .id_table = hpt36x,
390 .probe = hpt36x_init_one, 398 .probe = hpt36x_init_one,
391 .remove = ata_pci_remove_one, 399 .remove = ata_pci_remove_one,
392#ifdef CONFIG_PM 400#ifdef CONFIG_PM
393 .suspend = ata_pci_device_suspend, 401 .suspend = ata_pci_device_suspend,
diff --git a/drivers/ata/pata_hpt37x.c b/drivers/ata/pata_hpt37x.c
index 9ae4c0830577..9620636aa405 100644
--- a/drivers/ata/pata_hpt37x.c
+++ b/drivers/ata/pata_hpt37x.c
@@ -8,12 +8,14 @@
8 * Copyright (C) 1999-2003 Andre Hedrick <andre@linux-ide.org> 8 * Copyright (C) 1999-2003 Andre Hedrick <andre@linux-ide.org>
9 * Portions Copyright (C) 2001 Sun Microsystems, Inc. 9 * Portions Copyright (C) 2001 Sun Microsystems, Inc.
10 * Portions Copyright (C) 2003 Red Hat Inc 10 * Portions Copyright (C) 2003 Red Hat Inc
11 * Portions Copyright (C) 2005-2009 MontaVista Software, Inc. 11 * Portions Copyright (C) 2005-2010 MontaVista Software, Inc.
12 * 12 *
13 * TODO 13 * TODO
14 * Look into engine reset on timeout errors. Should not be required. 14 * Look into engine reset on timeout errors. Should not be required.
15 */ 15 */
16 16
17#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18
17#include <linux/kernel.h> 19#include <linux/kernel.h>
18#include <linux/module.h> 20#include <linux/module.h>
19#include <linux/pci.h> 21#include <linux/pci.h>
@@ -24,7 +26,7 @@
24#include <linux/libata.h> 26#include <linux/libata.h>
25 27
26#define DRV_NAME "pata_hpt37x" 28#define DRV_NAME "pata_hpt37x"
27#define DRV_VERSION "0.6.15" 29#define DRV_VERSION "0.6.23"
28 30
29struct hpt_clock { 31struct hpt_clock {
30 u8 xfer_speed; 32 u8 xfer_speed;
@@ -210,7 +212,7 @@ static u32 hpt37x_find_mode(struct ata_port *ap, int speed)
210{ 212{
211 struct hpt_clock *clocks = ap->host->private_data; 213 struct hpt_clock *clocks = ap->host->private_data;
212 214
213 while(clocks->xfer_speed) { 215 while (clocks->xfer_speed) {
214 if (clocks->xfer_speed == speed) 216 if (clocks->xfer_speed == speed)
215 return clocks->timing; 217 return clocks->timing;
216 clocks++; 218 clocks++;
@@ -219,7 +221,8 @@ static u32 hpt37x_find_mode(struct ata_port *ap, int speed)
219 return 0xffffffffU; /* silence compiler warning */ 221 return 0xffffffffU; /* silence compiler warning */
220} 222}
221 223
222static int hpt_dma_blacklisted(const struct ata_device *dev, char *modestr, const char *list[]) 224static int hpt_dma_blacklisted(const struct ata_device *dev, char *modestr,
225 const char * const list[])
223{ 226{
224 unsigned char model_num[ATA_ID_PROD_LEN + 1]; 227 unsigned char model_num[ATA_ID_PROD_LEN + 1];
225 int i = 0; 228 int i = 0;
@@ -228,7 +231,7 @@ static int hpt_dma_blacklisted(const struct ata_device *dev, char *modestr, cons
228 231
229 while (list[i] != NULL) { 232 while (list[i] != NULL) {
230 if (!strcmp(list[i], model_num)) { 233 if (!strcmp(list[i], model_num)) {
231 printk(KERN_WARNING DRV_NAME ": %s is not supported for %s.\n", 234 pr_warn("%s is not supported for %s\n",
232 modestr, list[i]); 235 modestr, list[i]);
233 return 1; 236 return 1;
234 } 237 }
@@ -237,18 +240,23 @@ static int hpt_dma_blacklisted(const struct ata_device *dev, char *modestr, cons
237 return 0; 240 return 0;
238} 241}
239 242
240static const char *bad_ata33[] = { 243static const char * const bad_ata33[] = {
241 "Maxtor 92720U8", "Maxtor 92040U6", "Maxtor 91360U4", "Maxtor 91020U3", "Maxtor 90845U3", "Maxtor 90650U2", 244 "Maxtor 92720U8", "Maxtor 92040U6", "Maxtor 91360U4", "Maxtor 91020U3",
242 "Maxtor 91360D8", "Maxtor 91190D7", "Maxtor 91020D6", "Maxtor 90845D5", "Maxtor 90680D4", "Maxtor 90510D3", "Maxtor 90340D2", 245 "Maxtor 90845U3", "Maxtor 90650U2",
243 "Maxtor 91152D8", "Maxtor 91008D7", "Maxtor 90845D6", "Maxtor 90840D6", "Maxtor 90720D5", "Maxtor 90648D5", "Maxtor 90576D4", 246 "Maxtor 91360D8", "Maxtor 91190D7", "Maxtor 91020D6", "Maxtor 90845D5",
247 "Maxtor 90680D4", "Maxtor 90510D3", "Maxtor 90340D2",
248 "Maxtor 91152D8", "Maxtor 91008D7", "Maxtor 90845D6", "Maxtor 90840D6",
249 "Maxtor 90720D5", "Maxtor 90648D5", "Maxtor 90576D4",
244 "Maxtor 90510D4", 250 "Maxtor 90510D4",
245 "Maxtor 90432D3", "Maxtor 90288D2", "Maxtor 90256D2", 251 "Maxtor 90432D3", "Maxtor 90288D2", "Maxtor 90256D2",
246 "Maxtor 91000D8", "Maxtor 90910D8", "Maxtor 90875D7", "Maxtor 90840D7", "Maxtor 90750D6", "Maxtor 90625D5", "Maxtor 90500D4", 252 "Maxtor 91000D8", "Maxtor 90910D8", "Maxtor 90875D7", "Maxtor 90840D7",
247 "Maxtor 91728D8", "Maxtor 91512D7", "Maxtor 91303D6", "Maxtor 91080D5", "Maxtor 90845D4", "Maxtor 90680D4", "Maxtor 90648D3", "Maxtor 90432D2", 253 "Maxtor 90750D6", "Maxtor 90625D5", "Maxtor 90500D4",
254 "Maxtor 91728D8", "Maxtor 91512D7", "Maxtor 91303D6", "Maxtor 91080D5",
255 "Maxtor 90845D4", "Maxtor 90680D4", "Maxtor 90648D3", "Maxtor 90432D2",
248 NULL 256 NULL
249}; 257};
250 258
251static const char *bad_ata100_5[] = { 259static const char * const bad_ata100_5[] = {
252 "IBM-DTLA-307075", 260 "IBM-DTLA-307075",
253 "IBM-DTLA-307060", 261 "IBM-DTLA-307060",
254 "IBM-DTLA-307045", 262 "IBM-DTLA-307045",
@@ -302,6 +310,22 @@ static unsigned long hpt370a_filter(struct ata_device *adev, unsigned long mask)
302} 310}
303 311
304/** 312/**
313 * hpt372_filter - mode selection filter
314 * @adev: ATA device
315 * @mask: mode mask
316 *
317 * The Marvell bridge chips used on the HighPoint SATA cards do not seem
318 * to support the UltraDMA modes 1, 2, and 3 as well as any MWDMA modes...
319 */
320static unsigned long hpt372_filter(struct ata_device *adev, unsigned long mask)
321{
322 if (ata_id_is_sata(adev->id))
323 mask &= ~((0xE << ATA_SHIFT_UDMA) | ATA_MASK_MWDMA);
324
325 return mask;
326}
327
328/**
305 * hpt37x_cable_detect - Detect the cable type 329 * hpt37x_cable_detect - Detect the cable type
306 * @ap: ATA port to detect on 330 * @ap: ATA port to detect on
307 * 331 *
@@ -373,6 +397,7 @@ static int hpt37x_pre_reset(struct ata_link *link, unsigned long deadline)
373 { 0x50, 1, 0x04, 0x04 }, 397 { 0x50, 1, 0x04, 0x04 },
374 { 0x54, 1, 0x04, 0x04 } 398 { 0x54, 1, 0x04, 0x04 }
375 }; 399 };
400
376 if (!pci_test_config_bits(pdev, &hpt37x_enable_bits[ap->port_no])) 401 if (!pci_test_config_bits(pdev, &hpt37x_enable_bits[ap->port_no]))
377 return -ENOENT; 402 return -ENOENT;
378 403
@@ -586,11 +611,11 @@ static struct ata_port_operations hpt370a_port_ops = {
586}; 611};
587 612
588/* 613/*
589 * Configuration for HPT372, HPT371, HPT302. Slightly different PIO 614 * Configuration for HPT371 and HPT302. Slightly different PIO and DMA
590 * and DMA mode setting functionality. 615 * mode setting functionality.
591 */ 616 */
592 617
593static struct ata_port_operations hpt372_port_ops = { 618static struct ata_port_operations hpt302_port_ops = {
594 .inherits = &ata_bmdma_port_ops, 619 .inherits = &ata_bmdma_port_ops,
595 620
596 .bmdma_stop = hpt37x_bmdma_stop, 621 .bmdma_stop = hpt37x_bmdma_stop,
@@ -602,14 +627,23 @@ static struct ata_port_operations hpt372_port_ops = {
602}; 627};
603 628
604/* 629/*
605 * Configuration for HPT374. Mode setting works like 372 and friends 630 * Configuration for HPT372. Mode setting works like 371 and 302
631 * but we have a mode filter.
632 */
633
634static struct ata_port_operations hpt372_port_ops = {
635 .inherits = &hpt302_port_ops,
636 .mode_filter = hpt372_filter,
637};
638
639/*
640 * Configuration for HPT374. Mode setting and filtering works like 372
606 * but we have a different cable detection procedure for function 1. 641 * but we have a different cable detection procedure for function 1.
607 */ 642 */
608 643
609static struct ata_port_operations hpt374_fn1_port_ops = { 644static struct ata_port_operations hpt374_fn1_port_ops = {
610 .inherits = &hpt372_port_ops, 645 .inherits = &hpt372_port_ops,
611 .cable_detect = hpt374_fn1_cable_detect, 646 .cable_detect = hpt374_fn1_cable_detect,
612 .prereset = hpt37x_pre_reset,
613}; 647};
614 648
615/** 649/**
@@ -647,12 +681,12 @@ static int hpt37x_calibrate_dpll(struct pci_dev *dev)
647 u32 reg5c; 681 u32 reg5c;
648 int tries; 682 int tries;
649 683
650 for(tries = 0; tries < 0x5000; tries++) { 684 for (tries = 0; tries < 0x5000; tries++) {
651 udelay(50); 685 udelay(50);
652 pci_read_config_byte(dev, 0x5b, &reg5b); 686 pci_read_config_byte(dev, 0x5b, &reg5b);
653 if (reg5b & 0x80) { 687 if (reg5b & 0x80) {
654 /* See if it stays set */ 688 /* See if it stays set */
655 for(tries = 0; tries < 0x1000; tries ++) { 689 for (tries = 0; tries < 0x1000; tries++) {
656 pci_read_config_byte(dev, 0x5b, &reg5b); 690 pci_read_config_byte(dev, 0x5b, &reg5b);
657 /* Failed ? */ 691 /* Failed ? */
658 if ((reg5b & 0x80) == 0) 692 if ((reg5b & 0x80) == 0)
@@ -660,7 +694,7 @@ static int hpt37x_calibrate_dpll(struct pci_dev *dev)
660 } 694 }
661 /* Turn off tuning, we have the DPLL set */ 695 /* Turn off tuning, we have the DPLL set */
662 pci_read_config_dword(dev, 0x5c, &reg5c); 696 pci_read_config_dword(dev, 0x5c, &reg5c);
663 pci_write_config_dword(dev, 0x5c, reg5c & ~ 0x100); 697 pci_write_config_dword(dev, 0x5c, reg5c & ~0x100);
664 return 1; 698 return 1;
665 } 699 }
666 } 700 }
@@ -672,6 +706,7 @@ static u32 hpt374_read_freq(struct pci_dev *pdev)
672{ 706{
673 u32 freq; 707 u32 freq;
674 unsigned long io_base = pci_resource_start(pdev, 4); 708 unsigned long io_base = pci_resource_start(pdev, 4);
709
675 if (PCI_FUNC(pdev->devfn) & 1) { 710 if (PCI_FUNC(pdev->devfn) & 1) {
676 struct pci_dev *pdev_0; 711 struct pci_dev *pdev_0;
677 712
@@ -737,23 +772,23 @@ static int hpt37x_init_one(struct pci_dev *dev, const struct pci_device_id *id)
737 .udma_mask = ATA_UDMA5, 772 .udma_mask = ATA_UDMA5,
738 .port_ops = &hpt370a_port_ops 773 .port_ops = &hpt370a_port_ops
739 }; 774 };
740 /* HPT370 - UDMA100 */ 775 /* HPT370 - UDMA66 */
741 static const struct ata_port_info info_hpt370_33 = { 776 static const struct ata_port_info info_hpt370_33 = {
742 .flags = ATA_FLAG_SLAVE_POSS, 777 .flags = ATA_FLAG_SLAVE_POSS,
743 .pio_mask = ATA_PIO4, 778 .pio_mask = ATA_PIO4,
744 .mwdma_mask = ATA_MWDMA2, 779 .mwdma_mask = ATA_MWDMA2,
745 .udma_mask = ATA_UDMA5, 780 .udma_mask = ATA_UDMA4,
746 .port_ops = &hpt370_port_ops 781 .port_ops = &hpt370_port_ops
747 }; 782 };
748 /* HPT370A - UDMA100 */ 783 /* HPT370A - UDMA66 */
749 static const struct ata_port_info info_hpt370a_33 = { 784 static const struct ata_port_info info_hpt370a_33 = {
750 .flags = ATA_FLAG_SLAVE_POSS, 785 .flags = ATA_FLAG_SLAVE_POSS,
751 .pio_mask = ATA_PIO4, 786 .pio_mask = ATA_PIO4,
752 .mwdma_mask = ATA_MWDMA2, 787 .mwdma_mask = ATA_MWDMA2,
753 .udma_mask = ATA_UDMA5, 788 .udma_mask = ATA_UDMA4,
754 .port_ops = &hpt370a_port_ops 789 .port_ops = &hpt370a_port_ops
755 }; 790 };
756 /* HPT371, 372 and friends - UDMA133 */ 791 /* HPT372 - UDMA133 */
757 static const struct ata_port_info info_hpt372 = { 792 static const struct ata_port_info info_hpt372 = {
758 .flags = ATA_FLAG_SLAVE_POSS, 793 .flags = ATA_FLAG_SLAVE_POSS,
759 .pio_mask = ATA_PIO4, 794 .pio_mask = ATA_PIO4,
@@ -761,7 +796,15 @@ static int hpt37x_init_one(struct pci_dev *dev, const struct pci_device_id *id)
761 .udma_mask = ATA_UDMA6, 796 .udma_mask = ATA_UDMA6,
762 .port_ops = &hpt372_port_ops 797 .port_ops = &hpt372_port_ops
763 }; 798 };
764 /* HPT374 - UDMA100, function 1 uses different prereset method */ 799 /* HPT371, 302 - UDMA133 */
800 static const struct ata_port_info info_hpt302 = {
801 .flags = ATA_FLAG_SLAVE_POSS,
802 .pio_mask = ATA_PIO4,
803 .mwdma_mask = ATA_MWDMA2,
804 .udma_mask = ATA_UDMA6,
805 .port_ops = &hpt302_port_ops
806 };
807 /* HPT374 - UDMA100, function 1 uses different cable_detect method */
765 static const struct ata_port_info info_hpt374_fn0 = { 808 static const struct ata_port_info info_hpt374_fn0 = {
766 .flags = ATA_FLAG_SLAVE_POSS, 809 .flags = ATA_FLAG_SLAVE_POSS,
767 .pio_mask = ATA_PIO4, 810 .pio_mask = ATA_PIO4,
@@ -796,7 +839,8 @@ static int hpt37x_init_one(struct pci_dev *dev, const struct pci_device_id *id)
796 if (rc) 839 if (rc)
797 return rc; 840 return rc;
798 841
799 if (dev->device == PCI_DEVICE_ID_TTI_HPT366) { 842 switch (dev->device) {
843 case PCI_DEVICE_ID_TTI_HPT366:
800 /* May be a later chip in disguise. Check */ 844 /* May be a later chip in disguise. Check */
801 /* Older chips are in the HPT366 driver. Ignore them */ 845 /* Older chips are in the HPT366 driver. Ignore them */
802 if (rev < 3) 846 if (rev < 3)
@@ -805,66 +849,65 @@ static int hpt37x_init_one(struct pci_dev *dev, const struct pci_device_id *id)
805 if (rev == 6) 849 if (rev == 6)
806 return -ENODEV; 850 return -ENODEV;
807 851
808 switch(rev) { 852 switch (rev) {
809 case 3: 853 case 3:
810 ppi[0] = &info_hpt370; 854 ppi[0] = &info_hpt370;
811 chip_table = &hpt370; 855 chip_table = &hpt370;
812 prefer_dpll = 0; 856 prefer_dpll = 0;
813 break; 857 break;
814 case 4: 858 case 4:
815 ppi[0] = &info_hpt370a; 859 ppi[0] = &info_hpt370a;
816 chip_table = &hpt370a; 860 chip_table = &hpt370a;
817 prefer_dpll = 0; 861 prefer_dpll = 0;
818 break; 862 break;
819 case 5: 863 case 5:
820 ppi[0] = &info_hpt372; 864 ppi[0] = &info_hpt372;
821 chip_table = &hpt372; 865 chip_table = &hpt372;
822 break; 866 break;
823 default: 867 default:
824 printk(KERN_ERR "pata_hpt37x: Unknown HPT366 " 868 pr_err("Unknown HPT366 subtype, please report (%d)\n",
825 "subtype, please report (%d).\n", rev); 869 rev);
826 return -ENODEV; 870 return -ENODEV;
827 }
828 } else {
829 switch(dev->device) {
830 case PCI_DEVICE_ID_TTI_HPT372:
831 /* 372N if rev >= 2*/
832 if (rev >= 2)
833 return -ENODEV;
834 ppi[0] = &info_hpt372;
835 chip_table = &hpt372a;
836 break;
837 case PCI_DEVICE_ID_TTI_HPT302:
838 /* 302N if rev > 1 */
839 if (rev > 1)
840 return -ENODEV;
841 ppi[0] = &info_hpt372;
842 /* Check this */
843 chip_table = &hpt302;
844 break;
845 case PCI_DEVICE_ID_TTI_HPT371:
846 if (rev > 1)
847 return -ENODEV;
848 ppi[0] = &info_hpt372;
849 chip_table = &hpt371;
850 /* Single channel device, master is not present
851 but the BIOS (or us for non x86) must mark it
852 absent */
853 pci_read_config_byte(dev, 0x50, &mcr1);
854 mcr1 &= ~0x04;
855 pci_write_config_byte(dev, 0x50, mcr1);
856 break;
857 case PCI_DEVICE_ID_TTI_HPT374:
858 chip_table = &hpt374;
859 if (!(PCI_FUNC(dev->devfn) & 1))
860 *ppi = &info_hpt374_fn0;
861 else
862 *ppi = &info_hpt374_fn1;
863 break;
864 default:
865 printk(KERN_ERR "pata_hpt37x: PCI table is bogus please report (%d).\n", dev->device);
866 return -ENODEV;
867 } 871 }
872 break;
873 case PCI_DEVICE_ID_TTI_HPT372:
874 /* 372N if rev >= 2 */
875 if (rev >= 2)
876 return -ENODEV;
877 ppi[0] = &info_hpt372;
878 chip_table = &hpt372a;
879 break;
880 case PCI_DEVICE_ID_TTI_HPT302:
881 /* 302N if rev > 1 */
882 if (rev > 1)
883 return -ENODEV;
884 ppi[0] = &info_hpt302;
885 /* Check this */
886 chip_table = &hpt302;
887 break;
888 case PCI_DEVICE_ID_TTI_HPT371:
889 if (rev > 1)
890 return -ENODEV;
891 ppi[0] = &info_hpt302;
892 chip_table = &hpt371;
893 /*
894 * Single channel device, master is not present but the BIOS
895 * (or us for non x86) must mark it absent
896 */
897 pci_read_config_byte(dev, 0x50, &mcr1);
898 mcr1 &= ~0x04;
899 pci_write_config_byte(dev, 0x50, mcr1);
900 break;
901 case PCI_DEVICE_ID_TTI_HPT374:
902 chip_table = &hpt374;
903 if (!(PCI_FUNC(dev->devfn) & 1))
904 *ppi = &info_hpt374_fn0;
905 else
906 *ppi = &info_hpt374_fn1;
907 break;
908 default:
909 pr_err("PCI table is bogus, please report (%d)\n", dev->device);
910 return -ENODEV;
868 } 911 }
869 /* Ok so this is a chip we support */ 912 /* Ok so this is a chip we support */
870 913
@@ -893,9 +936,11 @@ static int hpt37x_init_one(struct pci_dev *dev, const struct pci_device_id *id)
893 if (chip_table == &hpt372a) 936 if (chip_table == &hpt372a)
894 outb(0x0e, iobase + 0x9c); 937 outb(0x0e, iobase + 0x9c);
895 938
896 /* Some devices do not let this value be accessed via PCI space 939 /*
897 according to the old driver. In addition we must use the value 940 * Some devices do not let this value be accessed via PCI space
898 from FN 0 on the HPT374 */ 941 * according to the old driver. In addition we must use the value
942 * from FN 0 on the HPT374.
943 */
899 944
900 if (chip_table == &hpt374) { 945 if (chip_table == &hpt374) {
901 freq = hpt374_read_freq(dev); 946 freq = hpt374_read_freq(dev);
@@ -909,10 +954,10 @@ static int hpt37x_init_one(struct pci_dev *dev, const struct pci_device_id *id)
909 u8 sr; 954 u8 sr;
910 u32 total = 0; 955 u32 total = 0;
911 956
912 printk(KERN_WARNING "pata_hpt37x: BIOS has not set timing clocks.\n"); 957 pr_warn("BIOS has not set timing clocks\n");
913 958
914 /* This is the process the HPT371 BIOS is reported to use */ 959 /* This is the process the HPT371 BIOS is reported to use */
915 for(i = 0; i < 128; i++) { 960 for (i = 0; i < 128; i++) {
916 pci_read_config_byte(dev, 0x78, &sr); 961 pci_read_config_byte(dev, 0x78, &sr);
917 total += sr & 0x1FF; 962 total += sr & 0x1FF;
918 udelay(15); 963 udelay(15);
@@ -947,20 +992,25 @@ static int hpt37x_init_one(struct pci_dev *dev, const struct pci_device_id *id)
947 992
948 /* Select the DPLL clock. */ 993 /* Select the DPLL clock. */
949 pci_write_config_byte(dev, 0x5b, 0x21); 994 pci_write_config_byte(dev, 0x5b, 0x21);
950 pci_write_config_dword(dev, 0x5C, (f_high << 16) | f_low | 0x100); 995 pci_write_config_dword(dev, 0x5C,
996 (f_high << 16) | f_low | 0x100);
951 997
952 for(adjust = 0; adjust < 8; adjust++) { 998 for (adjust = 0; adjust < 8; adjust++) {
953 if (hpt37x_calibrate_dpll(dev)) 999 if (hpt37x_calibrate_dpll(dev))
954 break; 1000 break;
955 /* See if it'll settle at a fractionally different clock */ 1001 /*
1002 * See if it'll settle at a fractionally
1003 * different clock
1004 */
956 if (adjust & 1) 1005 if (adjust & 1)
957 f_low -= adjust >> 1; 1006 f_low -= adjust >> 1;
958 else 1007 else
959 f_high += adjust >> 1; 1008 f_high += adjust >> 1;
960 pci_write_config_dword(dev, 0x5C, (f_high << 16) | f_low | 0x100); 1009 pci_write_config_dword(dev, 0x5C,
1010 (f_high << 16) | f_low | 0x100);
961 } 1011 }
962 if (adjust == 8) { 1012 if (adjust == 8) {
963 printk(KERN_ERR "pata_hpt37x: DPLL did not stabilize!\n"); 1013 pr_err("DPLL did not stabilize!\n");
964 return -ENODEV; 1014 return -ENODEV;
965 } 1015 }
966 if (dpll == 3) 1016 if (dpll == 3)
@@ -968,22 +1018,23 @@ static int hpt37x_init_one(struct pci_dev *dev, const struct pci_device_id *id)
968 else 1018 else
969 private_data = (void *)hpt37x_timings_50; 1019 private_data = (void *)hpt37x_timings_50;
970 1020
971 printk(KERN_INFO "pata_hpt37x: bus clock %dMHz, using %dMHz DPLL.\n", 1021 pr_info("bus clock %dMHz, using %dMHz DPLL\n",
972 MHz[clock_slot], MHz[dpll]); 1022 MHz[clock_slot], MHz[dpll]);
973 } else { 1023 } else {
974 private_data = (void *)chip_table->clocks[clock_slot]; 1024 private_data = (void *)chip_table->clocks[clock_slot];
975 /* 1025 /*
976 * Perform a final fixup. Note that we will have used the 1026 * Perform a final fixup. Note that we will have used the
977 * DPLL on the HPT372 which means we don't have to worry 1027 * DPLL on the HPT372 which means we don't have to worry
978 * about lack of UDMA133 support on lower clocks 1028 * about lack of UDMA133 support on lower clocks
979 */ 1029 */
980 1030
981 if (clock_slot < 2 && ppi[0] == &info_hpt370) 1031 if (clock_slot < 2 && ppi[0] == &info_hpt370)
982 ppi[0] = &info_hpt370_33; 1032 ppi[0] = &info_hpt370_33;
983 if (clock_slot < 2 && ppi[0] == &info_hpt370a) 1033 if (clock_slot < 2 && ppi[0] == &info_hpt370a)
984 ppi[0] = &info_hpt370a_33; 1034 ppi[0] = &info_hpt370a_33;
985 printk(KERN_INFO "pata_hpt37x: %s using %dMHz bus clock.\n", 1035
986 chip_table->name, MHz[clock_slot]); 1036 pr_info("%s using %dMHz bus clock\n",
1037 chip_table->name, MHz[clock_slot]);
987 } 1038 }
988 1039
989 /* Now kick off ATA set up */ 1040 /* Now kick off ATA set up */
@@ -1001,9 +1052,9 @@ static const struct pci_device_id hpt37x[] = {
1001}; 1052};
1002 1053
1003static struct pci_driver hpt37x_pci_driver = { 1054static struct pci_driver hpt37x_pci_driver = {
1004 .name = DRV_NAME, 1055 .name = DRV_NAME,
1005 .id_table = hpt37x, 1056 .id_table = hpt37x,
1006 .probe = hpt37x_init_one, 1057 .probe = hpt37x_init_one,
1007 .remove = ata_pci_remove_one 1058 .remove = ata_pci_remove_one
1008}; 1059};
1009 1060
diff --git a/drivers/ata/pata_hpt3x2n.c b/drivers/ata/pata_hpt3x2n.c
index 32f3463216b8..765f136d8cd3 100644
--- a/drivers/ata/pata_hpt3x2n.c
+++ b/drivers/ata/pata_hpt3x2n.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Libata driver for the highpoint 372N and 302N UDMA66 ATA controllers. 2 * Libata driver for the HighPoint 371N, 372N, and 302N UDMA66 ATA controllers.
3 * 3 *
4 * This driver is heavily based upon: 4 * This driver is heavily based upon:
5 * 5 *
@@ -8,13 +8,15 @@
8 * Copyright (C) 1999-2003 Andre Hedrick <andre@linux-ide.org> 8 * Copyright (C) 1999-2003 Andre Hedrick <andre@linux-ide.org>
9 * Portions Copyright (C) 2001 Sun Microsystems, Inc. 9 * Portions Copyright (C) 2001 Sun Microsystems, Inc.
10 * Portions Copyright (C) 2003 Red Hat Inc 10 * Portions Copyright (C) 2003 Red Hat Inc
11 * Portions Copyright (C) 2005-2009 MontaVista Software, Inc. 11 * Portions Copyright (C) 2005-2010 MontaVista Software, Inc.
12 * 12 *
13 * 13 *
14 * TODO 14 * TODO
15 * Work out best PLL policy 15 * Work out best PLL policy
16 */ 16 */
17 17
18#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
18#include <linux/kernel.h> 20#include <linux/kernel.h>
19#include <linux/module.h> 21#include <linux/module.h>
20#include <linux/pci.h> 22#include <linux/pci.h>
@@ -25,7 +27,7 @@
25#include <linux/libata.h> 27#include <linux/libata.h>
26 28
27#define DRV_NAME "pata_hpt3x2n" 29#define DRV_NAME "pata_hpt3x2n"
28#define DRV_VERSION "0.3.10" 30#define DRV_VERSION "0.3.15"
29 31
30enum { 32enum {
31 HPT_PCI_FAST = (1 << 31), 33 HPT_PCI_FAST = (1 << 31),
@@ -103,7 +105,7 @@ static u32 hpt3x2n_find_mode(struct ata_port *ap, int speed)
103{ 105{
104 struct hpt_clock *clocks = hpt3x2n_clocks; 106 struct hpt_clock *clocks = hpt3x2n_clocks;
105 107
106 while(clocks->xfer_speed) { 108 while (clocks->xfer_speed) {
107 if (clocks->xfer_speed == speed) 109 if (clocks->xfer_speed == speed)
108 return clocks->timing; 110 return clocks->timing;
109 clocks++; 111 clocks++;
@@ -113,6 +115,22 @@ static u32 hpt3x2n_find_mode(struct ata_port *ap, int speed)
113} 115}
114 116
115/** 117/**
118 * hpt372n_filter - mode selection filter
119 * @adev: ATA device
120 * @mask: mode mask
121 *
122 * The Marvell bridge chips used on the HighPoint SATA cards do not seem
123 * to support the UltraDMA modes 1, 2, and 3 as well as any MWDMA modes...
124 */
125static unsigned long hpt372n_filter(struct ata_device *adev, unsigned long mask)
126{
127 if (ata_id_is_sata(adev->id))
128 mask &= ~((0xE << ATA_SHIFT_UDMA) | ATA_MASK_MWDMA);
129
130 return mask;
131}
132
133/**
116 * hpt3x2n_cable_detect - Detect the cable type 134 * hpt3x2n_cable_detect - Detect the cable type
117 * @ap: ATA port to detect on 135 * @ap: ATA port to detect on
118 * 136 *
@@ -153,6 +171,7 @@ static int hpt3x2n_pre_reset(struct ata_link *link, unsigned long deadline)
153{ 171{
154 struct ata_port *ap = link->ap; 172 struct ata_port *ap = link->ap;
155 struct pci_dev *pdev = to_pci_dev(ap->host->dev); 173 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
174
156 /* Reset the state machine */ 175 /* Reset the state machine */
157 pci_write_config_byte(pdev, 0x50 + 4 * ap->port_no, 0x37); 176 pci_write_config_byte(pdev, 0x50 + 4 * ap->port_no, 0x37);
158 udelay(100); 177 udelay(100);
@@ -328,10 +347,10 @@ static struct scsi_host_template hpt3x2n_sht = {
328}; 347};
329 348
330/* 349/*
331 * Configuration for HPT3x2n. 350 * Configuration for HPT302N/371N.
332 */ 351 */
333 352
334static struct ata_port_operations hpt3x2n_port_ops = { 353static struct ata_port_operations hpt3xxn_port_ops = {
335 .inherits = &ata_bmdma_port_ops, 354 .inherits = &ata_bmdma_port_ops,
336 355
337 .bmdma_stop = hpt3x2n_bmdma_stop, 356 .bmdma_stop = hpt3x2n_bmdma_stop,
@@ -345,6 +364,15 @@ static struct ata_port_operations hpt3x2n_port_ops = {
345 .prereset = hpt3x2n_pre_reset, 364 .prereset = hpt3x2n_pre_reset,
346}; 365};
347 366
367/*
368 * Configuration for HPT372N. Same as 302N/371N but we have a mode filter.
369 */
370
371static struct ata_port_operations hpt372n_port_ops = {
372 .inherits = &hpt3xxn_port_ops,
373 .mode_filter = &hpt372n_filter,
374};
375
348/** 376/**
349 * hpt3xn_calibrate_dpll - Calibrate the DPLL loop 377 * hpt3xn_calibrate_dpll - Calibrate the DPLL loop
350 * @dev: PCI device 378 * @dev: PCI device
@@ -359,12 +387,12 @@ static int hpt3xn_calibrate_dpll(struct pci_dev *dev)
359 u32 reg5c; 387 u32 reg5c;
360 int tries; 388 int tries;
361 389
362 for(tries = 0; tries < 0x5000; tries++) { 390 for (tries = 0; tries < 0x5000; tries++) {
363 udelay(50); 391 udelay(50);
364 pci_read_config_byte(dev, 0x5b, &reg5b); 392 pci_read_config_byte(dev, 0x5b, &reg5b);
365 if (reg5b & 0x80) { 393 if (reg5b & 0x80) {
366 /* See if it stays set */ 394 /* See if it stays set */
367 for(tries = 0; tries < 0x1000; tries ++) { 395 for (tries = 0; tries < 0x1000; tries++) {
368 pci_read_config_byte(dev, 0x5b, &reg5b); 396 pci_read_config_byte(dev, 0x5b, &reg5b);
369 /* Failed ? */ 397 /* Failed ? */
370 if ((reg5b & 0x80) == 0) 398 if ((reg5b & 0x80) == 0)
@@ -372,7 +400,7 @@ static int hpt3xn_calibrate_dpll(struct pci_dev *dev)
372 } 400 }
373 /* Turn off tuning, we have the DPLL set */ 401 /* Turn off tuning, we have the DPLL set */
374 pci_read_config_dword(dev, 0x5c, &reg5c); 402 pci_read_config_dword(dev, 0x5c, &reg5c);
375 pci_write_config_dword(dev, 0x5c, reg5c & ~ 0x100); 403 pci_write_config_dword(dev, 0x5c, reg5c & ~0x100);
376 return 1; 404 return 1;
377 } 405 }
378 } 406 }
@@ -388,8 +416,19 @@ static int hpt3x2n_pci_clock(struct pci_dev *pdev)
388 416
389 fcnt = inl(iobase + 0x90); /* Not PCI readable for some chips */ 417 fcnt = inl(iobase + 0x90); /* Not PCI readable for some chips */
390 if ((fcnt >> 12) != 0xABCDE) { 418 if ((fcnt >> 12) != 0xABCDE) {
391 printk(KERN_WARNING "hpt3xn: BIOS clock data not set.\n"); 419 int i;
392 return 33; /* Not BIOS set */ 420 u16 sr;
421 u32 total = 0;
422
423 pr_warn("BIOS clock data not set\n");
424
425 /* This is the process the HPT371 BIOS is reported to use */
426 for (i = 0; i < 128; i++) {
427 pci_read_config_word(pdev, 0x78, &sr);
428 total += sr & 0x1FF;
429 udelay(15);
430 }
431 fcnt = total / 128;
393 } 432 }
394 fcnt &= 0x1FF; 433 fcnt &= 0x1FF;
395 434
@@ -431,21 +470,27 @@ static int hpt3x2n_pci_clock(struct pci_dev *pdev)
431 * HPT372N 9 (HPT372N) * UDMA133 470 * HPT372N 9 (HPT372N) * UDMA133
432 * 471 *
433 * (1) UDMA133 support depends on the bus clock 472 * (1) UDMA133 support depends on the bus clock
434 *
435 * To pin down HPT371N
436 */ 473 */
437 474
438static int hpt3x2n_init_one(struct pci_dev *dev, const struct pci_device_id *id) 475static int hpt3x2n_init_one(struct pci_dev *dev, const struct pci_device_id *id)
439{ 476{
440 /* HPT372N and friends - UDMA133 */ 477 /* HPT372N - UDMA133 */
441 static const struct ata_port_info info = { 478 static const struct ata_port_info info_hpt372n = {
442 .flags = ATA_FLAG_SLAVE_POSS, 479 .flags = ATA_FLAG_SLAVE_POSS,
443 .pio_mask = ATA_PIO4, 480 .pio_mask = ATA_PIO4,
444 .mwdma_mask = ATA_MWDMA2, 481 .mwdma_mask = ATA_MWDMA2,
445 .udma_mask = ATA_UDMA6, 482 .udma_mask = ATA_UDMA6,
446 .port_ops = &hpt3x2n_port_ops 483 .port_ops = &hpt372n_port_ops
447 }; 484 };
448 const struct ata_port_info *ppi[] = { &info, NULL }; 485 /* HPT302N and HPT371N - UDMA133 */
486 static const struct ata_port_info info_hpt3xxn = {
487 .flags = ATA_FLAG_SLAVE_POSS,
488 .pio_mask = ATA_PIO4,
489 .mwdma_mask = ATA_MWDMA2,
490 .udma_mask = ATA_UDMA6,
491 .port_ops = &hpt3xxn_port_ops
492 };
493 const struct ata_port_info *ppi[] = { &info_hpt3xxn, NULL };
449 u8 rev = dev->revision; 494 u8 rev = dev->revision;
450 u8 irqmask; 495 u8 irqmask;
451 unsigned int pci_mhz; 496 unsigned int pci_mhz;
@@ -459,30 +504,34 @@ static int hpt3x2n_init_one(struct pci_dev *dev, const struct pci_device_id *id)
459 if (rc) 504 if (rc)
460 return rc; 505 return rc;
461 506
462 switch(dev->device) { 507 switch (dev->device) {
463 case PCI_DEVICE_ID_TTI_HPT366: 508 case PCI_DEVICE_ID_TTI_HPT366:
464 if (rev < 6) 509 /* 372N if rev >= 6 */
465 return -ENODEV; 510 if (rev < 6)
466 break;
467 case PCI_DEVICE_ID_TTI_HPT371:
468 if (rev < 2)
469 return -ENODEV;
470 /* 371N if rev > 1 */
471 break;
472 case PCI_DEVICE_ID_TTI_HPT372:
473 /* 372N if rev >= 2*/
474 if (rev < 2)
475 return -ENODEV;
476 break;
477 case PCI_DEVICE_ID_TTI_HPT302:
478 if (rev < 2)
479 return -ENODEV;
480 break;
481 case PCI_DEVICE_ID_TTI_HPT372N:
482 break;
483 default:
484 printk(KERN_ERR "pata_hpt3x2n: PCI table is bogus please report (%d).\n", dev->device);
485 return -ENODEV; 511 return -ENODEV;
512 goto hpt372n;
513 case PCI_DEVICE_ID_TTI_HPT371:
514 /* 371N if rev >= 2 */
515 if (rev < 2)
516 return -ENODEV;
517 break;
518 case PCI_DEVICE_ID_TTI_HPT372:
519 /* 372N if rev >= 2 */
520 if (rev < 2)
521 return -ENODEV;
522 goto hpt372n;
523 case PCI_DEVICE_ID_TTI_HPT302:
524 /* 302N if rev >= 2 */
525 if (rev < 2)
526 return -ENODEV;
527 break;
528 case PCI_DEVICE_ID_TTI_HPT372N:
529hpt372n:
530 ppi[0] = &info_hpt372n;
531 break;
532 default:
533 pr_err("PCI table is bogus, please report (%d)\n", dev->device);
534 return -ENODEV;
486 } 535 }
487 536
488 /* Ok so this is a chip we support */ 537 /* Ok so this is a chip we support */
@@ -509,8 +558,10 @@ static int hpt3x2n_init_one(struct pci_dev *dev, const struct pci_device_id *id)
509 pci_write_config_byte(dev, 0x50, mcr1); 558 pci_write_config_byte(dev, 0x50, mcr1);
510 } 559 }
511 560
512 /* Tune the PLL. HPT recommend using 75 for SATA, 66 for UDMA133 or 561 /*
513 50 for UDMA100. Right now we always use 66 */ 562 * Tune the PLL. HPT recommend using 75 for SATA, 66 for UDMA133 or
563 * 50 for UDMA100. Right now we always use 66
564 */
514 565
515 pci_mhz = hpt3x2n_pci_clock(dev); 566 pci_mhz = hpt3x2n_pci_clock(dev);
516 567
@@ -522,20 +573,22 @@ static int hpt3x2n_init_one(struct pci_dev *dev, const struct pci_device_id *id)
522 pci_write_config_byte(dev, 0x5B, 0x21); 573 pci_write_config_byte(dev, 0x5B, 0x21);
523 574
524 /* Unlike the 37x we don't try jiggling the frequency */ 575 /* Unlike the 37x we don't try jiggling the frequency */
525 for(adjust = 0; adjust < 8; adjust++) { 576 for (adjust = 0; adjust < 8; adjust++) {
526 if (hpt3xn_calibrate_dpll(dev)) 577 if (hpt3xn_calibrate_dpll(dev))
527 break; 578 break;
528 pci_write_config_dword(dev, 0x5C, (f_high << 16) | f_low); 579 pci_write_config_dword(dev, 0x5C, (f_high << 16) | f_low);
529 } 580 }
530 if (adjust == 8) { 581 if (adjust == 8) {
531 printk(KERN_ERR "pata_hpt3x2n: DPLL did not stabilize!\n"); 582 pr_err("DPLL did not stabilize!\n");
532 return -ENODEV; 583 return -ENODEV;
533 } 584 }
534 585
535 printk(KERN_INFO "pata_hpt37x: bus clock %dMHz, using 66MHz DPLL.\n", 586 pr_info("bus clock %dMHz, using 66MHz DPLL\n", pci_mhz);
536 pci_mhz); 587
537 /* Set our private data up. We only need a few flags so we use 588 /*
538 it directly */ 589 * Set our private data up. We only need a few flags
590 * so we use it directly.
591 */
539 if (pci_mhz > 60) 592 if (pci_mhz > 60)
540 hpriv = (void *)(PCI66 | USE_DPLL); 593 hpriv = (void *)(PCI66 | USE_DPLL);
541 594
@@ -562,9 +615,9 @@ static const struct pci_device_id hpt3x2n[] = {
562}; 615};
563 616
564static struct pci_driver hpt3x2n_pci_driver = { 617static struct pci_driver hpt3x2n_pci_driver = {
565 .name = DRV_NAME, 618 .name = DRV_NAME,
566 .id_table = hpt3x2n, 619 .id_table = hpt3x2n,
567 .probe = hpt3x2n_init_one, 620 .probe = hpt3x2n_init_one,
568 .remove = ata_pci_remove_one 621 .remove = ata_pci_remove_one
569}; 622};
570 623
@@ -579,7 +632,7 @@ static void __exit hpt3x2n_exit(void)
579} 632}
580 633
581MODULE_AUTHOR("Alan Cox"); 634MODULE_AUTHOR("Alan Cox");
582MODULE_DESCRIPTION("low-level driver for the Highpoint HPT3x2n/30x"); 635MODULE_DESCRIPTION("low-level driver for the Highpoint HPT3xxN");
583MODULE_LICENSE("GPL"); 636MODULE_LICENSE("GPL");
584MODULE_DEVICE_TABLE(pci, hpt3x2n); 637MODULE_DEVICE_TABLE(pci, hpt3x2n);
585MODULE_VERSION(DRV_VERSION); 638MODULE_VERSION(DRV_VERSION);
diff --git a/drivers/ata/pata_hpt3x3.c b/drivers/ata/pata_hpt3x3.c
index b63d5e2d4628..24d7df81546b 100644
--- a/drivers/ata/pata_hpt3x3.c
+++ b/drivers/ata/pata_hpt3x3.c
@@ -151,7 +151,7 @@ static struct ata_port_operations hpt3x3_port_ops = {
151 .check_atapi_dma= hpt3x3_atapi_dma, 151 .check_atapi_dma= hpt3x3_atapi_dma,
152 .freeze = hpt3x3_freeze, 152 .freeze = hpt3x3_freeze,
153#endif 153#endif
154 154
155}; 155};
156 156
157/** 157/**
diff --git a/drivers/ata/pata_it821x.c b/drivers/ata/pata_it821x.c
index bf88f71a21f4..2d15f2548a10 100644
--- a/drivers/ata/pata_it821x.c
+++ b/drivers/ata/pata_it821x.c
@@ -15,8 +15,8 @@
15 * May be copied or modified under the terms of the GNU General Public License 15 * May be copied or modified under the terms of the GNU General Public License
16 * Based in part on the ITE vendor provided SCSI driver. 16 * Based in part on the ITE vendor provided SCSI driver.
17 * 17 *
18 * Documentation available from 18 * Documentation available from IT8212F_V04.pdf
19 * http://www.ite.com.tw/pc/IT8212F_V04.pdf 19 * http://www.ite.com.tw/EN/products_more.aspx?CategoryID=3&ID=5,91
20 * Some other documents are NDA. 20 * Some other documents are NDA.
21 * 21 *
22 * The ITE8212 isn't exactly a standard IDE controller. It has two 22 * The ITE8212 isn't exactly a standard IDE controller. It has two
@@ -616,7 +616,7 @@ static void it821x_display_disk(int n, u8 *buf)
616 if (buf[52] > 4) /* No Disk */ 616 if (buf[52] > 4) /* No Disk */
617 return; 617 return;
618 618
619 ata_id_c_string((u16 *)buf, id, 0, 41); 619 ata_id_c_string((u16 *)buf, id, 0, 41);
620 620
621 if (buf[51]) { 621 if (buf[51]) {
622 mode = ffs(buf[51]); 622 mode = ffs(buf[51]);
@@ -910,7 +910,7 @@ static int it821x_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
910 rc = pcim_enable_device(pdev); 910 rc = pcim_enable_device(pdev);
911 if (rc) 911 if (rc)
912 return rc; 912 return rc;
913 913
914 if (pdev->vendor == PCI_VENDOR_ID_RDC) { 914 if (pdev->vendor == PCI_VENDOR_ID_RDC) {
915 /* Deal with Vortex86SX */ 915 /* Deal with Vortex86SX */
916 if (pdev->revision == 0x11) 916 if (pdev->revision == 0x11)
diff --git a/drivers/ata/pata_ixp4xx_cf.c b/drivers/ata/pata_ixp4xx_cf.c
index ba54b089f98c..f6b3f995f58a 100644
--- a/drivers/ata/pata_ixp4xx_cf.c
+++ b/drivers/ata/pata_ixp4xx_cf.c
@@ -167,7 +167,7 @@ static __devinit int ixp4xx_pata_probe(struct platform_device *pdev)
167 167
168 irq = platform_get_irq(pdev, 0); 168 irq = platform_get_irq(pdev, 0);
169 if (irq) 169 if (irq)
170 set_irq_type(irq, IRQ_TYPE_EDGE_RISING); 170 irq_set_irq_type(irq, IRQ_TYPE_EDGE_RISING);
171 171
172 /* Setup expansion bus chip selects */ 172 /* Setup expansion bus chip selects */
173 *data->cs0_cfg = data->cs0_bits; 173 *data->cs0_cfg = data->cs0_bits;
@@ -177,7 +177,7 @@ static __devinit int ixp4xx_pata_probe(struct platform_device *pdev)
177 177
178 ap->ops = &ixp4xx_port_ops; 178 ap->ops = &ixp4xx_port_ops;
179 ap->pio_mask = ATA_PIO4; 179 ap->pio_mask = ATA_PIO4;
180 ap->flags |= ATA_FLAG_MMIO | ATA_FLAG_NO_LEGACY | ATA_FLAG_NO_ATAPI; 180 ap->flags |= ATA_FLAG_NO_ATAPI;
181 181
182 ixp4xx_setup_port(ap, data, cs0->start, cs1->start); 182 ixp4xx_setup_port(ap, data, cs0->start, cs1->start);
183 183
diff --git a/drivers/ata/pata_legacy.c b/drivers/ata/pata_legacy.c
index eaf194138f21..6bd9425ba5ab 100644
--- a/drivers/ata/pata_legacy.c
+++ b/drivers/ata/pata_legacy.c
@@ -142,7 +142,7 @@ static int autospeed; /* Chip present which snoops speed changes */
142static int pio_mask = ATA_PIO4; /* PIO range for autospeed devices */ 142static int pio_mask = ATA_PIO4; /* PIO range for autospeed devices */
143static int iordy_mask = 0xFFFFFFFF; /* Use iordy if available */ 143static int iordy_mask = 0xFFFFFFFF; /* Use iordy if available */
144 144
145#ifdef PATA_WINBOND_VLB_MODULE 145#ifdef CONFIG_PATA_WINBOND_VLB_MODULE
146static int winbond = 1; /* Set to probe Winbond controllers, 146static int winbond = 1; /* Set to probe Winbond controllers,
147 give I/O port if non standard */ 147 give I/O port if non standard */
148#else 148#else
diff --git a/drivers/ata/pata_macio.c b/drivers/ata/pata_macio.c
index 75b49d01780b..46f589edccdb 100644
--- a/drivers/ata/pata_macio.c
+++ b/drivers/ata/pata_macio.c
@@ -1053,8 +1053,7 @@ static int __devinit pata_macio_common_init(struct pata_macio_priv *priv,
1053 /* Allocate libata host for 1 port */ 1053 /* Allocate libata host for 1 port */
1054 memset(&pinfo, 0, sizeof(struct ata_port_info)); 1054 memset(&pinfo, 0, sizeof(struct ata_port_info));
1055 pmac_macio_calc_timing_masks(priv, &pinfo); 1055 pmac_macio_calc_timing_masks(priv, &pinfo);
1056 pinfo.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_MMIO | 1056 pinfo.flags = ATA_FLAG_SLAVE_POSS;
1057 ATA_FLAG_NO_LEGACY;
1058 pinfo.port_ops = &pata_macio_ops; 1057 pinfo.port_ops = &pata_macio_ops;
1059 pinfo.private_data = priv; 1058 pinfo.private_data = priv;
1060 1059
diff --git a/drivers/ata/pata_marvell.c b/drivers/ata/pata_marvell.c
index dd38083dcbeb..5d7f58a7e34d 100644
--- a/drivers/ata/pata_marvell.c
+++ b/drivers/ata/pata_marvell.c
@@ -38,7 +38,7 @@ static int marvell_pata_active(struct pci_dev *pdev)
38 38
39 /* We don't yet know how to do this for other devices */ 39 /* We don't yet know how to do this for other devices */
40 if (pdev->device != 0x6145) 40 if (pdev->device != 0x6145)
41 return 1; 41 return 1;
42 42
43 barp = pci_iomap(pdev, 5, 0x10); 43 barp = pci_iomap(pdev, 5, 0x10);
44 if (barp == NULL) 44 if (barp == NULL)
@@ -161,6 +161,9 @@ static const struct pci_device_id marvell_pci_tbl[] = {
161 { PCI_DEVICE(0x11AB, 0x6121), }, 161 { PCI_DEVICE(0x11AB, 0x6121), },
162 { PCI_DEVICE(0x11AB, 0x6123), }, 162 { PCI_DEVICE(0x11AB, 0x6123), },
163 { PCI_DEVICE(0x11AB, 0x6145), }, 163 { PCI_DEVICE(0x11AB, 0x6145), },
164 { PCI_DEVICE(0x1B4B, 0x91A0), },
165 { PCI_DEVICE(0x1B4B, 0x91A4), },
166
164 { } /* terminate list */ 167 { } /* terminate list */
165}; 168};
166 169
diff --git a/drivers/ata/pata_mpc52xx.c b/drivers/ata/pata_mpc52xx.c
index 8cc536e49a0a..2fcac511d39c 100644
--- a/drivers/ata/pata_mpc52xx.c
+++ b/drivers/ata/pata_mpc52xx.c
@@ -610,7 +610,7 @@ static struct scsi_host_template mpc52xx_ata_sht = {
610}; 610};
611 611
612static struct ata_port_operations mpc52xx_ata_port_ops = { 612static struct ata_port_operations mpc52xx_ata_port_ops = {
613 .inherits = &ata_sff_port_ops, 613 .inherits = &ata_bmdma_port_ops,
614 .sff_dev_select = mpc52xx_ata_dev_select, 614 .sff_dev_select = mpc52xx_ata_dev_select,
615 .set_piomode = mpc52xx_ata_set_piomode, 615 .set_piomode = mpc52xx_ata_set_piomode,
616 .set_dmamode = mpc52xx_ata_set_dmamode, 616 .set_dmamode = mpc52xx_ata_set_dmamode,
@@ -680,7 +680,7 @@ mpc52xx_ata_remove_one(struct device *dev)
680/* ======================================================================== */ 680/* ======================================================================== */
681 681
682static int __devinit 682static int __devinit
683mpc52xx_ata_probe(struct platform_device *op, const struct of_device_id *match) 683mpc52xx_ata_probe(struct platform_device *op)
684{ 684{
685 unsigned int ipb_freq; 685 unsigned int ipb_freq;
686 struct resource res_mem; 686 struct resource res_mem;
@@ -883,7 +883,7 @@ static struct of_device_id mpc52xx_ata_of_match[] = {
883}; 883};
884 884
885 885
886static struct of_platform_driver mpc52xx_ata_of_platform_driver = { 886static struct platform_driver mpc52xx_ata_of_platform_driver = {
887 .probe = mpc52xx_ata_probe, 887 .probe = mpc52xx_ata_probe,
888 .remove = mpc52xx_ata_remove, 888 .remove = mpc52xx_ata_remove,
889#ifdef CONFIG_PM 889#ifdef CONFIG_PM
@@ -906,13 +906,13 @@ static int __init
906mpc52xx_ata_init(void) 906mpc52xx_ata_init(void)
907{ 907{
908 printk(KERN_INFO "ata: MPC52xx IDE/ATA libata driver\n"); 908 printk(KERN_INFO "ata: MPC52xx IDE/ATA libata driver\n");
909 return of_register_platform_driver(&mpc52xx_ata_of_platform_driver); 909 return platform_driver_register(&mpc52xx_ata_of_platform_driver);
910} 910}
911 911
912static void __exit 912static void __exit
913mpc52xx_ata_exit(void) 913mpc52xx_ata_exit(void)
914{ 914{
915 of_unregister_platform_driver(&mpc52xx_ata_of_platform_driver); 915 platform_driver_unregister(&mpc52xx_ata_of_platform_driver);
916} 916}
917 917
918module_init(mpc52xx_ata_init); 918module_init(mpc52xx_ata_init);
diff --git a/drivers/ata/pata_mpiix.c b/drivers/ata/pata_mpiix.c
index b21f0021f54a..d8d9c5807740 100644
--- a/drivers/ata/pata_mpiix.c
+++ b/drivers/ata/pata_mpiix.c
@@ -15,7 +15,7 @@
15 * with PCI IDE and also that we do not disable the device when our driver is 15 * with PCI IDE and also that we do not disable the device when our driver is
16 * unloaded (as it has many other functions). 16 * unloaded (as it has many other functions).
17 * 17 *
18 * The driver conciously keeps this logic internally to avoid pushing quirky 18 * The driver consciously keeps this logic internally to avoid pushing quirky
19 * PATA history into the clean libata layer. 19 * PATA history into the clean libata layer.
20 * 20 *
21 * Thinkpad specific note: If you boot an MPIIX using a thinkpad with a PCMCIA 21 * Thinkpad specific note: If you boot an MPIIX using a thinkpad with a PCMCIA
diff --git a/drivers/ata/pata_ninja32.c b/drivers/ata/pata_ninja32.c
index cc50bd09aa26..e277a142138c 100644
--- a/drivers/ata/pata_ninja32.c
+++ b/drivers/ata/pata_ninja32.c
@@ -165,7 +165,7 @@ static int ninja32_reinit_one(struct pci_dev *pdev)
165 return rc; 165 return rc;
166 ninja32_program(host->iomap[0]); 166 ninja32_program(host->iomap[0]);
167 ata_host_resume(host); 167 ata_host_resume(host);
168 return 0; 168 return 0;
169} 169}
170#endif 170#endif
171 171
diff --git a/drivers/ata/pata_octeon_cf.c b/drivers/ata/pata_octeon_cf.c
index 06ddd91ffeda..220ddc90608f 100644
--- a/drivers/ata/pata_octeon_cf.c
+++ b/drivers/ata/pata_octeon_cf.c
@@ -60,7 +60,7 @@ static unsigned int ns_to_tim_reg(unsigned int tim_mult, unsigned int nsecs)
60 * Compute # of eclock periods to get desired duration in 60 * Compute # of eclock periods to get desired duration in
61 * nanoseconds. 61 * nanoseconds.
62 */ 62 */
63 val = DIV_ROUND_UP(nsecs * (octeon_get_clock_rate() / 1000000), 63 val = DIV_ROUND_UP(nsecs * (octeon_get_io_clock_rate() / 1000000),
64 1000 * tim_mult); 64 1000 * tim_mult);
65 65
66 return val; 66 return val;
@@ -653,8 +653,6 @@ static irqreturn_t octeon_cf_interrupt(int irq, void *dev_instance)
653 653
654 ap = host->ports[i]; 654 ap = host->ports[i];
655 ocd = ap->dev->platform_data; 655 ocd = ap->dev->platform_data;
656
657 ocd = ap->dev->platform_data;
658 cf_port = ap->private_data; 656 cf_port = ap->private_data;
659 dma_int.u64 = 657 dma_int.u64 =
660 cvmx_read_csr(CVMX_MIO_BOOT_DMA_INTX(ocd->dma_engine)); 658 cvmx_read_csr(CVMX_MIO_BOOT_DMA_INTX(ocd->dma_engine));
@@ -850,8 +848,7 @@ static int __devinit octeon_cf_probe(struct platform_device *pdev)
850 cf_port->ap = ap; 848 cf_port->ap = ap;
851 ap->ops = &octeon_cf_ops; 849 ap->ops = &octeon_cf_ops;
852 ap->pio_mask = ATA_PIO6; 850 ap->pio_mask = ATA_PIO6;
853 ap->flags |= ATA_FLAG_MMIO | ATA_FLAG_NO_LEGACY 851 ap->flags |= ATA_FLAG_NO_ATAPI | ATA_FLAG_PIO_POLLING;
854 | ATA_FLAG_NO_ATAPI | ATA_FLAG_PIO_POLLING;
855 852
856 base = cs0 + ocd->base_region_bias; 853 base = cs0 + ocd->base_region_bias;
857 if (!ocd->is16bit) { 854 if (!ocd->is16bit) {
diff --git a/drivers/ata/pata_of_platform.c b/drivers/ata/pata_of_platform.c
index 480e043ce6b8..f3054009bd25 100644
--- a/drivers/ata/pata_of_platform.c
+++ b/drivers/ata/pata_of_platform.c
@@ -14,8 +14,7 @@
14#include <linux/of_platform.h> 14#include <linux/of_platform.h>
15#include <linux/ata_platform.h> 15#include <linux/ata_platform.h>
16 16
17static int __devinit pata_of_platform_probe(struct platform_device *ofdev, 17static int __devinit pata_of_platform_probe(struct platform_device *ofdev)
18 const struct of_device_id *match)
19{ 18{
20 int ret; 19 int ret;
21 struct device_node *dn = ofdev->dev.of_node; 20 struct device_node *dn = ofdev->dev.of_node;
@@ -90,7 +89,7 @@ static struct of_device_id pata_of_platform_match[] = {
90}; 89};
91MODULE_DEVICE_TABLE(of, pata_of_platform_match); 90MODULE_DEVICE_TABLE(of, pata_of_platform_match);
92 91
93static struct of_platform_driver pata_of_platform_driver = { 92static struct platform_driver pata_of_platform_driver = {
94 .driver = { 93 .driver = {
95 .name = "pata_of_platform", 94 .name = "pata_of_platform",
96 .owner = THIS_MODULE, 95 .owner = THIS_MODULE,
@@ -102,13 +101,13 @@ static struct of_platform_driver pata_of_platform_driver = {
102 101
103static int __init pata_of_platform_init(void) 102static int __init pata_of_platform_init(void)
104{ 103{
105 return of_register_platform_driver(&pata_of_platform_driver); 104 return platform_driver_register(&pata_of_platform_driver);
106} 105}
107module_init(pata_of_platform_init); 106module_init(pata_of_platform_init);
108 107
109static void __exit pata_of_platform_exit(void) 108static void __exit pata_of_platform_exit(void)
110{ 109{
111 of_unregister_platform_driver(&pata_of_platform_driver); 110 platform_driver_unregister(&pata_of_platform_driver);
112} 111}
113module_exit(pata_of_platform_exit); 112module_exit(pata_of_platform_exit);
114 113
diff --git a/drivers/ata/pata_palmld.c b/drivers/ata/pata_palmld.c
index 11fb4ccc74b4..b86d7e22595e 100644
--- a/drivers/ata/pata_palmld.c
+++ b/drivers/ata/pata_palmld.c
@@ -33,6 +33,11 @@
33 33
34#define DRV_NAME "pata_palmld" 34#define DRV_NAME "pata_palmld"
35 35
36static struct gpio palmld_hdd_gpios[] = {
37 { GPIO_NR_PALMLD_IDE_PWEN, GPIOF_INIT_HIGH, "HDD Power" },
38 { GPIO_NR_PALMLD_IDE_RESET, GPIOF_INIT_LOW, "HDD Reset" },
39};
40
36static struct scsi_host_template palmld_sht = { 41static struct scsi_host_template palmld_sht = {
37 ATA_PIO_SHT(DRV_NAME), 42 ATA_PIO_SHT(DRV_NAME),
38}; 43};
@@ -52,28 +57,23 @@ static __devinit int palmld_pata_probe(struct platform_device *pdev)
52 57
53 /* allocate host */ 58 /* allocate host */
54 host = ata_host_alloc(&pdev->dev, 1); 59 host = ata_host_alloc(&pdev->dev, 1);
55 if (!host) 60 if (!host) {
56 return -ENOMEM; 61 ret = -ENOMEM;
62 goto err1;
63 }
57 64
58 /* remap drive's physical memory address */ 65 /* remap drive's physical memory address */
59 mem = devm_ioremap(&pdev->dev, PALMLD_IDE_PHYS, 0x1000); 66 mem = devm_ioremap(&pdev->dev, PALMLD_IDE_PHYS, 0x1000);
60 if (!mem) 67 if (!mem) {
61 return -ENOMEM; 68 ret = -ENOMEM;
69 goto err1;
70 }
62 71
63 /* request and activate power GPIO, IRQ GPIO */ 72 /* request and activate power GPIO, IRQ GPIO */
64 ret = gpio_request(GPIO_NR_PALMLD_IDE_PWEN, "HDD PWR"); 73 ret = gpio_request_array(palmld_hdd_gpios,
74 ARRAY_SIZE(palmld_hdd_gpios));
65 if (ret) 75 if (ret)
66 goto err1; 76 goto err1;
67 ret = gpio_direction_output(GPIO_NR_PALMLD_IDE_PWEN, 1);
68 if (ret)
69 goto err2;
70
71 ret = gpio_request(GPIO_NR_PALMLD_IDE_RESET, "HDD RST");
72 if (ret)
73 goto err2;
74 ret = gpio_direction_output(GPIO_NR_PALMLD_IDE_RESET, 0);
75 if (ret)
76 goto err3;
77 77
78 /* reset the drive */ 78 /* reset the drive */
79 gpio_set_value(GPIO_NR_PALMLD_IDE_RESET, 0); 79 gpio_set_value(GPIO_NR_PALMLD_IDE_RESET, 0);
@@ -85,7 +85,7 @@ static __devinit int palmld_pata_probe(struct platform_device *pdev)
85 ap = host->ports[0]; 85 ap = host->ports[0];
86 ap->ops = &palmld_port_ops; 86 ap->ops = &palmld_port_ops;
87 ap->pio_mask = ATA_PIO4; 87 ap->pio_mask = ATA_PIO4;
88 ap->flags |= ATA_FLAG_MMIO | ATA_FLAG_NO_LEGACY | ATA_FLAG_PIO_POLLING; 88 ap->flags |= ATA_FLAG_PIO_POLLING;
89 89
90 /* memory mapping voodoo */ 90 /* memory mapping voodoo */
91 ap->ioaddr.cmd_addr = mem + 0x10; 91 ap->ioaddr.cmd_addr = mem + 0x10;
@@ -96,13 +96,15 @@ static __devinit int palmld_pata_probe(struct platform_device *pdev)
96 ata_sff_std_ports(&ap->ioaddr); 96 ata_sff_std_ports(&ap->ioaddr);
97 97
98 /* activate host */ 98 /* activate host */
99 return ata_host_activate(host, 0, NULL, IRQF_TRIGGER_RISING, 99 ret = ata_host_activate(host, 0, NULL, IRQF_TRIGGER_RISING,
100 &palmld_sht); 100 &palmld_sht);
101 if (ret)
102 goto err2;
103
104 return ret;
101 105
102err3:
103 gpio_free(GPIO_NR_PALMLD_IDE_RESET);
104err2: 106err2:
105 gpio_free(GPIO_NR_PALMLD_IDE_PWEN); 107 gpio_free_array(palmld_hdd_gpios, ARRAY_SIZE(palmld_hdd_gpios));
106err1: 108err1:
107 return ret; 109 return ret;
108} 110}
@@ -116,8 +118,7 @@ static __devexit int palmld_pata_remove(struct platform_device *dev)
116 /* power down the HDD */ 118 /* power down the HDD */
117 gpio_set_value(GPIO_NR_PALMLD_IDE_PWEN, 0); 119 gpio_set_value(GPIO_NR_PALMLD_IDE_PWEN, 0);
118 120
119 gpio_free(GPIO_NR_PALMLD_IDE_RESET); 121 gpio_free_array(palmld_hdd_gpios, ARRAY_SIZE(palmld_hdd_gpios));
120 gpio_free(GPIO_NR_PALMLD_IDE_PWEN);
121 122
122 return 0; 123 return 0;
123} 124}
diff --git a/drivers/ata/pata_pcmcia.c b/drivers/ata/pata_pcmcia.c
index e944aa0c5517..021abe6d8527 100644
--- a/drivers/ata/pata_pcmcia.c
+++ b/drivers/ata/pata_pcmcia.c
@@ -34,7 +34,6 @@
34#include <linux/ata.h> 34#include <linux/ata.h>
35#include <linux/libata.h> 35#include <linux/libata.h>
36 36
37#include <pcmcia/cs.h>
38#include <pcmcia/cistpl.h> 37#include <pcmcia/cistpl.h>
39#include <pcmcia/ds.h> 38#include <pcmcia/ds.h>
40#include <pcmcia/cisreg.h> 39#include <pcmcia/cisreg.h>
@@ -125,7 +124,7 @@ static unsigned int ata_data_xfer_8bit(struct ata_device *dev,
125 * reset will recover the device. 124 * reset will recover the device.
126 * 125 *
127 */ 126 */
128 127
129static void pcmcia_8bit_drain_fifo(struct ata_queued_cmd *qc) 128static void pcmcia_8bit_drain_fifo(struct ata_queued_cmd *qc)
130{ 129{
131 int count; 130 int count;
@@ -168,63 +167,26 @@ static struct ata_port_operations pcmcia_8bit_port_ops = {
168}; 167};
169 168
170 169
171struct pcmcia_config_check { 170static int pcmcia_check_one_config(struct pcmcia_device *pdev, void *priv_data)
172 unsigned long ctl_base;
173 int skip_vcc;
174 int is_kme;
175};
176
177static int pcmcia_check_one_config(struct pcmcia_device *pdev,
178 cistpl_cftable_entry_t *cfg,
179 cistpl_cftable_entry_t *dflt,
180 unsigned int vcc,
181 void *priv_data)
182{ 171{
183 struct pcmcia_config_check *stk = priv_data; 172 int *is_kme = priv_data;
184 173
185 /* Check for matching Vcc, unless we're desperate */ 174 if (!(pdev->resource[0]->flags & IO_DATA_PATH_WIDTH_8)) {
186 if (!stk->skip_vcc) { 175 pdev->resource[0]->flags &= ~IO_DATA_PATH_WIDTH;
187 if (cfg->vcc.present & (1 << CISTPL_POWER_VNOM)) { 176 pdev->resource[0]->flags |= IO_DATA_PATH_WIDTH_AUTO;
188 if (vcc != cfg->vcc.param[CISTPL_POWER_VNOM] / 10000)
189 return -ENODEV;
190 } else if (dflt->vcc.present & (1 << CISTPL_POWER_VNOM)) {
191 if (vcc != dflt->vcc.param[CISTPL_POWER_VNOM] / 10000)
192 return -ENODEV;
193 }
194 } 177 }
178 pdev->resource[1]->flags &= ~IO_DATA_PATH_WIDTH;
179 pdev->resource[1]->flags |= IO_DATA_PATH_WIDTH_8;
195 180
196 if (cfg->vpp1.present & (1 << CISTPL_POWER_VNOM)) 181 if (pdev->resource[1]->end) {
197 pdev->conf.Vpp = cfg->vpp1.param[CISTPL_POWER_VNOM] / 10000; 182 pdev->resource[0]->end = 8;
198 else if (dflt->vpp1.present & (1 << CISTPL_POWER_VNOM)) 183 pdev->resource[1]->end = (*is_kme) ? 2 : 1;
199 pdev->conf.Vpp = dflt->vpp1.param[CISTPL_POWER_VNOM] / 10000; 184 } else {
200 185 if (pdev->resource[0]->end < 16)
201 if ((cfg->io.nwin > 0) || (dflt->io.nwin > 0)) {
202 cistpl_io_t *io = (cfg->io.nwin) ? &cfg->io : &dflt->io;
203 pdev->io_lines = io->flags & CISTPL_IO_LINES_MASK;
204 pdev->resource[0]->start = io->win[0].base;
205 if (!(io->flags & CISTPL_IO_16BIT)) {
206 pdev->resource[0]->flags &= ~IO_DATA_PATH_WIDTH;
207 pdev->resource[0]->flags |= IO_DATA_PATH_WIDTH_8;
208 }
209 if (io->nwin == 2) {
210 pdev->resource[0]->end = 8;
211 pdev->resource[1]->start = io->win[1].base;
212 pdev->resource[1]->end = (stk->is_kme) ? 2 : 1;
213 if (pcmcia_request_io(pdev) != 0)
214 return -ENODEV;
215 stk->ctl_base = pdev->resource[1]->start;
216 } else if ((io->nwin == 1) && (io->win[0].len >= 16)) {
217 pdev->resource[0]->end = io->win[0].len;
218 pdev->resource[1]->end = 0;
219 if (pcmcia_request_io(pdev) != 0)
220 return -ENODEV;
221 stk->ctl_base = pdev->resource[0]->start + 0x0e;
222 } else
223 return -ENODEV; 186 return -ENODEV;
224 /* If we've got this far, we're done */
225 return 0;
226 } 187 }
227 return -ENODEV; 188
189 return pcmcia_request_io(pdev);
228} 190}
229 191
230/** 192/**
@@ -239,7 +201,6 @@ static int pcmcia_init_one(struct pcmcia_device *pdev)
239{ 201{
240 struct ata_host *host; 202 struct ata_host *host;
241 struct ata_port *ap; 203 struct ata_port *ap;
242 struct pcmcia_config_check *stk = NULL;
243 int is_kme = 0, ret = -ENOMEM, p; 204 int is_kme = 0, ret = -ENOMEM, p;
244 unsigned long io_base, ctl_base; 205 unsigned long io_base, ctl_base;
245 void __iomem *io_addr, *ctl_addr; 206 void __iomem *io_addr, *ctl_addr;
@@ -247,10 +208,8 @@ static int pcmcia_init_one(struct pcmcia_device *pdev)
247 struct ata_port_operations *ops = &pcmcia_port_ops; 208 struct ata_port_operations *ops = &pcmcia_port_ops;
248 209
249 /* Set up attributes in order to probe card and get resources */ 210 /* Set up attributes in order to probe card and get resources */
250 pdev->resource[0]->flags |= IO_DATA_PATH_WIDTH_AUTO; 211 pdev->config_flags |= CONF_ENABLE_IRQ | CONF_AUTO_SET_IO |
251 pdev->resource[1]->flags |= IO_DATA_PATH_WIDTH_8; 212 CONF_AUTO_SET_VPP | CONF_AUTO_CHECK_VCC;
252 pdev->conf.Attributes = CONF_ENABLE_IRQ;
253 pdev->conf.IntType = INT_MEMORY_AND_IO;
254 213
255 /* See if we have a manufacturer identifier. Use it to set is_kme for 214 /* See if we have a manufacturer identifier. Use it to set is_kme for
256 vendor quirks */ 215 vendor quirks */
@@ -258,25 +217,21 @@ static int pcmcia_init_one(struct pcmcia_device *pdev)
258 ((pdev->card_id == PRODID_KME_KXLC005_A) || 217 ((pdev->card_id == PRODID_KME_KXLC005_A) ||
259 (pdev->card_id == PRODID_KME_KXLC005_B))); 218 (pdev->card_id == PRODID_KME_KXLC005_B)));
260 219
261 /* Allocate resoure probing structures */ 220 if (pcmcia_loop_config(pdev, pcmcia_check_one_config, &is_kme)) {
262 221 pdev->config_flags &= ~CONF_AUTO_CHECK_VCC;
263 stk = kzalloc(sizeof(*stk), GFP_KERNEL); 222 if (pcmcia_loop_config(pdev, pcmcia_check_one_config, &is_kme))
264 if (!stk)
265 goto out1;
266 stk->is_kme = is_kme;
267 stk->skip_vcc = io_base = ctl_base = 0;
268
269 if (pcmcia_loop_config(pdev, pcmcia_check_one_config, stk)) {
270 stk->skip_vcc = 1;
271 if (pcmcia_loop_config(pdev, pcmcia_check_one_config, stk))
272 goto failed; /* No suitable config found */ 223 goto failed; /* No suitable config found */
273 } 224 }
274 io_base = pdev->resource[0]->start; 225 io_base = pdev->resource[0]->start;
275 ctl_base = stk->ctl_base; 226 if (pdev->resource[1]->end)
227 ctl_base = pdev->resource[1]->start;
228 else
229 ctl_base = pdev->resource[0]->start + 0x0e;
230
276 if (!pdev->irq) 231 if (!pdev->irq)
277 goto failed; 232 goto failed;
278 233
279 ret = pcmcia_request_configuration(pdev, &pdev->conf); 234 ret = pcmcia_enable_device(pdev);
280 if (ret) 235 if (ret)
281 goto failed; 236 goto failed;
282 237
@@ -329,13 +284,10 @@ static int pcmcia_init_one(struct pcmcia_device *pdev)
329 goto failed; 284 goto failed;
330 285
331 pdev->priv = host; 286 pdev->priv = host;
332 kfree(stk);
333 return 0; 287 return 0;
334 288
335failed: 289failed:
336 kfree(stk);
337 pcmcia_disable_device(pdev); 290 pcmcia_disable_device(pdev);
338out1:
339 return ret; 291 return ret;
340} 292}
341 293
@@ -357,7 +309,7 @@ static void pcmcia_remove_one(struct pcmcia_device *pdev)
357 pcmcia_disable_device(pdev); 309 pcmcia_disable_device(pdev);
358} 310}
359 311
360static struct pcmcia_device_id pcmcia_devices[] = { 312static const struct pcmcia_device_id pcmcia_devices[] = {
361 PCMCIA_DEVICE_FUNC_ID(4), 313 PCMCIA_DEVICE_FUNC_ID(4),
362 PCMCIA_DEVICE_MANF_CARD(0x0000, 0x0000), /* Corsair */ 314 PCMCIA_DEVICE_MANF_CARD(0x0000, 0x0000), /* Corsair */
363 PCMCIA_DEVICE_MANF_CARD(0x0007, 0x0000), /* Hitachi */ 315 PCMCIA_DEVICE_MANF_CARD(0x0007, 0x0000), /* Hitachi */
@@ -430,9 +382,7 @@ MODULE_DEVICE_TABLE(pcmcia, pcmcia_devices);
430 382
431static struct pcmcia_driver pcmcia_driver = { 383static struct pcmcia_driver pcmcia_driver = {
432 .owner = THIS_MODULE, 384 .owner = THIS_MODULE,
433 .drv = { 385 .name = DRV_NAME,
434 .name = DRV_NAME,
435 },
436 .id_table = pcmcia_devices, 386 .id_table = pcmcia_devices,
437 .probe = pcmcia_init_one, 387 .probe = pcmcia_init_one,
438 .remove = pcmcia_remove_one, 388 .remove = pcmcia_remove_one,
diff --git a/drivers/ata/pata_pdc2027x.c b/drivers/ata/pata_pdc2027x.c
index b18351122525..9765ace16921 100644
--- a/drivers/ata/pata_pdc2027x.c
+++ b/drivers/ata/pata_pdc2027x.c
@@ -150,8 +150,7 @@ static struct ata_port_operations pdc2027x_pata133_ops = {
150static struct ata_port_info pdc2027x_port_info[] = { 150static struct ata_port_info pdc2027x_port_info[] = {
151 /* PDC_UDMA_100 */ 151 /* PDC_UDMA_100 */
152 { 152 {
153 .flags = ATA_FLAG_NO_LEGACY | ATA_FLAG_SLAVE_POSS | 153 .flags = ATA_FLAG_SLAVE_POSS,
154 ATA_FLAG_MMIO,
155 .pio_mask = ATA_PIO4, 154 .pio_mask = ATA_PIO4,
156 .mwdma_mask = ATA_MWDMA2, 155 .mwdma_mask = ATA_MWDMA2,
157 .udma_mask = ATA_UDMA5, 156 .udma_mask = ATA_UDMA5,
@@ -159,8 +158,7 @@ static struct ata_port_info pdc2027x_port_info[] = {
159 }, 158 },
160 /* PDC_UDMA_133 */ 159 /* PDC_UDMA_133 */
161 { 160 {
162 .flags = ATA_FLAG_NO_LEGACY | ATA_FLAG_SLAVE_POSS | 161 .flags = ATA_FLAG_SLAVE_POSS,
163 ATA_FLAG_MMIO,
164 .pio_mask = ATA_PIO4, 162 .pio_mask = ATA_PIO4,
165 .mwdma_mask = ATA_MWDMA2, 163 .mwdma_mask = ATA_MWDMA2,
166 .udma_mask = ATA_UDMA6, 164 .udma_mask = ATA_UDMA6,
diff --git a/drivers/ata/pata_pdc202xx_old.c b/drivers/ata/pata_pdc202xx_old.c
index c39f213e1bbc..c2ed5868dda6 100644
--- a/drivers/ata/pata_pdc202xx_old.c
+++ b/drivers/ata/pata_pdc202xx_old.c
@@ -44,6 +44,27 @@ static void pdc202xx_exec_command(struct ata_port *ap,
44 ndelay(400); 44 ndelay(400);
45} 45}
46 46
47static bool pdc202xx_irq_check(struct ata_port *ap)
48{
49 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
50 unsigned long master = pci_resource_start(pdev, 4);
51 u8 sc1d = inb(master + 0x1d);
52
53 if (ap->port_no) {
54 /*
55 * bit 7: error, bit 6: interrupting,
56 * bit 5: FIFO full, bit 4: FIFO empty
57 */
58 return sc1d & 0x40;
59 } else {
60 /*
61 * bit 3: error, bit 2: interrupting,
62 * bit 1: FIFO full, bit 0: FIFO empty
63 */
64 return sc1d & 0x04;
65 }
66}
67
47/** 68/**
48 * pdc202xx_configure_piomode - set chip PIO timing 69 * pdc202xx_configure_piomode - set chip PIO timing
49 * @ap: ATA interface 70 * @ap: ATA interface
@@ -282,6 +303,7 @@ static struct ata_port_operations pdc2024x_port_ops = {
282 .set_dmamode = pdc202xx_set_dmamode, 303 .set_dmamode = pdc202xx_set_dmamode,
283 304
284 .sff_exec_command = pdc202xx_exec_command, 305 .sff_exec_command = pdc202xx_exec_command,
306 .sff_irq_check = pdc202xx_irq_check,
285}; 307};
286 308
287static struct ata_port_operations pdc2026x_port_ops = { 309static struct ata_port_operations pdc2026x_port_ops = {
@@ -297,6 +319,7 @@ static struct ata_port_operations pdc2026x_port_ops = {
297 .port_start = pdc2026x_port_start, 319 .port_start = pdc2026x_port_start,
298 320
299 .sff_exec_command = pdc202xx_exec_command, 321 .sff_exec_command = pdc202xx_exec_command,
322 .sff_irq_check = pdc202xx_irq_check,
300}; 323};
301 324
302static int pdc202xx_init_one(struct pci_dev *dev, const struct pci_device_id *id) 325static int pdc202xx_init_one(struct pci_dev *dev, const struct pci_device_id *id)
diff --git a/drivers/ata/pata_pxa.c b/drivers/ata/pata_pxa.c
index 1898c6ed4b4e..b4ede40f8ae1 100644
--- a/drivers/ata/pata_pxa.c
+++ b/drivers/ata/pata_pxa.c
@@ -292,7 +292,6 @@ static int __devinit pxa_ata_probe(struct platform_device *pdev)
292 ap->ops = &pxa_ata_port_ops; 292 ap->ops = &pxa_ata_port_ops;
293 ap->pio_mask = ATA_PIO4; 293 ap->pio_mask = ATA_PIO4;
294 ap->mwdma_mask = ATA_MWDMA2; 294 ap->mwdma_mask = ATA_MWDMA2;
295 ap->flags = ATA_FLAG_MMIO;
296 295
297 ap->ioaddr.cmd_addr = devm_ioremap(&pdev->dev, cmd_res->start, 296 ap->ioaddr.cmd_addr = devm_ioremap(&pdev->dev, cmd_res->start,
298 resource_size(cmd_res)); 297 resource_size(cmd_res));
diff --git a/drivers/ata/pata_rb532_cf.c b/drivers/ata/pata_rb532_cf.c
index 0ffd631000b7..1b9d10d9c5d9 100644
--- a/drivers/ata/pata_rb532_cf.c
+++ b/drivers/ata/pata_rb532_cf.c
@@ -60,10 +60,10 @@ static irqreturn_t rb532_pata_irq_handler(int irq, void *dev_instance)
60 struct rb532_cf_info *info = ah->private_data; 60 struct rb532_cf_info *info = ah->private_data;
61 61
62 if (gpio_get_value(info->gpio_line)) { 62 if (gpio_get_value(info->gpio_line)) {
63 set_irq_type(info->irq, IRQ_TYPE_LEVEL_LOW); 63 irq_set_irq_type(info->irq, IRQ_TYPE_LEVEL_LOW);
64 ata_sff_interrupt(info->irq, dev_instance); 64 ata_sff_interrupt(info->irq, dev_instance);
65 } else { 65 } else {
66 set_irq_type(info->irq, IRQ_TYPE_LEVEL_HIGH); 66 irq_set_irq_type(info->irq, IRQ_TYPE_LEVEL_HIGH);
67 } 67 }
68 68
69 return IRQ_HANDLED; 69 return IRQ_HANDLED;
@@ -91,7 +91,6 @@ static void rb532_pata_setup_ports(struct ata_host *ah)
91 91
92 ap->ops = &rb532_pata_port_ops; 92 ap->ops = &rb532_pata_port_ops;
93 ap->pio_mask = ATA_PIO4; 93 ap->pio_mask = ATA_PIO4;
94 ap->flags = ATA_FLAG_NO_LEGACY | ATA_FLAG_MMIO;
95 94
96 ap->ioaddr.cmd_addr = info->iobase + RB500_CF_REG_BASE; 95 ap->ioaddr.cmd_addr = info->iobase + RB500_CF_REG_BASE;
97 ap->ioaddr.ctl_addr = info->iobase + RB500_CF_REG_CTRL; 96 ap->ioaddr.ctl_addr = info->iobase + RB500_CF_REG_CTRL;
diff --git a/drivers/ata/pata_rz1000.c b/drivers/ata/pata_rz1000.c
index 4a454a88aa9d..4d04471794b6 100644
--- a/drivers/ata/pata_rz1000.c
+++ b/drivers/ata/pata_rz1000.c
@@ -112,7 +112,7 @@ static int rz1000_reinit_one(struct pci_dev *pdev)
112 if (rc) 112 if (rc)
113 return rc; 113 return rc;
114 114
115 /* If this fails on resume (which is a "cant happen" case), we 115 /* If this fails on resume (which is a "can't happen" case), we
116 must stop as any progress risks data loss */ 116 must stop as any progress risks data loss */
117 if (rz1000_fifo_disable(pdev)) 117 if (rz1000_fifo_disable(pdev))
118 panic("rz1000 fifo"); 118 panic("rz1000 fifo");
diff --git a/drivers/ata/pata_samsung_cf.c b/drivers/ata/pata_samsung_cf.c
index 6f9cfb24b751..c446ae6055a3 100644
--- a/drivers/ata/pata_samsung_cf.c
+++ b/drivers/ata/pata_samsung_cf.c
@@ -322,7 +322,7 @@ static int pata_s3c_wait_after_reset(struct ata_link *link,
322{ 322{
323 int rc; 323 int rc;
324 324
325 msleep(ATA_WAIT_AFTER_RESET); 325 ata_msleep(link->ap, ATA_WAIT_AFTER_RESET);
326 326
327 /* always check readiness of the master device */ 327 /* always check readiness of the master device */
328 rc = ata_sff_wait_ready(link, deadline); 328 rc = ata_sff_wait_ready(link, deadline);
@@ -531,7 +531,6 @@ static int __init pata_s3c_probe(struct platform_device *pdev)
531 } 531 }
532 532
533 ap = host->ports[0]; 533 ap = host->ports[0];
534 ap->flags |= ATA_FLAG_MMIO;
535 ap->pio_mask = ATA_PIO4; 534 ap->pio_mask = ATA_PIO4;
536 535
537 if (cpu_type == TYPE_S3C64XX) { 536 if (cpu_type == TYPE_S3C64XX) {
diff --git a/drivers/ata/pata_scc.c b/drivers/ata/pata_scc.c
index fe36966f7e34..88ea9b677b47 100644
--- a/drivers/ata/pata_scc.c
+++ b/drivers/ata/pata_scc.c
@@ -530,7 +530,7 @@ static int scc_wait_after_reset(struct ata_link *link, unsigned int devmask,
530 * 530 *
531 * Old drivers/ide uses the 2mS rule and then waits for ready. 531 * Old drivers/ide uses the 2mS rule and then waits for ready.
532 */ 532 */
533 msleep(150); 533 ata_msleep(ap, 150);
534 534
535 /* always check readiness of the master device */ 535 /* always check readiness of the master device */
536 rc = ata_sff_wait_ready(link, deadline); 536 rc = ata_sff_wait_ready(link, deadline);
@@ -559,7 +559,7 @@ static int scc_wait_after_reset(struct ata_link *link, unsigned int devmask,
559 lbal = in_be32(ioaddr->lbal_addr); 559 lbal = in_be32(ioaddr->lbal_addr);
560 if ((nsect == 1) && (lbal == 1)) 560 if ((nsect == 1) && (lbal == 1))
561 break; 561 break;
562 msleep(50); /* give drive a breather */ 562 ata_msleep(ap, 50); /* give drive a breather */
563 } 563 }
564 564
565 rc = ata_sff_wait_ready(link, deadline); 565 rc = ata_sff_wait_ready(link, deadline);
@@ -959,7 +959,7 @@ static struct ata_port_operations scc_pata_ops = {
959 959
960static struct ata_port_info scc_port_info[] = { 960static struct ata_port_info scc_port_info[] = {
961 { 961 {
962 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_MMIO | ATA_FLAG_NO_LEGACY, 962 .flags = ATA_FLAG_SLAVE_POSS,
963 .pio_mask = ATA_PIO4, 963 .pio_mask = ATA_PIO4,
964 /* No MWDMA */ 964 /* No MWDMA */
965 .udma_mask = ATA_UDMA6, 965 .udma_mask = ATA_UDMA6,
diff --git a/drivers/ata/pata_sil680.c b/drivers/ata/pata_sil680.c
index d3190d7ec304..118787caa93f 100644
--- a/drivers/ata/pata_sil680.c
+++ b/drivers/ata/pata_sil680.c
@@ -11,7 +11,7 @@
11 * 11 *
12 * May be copied or modified under the terms of the GNU General Public License 12 * May be copied or modified under the terms of the GNU General Public License
13 * 13 *
14 * Documentation publically available. 14 * Documentation publicly available.
15 * 15 *
16 * If you have strange problems with nVidia chipset systems please 16 * If you have strange problems with nVidia chipset systems please
17 * see the SI support documentation and update your system BIOS 17 * see the SI support documentation and update your system BIOS
@@ -43,7 +43,7 @@
43 * 43 *
44 * Turn a config register offset into the right address in either 44 * Turn a config register offset into the right address in either
45 * PCI space or MMIO space to access the control register in question 45 * PCI space or MMIO space to access the control register in question
46 * Thankfully this is a configuration operation so isnt performance 46 * Thankfully this is a configuration operation so isn't performance
47 * criticial. 47 * criticial.
48 */ 48 */
49 49
@@ -202,14 +202,25 @@ static void sil680_set_dmamode(struct ata_port *ap, struct ata_device *adev)
202 * LOCKING: 202 * LOCKING:
203 * spin_lock_irqsave(host lock) 203 * spin_lock_irqsave(host lock)
204 */ 204 */
205void sil680_sff_exec_command(struct ata_port *ap, 205static void sil680_sff_exec_command(struct ata_port *ap,
206 const struct ata_taskfile *tf) 206 const struct ata_taskfile *tf)
207{ 207{
208 DPRINTK("ata%u: cmd 0x%X\n", ap->print_id, tf->command); 208 DPRINTK("ata%u: cmd 0x%X\n", ap->print_id, tf->command);
209 iowrite8(tf->command, ap->ioaddr.command_addr); 209 iowrite8(tf->command, ap->ioaddr.command_addr);
210 ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD); 210 ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
211} 211}
212 212
213static bool sil680_sff_irq_check(struct ata_port *ap)
214{
215 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
216 unsigned long addr = sil680_selreg(ap, 1);
217 u8 val;
218
219 pci_read_config_byte(pdev, addr, &val);
220
221 return val & 0x08;
222}
223
213static struct scsi_host_template sil680_sht = { 224static struct scsi_host_template sil680_sht = {
214 ATA_BMDMA_SHT(DRV_NAME), 225 ATA_BMDMA_SHT(DRV_NAME),
215}; 226};
@@ -218,6 +229,7 @@ static struct scsi_host_template sil680_sht = {
218static struct ata_port_operations sil680_port_ops = { 229static struct ata_port_operations sil680_port_ops = {
219 .inherits = &ata_bmdma32_port_ops, 230 .inherits = &ata_bmdma32_port_ops,
220 .sff_exec_command = sil680_sff_exec_command, 231 .sff_exec_command = sil680_sff_exec_command,
232 .sff_irq_check = sil680_sff_irq_check,
221 .cable_detect = sil680_cable_detect, 233 .cable_detect = sil680_cable_detect,
222 .set_piomode = sil680_set_piomode, 234 .set_piomode = sil680_set_piomode,
223 .set_dmamode = sil680_set_dmamode, 235 .set_dmamode = sil680_set_dmamode,
diff --git a/drivers/ata/pata_sis.c b/drivers/ata/pata_sis.c
index 60cea13cccce..be08ff92db17 100644
--- a/drivers/ata/pata_sis.c
+++ b/drivers/ata/pata_sis.c
@@ -331,7 +331,7 @@ static void sis_old_set_dmamode (struct ata_port *ap, struct ata_device *adev)
331 331
332 if (adev->dma_mode < XFER_UDMA_0) { 332 if (adev->dma_mode < XFER_UDMA_0) {
333 /* bits 3-0 hold recovery timing bits 8-10 active timing and 333 /* bits 3-0 hold recovery timing bits 8-10 active timing and
334 the higher bits are dependant on the device */ 334 the higher bits are dependent on the device */
335 timing &= ~0x870F; 335 timing &= ~0x870F;
336 timing |= mwdma_bits[speed]; 336 timing |= mwdma_bits[speed];
337 } else { 337 } else {
@@ -371,7 +371,7 @@ static void sis_66_set_dmamode (struct ata_port *ap, struct ata_device *adev)
371 371
372 if (adev->dma_mode < XFER_UDMA_0) { 372 if (adev->dma_mode < XFER_UDMA_0) {
373 /* bits 3-0 hold recovery timing bits 8-10 active timing and 373 /* bits 3-0 hold recovery timing bits 8-10 active timing and
374 the higher bits are dependant on the device, bit 15 udma */ 374 the higher bits are dependent on the device, bit 15 udma */
375 timing &= ~0x870F; 375 timing &= ~0x870F;
376 timing |= mwdma_bits[speed]; 376 timing |= mwdma_bits[speed];
377 } else { 377 } else {
@@ -593,7 +593,7 @@ static const struct ata_port_info sis_info133 = {
593 .port_ops = &sis_133_ops, 593 .port_ops = &sis_133_ops,
594}; 594};
595const struct ata_port_info sis_info133_for_sata = { 595const struct ata_port_info sis_info133_for_sata = {
596 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST, 596 .flags = ATA_FLAG_SLAVE_POSS,
597 .pio_mask = ATA_PIO4, 597 .pio_mask = ATA_PIO4,
598 /* No MWDMA */ 598 /* No MWDMA */
599 .udma_mask = ATA_UDMA6, 599 .udma_mask = ATA_UDMA6,
diff --git a/drivers/ata/pata_sl82c105.c b/drivers/ata/pata_sl82c105.c
index 98548f640c8e..7f5d020ed56c 100644
--- a/drivers/ata/pata_sl82c105.c
+++ b/drivers/ata/pata_sl82c105.c
@@ -227,6 +227,16 @@ static int sl82c105_qc_defer(struct ata_queued_cmd *qc)
227 return 0; 227 return 0;
228} 228}
229 229
230static bool sl82c105_sff_irq_check(struct ata_port *ap)
231{
232 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
233 u32 val, mask = ap->port_no ? CTRL_IDE_IRQB : CTRL_IDE_IRQA;
234
235 pci_read_config_dword(pdev, 0x40, &val);
236
237 return val & mask;
238}
239
230static struct scsi_host_template sl82c105_sht = { 240static struct scsi_host_template sl82c105_sht = {
231 ATA_BMDMA_SHT(DRV_NAME), 241 ATA_BMDMA_SHT(DRV_NAME),
232}; 242};
@@ -239,6 +249,7 @@ static struct ata_port_operations sl82c105_port_ops = {
239 .cable_detect = ata_cable_40wire, 249 .cable_detect = ata_cable_40wire,
240 .set_piomode = sl82c105_set_piomode, 250 .set_piomode = sl82c105_set_piomode,
241 .prereset = sl82c105_pre_reset, 251 .prereset = sl82c105_pre_reset,
252 .sff_irq_check = sl82c105_sff_irq_check,
242}; 253};
243 254
244/** 255/**
diff --git a/drivers/ata/pata_triflex.c b/drivers/ata/pata_triflex.c
index 0d1f89e571dd..b3e0c9432283 100644
--- a/drivers/ata/pata_triflex.c
+++ b/drivers/ata/pata_triflex.c
@@ -30,7 +30,7 @@
30 * Loosely based on the piix & svwks drivers. 30 * Loosely based on the piix & svwks drivers.
31 * 31 *
32 * Documentation: 32 * Documentation:
33 * Not publically available. 33 * Not publicly available.
34 */ 34 */
35 35
36#include <linux/kernel.h> 36#include <linux/kernel.h>
@@ -210,13 +210,34 @@ static const struct pci_device_id triflex[] = {
210 { }, 210 { },
211}; 211};
212 212
213#ifdef CONFIG_PM
214static int triflex_ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
215{
216 struct ata_host *host = dev_get_drvdata(&pdev->dev);
217 int rc = 0;
218
219 rc = ata_host_suspend(host, mesg);
220 if (rc)
221 return rc;
222
223 /*
224 * We must not disable or powerdown the device.
225 * APM bios refuses to suspend if IDE is not accessible.
226 */
227 pci_save_state(pdev);
228
229 return 0;
230}
231
232#endif
233
213static struct pci_driver triflex_pci_driver = { 234static struct pci_driver triflex_pci_driver = {
214 .name = DRV_NAME, 235 .name = DRV_NAME,
215 .id_table = triflex, 236 .id_table = triflex,
216 .probe = triflex_init_one, 237 .probe = triflex_init_one,
217 .remove = ata_pci_remove_one, 238 .remove = ata_pci_remove_one,
218#ifdef CONFIG_PM 239#ifdef CONFIG_PM
219 .suspend = ata_pci_device_suspend, 240 .suspend = triflex_ata_pci_device_suspend,
220 .resume = ata_pci_device_resume, 241 .resume = ata_pci_device_resume,
221#endif 242#endif
222}; 243};
diff --git a/drivers/ata/pdc_adma.c b/drivers/ata/pdc_adma.c
index adbe0426c8f0..1111712b3d7d 100644
--- a/drivers/ata/pdc_adma.c
+++ b/drivers/ata/pdc_adma.c
@@ -166,9 +166,7 @@ static struct ata_port_operations adma_ata_ops = {
166static struct ata_port_info adma_port_info[] = { 166static struct ata_port_info adma_port_info[] = {
167 /* board_1841_idx */ 167 /* board_1841_idx */
168 { 168 {
169 .flags = ATA_FLAG_SLAVE_POSS | 169 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_PIO_POLLING,
170 ATA_FLAG_NO_LEGACY | ATA_FLAG_MMIO |
171 ATA_FLAG_PIO_POLLING,
172 .pio_mask = ATA_PIO4_ONLY, 170 .pio_mask = ATA_PIO4_ONLY,
173 .udma_mask = ATA_UDMA4, 171 .udma_mask = ATA_UDMA4,
174 .port_ops = &adma_ata_ops, 172 .port_ops = &adma_ata_ops,
diff --git a/drivers/ata/sata_dwc_460ex.c b/drivers/ata/sata_dwc_460ex.c
index 6cf57c5c2b5f..dc88a39e7db8 100644
--- a/drivers/ata/sata_dwc_460ex.c
+++ b/drivers/ata/sata_dwc_460ex.c
@@ -40,8 +40,11 @@
40#include <scsi/scsi_host.h> 40#include <scsi/scsi_host.h>
41#include <scsi/scsi_cmnd.h> 41#include <scsi/scsi_cmnd.h>
42 42
43/* These two are defined in "libata.h" */
44#undef DRV_NAME
45#undef DRV_VERSION
43#define DRV_NAME "sata-dwc" 46#define DRV_NAME "sata-dwc"
44#define DRV_VERSION "1.0" 47#define DRV_VERSION "1.3"
45 48
46/* SATA DMA driver Globals */ 49/* SATA DMA driver Globals */
47#define DMA_NUM_CHANS 1 50#define DMA_NUM_CHANS 1
@@ -333,11 +336,47 @@ static int dma_dwc_xfer_setup(struct scatterlist *sg, int num_elems,
333 void __iomem *addr, int dir); 336 void __iomem *addr, int dir);
334static void dma_dwc_xfer_start(int dma_ch); 337static void dma_dwc_xfer_start(int dma_ch);
335 338
339static const char *get_prot_descript(u8 protocol)
340{
341 switch ((enum ata_tf_protocols)protocol) {
342 case ATA_PROT_NODATA:
343 return "ATA no data";
344 case ATA_PROT_PIO:
345 return "ATA PIO";
346 case ATA_PROT_DMA:
347 return "ATA DMA";
348 case ATA_PROT_NCQ:
349 return "ATA NCQ";
350 case ATAPI_PROT_NODATA:
351 return "ATAPI no data";
352 case ATAPI_PROT_PIO:
353 return "ATAPI PIO";
354 case ATAPI_PROT_DMA:
355 return "ATAPI DMA";
356 default:
357 return "unknown";
358 }
359}
360
361static const char *get_dma_dir_descript(int dma_dir)
362{
363 switch ((enum dma_data_direction)dma_dir) {
364 case DMA_BIDIRECTIONAL:
365 return "bidirectional";
366 case DMA_TO_DEVICE:
367 return "to device";
368 case DMA_FROM_DEVICE:
369 return "from device";
370 default:
371 return "none";
372 }
373}
374
336static void sata_dwc_tf_dump(struct ata_taskfile *tf) 375static void sata_dwc_tf_dump(struct ata_taskfile *tf)
337{ 376{
338 dev_vdbg(host_pvt.dwc_dev, "taskfile cmd: 0x%02x protocol: %s flags:" 377 dev_vdbg(host_pvt.dwc_dev, "taskfile cmd: 0x%02x protocol: %s flags:"
339 "0x%lx device: %x\n", tf->command, ata_get_cmd_descript\ 378 "0x%lx device: %x\n", tf->command,
340 (tf->protocol), tf->flags, tf->device); 379 get_prot_descript(tf->protocol), tf->flags, tf->device);
341 dev_vdbg(host_pvt.dwc_dev, "feature: 0x%02x nsect: 0x%x lbal: 0x%x " 380 dev_vdbg(host_pvt.dwc_dev, "feature: 0x%02x nsect: 0x%x lbal: 0x%x "
342 "lbam: 0x%x lbah: 0x%x\n", tf->feature, tf->nsect, tf->lbal, 381 "lbam: 0x%x lbah: 0x%x\n", tf->feature, tf->nsect, tf->lbal,
343 tf->lbam, tf->lbah); 382 tf->lbam, tf->lbah);
@@ -350,7 +389,7 @@ static void sata_dwc_tf_dump(struct ata_taskfile *tf)
350/* 389/*
351 * Function: get_burst_length_encode 390 * Function: get_burst_length_encode
352 * arguments: datalength: length in bytes of data 391 * arguments: datalength: length in bytes of data
353 * returns value to be programmed in register corrresponding to data length 392 * returns value to be programmed in register corresponding to data length
354 * This value is effectively the log(base 2) of the length 393 * This value is effectively the log(base 2) of the length
355 */ 394 */
356static int get_burst_length_encode(int datalength) 395static int get_burst_length_encode(int datalength)
@@ -715,7 +754,7 @@ static int dma_dwc_xfer_setup(struct scatterlist *sg, int num_elems,
715 /* Program the CTL register with src enable / dst enable */ 754 /* Program the CTL register with src enable / dst enable */
716 out_le32(&(host_pvt.sata_dma_regs->chan_regs[dma_ch].ctl.low), 755 out_le32(&(host_pvt.sata_dma_regs->chan_regs[dma_ch].ctl.low),
717 DMA_CTL_LLP_SRCEN | DMA_CTL_LLP_DSTEN); 756 DMA_CTL_LLP_SRCEN | DMA_CTL_LLP_DSTEN);
718 return 0; 757 return dma_ch;
719} 758}
720 759
721/* 760/*
@@ -967,7 +1006,7 @@ static irqreturn_t sata_dwc_isr(int irq, void *dev_instance)
967 } 1006 }
968 1007
969 dev_dbg(ap->dev, "%s non-NCQ cmd interrupt, protocol: %s\n", 1008 dev_dbg(ap->dev, "%s non-NCQ cmd interrupt, protocol: %s\n",
970 __func__, ata_get_cmd_descript(qc->tf.protocol)); 1009 __func__, get_prot_descript(qc->tf.protocol));
971DRVSTILLBUSY: 1010DRVSTILLBUSY:
972 if (ata_is_dma(qc->tf.protocol)) { 1011 if (ata_is_dma(qc->tf.protocol)) {
973 /* 1012 /*
@@ -1057,7 +1096,7 @@ DRVSTILLBUSY:
1057 1096
1058 /* Process completed command */ 1097 /* Process completed command */
1059 dev_dbg(ap->dev, "%s NCQ command, protocol: %s\n", __func__, 1098 dev_dbg(ap->dev, "%s NCQ command, protocol: %s\n", __func__,
1060 ata_get_cmd_descript(qc->tf.protocol)); 1099 get_prot_descript(qc->tf.protocol));
1061 if (ata_is_dma(qc->tf.protocol)) { 1100 if (ata_is_dma(qc->tf.protocol)) {
1062 host_pvt.dma_interrupt_count++; 1101 host_pvt.dma_interrupt_count++;
1063 if (hsdevp->dma_pending[tag] == \ 1102 if (hsdevp->dma_pending[tag] == \
@@ -1142,8 +1181,8 @@ static void sata_dwc_dma_xfer_complete(struct ata_port *ap, u32 check_status)
1142 if (tag > 0) { 1181 if (tag > 0) {
1143 dev_info(ap->dev, "%s tag=%u cmd=0x%02x dma dir=%s proto=%s " 1182 dev_info(ap->dev, "%s tag=%u cmd=0x%02x dma dir=%s proto=%s "
1144 "dmacr=0x%08x\n", __func__, qc->tag, qc->tf.command, 1183 "dmacr=0x%08x\n", __func__, qc->tag, qc->tf.command,
1145 ata_get_cmd_descript(qc->dma_dir), 1184 get_dma_dir_descript(qc->dma_dir),
1146 ata_get_cmd_descript(qc->tf.protocol), 1185 get_prot_descript(qc->tf.protocol),
1147 in_le32(&(hsdev->sata_dwc_regs->dmacr))); 1186 in_le32(&(hsdev->sata_dwc_regs->dmacr)));
1148 } 1187 }
1149#endif 1188#endif
@@ -1354,7 +1393,7 @@ static void sata_dwc_exec_command_by_tag(struct ata_port *ap,
1354 struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap); 1393 struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap);
1355 1394
1356 dev_dbg(ap->dev, "%s cmd(0x%02x): %s tag=%d\n", __func__, tf->command, 1395 dev_dbg(ap->dev, "%s cmd(0x%02x): %s tag=%d\n", __func__, tf->command,
1357 ata_get_cmd_descript(tf), tag); 1396 ata_get_cmd_descript(tf->command), tag);
1358 1397
1359 spin_lock_irqsave(&ap->host->lock, flags); 1398 spin_lock_irqsave(&ap->host->lock, flags);
1360 hsdevp->cmd_issued[tag] = cmd_issued; 1399 hsdevp->cmd_issued[tag] = cmd_issued;
@@ -1413,7 +1452,7 @@ static void sata_dwc_bmdma_start_by_tag(struct ata_queued_cmd *qc, u8 tag)
1413 1452
1414 dev_dbg(ap->dev, "%s qc=%p tag: %x cmd: 0x%02x dma_dir: %s " 1453 dev_dbg(ap->dev, "%s qc=%p tag: %x cmd: 0x%02x dma_dir: %s "
1415 "start_dma? %x\n", __func__, qc, tag, qc->tf.command, 1454 "start_dma? %x\n", __func__, qc, tag, qc->tf.command,
1416 ata_get_cmd_descript(qc->dma_dir), start_dma); 1455 get_dma_dir_descript(qc->dma_dir), start_dma);
1417 sata_dwc_tf_dump(&(qc->tf)); 1456 sata_dwc_tf_dump(&(qc->tf));
1418 1457
1419 if (start_dma) { 1458 if (start_dma) {
@@ -1462,10 +1501,9 @@ static void sata_dwc_qc_prep_by_tag(struct ata_queued_cmd *qc, u8 tag)
1462 int dma_chan; 1501 int dma_chan;
1463 struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap); 1502 struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap);
1464 struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap); 1503 struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap);
1465 int err;
1466 1504
1467 dev_dbg(ap->dev, "%s: port=%d dma dir=%s n_elem=%d\n", 1505 dev_dbg(ap->dev, "%s: port=%d dma dir=%s n_elem=%d\n",
1468 __func__, ap->port_no, ata_get_cmd_descript(qc->dma_dir), 1506 __func__, ap->port_no, get_dma_dir_descript(qc->dma_dir),
1469 qc->n_elem); 1507 qc->n_elem);
1470 1508
1471 dma_chan = dma_dwc_xfer_setup(sg, qc->n_elem, hsdevp->llit[tag], 1509 dma_chan = dma_dwc_xfer_setup(sg, qc->n_elem, hsdevp->llit[tag],
@@ -1474,7 +1512,7 @@ static void sata_dwc_qc_prep_by_tag(struct ata_queued_cmd *qc, u8 tag)
1474 dmadr), qc->dma_dir); 1512 dmadr), qc->dma_dir);
1475 if (dma_chan < 0) { 1513 if (dma_chan < 0) {
1476 dev_err(ap->dev, "%s: dma_dwc_xfer_setup returns err %d\n", 1514 dev_err(ap->dev, "%s: dma_dwc_xfer_setup returns err %d\n",
1477 __func__, err); 1515 __func__, dma_chan);
1478 return; 1516 return;
1479 } 1517 }
1480 hsdevp->dma_chan[tag] = dma_chan; 1518 hsdevp->dma_chan[tag] = dma_chan;
@@ -1491,8 +1529,8 @@ static unsigned int sata_dwc_qc_issue(struct ata_queued_cmd *qc)
1491 dev_info(ap->dev, "%s ap id=%d cmd(0x%02x)=%s qc tag=%d " 1529 dev_info(ap->dev, "%s ap id=%d cmd(0x%02x)=%s qc tag=%d "
1492 "prot=%s ap active_tag=0x%08x ap sactive=0x%08x\n", 1530 "prot=%s ap active_tag=0x%08x ap sactive=0x%08x\n",
1493 __func__, ap->print_id, qc->tf.command, 1531 __func__, ap->print_id, qc->tf.command,
1494 ata_get_cmd_descript(&qc->tf), 1532 ata_get_cmd_descript(qc->tf.command),
1495 qc->tag, ata_get_cmd_descript(qc->tf.protocol), 1533 qc->tag, get_prot_descript(qc->tf.protocol),
1496 ap->link.active_tag, ap->link.sactive); 1534 ap->link.active_tag, ap->link.sactive);
1497#endif 1535#endif
1498 1536
@@ -1533,7 +1571,7 @@ static void sata_dwc_qc_prep(struct ata_queued_cmd *qc)
1533#ifdef DEBUG_NCQ 1571#ifdef DEBUG_NCQ
1534 if (qc->tag > 0) 1572 if (qc->tag > 0)
1535 dev_info(qc->ap->dev, "%s: qc->tag=%d ap->active_tag=0x%08x\n", 1573 dev_info(qc->ap->dev, "%s: qc->tag=%d ap->active_tag=0x%08x\n",
1536 __func__, tag, qc->ap->link.active_tag); 1574 __func__, qc->tag, qc->ap->link.active_tag);
1537 1575
1538 return ; 1576 return ;
1539#endif 1577#endif
@@ -1580,16 +1618,14 @@ static struct ata_port_operations sata_dwc_ops = {
1580 1618
1581static const struct ata_port_info sata_dwc_port_info[] = { 1619static const struct ata_port_info sata_dwc_port_info[] = {
1582 { 1620 {
1583 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 1621 .flags = ATA_FLAG_SATA | ATA_FLAG_NCQ,
1584 ATA_FLAG_MMIO | ATA_FLAG_NCQ, 1622 .pio_mask = ATA_PIO4,
1585 .pio_mask = 0x1f, /* pio 0-4 */
1586 .udma_mask = ATA_UDMA6, 1623 .udma_mask = ATA_UDMA6,
1587 .port_ops = &sata_dwc_ops, 1624 .port_ops = &sata_dwc_ops,
1588 }, 1625 },
1589}; 1626};
1590 1627
1591static int sata_dwc_probe(struct platform_device *ofdev, 1628static int sata_dwc_probe(struct platform_device *ofdev)
1592 const struct of_device_id *match)
1593{ 1629{
1594 struct sata_dwc_device *hsdev; 1630 struct sata_dwc_device *hsdev;
1595 u32 idr, versionr; 1631 u32 idr, versionr;
@@ -1727,7 +1763,7 @@ static const struct of_device_id sata_dwc_match[] = {
1727}; 1763};
1728MODULE_DEVICE_TABLE(of, sata_dwc_match); 1764MODULE_DEVICE_TABLE(of, sata_dwc_match);
1729 1765
1730static struct of_platform_driver sata_dwc_driver = { 1766static struct platform_driver sata_dwc_driver = {
1731 .driver = { 1767 .driver = {
1732 .name = DRV_NAME, 1768 .name = DRV_NAME,
1733 .owner = THIS_MODULE, 1769 .owner = THIS_MODULE,
@@ -1739,12 +1775,12 @@ static struct of_platform_driver sata_dwc_driver = {
1739 1775
1740static int __init sata_dwc_init(void) 1776static int __init sata_dwc_init(void)
1741{ 1777{
1742 return of_register_platform_driver(&sata_dwc_driver); 1778 return platform_driver_register(&sata_dwc_driver);
1743} 1779}
1744 1780
1745static void __exit sata_dwc_exit(void) 1781static void __exit sata_dwc_exit(void)
1746{ 1782{
1747 of_unregister_platform_driver(&sata_dwc_driver); 1783 platform_driver_unregister(&sata_dwc_driver);
1748} 1784}
1749 1785
1750module_init(sata_dwc_init); 1786module_init(sata_dwc_init);
diff --git a/drivers/ata/sata_fsl.c b/drivers/ata/sata_fsl.c
index 7325f77480dc..35a71d875d0e 100644
--- a/drivers/ata/sata_fsl.c
+++ b/drivers/ata/sata_fsl.c
@@ -6,7 +6,7 @@
6 * Author: Ashish Kalra <ashish.kalra@freescale.com> 6 * Author: Ashish Kalra <ashish.kalra@freescale.com>
7 * Li Yang <leoli@freescale.com> 7 * Li Yang <leoli@freescale.com>
8 * 8 *
9 * Copyright (c) 2006-2007 Freescale Semiconductor, Inc. 9 * Copyright (c) 2006-2007, 2011 Freescale Semiconductor, Inc.
10 * 10 *
11 * This program is free software; you can redistribute it and/or modify it 11 * This program is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the 12 * under the terms of the GNU General Public License as published by the
@@ -33,8 +33,7 @@ enum {
33 SATA_FSL_MAX_PRD_USABLE = SATA_FSL_MAX_PRD - 1, 33 SATA_FSL_MAX_PRD_USABLE = SATA_FSL_MAX_PRD - 1,
34 SATA_FSL_MAX_PRD_DIRECT = 16, /* Direct PRDT entries */ 34 SATA_FSL_MAX_PRD_DIRECT = 16, /* Direct PRDT entries */
35 35
36 SATA_FSL_HOST_FLAGS = (ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 36 SATA_FSL_HOST_FLAGS = (ATA_FLAG_SATA | ATA_FLAG_PIO_DMA |
37 ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA |
38 ATA_FLAG_PMP | ATA_FLAG_NCQ | ATA_FLAG_AN), 37 ATA_FLAG_PMP | ATA_FLAG_NCQ | ATA_FLAG_AN),
39 38
40 SATA_FSL_MAX_CMDS = SATA_FSL_QUEUE_DEPTH, 39 SATA_FSL_MAX_CMDS = SATA_FSL_QUEUE_DEPTH,
@@ -43,7 +42,7 @@ enum {
43 42
44 /* 43 /*
45 * SATA-FSL host controller supports a max. of (15+1) direct PRDEs, and 44 * SATA-FSL host controller supports a max. of (15+1) direct PRDEs, and
46 * chained indirect PRDEs upto a max count of 63. 45 * chained indirect PRDEs up to a max count of 63.
47 * We are allocating an array of 63 PRDEs contiguously, but PRDE#15 will 46 * We are allocating an array of 63 PRDEs contiguously, but PRDE#15 will
48 * be setup as an indirect descriptor, pointing to it's next 47 * be setup as an indirect descriptor, pointing to it's next
49 * (contiguous) PRDE. Though chained indirect PRDE arrays are 48 * (contiguous) PRDE. Though chained indirect PRDE arrays are
@@ -158,7 +157,8 @@ enum {
158 IE_ON_SINGL_DEVICE_ERR | IE_ON_CMD_COMPLETE, 157 IE_ON_SINGL_DEVICE_ERR | IE_ON_CMD_COMPLETE,
159 158
160 EXT_INDIRECT_SEG_PRD_FLAG = (1 << 31), 159 EXT_INDIRECT_SEG_PRD_FLAG = (1 << 31),
161 DATA_SNOOP_ENABLE = (1 << 22), 160 DATA_SNOOP_ENABLE_V1 = (1 << 22),
161 DATA_SNOOP_ENABLE_V2 = (1 << 28),
162}; 162};
163 163
164/* 164/*
@@ -186,6 +186,11 @@ enum {
186 COMMANDSTAT = 0x20, 186 COMMANDSTAT = 0x20,
187}; 187};
188 188
189/* TRANSCFG (transport-layer) configuration control */
190enum {
191 TRANSCFG_RX_WATER_MARK = (1 << 4),
192};
193
189/* PHY (link-layer) configuration control */ 194/* PHY (link-layer) configuration control */
190enum { 195enum {
191 PHY_BIST_ENABLE = 0x01, 196 PHY_BIST_ENABLE = 0x01,
@@ -256,6 +261,7 @@ struct sata_fsl_host_priv {
256 void __iomem *ssr_base; 261 void __iomem *ssr_base;
257 void __iomem *csr_base; 262 void __iomem *csr_base;
258 int irq; 263 int irq;
264 int data_snoop;
259}; 265};
260 266
261static inline unsigned int sata_fsl_tag(unsigned int tag, 267static inline unsigned int sata_fsl_tag(unsigned int tag,
@@ -308,7 +314,8 @@ static void sata_fsl_setup_cmd_hdr_entry(struct sata_fsl_port_priv *pp,
308} 314}
309 315
310static unsigned int sata_fsl_fill_sg(struct ata_queued_cmd *qc, void *cmd_desc, 316static unsigned int sata_fsl_fill_sg(struct ata_queued_cmd *qc, void *cmd_desc,
311 u32 *ttl, dma_addr_t cmd_desc_paddr) 317 u32 *ttl, dma_addr_t cmd_desc_paddr,
318 int data_snoop)
312{ 319{
313 struct scatterlist *sg; 320 struct scatterlist *sg;
314 unsigned int num_prde = 0; 321 unsigned int num_prde = 0;
@@ -358,8 +365,7 @@ static unsigned int sata_fsl_fill_sg(struct ata_queued_cmd *qc, void *cmd_desc,
358 365
359 ttl_dwords += sg_len; 366 ttl_dwords += sg_len;
360 prd->dba = cpu_to_le32(sg_addr); 367 prd->dba = cpu_to_le32(sg_addr);
361 prd->ddc_and_ext = 368 prd->ddc_and_ext = cpu_to_le32(data_snoop | (sg_len & ~0x03));
362 cpu_to_le32(DATA_SNOOP_ENABLE | (sg_len & ~0x03));
363 369
364 VPRINTK("sg_fill, ttl=%d, dba=0x%x, ddc=0x%x\n", 370 VPRINTK("sg_fill, ttl=%d, dba=0x%x, ddc=0x%x\n",
365 ttl_dwords, prd->dba, prd->ddc_and_ext); 371 ttl_dwords, prd->dba, prd->ddc_and_ext);
@@ -374,7 +380,7 @@ static unsigned int sata_fsl_fill_sg(struct ata_queued_cmd *qc, void *cmd_desc,
374 /* set indirect extension flag along with indirect ext. size */ 380 /* set indirect extension flag along with indirect ext. size */
375 prd_ptr_to_indirect_ext->ddc_and_ext = 381 prd_ptr_to_indirect_ext->ddc_and_ext =
376 cpu_to_le32((EXT_INDIRECT_SEG_PRD_FLAG | 382 cpu_to_le32((EXT_INDIRECT_SEG_PRD_FLAG |
377 DATA_SNOOP_ENABLE | 383 data_snoop |
378 (indirect_ext_segment_sz & ~0x03))); 384 (indirect_ext_segment_sz & ~0x03)));
379 } 385 }
380 386
@@ -417,7 +423,8 @@ static void sata_fsl_qc_prep(struct ata_queued_cmd *qc)
417 423
418 if (qc->flags & ATA_QCFLAG_DMAMAP) 424 if (qc->flags & ATA_QCFLAG_DMAMAP)
419 num_prde = sata_fsl_fill_sg(qc, (void *)cd, 425 num_prde = sata_fsl_fill_sg(qc, (void *)cd,
420 &ttl_dwords, cd_paddr); 426 &ttl_dwords, cd_paddr,
427 host_priv->data_snoop);
421 428
422 if (qc->tf.protocol == ATA_PROT_NCQ) 429 if (qc->tf.protocol == ATA_PROT_NCQ)
423 desc_info |= FPDMA_QUEUED_CMD; 430 desc_info |= FPDMA_QUEUED_CMD;
@@ -678,7 +685,7 @@ static void sata_fsl_port_stop(struct ata_port *ap)
678 iowrite32(temp, hcr_base + HCONTROL); 685 iowrite32(temp, hcr_base + HCONTROL);
679 686
680 /* Poll for controller to go offline - should happen immediately */ 687 /* Poll for controller to go offline - should happen immediately */
681 ata_wait_register(hcr_base + HSTATUS, ONLINE, ONLINE, 1, 1); 688 ata_wait_register(ap, hcr_base + HSTATUS, ONLINE, ONLINE, 1, 1);
682 689
683 ap->private_data = NULL; 690 ap->private_data = NULL;
684 dma_free_coherent(dev, SATA_FSL_PORT_PRIV_DMA_SZ, 691 dma_free_coherent(dev, SATA_FSL_PORT_PRIV_DMA_SZ,
@@ -729,7 +736,8 @@ try_offline_again:
729 iowrite32(temp, hcr_base + HCONTROL); 736 iowrite32(temp, hcr_base + HCONTROL);
730 737
731 /* Poll for controller to go offline */ 738 /* Poll for controller to go offline */
732 temp = ata_wait_register(hcr_base + HSTATUS, ONLINE, ONLINE, 1, 500); 739 temp = ata_wait_register(ap, hcr_base + HSTATUS, ONLINE, ONLINE,
740 1, 500);
733 741
734 if (temp & ONLINE) { 742 if (temp & ONLINE) {
735 ata_port_printk(ap, KERN_ERR, 743 ata_port_printk(ap, KERN_ERR,
@@ -752,7 +760,7 @@ try_offline_again:
752 /* 760 /*
753 * PHY reset should remain asserted for atleast 1ms 761 * PHY reset should remain asserted for atleast 1ms
754 */ 762 */
755 msleep(1); 763 ata_msleep(ap, 1);
756 764
757 /* 765 /*
758 * Now, bring the host controller online again, this can take time 766 * Now, bring the host controller online again, this can take time
@@ -766,7 +774,7 @@ try_offline_again:
766 temp |= HCONTROL_PMP_ATTACHED; 774 temp |= HCONTROL_PMP_ATTACHED;
767 iowrite32(temp, hcr_base + HCONTROL); 775 iowrite32(temp, hcr_base + HCONTROL);
768 776
769 temp = ata_wait_register(hcr_base + HSTATUS, ONLINE, 0, 1, 500); 777 temp = ata_wait_register(ap, hcr_base + HSTATUS, ONLINE, 0, 1, 500);
770 778
771 if (!(temp & ONLINE)) { 779 if (!(temp & ONLINE)) {
772 ata_port_printk(ap, KERN_ERR, 780 ata_port_printk(ap, KERN_ERR,
@@ -784,7 +792,7 @@ try_offline_again:
784 * presence 792 * presence
785 */ 793 */
786 794
787 temp = ata_wait_register(hcr_base + HSTATUS, 0xFF, 0, 1, 500); 795 temp = ata_wait_register(ap, hcr_base + HSTATUS, 0xFF, 0, 1, 500);
788 if ((!(temp & 0x10)) || ata_link_offline(link)) { 796 if ((!(temp & 0x10)) || ata_link_offline(link)) {
789 ata_port_printk(ap, KERN_WARNING, 797 ata_port_printk(ap, KERN_WARNING,
790 "No Device OR PHYRDY change,Hstatus = 0x%x\n", 798 "No Device OR PHYRDY change,Hstatus = 0x%x\n",
@@ -797,7 +805,7 @@ try_offline_again:
797 * Wait for the first D2H from device,i.e,signature update notification 805 * Wait for the first D2H from device,i.e,signature update notification
798 */ 806 */
799 start_jiffies = jiffies; 807 start_jiffies = jiffies;
800 temp = ata_wait_register(hcr_base + HSTATUS, 0xFF, 0x10, 808 temp = ata_wait_register(ap, hcr_base + HSTATUS, 0xFF, 0x10,
801 500, jiffies_to_msecs(deadline - start_jiffies)); 809 500, jiffies_to_msecs(deadline - start_jiffies));
802 810
803 if ((temp & 0xFF) != 0x18) { 811 if ((temp & 0xFF) != 0x18) {
@@ -880,7 +888,7 @@ static int sata_fsl_softreset(struct ata_link *link, unsigned int *class,
880 iowrite32(pmp, CQPMP + hcr_base); 888 iowrite32(pmp, CQPMP + hcr_base);
881 iowrite32(1, CQ + hcr_base); 889 iowrite32(1, CQ + hcr_base);
882 890
883 temp = ata_wait_register(CQ + hcr_base, 0x1, 0x1, 1, 5000); 891 temp = ata_wait_register(ap, CQ + hcr_base, 0x1, 0x1, 1, 5000);
884 if (temp & 0x1) { 892 if (temp & 0x1) {
885 ata_port_printk(ap, KERN_WARNING, "ATA_SRST issue failed\n"); 893 ata_port_printk(ap, KERN_WARNING, "ATA_SRST issue failed\n");
886 894
@@ -896,10 +904,10 @@ static int sata_fsl_softreset(struct ata_link *link, unsigned int *class,
896 goto err; 904 goto err;
897 } 905 }
898 906
899 msleep(1); 907 ata_msleep(ap, 1);
900 908
901 /* 909 /*
902 * SATA device enters reset state after receving a Control register 910 * SATA device enters reset state after receiving a Control register
903 * FIS with SRST bit asserted and it awaits another H2D Control reg. 911 * FIS with SRST bit asserted and it awaits another H2D Control reg.
904 * FIS with SRST bit cleared, then the device does internal diags & 912 * FIS with SRST bit cleared, then the device does internal diags &
905 * initialization, followed by indicating it's initialization status 913 * initialization, followed by indicating it's initialization status
@@ -915,7 +923,7 @@ static int sata_fsl_softreset(struct ata_link *link, unsigned int *class,
915 if (pmp != SATA_PMP_CTRL_PORT) 923 if (pmp != SATA_PMP_CTRL_PORT)
916 iowrite32(pmp, CQPMP + hcr_base); 924 iowrite32(pmp, CQPMP + hcr_base);
917 iowrite32(1, CQ + hcr_base); 925 iowrite32(1, CQ + hcr_base);
918 msleep(150); /* ?? */ 926 ata_msleep(ap, 150); /* ?? */
919 927
920 /* 928 /*
921 * The above command would have signalled an interrupt on command 929 * The above command would have signalled an interrupt on command
@@ -1039,12 +1047,15 @@ static void sata_fsl_error_intr(struct ata_port *ap)
1039 1047
1040 /* find out the offending link and qc */ 1048 /* find out the offending link and qc */
1041 if (ap->nr_pmp_links) { 1049 if (ap->nr_pmp_links) {
1050 unsigned int dev_num;
1051
1042 dereg = ioread32(hcr_base + DE); 1052 dereg = ioread32(hcr_base + DE);
1043 iowrite32(dereg, hcr_base + DE); 1053 iowrite32(dereg, hcr_base + DE);
1044 iowrite32(cereg, hcr_base + CE); 1054 iowrite32(cereg, hcr_base + CE);
1045 1055
1046 if (dereg < ap->nr_pmp_links) { 1056 dev_num = ffs(dereg) - 1;
1047 link = &ap->pmp_link[dereg]; 1057 if (dev_num < ap->nr_pmp_links && dereg != 0) {
1058 link = &ap->pmp_link[dev_num];
1048 ehi = &link->eh_info; 1059 ehi = &link->eh_info;
1049 qc = ata_qc_from_tag(ap, link->active_tag); 1060 qc = ata_qc_from_tag(ap, link->active_tag);
1050 /* 1061 /*
@@ -1137,17 +1148,13 @@ static void sata_fsl_host_intr(struct ata_port *ap)
1137 ioread32(hcr_base + CE)); 1148 ioread32(hcr_base + CE));
1138 1149
1139 for (i = 0; i < SATA_FSL_QUEUE_DEPTH; i++) { 1150 for (i = 0; i < SATA_FSL_QUEUE_DEPTH; i++) {
1140 if (done_mask & (1 << i)) { 1151 if (done_mask & (1 << i))
1141 qc = ata_qc_from_tag(ap, i);
1142 if (qc) {
1143 ata_qc_complete(qc);
1144 }
1145 DPRINTK 1152 DPRINTK
1146 ("completing ncq cmd,tag=%d,CC=0x%x,CA=0x%x\n", 1153 ("completing ncq cmd,tag=%d,CC=0x%x,CA=0x%x\n",
1147 i, ioread32(hcr_base + CC), 1154 i, ioread32(hcr_base + CC),
1148 ioread32(hcr_base + CA)); 1155 ioread32(hcr_base + CA));
1149 }
1150 } 1156 }
1157 ata_qc_complete_multiple(ap, ap->qc_active ^ done_mask);
1151 return; 1158 return;
1152 1159
1153 } else if ((ap->qc_active & (1 << ATA_TAG_INTERNAL))) { 1160 } else if ((ap->qc_active & (1 << ATA_TAG_INTERNAL))) {
@@ -1296,8 +1303,7 @@ static const struct ata_port_info sata_fsl_port_info[] = {
1296 }, 1303 },
1297}; 1304};
1298 1305
1299static int sata_fsl_probe(struct platform_device *ofdev, 1306static int sata_fsl_probe(struct platform_device *ofdev)
1300 const struct of_device_id *match)
1301{ 1307{
1302 int retval = -ENXIO; 1308 int retval = -ENXIO;
1303 void __iomem *hcr_base = NULL; 1309 void __iomem *hcr_base = NULL;
@@ -1306,6 +1312,7 @@ static int sata_fsl_probe(struct platform_device *ofdev,
1306 struct sata_fsl_host_priv *host_priv = NULL; 1312 struct sata_fsl_host_priv *host_priv = NULL;
1307 int irq; 1313 int irq;
1308 struct ata_host *host; 1314 struct ata_host *host;
1315 u32 temp;
1309 1316
1310 struct ata_port_info pi = sata_fsl_port_info[0]; 1317 struct ata_port_info pi = sata_fsl_port_info[0];
1311 const struct ata_port_info *ppi[] = { &pi, NULL }; 1318 const struct ata_port_info *ppi[] = { &pi, NULL };
@@ -1320,6 +1327,12 @@ static int sata_fsl_probe(struct platform_device *ofdev,
1320 ssr_base = hcr_base + 0x100; 1327 ssr_base = hcr_base + 0x100;
1321 csr_base = hcr_base + 0x140; 1328 csr_base = hcr_base + 0x140;
1322 1329
1330 if (!of_device_is_compatible(ofdev->dev.of_node, "fsl,mpc8315-sata")) {
1331 temp = ioread32(csr_base + TRANSCFG);
1332 temp = temp & 0xffffffe0;
1333 iowrite32(temp | TRANSCFG_RX_WATER_MARK, csr_base + TRANSCFG);
1334 }
1335
1323 DPRINTK("@reset i/o = 0x%x\n", ioread32(csr_base + TRANSCFG)); 1336 DPRINTK("@reset i/o = 0x%x\n", ioread32(csr_base + TRANSCFG));
1324 DPRINTK("sizeof(cmd_desc) = %d\n", sizeof(struct command_desc)); 1337 DPRINTK("sizeof(cmd_desc) = %d\n", sizeof(struct command_desc));
1325 DPRINTK("sizeof(#define cmd_desc) = %d\n", SATA_FSL_CMD_DESC_SIZE); 1338 DPRINTK("sizeof(#define cmd_desc) = %d\n", SATA_FSL_CMD_DESC_SIZE);
@@ -1339,6 +1352,11 @@ static int sata_fsl_probe(struct platform_device *ofdev,
1339 } 1352 }
1340 host_priv->irq = irq; 1353 host_priv->irq = irq;
1341 1354
1355 if (of_device_is_compatible(ofdev->dev.of_node, "fsl,pq-sata-v2"))
1356 host_priv->data_snoop = DATA_SNOOP_ENABLE_V2;
1357 else
1358 host_priv->data_snoop = DATA_SNOOP_ENABLE_V1;
1359
1342 /* allocate host structure */ 1360 /* allocate host structure */
1343 host = ata_host_alloc_pinfo(&ofdev->dev, ppi, SATA_FSL_MAX_PORTS); 1361 host = ata_host_alloc_pinfo(&ofdev->dev, ppi, SATA_FSL_MAX_PORTS);
1344 1362
@@ -1421,12 +1439,15 @@ static struct of_device_id fsl_sata_match[] = {
1421 { 1439 {
1422 .compatible = "fsl,pq-sata", 1440 .compatible = "fsl,pq-sata",
1423 }, 1441 },
1442 {
1443 .compatible = "fsl,pq-sata-v2",
1444 },
1424 {}, 1445 {},
1425}; 1446};
1426 1447
1427MODULE_DEVICE_TABLE(of, fsl_sata_match); 1448MODULE_DEVICE_TABLE(of, fsl_sata_match);
1428 1449
1429static struct of_platform_driver fsl_sata_driver = { 1450static struct platform_driver fsl_sata_driver = {
1430 .driver = { 1451 .driver = {
1431 .name = "fsl-sata", 1452 .name = "fsl-sata",
1432 .owner = THIS_MODULE, 1453 .owner = THIS_MODULE,
@@ -1442,13 +1463,13 @@ static struct of_platform_driver fsl_sata_driver = {
1442 1463
1443static int __init sata_fsl_init(void) 1464static int __init sata_fsl_init(void)
1444{ 1465{
1445 of_register_platform_driver(&fsl_sata_driver); 1466 platform_driver_register(&fsl_sata_driver);
1446 return 0; 1467 return 0;
1447} 1468}
1448 1469
1449static void __exit sata_fsl_exit(void) 1470static void __exit sata_fsl_exit(void)
1450{ 1471{
1451 of_unregister_platform_driver(&fsl_sata_driver); 1472 platform_driver_unregister(&fsl_sata_driver);
1452} 1473}
1453 1474
1454MODULE_LICENSE("GPL"); 1475MODULE_LICENSE("GPL");
diff --git a/drivers/ata/sata_inic162x.c b/drivers/ata/sata_inic162x.c
index a36149ebf4a2..83a44471b189 100644
--- a/drivers/ata/sata_inic162x.c
+++ b/drivers/ata/sata_inic162x.c
@@ -614,7 +614,7 @@ static int inic_hardreset(struct ata_link *link, unsigned int *class,
614 614
615 writew(IDMA_CTL_RST_ATA, idma_ctl); 615 writew(IDMA_CTL_RST_ATA, idma_ctl);
616 readw(idma_ctl); /* flush */ 616 readw(idma_ctl); /* flush */
617 msleep(1); 617 ata_msleep(ap, 1);
618 writew(0, idma_ctl); 618 writew(0, idma_ctl);
619 619
620 rc = sata_link_resume(link, timing, deadline); 620 rc = sata_link_resume(link, timing, deadline);
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
index a9fd9709c262..b52c0519ad0b 100644
--- a/drivers/ata/sata_mv.c
+++ b/drivers/ata/sata_mv.c
@@ -160,8 +160,7 @@ enum {
160 /* Host Flags */ 160 /* Host Flags */
161 MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */ 161 MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */
162 162
163 MV_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 163 MV_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_PIO_POLLING,
164 ATA_FLAG_MMIO | ATA_FLAG_PIO_POLLING,
165 164
166 MV_GEN_I_FLAGS = MV_COMMON_FLAGS | ATA_FLAG_NO_ATAPI, 165 MV_GEN_I_FLAGS = MV_COMMON_FLAGS | ATA_FLAG_NO_ATAPI,
167 166
@@ -1353,7 +1352,7 @@ static int mv_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val)
1353 /* 1352 /*
1354 * Workaround for 88SX60x1 FEr SATA#26: 1353 * Workaround for 88SX60x1 FEr SATA#26:
1355 * 1354 *
1356 * COMRESETs have to take care not to accidently 1355 * COMRESETs have to take care not to accidentally
1357 * put the drive to sleep when writing SCR_CONTROL. 1356 * put the drive to sleep when writing SCR_CONTROL.
1358 * Setting bits 12..15 prevents this problem. 1357 * Setting bits 12..15 prevents this problem.
1359 * 1358 *
@@ -2045,7 +2044,7 @@ static void mv_qc_prep(struct ata_queued_cmd *qc)
2045 2044
2046 cw = &pp->crqb[in_index].ata_cmd[0]; 2045 cw = &pp->crqb[in_index].ata_cmd[0];
2047 2046
2048 /* Sadly, the CRQB cannot accomodate all registers--there are 2047 /* Sadly, the CRQB cannot accommodate all registers--there are
2049 * only 11 bytes...so we must pick and choose required 2048 * only 11 bytes...so we must pick and choose required
2050 * registers based on the command. So, we drop feature and 2049 * registers based on the command. So, we drop feature and
2051 * hob_feature for [RW] DMA commands, but they are needed for 2050 * hob_feature for [RW] DMA commands, but they are needed for
@@ -2743,18 +2742,11 @@ static void mv_err_intr(struct ata_port *ap)
2743 } 2742 }
2744} 2743}
2745 2744
2746static void mv_process_crpb_response(struct ata_port *ap, 2745static bool mv_process_crpb_response(struct ata_port *ap,
2747 struct mv_crpb *response, unsigned int tag, int ncq_enabled) 2746 struct mv_crpb *response, unsigned int tag, int ncq_enabled)
2748{ 2747{
2749 u8 ata_status; 2748 u8 ata_status;
2750 u16 edma_status = le16_to_cpu(response->flags); 2749 u16 edma_status = le16_to_cpu(response->flags);
2751 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, tag);
2752
2753 if (unlikely(!qc)) {
2754 ata_port_printk(ap, KERN_ERR, "%s: no qc for tag=%d\n",
2755 __func__, tag);
2756 return;
2757 }
2758 2750
2759 /* 2751 /*
2760 * edma_status from a response queue entry: 2752 * edma_status from a response queue entry:
@@ -2768,13 +2760,14 @@ static void mv_process_crpb_response(struct ata_port *ap,
2768 * Error will be seen/handled by 2760 * Error will be seen/handled by
2769 * mv_err_intr(). So do nothing at all here. 2761 * mv_err_intr(). So do nothing at all here.
2770 */ 2762 */
2771 return; 2763 return false;
2772 } 2764 }
2773 } 2765 }
2774 ata_status = edma_status >> CRPB_FLAG_STATUS_SHIFT; 2766 ata_status = edma_status >> CRPB_FLAG_STATUS_SHIFT;
2775 if (!ac_err_mask(ata_status)) 2767 if (!ac_err_mask(ata_status))
2776 ata_qc_complete(qc); 2768 return true;
2777 /* else: leave it for mv_err_intr() */ 2769 /* else: leave it for mv_err_intr() */
2770 return false;
2778} 2771}
2779 2772
2780static void mv_process_crpb_entries(struct ata_port *ap, struct mv_port_priv *pp) 2773static void mv_process_crpb_entries(struct ata_port *ap, struct mv_port_priv *pp)
@@ -2783,6 +2776,7 @@ static void mv_process_crpb_entries(struct ata_port *ap, struct mv_port_priv *pp
2783 struct mv_host_priv *hpriv = ap->host->private_data; 2776 struct mv_host_priv *hpriv = ap->host->private_data;
2784 u32 in_index; 2777 u32 in_index;
2785 bool work_done = false; 2778 bool work_done = false;
2779 u32 done_mask = 0;
2786 int ncq_enabled = (pp->pp_flags & MV_PP_FLAG_NCQ_EN); 2780 int ncq_enabled = (pp->pp_flags & MV_PP_FLAG_NCQ_EN);
2787 2781
2788 /* Get the hardware queue position index */ 2782 /* Get the hardware queue position index */
@@ -2803,15 +2797,19 @@ static void mv_process_crpb_entries(struct ata_port *ap, struct mv_port_priv *pp
2803 /* Gen II/IIE: get command tag from CRPB entry */ 2797 /* Gen II/IIE: get command tag from CRPB entry */
2804 tag = le16_to_cpu(response->id) & 0x1f; 2798 tag = le16_to_cpu(response->id) & 0x1f;
2805 } 2799 }
2806 mv_process_crpb_response(ap, response, tag, ncq_enabled); 2800 if (mv_process_crpb_response(ap, response, tag, ncq_enabled))
2801 done_mask |= 1 << tag;
2807 work_done = true; 2802 work_done = true;
2808 } 2803 }
2809 2804
2810 /* Update the software queue position index in hardware */ 2805 if (work_done) {
2811 if (work_done) 2806 ata_qc_complete_multiple(ap, ap->qc_active ^ done_mask);
2807
2808 /* Update the software queue position index in hardware */
2812 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | 2809 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) |
2813 (pp->resp_idx << EDMA_RSP_Q_PTR_SHIFT), 2810 (pp->resp_idx << EDMA_RSP_Q_PTR_SHIFT),
2814 port_mmio + EDMA_RSP_Q_OUT_PTR); 2811 port_mmio + EDMA_RSP_Q_OUT_PTR);
2812 }
2815} 2813}
2816 2814
2817static void mv_port_intr(struct ata_port *ap, u32 port_cause) 2815static void mv_port_intr(struct ata_port *ap, u32 port_cause)
diff --git a/drivers/ata/sata_nv.c b/drivers/ata/sata_nv.c
index cb89ef8d99d9..f173ef3bfc10 100644
--- a/drivers/ata/sata_nv.c
+++ b/drivers/ata/sata_nv.c
@@ -539,7 +539,7 @@ struct nv_pi_priv {
539static const struct ata_port_info nv_port_info[] = { 539static const struct ata_port_info nv_port_info[] = {
540 /* generic */ 540 /* generic */
541 { 541 {
542 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY, 542 .flags = ATA_FLAG_SATA,
543 .pio_mask = NV_PIO_MASK, 543 .pio_mask = NV_PIO_MASK,
544 .mwdma_mask = NV_MWDMA_MASK, 544 .mwdma_mask = NV_MWDMA_MASK,
545 .udma_mask = NV_UDMA_MASK, 545 .udma_mask = NV_UDMA_MASK,
@@ -548,7 +548,7 @@ static const struct ata_port_info nv_port_info[] = {
548 }, 548 },
549 /* nforce2/3 */ 549 /* nforce2/3 */
550 { 550 {
551 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY, 551 .flags = ATA_FLAG_SATA,
552 .pio_mask = NV_PIO_MASK, 552 .pio_mask = NV_PIO_MASK,
553 .mwdma_mask = NV_MWDMA_MASK, 553 .mwdma_mask = NV_MWDMA_MASK,
554 .udma_mask = NV_UDMA_MASK, 554 .udma_mask = NV_UDMA_MASK,
@@ -557,7 +557,7 @@ static const struct ata_port_info nv_port_info[] = {
557 }, 557 },
558 /* ck804 */ 558 /* ck804 */
559 { 559 {
560 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY, 560 .flags = ATA_FLAG_SATA,
561 .pio_mask = NV_PIO_MASK, 561 .pio_mask = NV_PIO_MASK,
562 .mwdma_mask = NV_MWDMA_MASK, 562 .mwdma_mask = NV_MWDMA_MASK,
563 .udma_mask = NV_UDMA_MASK, 563 .udma_mask = NV_UDMA_MASK,
@@ -566,8 +566,7 @@ static const struct ata_port_info nv_port_info[] = {
566 }, 566 },
567 /* ADMA */ 567 /* ADMA */
568 { 568 {
569 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 569 .flags = ATA_FLAG_SATA | ATA_FLAG_NCQ,
570 ATA_FLAG_MMIO | ATA_FLAG_NCQ,
571 .pio_mask = NV_PIO_MASK, 570 .pio_mask = NV_PIO_MASK,
572 .mwdma_mask = NV_MWDMA_MASK, 571 .mwdma_mask = NV_MWDMA_MASK,
573 .udma_mask = NV_UDMA_MASK, 572 .udma_mask = NV_UDMA_MASK,
@@ -576,7 +575,7 @@ static const struct ata_port_info nv_port_info[] = {
576 }, 575 },
577 /* MCP5x */ 576 /* MCP5x */
578 { 577 {
579 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY, 578 .flags = ATA_FLAG_SATA,
580 .pio_mask = NV_PIO_MASK, 579 .pio_mask = NV_PIO_MASK,
581 .mwdma_mask = NV_MWDMA_MASK, 580 .mwdma_mask = NV_MWDMA_MASK,
582 .udma_mask = NV_UDMA_MASK, 581 .udma_mask = NV_UDMA_MASK,
@@ -585,8 +584,7 @@ static const struct ata_port_info nv_port_info[] = {
585 }, 584 },
586 /* SWNCQ */ 585 /* SWNCQ */
587 { 586 {
588 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 587 .flags = ATA_FLAG_SATA | ATA_FLAG_NCQ,
589 ATA_FLAG_NCQ,
590 .pio_mask = NV_PIO_MASK, 588 .pio_mask = NV_PIO_MASK,
591 .mwdma_mask = NV_MWDMA_MASK, 589 .mwdma_mask = NV_MWDMA_MASK,
592 .udma_mask = NV_UDMA_MASK, 590 .udma_mask = NV_UDMA_MASK,
@@ -873,29 +871,11 @@ static int nv_adma_check_cpb(struct ata_port *ap, int cpb_num, int force_err)
873 ata_port_freeze(ap); 871 ata_port_freeze(ap);
874 else 872 else
875 ata_port_abort(ap); 873 ata_port_abort(ap);
876 return 1; 874 return -1;
877 } 875 }
878 876
879 if (likely(flags & NV_CPB_RESP_DONE)) { 877 if (likely(flags & NV_CPB_RESP_DONE))
880 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, cpb_num); 878 return 1;
881 VPRINTK("CPB flags done, flags=0x%x\n", flags);
882 if (likely(qc)) {
883 DPRINTK("Completing qc from tag %d\n", cpb_num);
884 ata_qc_complete(qc);
885 } else {
886 struct ata_eh_info *ehi = &ap->link.eh_info;
887 /* Notifier bits set without a command may indicate the drive
888 is misbehaving. Raise host state machine violation on this
889 condition. */
890 ata_port_printk(ap, KERN_ERR,
891 "notifier for tag %d with no cmd?\n",
892 cpb_num);
893 ehi->err_mask |= AC_ERR_HSM;
894 ehi->action |= ATA_EH_RESET;
895 ata_port_freeze(ap);
896 return 1;
897 }
898 }
899 return 0; 879 return 0;
900} 880}
901 881
@@ -1018,6 +998,7 @@ static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance)
1018 NV_ADMA_STAT_CPBERR | 998 NV_ADMA_STAT_CPBERR |
1019 NV_ADMA_STAT_CMD_COMPLETE)) { 999 NV_ADMA_STAT_CMD_COMPLETE)) {
1020 u32 check_commands = notifier_clears[i]; 1000 u32 check_commands = notifier_clears[i];
1001 u32 done_mask = 0;
1021 int pos, rc; 1002 int pos, rc;
1022 1003
1023 if (status & NV_ADMA_STAT_CPBERR) { 1004 if (status & NV_ADMA_STAT_CPBERR) {
@@ -1034,10 +1015,13 @@ static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance)
1034 pos--; 1015 pos--;
1035 rc = nv_adma_check_cpb(ap, pos, 1016 rc = nv_adma_check_cpb(ap, pos,
1036 notifier_error & (1 << pos)); 1017 notifier_error & (1 << pos));
1037 if (unlikely(rc)) 1018 if (rc > 0)
1019 done_mask |= 1 << pos;
1020 else if (unlikely(rc < 0))
1038 check_commands = 0; 1021 check_commands = 0;
1039 check_commands &= ~(1 << pos); 1022 check_commands &= ~(1 << pos);
1040 } 1023 }
1024 ata_qc_complete_multiple(ap, ap->qc_active ^ done_mask);
1041 } 1025 }
1042 } 1026 }
1043 1027
@@ -2132,13 +2116,12 @@ static int nv_swncq_sdbfis(struct ata_port *ap)
2132 struct ata_eh_info *ehi = &ap->link.eh_info; 2116 struct ata_eh_info *ehi = &ap->link.eh_info;
2133 u32 sactive; 2117 u32 sactive;
2134 u32 done_mask; 2118 u32 done_mask;
2135 int i;
2136 u8 host_stat; 2119 u8 host_stat;
2137 u8 lack_dhfis = 0; 2120 u8 lack_dhfis = 0;
2138 2121
2139 host_stat = ap->ops->bmdma_status(ap); 2122 host_stat = ap->ops->bmdma_status(ap);
2140 if (unlikely(host_stat & ATA_DMA_ERR)) { 2123 if (unlikely(host_stat & ATA_DMA_ERR)) {
2141 /* error when transfering data to/from memory */ 2124 /* error when transferring data to/from memory */
2142 ata_ehi_clear_desc(ehi); 2125 ata_ehi_clear_desc(ehi);
2143 ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat); 2126 ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat);
2144 ehi->err_mask |= AC_ERR_HOST_BUS; 2127 ehi->err_mask |= AC_ERR_HOST_BUS;
@@ -2152,27 +2135,11 @@ static int nv_swncq_sdbfis(struct ata_port *ap)
2152 sactive = readl(pp->sactive_block); 2135 sactive = readl(pp->sactive_block);
2153 done_mask = pp->qc_active ^ sactive; 2136 done_mask = pp->qc_active ^ sactive;
2154 2137
2155 if (unlikely(done_mask & sactive)) { 2138 pp->qc_active &= ~done_mask;
2156 ata_ehi_clear_desc(ehi); 2139 pp->dhfis_bits &= ~done_mask;
2157 ata_ehi_push_desc(ehi, "illegal SWNCQ:qc_active transition" 2140 pp->dmafis_bits &= ~done_mask;
2158 "(%08x->%08x)", pp->qc_active, sactive); 2141 pp->sdbfis_bits |= done_mask;
2159 ehi->err_mask |= AC_ERR_HSM; 2142 ata_qc_complete_multiple(ap, ap->qc_active ^ done_mask);
2160 ehi->action |= ATA_EH_RESET;
2161 return -EINVAL;
2162 }
2163 for (i = 0; i < ATA_MAX_QUEUE; i++) {
2164 if (!(done_mask & (1 << i)))
2165 continue;
2166
2167 qc = ata_qc_from_tag(ap, i);
2168 if (qc) {
2169 ata_qc_complete(qc);
2170 pp->qc_active &= ~(1 << i);
2171 pp->dhfis_bits &= ~(1 << i);
2172 pp->dmafis_bits &= ~(1 << i);
2173 pp->sdbfis_bits |= (1 << i);
2174 }
2175 }
2176 2143
2177 if (!ap->qc_active) { 2144 if (!ap->qc_active) {
2178 DPRINTK("over\n"); 2145 DPRINTK("over\n");
diff --git a/drivers/ata/sata_promise.c b/drivers/ata/sata_promise.c
index f03ad48273ff..a004b1e0ea6d 100644
--- a/drivers/ata/sata_promise.c
+++ b/drivers/ata/sata_promise.c
@@ -134,9 +134,7 @@ enum {
134 PDC_IRQ_DISABLE = (1 << 10), 134 PDC_IRQ_DISABLE = (1 << 10),
135 PDC_RESET = (1 << 11), /* HDMA reset */ 135 PDC_RESET = (1 << 11), /* HDMA reset */
136 136
137 PDC_COMMON_FLAGS = ATA_FLAG_NO_LEGACY | 137 PDC_COMMON_FLAGS = ATA_FLAG_PIO_POLLING,
138 ATA_FLAG_MMIO |
139 ATA_FLAG_PIO_POLLING,
140 138
141 /* ap->flags bits */ 139 /* ap->flags bits */
142 PDC_FLAG_GEN_II = (1 << 24), 140 PDC_FLAG_GEN_II = (1 << 24),
diff --git a/drivers/ata/sata_qstor.c b/drivers/ata/sata_qstor.c
index daeebf19a6a9..c5603265fa58 100644
--- a/drivers/ata/sata_qstor.c
+++ b/drivers/ata/sata_qstor.c
@@ -155,8 +155,7 @@ static struct ata_port_operations qs_ata_ops = {
155static const struct ata_port_info qs_port_info[] = { 155static const struct ata_port_info qs_port_info[] = {
156 /* board_2068_idx */ 156 /* board_2068_idx */
157 { 157 {
158 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 158 .flags = ATA_FLAG_SATA | ATA_FLAG_PIO_POLLING,
159 ATA_FLAG_MMIO | ATA_FLAG_PIO_POLLING,
160 .pio_mask = ATA_PIO4_ONLY, 159 .pio_mask = ATA_PIO4_ONLY,
161 .udma_mask = ATA_UDMA6, 160 .udma_mask = ATA_UDMA6,
162 .port_ops = &qs_ata_ops, 161 .port_ops = &qs_ata_ops,
diff --git a/drivers/ata/sata_sil.c b/drivers/ata/sata_sil.c
index 3a4f84219719..b42edaaf3a53 100644
--- a/drivers/ata/sata_sil.c
+++ b/drivers/ata/sata_sil.c
@@ -61,8 +61,7 @@ enum {
61 SIL_FLAG_RERR_ON_DMA_ACT = (1 << 29), 61 SIL_FLAG_RERR_ON_DMA_ACT = (1 << 29),
62 SIL_FLAG_MOD15WRITE = (1 << 30), 62 SIL_FLAG_MOD15WRITE = (1 << 30),
63 63
64 SIL_DFL_PORT_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 64 SIL_DFL_PORT_FLAGS = ATA_FLAG_SATA,
65 ATA_FLAG_MMIO,
66 65
67 /* 66 /*
68 * Controller IDs 67 * Controller IDs
diff --git a/drivers/ata/sata_sil24.c b/drivers/ata/sata_sil24.c
index be7726d7686d..06c564e55051 100644
--- a/drivers/ata/sata_sil24.c
+++ b/drivers/ata/sata_sil24.c
@@ -244,8 +244,7 @@ enum {
244 BID_SIL3131 = 2, 244 BID_SIL3131 = 2,
245 245
246 /* host flags */ 246 /* host flags */
247 SIL24_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 247 SIL24_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA |
248 ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA |
249 ATA_FLAG_NCQ | ATA_FLAG_ACPI_SATA | 248 ATA_FLAG_NCQ | ATA_FLAG_ACPI_SATA |
250 ATA_FLAG_AN | ATA_FLAG_PMP, 249 ATA_FLAG_AN | ATA_FLAG_PMP,
251 SIL24_FLAG_PCIX_IRQ_WOC = (1 << 24), /* IRQ loss errata on PCI-X */ 250 SIL24_FLAG_PCIX_IRQ_WOC = (1 << 24), /* IRQ loss errata on PCI-X */
@@ -589,9 +588,9 @@ static int sil24_init_port(struct ata_port *ap)
589 sil24_clear_pmp(ap); 588 sil24_clear_pmp(ap);
590 589
591 writel(PORT_CS_INIT, port + PORT_CTRL_STAT); 590 writel(PORT_CS_INIT, port + PORT_CTRL_STAT);
592 ata_wait_register(port + PORT_CTRL_STAT, 591 ata_wait_register(ap, port + PORT_CTRL_STAT,
593 PORT_CS_INIT, PORT_CS_INIT, 10, 100); 592 PORT_CS_INIT, PORT_CS_INIT, 10, 100);
594 tmp = ata_wait_register(port + PORT_CTRL_STAT, 593 tmp = ata_wait_register(ap, port + PORT_CTRL_STAT,
595 PORT_CS_RDY, 0, 10, 100); 594 PORT_CS_RDY, 0, 10, 100);
596 595
597 if ((tmp & (PORT_CS_INIT | PORT_CS_RDY)) != PORT_CS_RDY) { 596 if ((tmp & (PORT_CS_INIT | PORT_CS_RDY)) != PORT_CS_RDY) {
@@ -631,7 +630,7 @@ static int sil24_exec_polled_cmd(struct ata_port *ap, int pmp,
631 writel((u64)paddr >> 32, port + PORT_CMD_ACTIVATE + 4); 630 writel((u64)paddr >> 32, port + PORT_CMD_ACTIVATE + 4);
632 631
633 irq_mask = (PORT_IRQ_COMPLETE | PORT_IRQ_ERROR) << PORT_IRQ_RAW_SHIFT; 632 irq_mask = (PORT_IRQ_COMPLETE | PORT_IRQ_ERROR) << PORT_IRQ_RAW_SHIFT;
634 irq_stat = ata_wait_register(port + PORT_IRQ_STAT, irq_mask, 0x0, 633 irq_stat = ata_wait_register(ap, port + PORT_IRQ_STAT, irq_mask, 0x0,
635 10, timeout_msec); 634 10, timeout_msec);
636 635
637 writel(irq_mask, port + PORT_IRQ_STAT); /* clear IRQs */ 636 writel(irq_mask, port + PORT_IRQ_STAT); /* clear IRQs */
@@ -719,9 +718,9 @@ static int sil24_hardreset(struct ata_link *link, unsigned int *class,
719 "state, performing PORT_RST\n"); 718 "state, performing PORT_RST\n");
720 719
721 writel(PORT_CS_PORT_RST, port + PORT_CTRL_STAT); 720 writel(PORT_CS_PORT_RST, port + PORT_CTRL_STAT);
722 msleep(10); 721 ata_msleep(ap, 10);
723 writel(PORT_CS_PORT_RST, port + PORT_CTRL_CLR); 722 writel(PORT_CS_PORT_RST, port + PORT_CTRL_CLR);
724 ata_wait_register(port + PORT_CTRL_STAT, PORT_CS_RDY, 0, 723 ata_wait_register(ap, port + PORT_CTRL_STAT, PORT_CS_RDY, 0,
725 10, 5000); 724 10, 5000);
726 725
727 /* restore port configuration */ 726 /* restore port configuration */
@@ -740,7 +739,7 @@ static int sil24_hardreset(struct ata_link *link, unsigned int *class,
740 tout_msec = 5000; 739 tout_msec = 5000;
741 740
742 writel(PORT_CS_DEV_RST, port + PORT_CTRL_STAT); 741 writel(PORT_CS_DEV_RST, port + PORT_CTRL_STAT);
743 tmp = ata_wait_register(port + PORT_CTRL_STAT, 742 tmp = ata_wait_register(ap, port + PORT_CTRL_STAT,
744 PORT_CS_DEV_RST, PORT_CS_DEV_RST, 10, 743 PORT_CS_DEV_RST, PORT_CS_DEV_RST, 10,
745 tout_msec); 744 tout_msec);
746 745
@@ -1253,7 +1252,7 @@ static void sil24_init_controller(struct ata_host *host)
1253 tmp = readl(port + PORT_CTRL_STAT); 1252 tmp = readl(port + PORT_CTRL_STAT);
1254 if (tmp & PORT_CS_PORT_RST) { 1253 if (tmp & PORT_CS_PORT_RST) {
1255 writel(PORT_CS_PORT_RST, port + PORT_CTRL_CLR); 1254 writel(PORT_CS_PORT_RST, port + PORT_CTRL_CLR);
1256 tmp = ata_wait_register(port + PORT_CTRL_STAT, 1255 tmp = ata_wait_register(NULL, port + PORT_CTRL_STAT,
1257 PORT_CS_PORT_RST, 1256 PORT_CS_PORT_RST,
1258 PORT_CS_PORT_RST, 10, 100); 1257 PORT_CS_PORT_RST, 10, 100);
1259 if (tmp & PORT_CS_PORT_RST) 1258 if (tmp & PORT_CS_PORT_RST)
diff --git a/drivers/ata/sata_sis.c b/drivers/ata/sata_sis.c
index 2bfe3ae03976..cdcc13e9cf51 100644
--- a/drivers/ata/sata_sis.c
+++ b/drivers/ata/sata_sis.c
@@ -96,7 +96,7 @@ static struct ata_port_operations sis_ops = {
96}; 96};
97 97
98static const struct ata_port_info sis_port_info = { 98static const struct ata_port_info sis_port_info = {
99 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY, 99 .flags = ATA_FLAG_SATA,
100 .pio_mask = ATA_PIO4, 100 .pio_mask = ATA_PIO4,
101 .mwdma_mask = ATA_MWDMA2, 101 .mwdma_mask = ATA_MWDMA2,
102 .udma_mask = ATA_UDMA6, 102 .udma_mask = ATA_UDMA6,
diff --git a/drivers/ata/sata_svw.c b/drivers/ata/sata_svw.c
index 7d9db4aaf07e..35eabcf34568 100644
--- a/drivers/ata/sata_svw.c
+++ b/drivers/ata/sata_svw.c
@@ -359,8 +359,7 @@ static struct ata_port_operations k2_sata_ops = {
359static const struct ata_port_info k2_port_info[] = { 359static const struct ata_port_info k2_port_info[] = {
360 /* chip_svw4 */ 360 /* chip_svw4 */
361 { 361 {
362 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 362 .flags = ATA_FLAG_SATA | K2_FLAG_NO_ATAPI_DMA,
363 ATA_FLAG_MMIO | K2_FLAG_NO_ATAPI_DMA,
364 .pio_mask = ATA_PIO4, 363 .pio_mask = ATA_PIO4,
365 .mwdma_mask = ATA_MWDMA2, 364 .mwdma_mask = ATA_MWDMA2,
366 .udma_mask = ATA_UDMA6, 365 .udma_mask = ATA_UDMA6,
@@ -368,8 +367,7 @@ static const struct ata_port_info k2_port_info[] = {
368 }, 367 },
369 /* chip_svw8 */ 368 /* chip_svw8 */
370 { 369 {
371 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 370 .flags = ATA_FLAG_SATA | K2_FLAG_NO_ATAPI_DMA |
372 ATA_FLAG_MMIO | K2_FLAG_NO_ATAPI_DMA |
373 K2_FLAG_SATA_8_PORTS, 371 K2_FLAG_SATA_8_PORTS,
374 .pio_mask = ATA_PIO4, 372 .pio_mask = ATA_PIO4,
375 .mwdma_mask = ATA_MWDMA2, 373 .mwdma_mask = ATA_MWDMA2,
@@ -378,8 +376,7 @@ static const struct ata_port_info k2_port_info[] = {
378 }, 376 },
379 /* chip_svw42 */ 377 /* chip_svw42 */
380 { 378 {
381 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 379 .flags = ATA_FLAG_SATA | K2_FLAG_BAR_POS_3,
382 ATA_FLAG_MMIO | K2_FLAG_BAR_POS_3,
383 .pio_mask = ATA_PIO4, 380 .pio_mask = ATA_PIO4,
384 .mwdma_mask = ATA_MWDMA2, 381 .mwdma_mask = ATA_MWDMA2,
385 .udma_mask = ATA_UDMA6, 382 .udma_mask = ATA_UDMA6,
@@ -387,8 +384,7 @@ static const struct ata_port_info k2_port_info[] = {
387 }, 384 },
388 /* chip_svw43 */ 385 /* chip_svw43 */
389 { 386 {
390 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 387 .flags = ATA_FLAG_SATA,
391 ATA_FLAG_MMIO,
392 .pio_mask = ATA_PIO4, 388 .pio_mask = ATA_PIO4,
393 .mwdma_mask = ATA_MWDMA2, 389 .mwdma_mask = ATA_MWDMA2,
394 .udma_mask = ATA_UDMA6, 390 .udma_mask = ATA_UDMA6,
diff --git a/drivers/ata/sata_sx4.c b/drivers/ata/sata_sx4.c
index bedd5188e5b0..8fd3b7252bda 100644
--- a/drivers/ata/sata_sx4.c
+++ b/drivers/ata/sata_sx4.c
@@ -273,9 +273,8 @@ static struct ata_port_operations pdc_20621_ops = {
273static const struct ata_port_info pdc_port_info[] = { 273static const struct ata_port_info pdc_port_info[] = {
274 /* board_20621 */ 274 /* board_20621 */
275 { 275 {
276 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 276 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_ATAPI |
277 ATA_FLAG_SRST | ATA_FLAG_MMIO | 277 ATA_FLAG_PIO_POLLING,
278 ATA_FLAG_NO_ATAPI | ATA_FLAG_PIO_POLLING,
279 .pio_mask = ATA_PIO4, 278 .pio_mask = ATA_PIO4,
280 .mwdma_mask = ATA_MWDMA2, 279 .mwdma_mask = ATA_MWDMA2,
281 .udma_mask = ATA_UDMA6, 280 .udma_mask = ATA_UDMA6,
diff --git a/drivers/ata/sata_uli.c b/drivers/ata/sata_uli.c
index b8578c32d344..235be717a713 100644
--- a/drivers/ata/sata_uli.c
+++ b/drivers/ata/sata_uli.c
@@ -88,8 +88,7 @@ static struct ata_port_operations uli_ops = {
88}; 88};
89 89
90static const struct ata_port_info uli_port_info = { 90static const struct ata_port_info uli_port_info = {
91 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 91 .flags = ATA_FLAG_SATA | ATA_FLAG_IGN_SIMPLEX,
92 ATA_FLAG_IGN_SIMPLEX,
93 .pio_mask = ATA_PIO4, 92 .pio_mask = ATA_PIO4,
94 .udma_mask = ATA_UDMA6, 93 .udma_mask = ATA_UDMA6,
95 .port_ops = &uli_ops, 94 .port_ops = &uli_ops,
diff --git a/drivers/ata/sata_via.c b/drivers/ata/sata_via.c
index 4730c42a5ee5..54434db15b12 100644
--- a/drivers/ata/sata_via.c
+++ b/drivers/ata/sata_via.c
@@ -148,7 +148,7 @@ static struct ata_port_operations vt8251_ops = {
148}; 148};
149 149
150static const struct ata_port_info vt6420_port_info = { 150static const struct ata_port_info vt6420_port_info = {
151 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY, 151 .flags = ATA_FLAG_SATA,
152 .pio_mask = ATA_PIO4, 152 .pio_mask = ATA_PIO4,
153 .mwdma_mask = ATA_MWDMA2, 153 .mwdma_mask = ATA_MWDMA2,
154 .udma_mask = ATA_UDMA6, 154 .udma_mask = ATA_UDMA6,
@@ -156,7 +156,7 @@ static const struct ata_port_info vt6420_port_info = {
156}; 156};
157 157
158static struct ata_port_info vt6421_sport_info = { 158static struct ata_port_info vt6421_sport_info = {
159 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY, 159 .flags = ATA_FLAG_SATA,
160 .pio_mask = ATA_PIO4, 160 .pio_mask = ATA_PIO4,
161 .mwdma_mask = ATA_MWDMA2, 161 .mwdma_mask = ATA_MWDMA2,
162 .udma_mask = ATA_UDMA6, 162 .udma_mask = ATA_UDMA6,
@@ -164,7 +164,7 @@ static struct ata_port_info vt6421_sport_info = {
164}; 164};
165 165
166static struct ata_port_info vt6421_pport_info = { 166static struct ata_port_info vt6421_pport_info = {
167 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_NO_LEGACY, 167 .flags = ATA_FLAG_SLAVE_POSS,
168 .pio_mask = ATA_PIO4, 168 .pio_mask = ATA_PIO4,
169 /* No MWDMA */ 169 /* No MWDMA */
170 .udma_mask = ATA_UDMA6, 170 .udma_mask = ATA_UDMA6,
@@ -172,8 +172,7 @@ static struct ata_port_info vt6421_pport_info = {
172}; 172};
173 173
174static struct ata_port_info vt8251_port_info = { 174static struct ata_port_info vt8251_port_info = {
175 .flags = ATA_FLAG_SATA | ATA_FLAG_SLAVE_POSS | 175 .flags = ATA_FLAG_SATA | ATA_FLAG_SLAVE_POSS,
176 ATA_FLAG_NO_LEGACY,
177 .pio_mask = ATA_PIO4, 176 .pio_mask = ATA_PIO4,
178 .mwdma_mask = ATA_MWDMA2, 177 .mwdma_mask = ATA_MWDMA2,
179 .udma_mask = ATA_UDMA6, 178 .udma_mask = ATA_UDMA6,
@@ -349,7 +348,7 @@ static int vt6420_prereset(struct ata_link *link, unsigned long deadline)
349 348
350 /* wait for phy to become ready, if necessary */ 349 /* wait for phy to become ready, if necessary */
351 do { 350 do {
352 msleep(200); 351 ata_msleep(link->ap, 200);
353 svia_scr_read(link, SCR_STATUS, &sstatus); 352 svia_scr_read(link, SCR_STATUS, &sstatus);
354 if ((sstatus & 0xf) != 1) 353 if ((sstatus & 0xf) != 1)
355 break; 354 break;
@@ -538,7 +537,7 @@ static int vt8251_prepare_host(struct pci_dev *pdev, struct ata_host **r_host)
538 return 0; 537 return 0;
539} 538}
540 539
541static void svia_configure(struct pci_dev *pdev) 540static void svia_configure(struct pci_dev *pdev, int board_id)
542{ 541{
543 u8 tmp8; 542 u8 tmp8;
544 543
@@ -577,13 +576,13 @@ static void svia_configure(struct pci_dev *pdev)
577 } 576 }
578 577
579 /* 578 /*
580 * vt6421 has problems talking to some drives. The following 579 * vt6420/1 has problems talking to some drives. The following
581 * is the fix from Joseph Chan <JosephChan@via.com.tw>. 580 * is the fix from Joseph Chan <JosephChan@via.com.tw>.
582 * 581 *
583 * When host issues HOLD, device may send up to 20DW of data 582 * When host issues HOLD, device may send up to 20DW of data
584 * before acknowledging it with HOLDA and the host should be 583 * before acknowledging it with HOLDA and the host should be
585 * able to buffer them in FIFO. Unfortunately, some WD drives 584 * able to buffer them in FIFO. Unfortunately, some WD drives
586 * send upto 40DW before acknowledging HOLD and, in the 585 * send up to 40DW before acknowledging HOLD and, in the
587 * default configuration, this ends up overflowing vt6421's 586 * default configuration, this ends up overflowing vt6421's
588 * FIFO, making the controller abort the transaction with 587 * FIFO, making the controller abort the transaction with
589 * R_ERR. 588 * R_ERR.
@@ -596,8 +595,9 @@ static void svia_configure(struct pci_dev *pdev)
596 * 595 *
597 * https://bugzilla.kernel.org/show_bug.cgi?id=15173 596 * https://bugzilla.kernel.org/show_bug.cgi?id=15173
598 * http://article.gmane.org/gmane.linux.ide/46352 597 * http://article.gmane.org/gmane.linux.ide/46352
598 * http://thread.gmane.org/gmane.linux.kernel/1062139
599 */ 599 */
600 if (pdev->device == 0x3249) { 600 if (board_id == vt6420 || board_id == vt6421) {
601 pci_read_config_byte(pdev, 0x52, &tmp8); 601 pci_read_config_byte(pdev, 0x52, &tmp8);
602 tmp8 |= 1 << 2; 602 tmp8 |= 1 << 2;
603 pci_write_config_byte(pdev, 0x52, tmp8); 603 pci_write_config_byte(pdev, 0x52, tmp8);
@@ -652,7 +652,7 @@ static int svia_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
652 if (rc) 652 if (rc)
653 return rc; 653 return rc;
654 654
655 svia_configure(pdev); 655 svia_configure(pdev, board_id);
656 656
657 pci_set_master(pdev); 657 pci_set_master(pdev);
658 return ata_host_activate(host, pdev->irq, ata_bmdma_interrupt, 658 return ata_host_activate(host, pdev->irq, ata_bmdma_interrupt,
diff --git a/drivers/ata/sata_vsc.c b/drivers/ata/sata_vsc.c
index b777176ff494..7c987371136e 100644
--- a/drivers/ata/sata_vsc.c
+++ b/drivers/ata/sata_vsc.c
@@ -340,8 +340,7 @@ static int __devinit vsc_sata_init_one(struct pci_dev *pdev,
340 const struct pci_device_id *ent) 340 const struct pci_device_id *ent)
341{ 341{
342 static const struct ata_port_info pi = { 342 static const struct ata_port_info pi = {
343 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 343 .flags = ATA_FLAG_SATA,
344 ATA_FLAG_MMIO,
345 .pio_mask = ATA_PIO4, 344 .pio_mask = ATA_PIO4,
346 .mwdma_mask = ATA_MWDMA2, 345 .mwdma_mask = ATA_MWDMA2,
347 .udma_mask = ATA_UDMA6, 346 .udma_mask = ATA_UDMA6,
@@ -370,7 +369,7 @@ static int __devinit vsc_sata_init_one(struct pci_dev *pdev,
370 if (pci_resource_len(pdev, 0) == 0) 369 if (pci_resource_len(pdev, 0) == 0)
371 return -ENODEV; 370 return -ENODEV;
372 371
373 /* map IO regions and intialize host accordingly */ 372 /* map IO regions and initialize host accordingly */
374 rc = pcim_iomap_regions(pdev, 1 << VSC_MMIO_BAR, DRV_NAME); 373 rc = pcim_iomap_regions(pdev, 1 << VSC_MMIO_BAR, DRV_NAME);
375 if (rc == -EBUSY) 374 if (rc == -EBUSY)
376 pcim_pin_device(pdev); 375 pcim_pin_device(pdev);