aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@g5.osdl.org>2006-06-23 18:58:44 -0400
committerLinus Torvalds <torvalds@g5.osdl.org>2006-06-23 18:58:44 -0400
commit6edad161cd4dfe1df772e7a74ab63cab53b5e8c1 (patch)
tree389d6daa728b2ba1bd8c2180cab705706449f62a
parent236ee8c33277ab48671995f26dc68a4639936418 (diff)
parent0dd4b21f517e138ea113db255645fbae1bf5eef3 (diff)
Merge branch 'upstream-linus' of master.kernel.org:/pub/scm/linux/kernel/git/jgarzik/libata-dev
* 'upstream-linus' of master.kernel.org:/pub/scm/linux/kernel/git/jgarzik/libata-dev: (258 commits) [libata] conversion to new debug scheme, part 1 of $N [PATCH] libata: Add ata_scsi_dev_disabled [libata] Add host lock to struct ata_port [PATCH] libata: implement per-dev EH action mask eh_info->dev_action[] [PATCH] libata-dev: move the CDB-intr DMA blacklisting [PATCH] ahci: disable NCQ support on vt8251 [libata] ahci: add JMicron PCI IDs [libata] sata_nv: add PCI IDs [libata] ahci: Add NVIDIA PCI IDs. [PATCH] libata: convert several bmdma-style controllers to new EH, take #3 [PATCH] sata_via: convert to new EH, take #3 [libata] sata_nv: s/spin_lock_irqsave/spin_lock/ in irq handler [PATCH] sata_nv: add hotplug support [PATCH] sata_nv: convert to new EH [PATCH] sata_nv: better irq handlers [PATCH] sata_nv: simplify constants [PATCH] sata_nv: kill struct nv_host_desc and nv_host [PATCH] sata_nv: kill not-working hotplug code [libata] Update docs to reflect current driver API [PATCH] libata: add host_set->next for legacy two host_sets case, take #3 ...
-rw-r--r--Documentation/DocBook/libata.tmpl104
-rw-r--r--drivers/ide/pci/amd74xx.c7
-rw-r--r--drivers/scsi/Makefile2
-rw-r--r--drivers/scsi/ahci.c503
-rw-r--r--drivers/scsi/ata_piix.c112
-rw-r--r--drivers/scsi/libata-bmdma.c160
-rw-r--r--drivers/scsi/libata-core.c3042
-rw-r--r--drivers/scsi/libata-eh.c1907
-rw-r--r--drivers/scsi/libata-scsi.c754
-rw-r--r--drivers/scsi/libata.h31
-rw-r--r--drivers/scsi/pdc_adma.c12
-rw-r--r--drivers/scsi/sata_mv.c71
-rw-r--r--drivers/scsi/sata_nv.c535
-rw-r--r--drivers/scsi/sata_promise.c40
-rw-r--r--drivers/scsi/sata_qstor.c15
-rw-r--r--drivers/scsi/sata_sil.c221
-rw-r--r--drivers/scsi/sata_sil24.c646
-rw-r--r--drivers/scsi/sata_sis.c13
-rw-r--r--drivers/scsi/sata_svw.c16
-rw-r--r--drivers/scsi/sata_sx4.c21
-rw-r--r--drivers/scsi/sata_uli.c14
-rw-r--r--drivers/scsi/sata_via.c16
-rw-r--r--drivers/scsi/sata_vsc.c25
-rw-r--r--drivers/scsi/scsi.c18
-rw-r--r--drivers/scsi/scsi_error.c24
-rw-r--r--drivers/scsi/scsi_lib.c2
-rw-r--r--drivers/scsi/scsi_transport_api.h6
-rw-r--r--include/linux/ata.h37
-rw-r--r--include/linux/libata.h473
-rw-r--r--include/linux/pci_ids.h9
-rw-r--r--include/scsi/scsi_cmnd.h1
-rw-r--r--include/scsi/scsi_host.h1
32 files changed, 6505 insertions, 2333 deletions
diff --git a/Documentation/DocBook/libata.tmpl b/Documentation/DocBook/libata.tmpl
index f869b03929db..e97c32314541 100644
--- a/Documentation/DocBook/libata.tmpl
+++ b/Documentation/DocBook/libata.tmpl
@@ -169,6 +169,22 @@ void (*tf_read) (struct ata_port *ap, struct ata_taskfile *tf);
169 169
170 </sect2> 170 </sect2>
171 171
172 <sect2><title>PIO data read/write</title>
173 <programlisting>
174void (*data_xfer) (struct ata_device *, unsigned char *, unsigned int, int);
175 </programlisting>
176
177 <para>
178All bmdma-style drivers must implement this hook. This is the low-level
179operation that actually copies the data bytes during a PIO data
180transfer.
181Typically the driver
182will choose one of ata_pio_data_xfer_noirq(), ata_pio_data_xfer(), or
183ata_mmio_data_xfer().
184 </para>
185
186 </sect2>
187
172 <sect2><title>ATA command execute</title> 188 <sect2><title>ATA command execute</title>
173 <programlisting> 189 <programlisting>
174void (*exec_command)(struct ata_port *ap, struct ata_taskfile *tf); 190void (*exec_command)(struct ata_port *ap, struct ata_taskfile *tf);
@@ -204,11 +220,10 @@ command.
204 <programlisting> 220 <programlisting>
205u8 (*check_status)(struct ata_port *ap); 221u8 (*check_status)(struct ata_port *ap);
206u8 (*check_altstatus)(struct ata_port *ap); 222u8 (*check_altstatus)(struct ata_port *ap);
207u8 (*check_err)(struct ata_port *ap);
208 </programlisting> 223 </programlisting>
209 224
210 <para> 225 <para>
211 Reads the Status/AltStatus/Error ATA shadow register from 226 Reads the Status/AltStatus ATA shadow register from
212 hardware. On some hardware, reading the Status register has 227 hardware. On some hardware, reading the Status register has
213 the side effect of clearing the interrupt condition. 228 the side effect of clearing the interrupt condition.
214 Most drivers for taskfile-based hardware use 229 Most drivers for taskfile-based hardware use
@@ -269,23 +284,6 @@ void (*set_mode) (struct ata_port *ap);
269 284
270 </sect2> 285 </sect2>
271 286
272 <sect2><title>Reset ATA bus</title>
273 <programlisting>
274void (*phy_reset) (struct ata_port *ap);
275 </programlisting>
276
277 <para>
278 The very first step in the probe phase. Actions vary depending
279 on the bus type, typically. After waking up the device and probing
280 for device presence (PATA and SATA), typically a soft reset
281 (SRST) will be performed. Drivers typically use the helper
282 functions ata_bus_reset() or sata_phy_reset() for this hook.
283 Many SATA drivers use sata_phy_reset() or call it from within
284 their own phy_reset() functions.
285 </para>
286
287 </sect2>
288
289 <sect2><title>Control PCI IDE BMDMA engine</title> 287 <sect2><title>Control PCI IDE BMDMA engine</title>
290 <programlisting> 288 <programlisting>
291void (*bmdma_setup) (struct ata_queued_cmd *qc); 289void (*bmdma_setup) (struct ata_queued_cmd *qc);
@@ -354,16 +352,74 @@ int (*qc_issue) (struct ata_queued_cmd *qc);
354 352
355 </sect2> 353 </sect2>
356 354
357 <sect2><title>Timeout (error) handling</title> 355 <sect2><title>Exception and probe handling (EH)</title>
358 <programlisting> 356 <programlisting>
359void (*eng_timeout) (struct ata_port *ap); 357void (*eng_timeout) (struct ata_port *ap);
358void (*phy_reset) (struct ata_port *ap);
359 </programlisting>
360
361 <para>
362Deprecated. Use ->error_handler() instead.
363 </para>
364
365 <programlisting>
366void (*freeze) (struct ata_port *ap);
367void (*thaw) (struct ata_port *ap);
368 </programlisting>
369
370 <para>
371ata_port_freeze() is called when HSM violations or some other
372condition disrupts normal operation of the port. A frozen port
373is not allowed to perform any operation until the port is
374thawed, which usually follows a successful reset.
375 </para>
376
377 <para>
378The optional ->freeze() callback can be used for freezing the port
379hardware-wise (e.g. mask interrupt and stop DMA engine). If a
380port cannot be frozen hardware-wise, the interrupt handler
381must ack and clear interrupts unconditionally while the port
382is frozen.
383 </para>
384 <para>
385The optional ->thaw() callback is called to perform the opposite of ->freeze():
386prepare the port for normal operation once again. Unmask interrupts,
387start DMA engine, etc.
388 </para>
389
390 <programlisting>
391void (*error_handler) (struct ata_port *ap);
392 </programlisting>
393
394 <para>
395->error_handler() is a driver's hook into probe, hotplug, and recovery
396and other exceptional conditions. The primary responsibility of an
397implementation is to call ata_do_eh() or ata_bmdma_drive_eh() with a set
398of EH hooks as arguments:
399 </para>
400
401 <para>
402'prereset' hook (may be NULL) is called during an EH reset, before any other actions
403are taken.
404 </para>
405
406 <para>
407'postreset' hook (may be NULL) is called after the EH reset is performed. Based on
408existing conditions, severity of the problem, and hardware capabilities,
409 </para>
410
411 <para>
412Either 'softreset' (may be NULL) or 'hardreset' (may be NULL) will be
413called to perform the low-level EH reset.
414 </para>
415
416 <programlisting>
417void (*post_internal_cmd) (struct ata_queued_cmd *qc);
360 </programlisting> 418 </programlisting>
361 419
362 <para> 420 <para>
363This is a high level error handling function, called from the 421Perform any hardware-specific actions necessary to finish processing
364error handling thread, when a command times out. Most newer 422after executing a probe-time or EH-time command via ata_exec_internal().
365hardware will implement its own error handling code here. IDE BMDMA
366drivers may use the helper function ata_eng_timeout().
367 </para> 423 </para>
368 424
369 </sect2> 425 </sect2>
diff --git a/drivers/ide/pci/amd74xx.c b/drivers/ide/pci/amd74xx.c
index b22ee5462318..6e9dbf4d8077 100644
--- a/drivers/ide/pci/amd74xx.c
+++ b/drivers/ide/pci/amd74xx.c
@@ -74,6 +74,7 @@ static struct amd_ide_chip {
74 { PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_IDE, 0x50, AMD_UDMA_133 }, 74 { PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_IDE, 0x50, AMD_UDMA_133 },
75 { PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_IDE, 0x50, AMD_UDMA_133 }, 75 { PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_IDE, 0x50, AMD_UDMA_133 },
76 { PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_IDE, 0x50, AMD_UDMA_133 }, 76 { PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_IDE, 0x50, AMD_UDMA_133 },
77 { PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_IDE, 0x50, AMD_UDMA_133 },
77 { PCI_DEVICE_ID_AMD_CS5536_IDE, 0x40, AMD_UDMA_100 }, 78 { PCI_DEVICE_ID_AMD_CS5536_IDE, 0x40, AMD_UDMA_100 },
78 { 0 } 79 { 0 }
79}; 80};
@@ -488,7 +489,8 @@ static ide_pci_device_t amd74xx_chipsets[] __devinitdata = {
488 /* 14 */ DECLARE_NV_DEV("NFORCE-MCP04"), 489 /* 14 */ DECLARE_NV_DEV("NFORCE-MCP04"),
489 /* 15 */ DECLARE_NV_DEV("NFORCE-MCP51"), 490 /* 15 */ DECLARE_NV_DEV("NFORCE-MCP51"),
490 /* 16 */ DECLARE_NV_DEV("NFORCE-MCP55"), 491 /* 16 */ DECLARE_NV_DEV("NFORCE-MCP55"),
491 /* 17 */ DECLARE_AMD_DEV("AMD5536"), 492 /* 17 */ DECLARE_NV_DEV("NFORCE-MCP61"),
493 /* 18 */ DECLARE_AMD_DEV("AMD5536"),
492}; 494};
493 495
494static int __devinit amd74xx_probe(struct pci_dev *dev, const struct pci_device_id *id) 496static int __devinit amd74xx_probe(struct pci_dev *dev, const struct pci_device_id *id)
@@ -525,7 +527,8 @@ static struct pci_device_id amd74xx_pci_tbl[] = {
525 { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_IDE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 14 }, 527 { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_IDE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 14 },
526 { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_IDE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 15 }, 528 { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_IDE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 15 },
527 { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_IDE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 16 }, 529 { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_IDE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 16 },
528 { PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CS5536_IDE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 17 }, 530 { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_IDE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 17 },
531 { PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CS5536_IDE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 18 },
529 { 0, }, 532 { 0, },
530}; 533};
531MODULE_DEVICE_TABLE(pci, amd74xx_pci_tbl); 534MODULE_DEVICE_TABLE(pci, amd74xx_pci_tbl);
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index 9ae4361e352c..84d546323dc7 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -165,7 +165,7 @@ ncr53c8xx-flags-$(CONFIG_SCSI_ZALON) \
165CFLAGS_ncr53c8xx.o := $(ncr53c8xx-flags-y) $(ncr53c8xx-flags-m) 165CFLAGS_ncr53c8xx.o := $(ncr53c8xx-flags-y) $(ncr53c8xx-flags-m)
166zalon7xx-objs := zalon.o ncr53c8xx.o 166zalon7xx-objs := zalon.o ncr53c8xx.o
167NCR_Q720_mod-objs := NCR_Q720.o ncr53c8xx.o 167NCR_Q720_mod-objs := NCR_Q720.o ncr53c8xx.o
168libata-objs := libata-core.o libata-scsi.o libata-bmdma.o 168libata-objs := libata-core.o libata-scsi.o libata-bmdma.o libata-eh.o
169oktagon_esp_mod-objs := oktagon_esp.o oktagon_io.o 169oktagon_esp_mod-objs := oktagon_esp.o oktagon_io.o
170 170
171# Files generated that shall be removed upon make clean 171# Files generated that shall be removed upon make clean
diff --git a/drivers/scsi/ahci.c b/drivers/scsi/ahci.c
index b4f8fb1d628b..4bb77f62b3b9 100644
--- a/drivers/scsi/ahci.c
+++ b/drivers/scsi/ahci.c
@@ -48,7 +48,7 @@
48#include <asm/io.h> 48#include <asm/io.h>
49 49
50#define DRV_NAME "ahci" 50#define DRV_NAME "ahci"
51#define DRV_VERSION "1.2" 51#define DRV_VERSION "1.3"
52 52
53 53
54enum { 54enum {
@@ -56,12 +56,15 @@ enum {
56 AHCI_MAX_SG = 168, /* hardware max is 64K */ 56 AHCI_MAX_SG = 168, /* hardware max is 64K */
57 AHCI_DMA_BOUNDARY = 0xffffffff, 57 AHCI_DMA_BOUNDARY = 0xffffffff,
58 AHCI_USE_CLUSTERING = 0, 58 AHCI_USE_CLUSTERING = 0,
59 AHCI_CMD_SLOT_SZ = 32 * 32, 59 AHCI_MAX_CMDS = 32,
60 AHCI_CMD_SZ = 32,
61 AHCI_CMD_SLOT_SZ = AHCI_MAX_CMDS * AHCI_CMD_SZ,
60 AHCI_RX_FIS_SZ = 256, 62 AHCI_RX_FIS_SZ = 256,
61 AHCI_CMD_TBL_HDR = 0x80,
62 AHCI_CMD_TBL_CDB = 0x40, 63 AHCI_CMD_TBL_CDB = 0x40,
63 AHCI_CMD_TBL_SZ = AHCI_CMD_TBL_HDR + (AHCI_MAX_SG * 16), 64 AHCI_CMD_TBL_HDR_SZ = 0x80,
64 AHCI_PORT_PRIV_DMA_SZ = AHCI_CMD_SLOT_SZ + AHCI_CMD_TBL_SZ + 65 AHCI_CMD_TBL_SZ = AHCI_CMD_TBL_HDR_SZ + (AHCI_MAX_SG * 16),
66 AHCI_CMD_TBL_AR_SZ = AHCI_CMD_TBL_SZ * AHCI_MAX_CMDS,
67 AHCI_PORT_PRIV_DMA_SZ = AHCI_CMD_SLOT_SZ + AHCI_CMD_TBL_AR_SZ +
65 AHCI_RX_FIS_SZ, 68 AHCI_RX_FIS_SZ,
66 AHCI_IRQ_ON_SG = (1 << 31), 69 AHCI_IRQ_ON_SG = (1 << 31),
67 AHCI_CMD_ATAPI = (1 << 5), 70 AHCI_CMD_ATAPI = (1 << 5),
@@ -71,8 +74,10 @@ enum {
71 AHCI_CMD_CLR_BUSY = (1 << 10), 74 AHCI_CMD_CLR_BUSY = (1 << 10),
72 75
73 RX_FIS_D2H_REG = 0x40, /* offset of D2H Register FIS data */ 76 RX_FIS_D2H_REG = 0x40, /* offset of D2H Register FIS data */
77 RX_FIS_UNK = 0x60, /* offset of Unknown FIS data */
74 78
75 board_ahci = 0, 79 board_ahci = 0,
80 board_ahci_vt8251 = 1,
76 81
77 /* global controller registers */ 82 /* global controller registers */
78 HOST_CAP = 0x00, /* host capabilities */ 83 HOST_CAP = 0x00, /* host capabilities */
@@ -87,8 +92,9 @@ enum {
87 HOST_AHCI_EN = (1 << 31), /* AHCI enabled */ 92 HOST_AHCI_EN = (1 << 31), /* AHCI enabled */
88 93
89 /* HOST_CAP bits */ 94 /* HOST_CAP bits */
90 HOST_CAP_64 = (1 << 31), /* PCI DAC (64-bit DMA) support */
91 HOST_CAP_CLO = (1 << 24), /* Command List Override support */ 95 HOST_CAP_CLO = (1 << 24), /* Command List Override support */
96 HOST_CAP_NCQ = (1 << 30), /* Native Command Queueing */
97 HOST_CAP_64 = (1 << 31), /* PCI DAC (64-bit DMA) support */
92 98
93 /* registers for each SATA port */ 99 /* registers for each SATA port */
94 PORT_LST_ADDR = 0x00, /* command list DMA addr */ 100 PORT_LST_ADDR = 0x00, /* command list DMA addr */
@@ -127,15 +133,17 @@ enum {
127 PORT_IRQ_PIOS_FIS = (1 << 1), /* PIO Setup FIS rx'd */ 133 PORT_IRQ_PIOS_FIS = (1 << 1), /* PIO Setup FIS rx'd */
128 PORT_IRQ_D2H_REG_FIS = (1 << 0), /* D2H Register FIS rx'd */ 134 PORT_IRQ_D2H_REG_FIS = (1 << 0), /* D2H Register FIS rx'd */
129 135
130 PORT_IRQ_FATAL = PORT_IRQ_TF_ERR | 136 PORT_IRQ_FREEZE = PORT_IRQ_HBUS_ERR |
131 PORT_IRQ_HBUS_ERR | 137 PORT_IRQ_IF_ERR |
132 PORT_IRQ_HBUS_DATA_ERR | 138 PORT_IRQ_CONNECT |
133 PORT_IRQ_IF_ERR, 139 PORT_IRQ_PHYRDY |
134 DEF_PORT_IRQ = PORT_IRQ_FATAL | PORT_IRQ_PHYRDY | 140 PORT_IRQ_UNK_FIS,
135 PORT_IRQ_CONNECT | PORT_IRQ_SG_DONE | 141 PORT_IRQ_ERROR = PORT_IRQ_FREEZE |
136 PORT_IRQ_UNK_FIS | PORT_IRQ_SDB_FIS | 142 PORT_IRQ_TF_ERR |
137 PORT_IRQ_DMAS_FIS | PORT_IRQ_PIOS_FIS | 143 PORT_IRQ_HBUS_DATA_ERR,
138 PORT_IRQ_D2H_REG_FIS, 144 DEF_PORT_IRQ = PORT_IRQ_ERROR | PORT_IRQ_SG_DONE |
145 PORT_IRQ_SDB_FIS | PORT_IRQ_DMAS_FIS |
146 PORT_IRQ_PIOS_FIS | PORT_IRQ_D2H_REG_FIS,
139 147
140 /* PORT_CMD bits */ 148 /* PORT_CMD bits */
141 PORT_CMD_ATAPI = (1 << 24), /* Device is ATAPI */ 149 PORT_CMD_ATAPI = (1 << 24), /* Device is ATAPI */
@@ -153,6 +161,10 @@ enum {
153 161
154 /* hpriv->flags bits */ 162 /* hpriv->flags bits */
155 AHCI_FLAG_MSI = (1 << 0), 163 AHCI_FLAG_MSI = (1 << 0),
164
165 /* ap->flags bits */
166 AHCI_FLAG_RESET_NEEDS_CLO = (1 << 24),
167 AHCI_FLAG_NO_NCQ = (1 << 25),
156}; 168};
157 169
158struct ahci_cmd_hdr { 170struct ahci_cmd_hdr {
@@ -181,7 +193,6 @@ struct ahci_port_priv {
181 dma_addr_t cmd_slot_dma; 193 dma_addr_t cmd_slot_dma;
182 void *cmd_tbl; 194 void *cmd_tbl;
183 dma_addr_t cmd_tbl_dma; 195 dma_addr_t cmd_tbl_dma;
184 struct ahci_sg *cmd_tbl_sg;
185 void *rx_fis; 196 void *rx_fis;
186 dma_addr_t rx_fis_dma; 197 dma_addr_t rx_fis_dma;
187}; 198};
@@ -191,15 +202,16 @@ static void ahci_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val);
191static int ahci_init_one (struct pci_dev *pdev, const struct pci_device_id *ent); 202static int ahci_init_one (struct pci_dev *pdev, const struct pci_device_id *ent);
192static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc); 203static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc);
193static irqreturn_t ahci_interrupt (int irq, void *dev_instance, struct pt_regs *regs); 204static irqreturn_t ahci_interrupt (int irq, void *dev_instance, struct pt_regs *regs);
194static int ahci_probe_reset(struct ata_port *ap, unsigned int *classes);
195static void ahci_irq_clear(struct ata_port *ap); 205static void ahci_irq_clear(struct ata_port *ap);
196static void ahci_eng_timeout(struct ata_port *ap);
197static int ahci_port_start(struct ata_port *ap); 206static int ahci_port_start(struct ata_port *ap);
198static void ahci_port_stop(struct ata_port *ap); 207static void ahci_port_stop(struct ata_port *ap);
199static void ahci_tf_read(struct ata_port *ap, struct ata_taskfile *tf); 208static void ahci_tf_read(struct ata_port *ap, struct ata_taskfile *tf);
200static void ahci_qc_prep(struct ata_queued_cmd *qc); 209static void ahci_qc_prep(struct ata_queued_cmd *qc);
201static u8 ahci_check_status(struct ata_port *ap); 210static u8 ahci_check_status(struct ata_port *ap);
202static inline int ahci_host_intr(struct ata_port *ap, struct ata_queued_cmd *qc); 211static void ahci_freeze(struct ata_port *ap);
212static void ahci_thaw(struct ata_port *ap);
213static void ahci_error_handler(struct ata_port *ap);
214static void ahci_post_internal_cmd(struct ata_queued_cmd *qc);
203static void ahci_remove_one (struct pci_dev *pdev); 215static void ahci_remove_one (struct pci_dev *pdev);
204 216
205static struct scsi_host_template ahci_sht = { 217static struct scsi_host_template ahci_sht = {
@@ -207,7 +219,8 @@ static struct scsi_host_template ahci_sht = {
207 .name = DRV_NAME, 219 .name = DRV_NAME,
208 .ioctl = ata_scsi_ioctl, 220 .ioctl = ata_scsi_ioctl,
209 .queuecommand = ata_scsi_queuecmd, 221 .queuecommand = ata_scsi_queuecmd,
210 .can_queue = ATA_DEF_QUEUE, 222 .change_queue_depth = ata_scsi_change_queue_depth,
223 .can_queue = AHCI_MAX_CMDS - 1,
211 .this_id = ATA_SHT_THIS_ID, 224 .this_id = ATA_SHT_THIS_ID,
212 .sg_tablesize = AHCI_MAX_SG, 225 .sg_tablesize = AHCI_MAX_SG,
213 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 226 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
@@ -216,6 +229,7 @@ static struct scsi_host_template ahci_sht = {
216 .proc_name = DRV_NAME, 229 .proc_name = DRV_NAME,
217 .dma_boundary = AHCI_DMA_BOUNDARY, 230 .dma_boundary = AHCI_DMA_BOUNDARY,
218 .slave_configure = ata_scsi_slave_config, 231 .slave_configure = ata_scsi_slave_config,
232 .slave_destroy = ata_scsi_slave_destroy,
219 .bios_param = ata_std_bios_param, 233 .bios_param = ata_std_bios_param,
220}; 234};
221 235
@@ -228,19 +242,21 @@ static const struct ata_port_operations ahci_ops = {
228 242
229 .tf_read = ahci_tf_read, 243 .tf_read = ahci_tf_read,
230 244
231 .probe_reset = ahci_probe_reset,
232
233 .qc_prep = ahci_qc_prep, 245 .qc_prep = ahci_qc_prep,
234 .qc_issue = ahci_qc_issue, 246 .qc_issue = ahci_qc_issue,
235 247
236 .eng_timeout = ahci_eng_timeout,
237
238 .irq_handler = ahci_interrupt, 248 .irq_handler = ahci_interrupt,
239 .irq_clear = ahci_irq_clear, 249 .irq_clear = ahci_irq_clear,
240 250
241 .scr_read = ahci_scr_read, 251 .scr_read = ahci_scr_read,
242 .scr_write = ahci_scr_write, 252 .scr_write = ahci_scr_write,
243 253
254 .freeze = ahci_freeze,
255 .thaw = ahci_thaw,
256
257 .error_handler = ahci_error_handler,
258 .post_internal_cmd = ahci_post_internal_cmd,
259
244 .port_start = ahci_port_start, 260 .port_start = ahci_port_start,
245 .port_stop = ahci_port_stop, 261 .port_stop = ahci_port_stop,
246}; 262};
@@ -250,7 +266,19 @@ static const struct ata_port_info ahci_port_info[] = {
250 { 266 {
251 .sht = &ahci_sht, 267 .sht = &ahci_sht,
252 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 268 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
253 ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA, 269 ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA |
270 ATA_FLAG_SKIP_D2H_BSY,
271 .pio_mask = 0x1f, /* pio0-4 */
272 .udma_mask = 0x7f, /* udma0-6 ; FIXME */
273 .port_ops = &ahci_ops,
274 },
275 /* board_ahci_vt8251 */
276 {
277 .sht = &ahci_sht,
278 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
279 ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA |
280 ATA_FLAG_SKIP_D2H_BSY |
281 AHCI_FLAG_RESET_NEEDS_CLO | AHCI_FLAG_NO_NCQ,
254 .pio_mask = 0x1f, /* pio0-4 */ 282 .pio_mask = 0x1f, /* pio0-4 */
255 .udma_mask = 0x7f, /* udma0-6 ; FIXME */ 283 .udma_mask = 0x7f, /* udma0-6 ; FIXME */
256 .port_ops = &ahci_ops, 284 .port_ops = &ahci_ops,
@@ -258,6 +286,7 @@ static const struct ata_port_info ahci_port_info[] = {
258}; 286};
259 287
260static const struct pci_device_id ahci_pci_tbl[] = { 288static const struct pci_device_id ahci_pci_tbl[] = {
289 /* Intel */
261 { PCI_VENDOR_ID_INTEL, 0x2652, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 290 { PCI_VENDOR_ID_INTEL, 0x2652, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
262 board_ahci }, /* ICH6 */ 291 board_ahci }, /* ICH6 */
263 { PCI_VENDOR_ID_INTEL, 0x2653, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 292 { PCI_VENDOR_ID_INTEL, 0x2653, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
@@ -288,14 +317,39 @@ static const struct pci_device_id ahci_pci_tbl[] = {
288 board_ahci }, /* ICH8M */ 317 board_ahci }, /* ICH8M */
289 { PCI_VENDOR_ID_INTEL, 0x282a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 318 { PCI_VENDOR_ID_INTEL, 0x282a, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
290 board_ahci }, /* ICH8M */ 319 board_ahci }, /* ICH8M */
320
321 /* JMicron */
291 { 0x197b, 0x2360, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 322 { 0x197b, 0x2360, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
292 board_ahci }, /* JMicron JMB360 */ 323 board_ahci }, /* JMicron JMB360 */
324 { 0x197b, 0x2361, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
325 board_ahci }, /* JMicron JMB361 */
293 { 0x197b, 0x2363, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 326 { 0x197b, 0x2363, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
294 board_ahci }, /* JMicron JMB363 */ 327 board_ahci }, /* JMicron JMB363 */
328 { 0x197b, 0x2365, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
329 board_ahci }, /* JMicron JMB365 */
330 { 0x197b, 0x2366, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
331 board_ahci }, /* JMicron JMB366 */
332
333 /* ATI */
295 { PCI_VENDOR_ID_ATI, 0x4380, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 334 { PCI_VENDOR_ID_ATI, 0x4380, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
296 board_ahci }, /* ATI SB600 non-raid */ 335 board_ahci }, /* ATI SB600 non-raid */
297 { PCI_VENDOR_ID_ATI, 0x4381, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 336 { PCI_VENDOR_ID_ATI, 0x4381, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
298 board_ahci }, /* ATI SB600 raid */ 337 board_ahci }, /* ATI SB600 raid */
338
339 /* VIA */
340 { PCI_VENDOR_ID_VIA, 0x3349, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
341 board_ahci_vt8251 }, /* VIA VT8251 */
342
343 /* NVIDIA */
344 { PCI_VENDOR_ID_NVIDIA, 0x044c, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
345 board_ahci }, /* MCP65 */
346 { PCI_VENDOR_ID_NVIDIA, 0x044d, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
347 board_ahci }, /* MCP65 */
348 { PCI_VENDOR_ID_NVIDIA, 0x044e, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
349 board_ahci }, /* MCP65 */
350 { PCI_VENDOR_ID_NVIDIA, 0x044f, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
351 board_ahci }, /* MCP65 */
352
299 { } /* terminate list */ 353 { } /* terminate list */
300}; 354};
301 355
@@ -374,8 +428,6 @@ static int ahci_port_start(struct ata_port *ap)
374 pp->cmd_tbl = mem; 428 pp->cmd_tbl = mem;
375 pp->cmd_tbl_dma = mem_dma; 429 pp->cmd_tbl_dma = mem_dma;
376 430
377 pp->cmd_tbl_sg = mem + AHCI_CMD_TBL_HDR;
378
379 ap->private_data = pp; 431 ap->private_data = pp;
380 432
381 if (hpriv->cap & HOST_CAP_64) 433 if (hpriv->cap & HOST_CAP_64)
@@ -508,46 +560,71 @@ static unsigned int ahci_dev_classify(struct ata_port *ap)
508 return ata_dev_classify(&tf); 560 return ata_dev_classify(&tf);
509} 561}
510 562
511static void ahci_fill_cmd_slot(struct ahci_port_priv *pp, u32 opts) 563static void ahci_fill_cmd_slot(struct ahci_port_priv *pp, unsigned int tag,
564 u32 opts)
512{ 565{
513 pp->cmd_slot[0].opts = cpu_to_le32(opts); 566 dma_addr_t cmd_tbl_dma;
514 pp->cmd_slot[0].status = 0; 567
515 pp->cmd_slot[0].tbl_addr = cpu_to_le32(pp->cmd_tbl_dma & 0xffffffff); 568 cmd_tbl_dma = pp->cmd_tbl_dma + tag * AHCI_CMD_TBL_SZ;
516 pp->cmd_slot[0].tbl_addr_hi = cpu_to_le32((pp->cmd_tbl_dma >> 16) >> 16); 569
570 pp->cmd_slot[tag].opts = cpu_to_le32(opts);
571 pp->cmd_slot[tag].status = 0;
572 pp->cmd_slot[tag].tbl_addr = cpu_to_le32(cmd_tbl_dma & 0xffffffff);
573 pp->cmd_slot[tag].tbl_addr_hi = cpu_to_le32((cmd_tbl_dma >> 16) >> 16);
517} 574}
518 575
519static int ahci_poll_register(void __iomem *reg, u32 mask, u32 val, 576static int ahci_clo(struct ata_port *ap)
520 unsigned long interval_msec,
521 unsigned long timeout_msec)
522{ 577{
523 unsigned long timeout; 578 void __iomem *port_mmio = (void __iomem *) ap->ioaddr.cmd_addr;
579 struct ahci_host_priv *hpriv = ap->host_set->private_data;
524 u32 tmp; 580 u32 tmp;
525 581
526 timeout = jiffies + (timeout_msec * HZ) / 1000; 582 if (!(hpriv->cap & HOST_CAP_CLO))
527 do { 583 return -EOPNOTSUPP;
528 tmp = readl(reg);
529 if ((tmp & mask) == val)
530 return 0;
531 msleep(interval_msec);
532 } while (time_before(jiffies, timeout));
533 584
534 return -1; 585 tmp = readl(port_mmio + PORT_CMD);
586 tmp |= PORT_CMD_CLO;
587 writel(tmp, port_mmio + PORT_CMD);
588
589 tmp = ata_wait_register(port_mmio + PORT_CMD,
590 PORT_CMD_CLO, PORT_CMD_CLO, 1, 500);
591 if (tmp & PORT_CMD_CLO)
592 return -EIO;
593
594 return 0;
535} 595}
536 596
537static int ahci_softreset(struct ata_port *ap, int verbose, unsigned int *class) 597static int ahci_prereset(struct ata_port *ap)
598{
599 if ((ap->flags & AHCI_FLAG_RESET_NEEDS_CLO) &&
600 (ata_busy_wait(ap, ATA_BUSY, 1000) & ATA_BUSY)) {
601 /* ATA_BUSY hasn't cleared, so send a CLO */
602 ahci_clo(ap);
603 }
604
605 return ata_std_prereset(ap);
606}
607
608static int ahci_softreset(struct ata_port *ap, unsigned int *class)
538{ 609{
539 struct ahci_host_priv *hpriv = ap->host_set->private_data;
540 struct ahci_port_priv *pp = ap->private_data; 610 struct ahci_port_priv *pp = ap->private_data;
541 void __iomem *mmio = ap->host_set->mmio_base; 611 void __iomem *mmio = ap->host_set->mmio_base;
542 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no); 612 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
543 const u32 cmd_fis_len = 5; /* five dwords */ 613 const u32 cmd_fis_len = 5; /* five dwords */
544 const char *reason = NULL; 614 const char *reason = NULL;
545 struct ata_taskfile tf; 615 struct ata_taskfile tf;
616 u32 tmp;
546 u8 *fis; 617 u8 *fis;
547 int rc; 618 int rc;
548 619
549 DPRINTK("ENTER\n"); 620 DPRINTK("ENTER\n");
550 621
622 if (ata_port_offline(ap)) {
623 DPRINTK("PHY reports no device\n");
624 *class = ATA_DEV_NONE;
625 return 0;
626 }
627
551 /* prepare for SRST (AHCI-1.1 10.4.1) */ 628 /* prepare for SRST (AHCI-1.1 10.4.1) */
552 rc = ahci_stop_engine(ap); 629 rc = ahci_stop_engine(ap);
553 if (rc) { 630 if (rc) {
@@ -558,23 +635,13 @@ static int ahci_softreset(struct ata_port *ap, int verbose, unsigned int *class)
558 /* check BUSY/DRQ, perform Command List Override if necessary */ 635 /* check BUSY/DRQ, perform Command List Override if necessary */
559 ahci_tf_read(ap, &tf); 636 ahci_tf_read(ap, &tf);
560 if (tf.command & (ATA_BUSY | ATA_DRQ)) { 637 if (tf.command & (ATA_BUSY | ATA_DRQ)) {
561 u32 tmp; 638 rc = ahci_clo(ap);
562 639
563 if (!(hpriv->cap & HOST_CAP_CLO)) { 640 if (rc == -EOPNOTSUPP) {
564 rc = -EIO; 641 reason = "port busy but CLO unavailable";
565 reason = "port busy but no CLO";
566 goto fail_restart; 642 goto fail_restart;
567 } 643 } else if (rc) {
568 644 reason = "port busy but CLO failed";
569 tmp = readl(port_mmio + PORT_CMD);
570 tmp |= PORT_CMD_CLO;
571 writel(tmp, port_mmio + PORT_CMD);
572 readl(port_mmio + PORT_CMD); /* flush */
573
574 if (ahci_poll_register(port_mmio + PORT_CMD, PORT_CMD_CLO, 0x0,
575 1, 500)) {
576 rc = -EIO;
577 reason = "CLO failed";
578 goto fail_restart; 645 goto fail_restart;
579 } 646 }
580 } 647 }
@@ -582,20 +649,21 @@ static int ahci_softreset(struct ata_port *ap, int verbose, unsigned int *class)
582 /* restart engine */ 649 /* restart engine */
583 ahci_start_engine(ap); 650 ahci_start_engine(ap);
584 651
585 ata_tf_init(ap, &tf, 0); 652 ata_tf_init(ap->device, &tf);
586 fis = pp->cmd_tbl; 653 fis = pp->cmd_tbl;
587 654
588 /* issue the first D2H Register FIS */ 655 /* issue the first D2H Register FIS */
589 ahci_fill_cmd_slot(pp, cmd_fis_len | AHCI_CMD_RESET | AHCI_CMD_CLR_BUSY); 656 ahci_fill_cmd_slot(pp, 0,
657 cmd_fis_len | AHCI_CMD_RESET | AHCI_CMD_CLR_BUSY);
590 658
591 tf.ctl |= ATA_SRST; 659 tf.ctl |= ATA_SRST;
592 ata_tf_to_fis(&tf, fis, 0); 660 ata_tf_to_fis(&tf, fis, 0);
593 fis[1] &= ~(1 << 7); /* turn off Command FIS bit */ 661 fis[1] &= ~(1 << 7); /* turn off Command FIS bit */
594 662
595 writel(1, port_mmio + PORT_CMD_ISSUE); 663 writel(1, port_mmio + PORT_CMD_ISSUE);
596 readl(port_mmio + PORT_CMD_ISSUE); /* flush */
597 664
598 if (ahci_poll_register(port_mmio + PORT_CMD_ISSUE, 0x1, 0x0, 1, 500)) { 665 tmp = ata_wait_register(port_mmio + PORT_CMD_ISSUE, 0x1, 0x1, 1, 500);
666 if (tmp & 0x1) {
599 rc = -EIO; 667 rc = -EIO;
600 reason = "1st FIS failed"; 668 reason = "1st FIS failed";
601 goto fail; 669 goto fail;
@@ -605,7 +673,7 @@ static int ahci_softreset(struct ata_port *ap, int verbose, unsigned int *class)
605 msleep(1); 673 msleep(1);
606 674
607 /* issue the second D2H Register FIS */ 675 /* issue the second D2H Register FIS */
608 ahci_fill_cmd_slot(pp, cmd_fis_len); 676 ahci_fill_cmd_slot(pp, 0, cmd_fis_len);
609 677
610 tf.ctl &= ~ATA_SRST; 678 tf.ctl &= ~ATA_SRST;
611 ata_tf_to_fis(&tf, fis, 0); 679 ata_tf_to_fis(&tf, fis, 0);
@@ -625,7 +693,7 @@ static int ahci_softreset(struct ata_port *ap, int verbose, unsigned int *class)
625 msleep(150); 693 msleep(150);
626 694
627 *class = ATA_DEV_NONE; 695 *class = ATA_DEV_NONE;
628 if (sata_dev_present(ap)) { 696 if (ata_port_online(ap)) {
629 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) { 697 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
630 rc = -EIO; 698 rc = -EIO;
631 reason = "device not ready"; 699 reason = "device not ready";
@@ -640,25 +708,31 @@ static int ahci_softreset(struct ata_port *ap, int verbose, unsigned int *class)
640 fail_restart: 708 fail_restart:
641 ahci_start_engine(ap); 709 ahci_start_engine(ap);
642 fail: 710 fail:
643 if (verbose) 711 ata_port_printk(ap, KERN_ERR, "softreset failed (%s)\n", reason);
644 printk(KERN_ERR "ata%u: softreset failed (%s)\n",
645 ap->id, reason);
646 else
647 DPRINTK("EXIT, rc=%d reason=\"%s\"\n", rc, reason);
648 return rc; 712 return rc;
649} 713}
650 714
651static int ahci_hardreset(struct ata_port *ap, int verbose, unsigned int *class) 715static int ahci_hardreset(struct ata_port *ap, unsigned int *class)
652{ 716{
717 struct ahci_port_priv *pp = ap->private_data;
718 u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
719 struct ata_taskfile tf;
653 int rc; 720 int rc;
654 721
655 DPRINTK("ENTER\n"); 722 DPRINTK("ENTER\n");
656 723
657 ahci_stop_engine(ap); 724 ahci_stop_engine(ap);
658 rc = sata_std_hardreset(ap, verbose, class); 725
726 /* clear D2H reception area to properly wait for D2H FIS */
727 ata_tf_init(ap->device, &tf);
728 tf.command = 0xff;
729 ata_tf_to_fis(&tf, d2h_fis, 0);
730
731 rc = sata_std_hardreset(ap, class);
732
659 ahci_start_engine(ap); 733 ahci_start_engine(ap);
660 734
661 if (rc == 0) 735 if (rc == 0 && ata_port_online(ap))
662 *class = ahci_dev_classify(ap); 736 *class = ahci_dev_classify(ap);
663 if (*class == ATA_DEV_UNKNOWN) 737 if (*class == ATA_DEV_UNKNOWN)
664 *class = ATA_DEV_NONE; 738 *class = ATA_DEV_NONE;
@@ -686,13 +760,6 @@ static void ahci_postreset(struct ata_port *ap, unsigned int *class)
686 } 760 }
687} 761}
688 762
689static int ahci_probe_reset(struct ata_port *ap, unsigned int *classes)
690{
691 return ata_drive_probe_reset(ap, ata_std_probeinit,
692 ahci_softreset, ahci_hardreset,
693 ahci_postreset, classes);
694}
695
696static u8 ahci_check_status(struct ata_port *ap) 763static u8 ahci_check_status(struct ata_port *ap)
697{ 764{
698 void __iomem *mmio = (void __iomem *) ap->ioaddr.cmd_addr; 765 void __iomem *mmio = (void __iomem *) ap->ioaddr.cmd_addr;
@@ -708,9 +775,8 @@ static void ahci_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
708 ata_tf_from_fis(d2h_fis, tf); 775 ata_tf_from_fis(d2h_fis, tf);
709} 776}
710 777
711static unsigned int ahci_fill_sg(struct ata_queued_cmd *qc) 778static unsigned int ahci_fill_sg(struct ata_queued_cmd *qc, void *cmd_tbl)
712{ 779{
713 struct ahci_port_priv *pp = qc->ap->private_data;
714 struct scatterlist *sg; 780 struct scatterlist *sg;
715 struct ahci_sg *ahci_sg; 781 struct ahci_sg *ahci_sg;
716 unsigned int n_sg = 0; 782 unsigned int n_sg = 0;
@@ -720,7 +786,7 @@ static unsigned int ahci_fill_sg(struct ata_queued_cmd *qc)
720 /* 786 /*
721 * Next, the S/G list. 787 * Next, the S/G list.
722 */ 788 */
723 ahci_sg = pp->cmd_tbl_sg; 789 ahci_sg = cmd_tbl + AHCI_CMD_TBL_HDR_SZ;
724 ata_for_each_sg(sg, qc) { 790 ata_for_each_sg(sg, qc) {
725 dma_addr_t addr = sg_dma_address(sg); 791 dma_addr_t addr = sg_dma_address(sg);
726 u32 sg_len = sg_dma_len(sg); 792 u32 sg_len = sg_dma_len(sg);
@@ -741,6 +807,7 @@ static void ahci_qc_prep(struct ata_queued_cmd *qc)
741 struct ata_port *ap = qc->ap; 807 struct ata_port *ap = qc->ap;
742 struct ahci_port_priv *pp = ap->private_data; 808 struct ahci_port_priv *pp = ap->private_data;
743 int is_atapi = is_atapi_taskfile(&qc->tf); 809 int is_atapi = is_atapi_taskfile(&qc->tf);
810 void *cmd_tbl;
744 u32 opts; 811 u32 opts;
745 const u32 cmd_fis_len = 5; /* five dwords */ 812 const u32 cmd_fis_len = 5; /* five dwords */
746 unsigned int n_elem; 813 unsigned int n_elem;
@@ -749,16 +816,17 @@ static void ahci_qc_prep(struct ata_queued_cmd *qc)
749 * Fill in command table information. First, the header, 816 * Fill in command table information. First, the header,
750 * a SATA Register - Host to Device command FIS. 817 * a SATA Register - Host to Device command FIS.
751 */ 818 */
752 ata_tf_to_fis(&qc->tf, pp->cmd_tbl, 0); 819 cmd_tbl = pp->cmd_tbl + qc->tag * AHCI_CMD_TBL_SZ;
820
821 ata_tf_to_fis(&qc->tf, cmd_tbl, 0);
753 if (is_atapi) { 822 if (is_atapi) {
754 memset(pp->cmd_tbl + AHCI_CMD_TBL_CDB, 0, 32); 823 memset(cmd_tbl + AHCI_CMD_TBL_CDB, 0, 32);
755 memcpy(pp->cmd_tbl + AHCI_CMD_TBL_CDB, qc->cdb, 824 memcpy(cmd_tbl + AHCI_CMD_TBL_CDB, qc->cdb, qc->dev->cdb_len);
756 qc->dev->cdb_len);
757 } 825 }
758 826
759 n_elem = 0; 827 n_elem = 0;
760 if (qc->flags & ATA_QCFLAG_DMAMAP) 828 if (qc->flags & ATA_QCFLAG_DMAMAP)
761 n_elem = ahci_fill_sg(qc); 829 n_elem = ahci_fill_sg(qc, cmd_tbl);
762 830
763 /* 831 /*
764 * Fill in command slot information. 832 * Fill in command slot information.
@@ -769,112 +837,122 @@ static void ahci_qc_prep(struct ata_queued_cmd *qc)
769 if (is_atapi) 837 if (is_atapi)
770 opts |= AHCI_CMD_ATAPI | AHCI_CMD_PREFETCH; 838 opts |= AHCI_CMD_ATAPI | AHCI_CMD_PREFETCH;
771 839
772 ahci_fill_cmd_slot(pp, opts); 840 ahci_fill_cmd_slot(pp, qc->tag, opts);
773} 841}
774 842
775static void ahci_restart_port(struct ata_port *ap, u32 irq_stat) 843static void ahci_error_intr(struct ata_port *ap, u32 irq_stat)
776{ 844{
777 void __iomem *mmio = ap->host_set->mmio_base; 845 struct ahci_port_priv *pp = ap->private_data;
778 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no); 846 struct ata_eh_info *ehi = &ap->eh_info;
779 u32 tmp; 847 unsigned int err_mask = 0, action = 0;
848 struct ata_queued_cmd *qc;
849 u32 serror;
780 850
781 if ((ap->device[0].class != ATA_DEV_ATAPI) || 851 ata_ehi_clear_desc(ehi);
782 ((irq_stat & PORT_IRQ_TF_ERR) == 0))
783 printk(KERN_WARNING "ata%u: port reset, "
784 "p_is %x is %x pis %x cmd %x tf %x ss %x se %x\n",
785 ap->id,
786 irq_stat,
787 readl(mmio + HOST_IRQ_STAT),
788 readl(port_mmio + PORT_IRQ_STAT),
789 readl(port_mmio + PORT_CMD),
790 readl(port_mmio + PORT_TFDATA),
791 readl(port_mmio + PORT_SCR_STAT),
792 readl(port_mmio + PORT_SCR_ERR));
793
794 /* stop DMA */
795 ahci_stop_engine(ap);
796 852
797 /* clear SATA phy error, if any */ 853 /* AHCI needs SError cleared; otherwise, it might lock up */
798 tmp = readl(port_mmio + PORT_SCR_ERR); 854 serror = ahci_scr_read(ap, SCR_ERROR);
799 writel(tmp, port_mmio + PORT_SCR_ERR); 855 ahci_scr_write(ap, SCR_ERROR, serror);
800 856
801 /* if DRQ/BSY is set, device needs to be reset. 857 /* analyze @irq_stat */
802 * if so, issue COMRESET 858 ata_ehi_push_desc(ehi, "irq_stat 0x%08x", irq_stat);
803 */ 859
804 tmp = readl(port_mmio + PORT_TFDATA); 860 if (irq_stat & PORT_IRQ_TF_ERR)
805 if (tmp & (ATA_BUSY | ATA_DRQ)) { 861 err_mask |= AC_ERR_DEV;
806 writel(0x301, port_mmio + PORT_SCR_CTL); 862
807 readl(port_mmio + PORT_SCR_CTL); /* flush */ 863 if (irq_stat & (PORT_IRQ_HBUS_ERR | PORT_IRQ_HBUS_DATA_ERR)) {
808 udelay(10); 864 err_mask |= AC_ERR_HOST_BUS;
809 writel(0x300, port_mmio + PORT_SCR_CTL); 865 action |= ATA_EH_SOFTRESET;
810 readl(port_mmio + PORT_SCR_CTL); /* flush */
811 } 866 }
812 867
813 /* re-start DMA */ 868 if (irq_stat & PORT_IRQ_IF_ERR) {
814 ahci_start_engine(ap); 869 err_mask |= AC_ERR_ATA_BUS;
815} 870 action |= ATA_EH_SOFTRESET;
871 ata_ehi_push_desc(ehi, ", interface fatal error");
872 }
816 873
817static void ahci_eng_timeout(struct ata_port *ap) 874 if (irq_stat & (PORT_IRQ_CONNECT | PORT_IRQ_PHYRDY)) {
818{ 875 ata_ehi_hotplugged(ehi);
819 struct ata_host_set *host_set = ap->host_set; 876 ata_ehi_push_desc(ehi, ", %s", irq_stat & PORT_IRQ_CONNECT ?
820 void __iomem *mmio = host_set->mmio_base; 877 "connection status changed" : "PHY RDY changed");
821 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no); 878 }
822 struct ata_queued_cmd *qc; 879
823 unsigned long flags; 880 if (irq_stat & PORT_IRQ_UNK_FIS) {
881 u32 *unk = (u32 *)(pp->rx_fis + RX_FIS_UNK);
824 882
825 printk(KERN_WARNING "ata%u: handling error/timeout\n", ap->id); 883 err_mask |= AC_ERR_HSM;
884 action |= ATA_EH_SOFTRESET;
885 ata_ehi_push_desc(ehi, ", unknown FIS %08x %08x %08x %08x",
886 unk[0], unk[1], unk[2], unk[3]);
887 }
826 888
827 spin_lock_irqsave(&host_set->lock, flags); 889 /* okay, let's hand over to EH */
890 ehi->serror |= serror;
891 ehi->action |= action;
828 892
829 ahci_restart_port(ap, readl(port_mmio + PORT_IRQ_STAT));
830 qc = ata_qc_from_tag(ap, ap->active_tag); 893 qc = ata_qc_from_tag(ap, ap->active_tag);
831 qc->err_mask |= AC_ERR_TIMEOUT; 894 if (qc)
832 895 qc->err_mask |= err_mask;
833 spin_unlock_irqrestore(&host_set->lock, flags); 896 else
897 ehi->err_mask |= err_mask;
834 898
835 ata_eh_qc_complete(qc); 899 if (irq_stat & PORT_IRQ_FREEZE)
900 ata_port_freeze(ap);
901 else
902 ata_port_abort(ap);
836} 903}
837 904
838static inline int ahci_host_intr(struct ata_port *ap, struct ata_queued_cmd *qc) 905static void ahci_host_intr(struct ata_port *ap)
839{ 906{
840 void __iomem *mmio = ap->host_set->mmio_base; 907 void __iomem *mmio = ap->host_set->mmio_base;
841 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no); 908 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
842 u32 status, serr, ci; 909 struct ata_eh_info *ehi = &ap->eh_info;
843 910 u32 status, qc_active;
844 serr = readl(port_mmio + PORT_SCR_ERR); 911 int rc;
845 writel(serr, port_mmio + PORT_SCR_ERR);
846 912
847 status = readl(port_mmio + PORT_IRQ_STAT); 913 status = readl(port_mmio + PORT_IRQ_STAT);
848 writel(status, port_mmio + PORT_IRQ_STAT); 914 writel(status, port_mmio + PORT_IRQ_STAT);
849 915
850 ci = readl(port_mmio + PORT_CMD_ISSUE); 916 if (unlikely(status & PORT_IRQ_ERROR)) {
851 if (likely((ci & 0x1) == 0)) { 917 ahci_error_intr(ap, status);
852 if (qc) { 918 return;
853 WARN_ON(qc->err_mask);
854 ata_qc_complete(qc);
855 qc = NULL;
856 }
857 } 919 }
858 920
859 if (status & PORT_IRQ_FATAL) { 921 if (ap->sactive)
860 unsigned int err_mask; 922 qc_active = readl(port_mmio + PORT_SCR_ACT);
861 if (status & PORT_IRQ_TF_ERR) 923 else
862 err_mask = AC_ERR_DEV; 924 qc_active = readl(port_mmio + PORT_CMD_ISSUE);
863 else if (status & PORT_IRQ_IF_ERR) 925
864 err_mask = AC_ERR_ATA_BUS; 926 rc = ata_qc_complete_multiple(ap, qc_active, NULL);
865 else 927 if (rc > 0)
866 err_mask = AC_ERR_HOST_BUS; 928 return;
867 929 if (rc < 0) {
868 /* command processing has stopped due to error; restart */ 930 ehi->err_mask |= AC_ERR_HSM;
869 ahci_restart_port(ap, status); 931 ehi->action |= ATA_EH_SOFTRESET;
870 932 ata_port_freeze(ap);
871 if (qc) { 933 return;
872 qc->err_mask |= err_mask; 934 }
873 ata_qc_complete(qc); 935
874 } 936 /* hmmm... a spurious interupt */
937
938 /* some devices send D2H reg with I bit set during NCQ command phase */
939 if (ap->sactive && status & PORT_IRQ_D2H_REG_FIS)
940 return;
941
942 /* ignore interim PIO setup fis interrupts */
943 if (ata_tag_valid(ap->active_tag)) {
944 struct ata_queued_cmd *qc =
945 ata_qc_from_tag(ap, ap->active_tag);
946
947 if (qc && qc->tf.protocol == ATA_PROT_PIO &&
948 (status & PORT_IRQ_PIOS_FIS))
949 return;
875 } 950 }
876 951
877 return 1; 952 if (ata_ratelimit())
953 ata_port_printk(ap, KERN_INFO, "spurious interrupt "
954 "(irq_stat 0x%x active_tag %d sactive 0x%x)\n",
955 status, ap->active_tag, ap->sactive);
878} 956}
879 957
880static void ahci_irq_clear(struct ata_port *ap) 958static void ahci_irq_clear(struct ata_port *ap)
@@ -882,7 +960,7 @@ static void ahci_irq_clear(struct ata_port *ap)
882 /* TODO */ 960 /* TODO */
883} 961}
884 962
885static irqreturn_t ahci_interrupt (int irq, void *dev_instance, struct pt_regs *regs) 963static irqreturn_t ahci_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
886{ 964{
887 struct ata_host_set *host_set = dev_instance; 965 struct ata_host_set *host_set = dev_instance;
888 struct ahci_host_priv *hpriv; 966 struct ahci_host_priv *hpriv;
@@ -911,14 +989,7 @@ static irqreturn_t ahci_interrupt (int irq, void *dev_instance, struct pt_regs *
911 989
912 ap = host_set->ports[i]; 990 ap = host_set->ports[i];
913 if (ap) { 991 if (ap) {
914 struct ata_queued_cmd *qc; 992 ahci_host_intr(ap);
915 qc = ata_qc_from_tag(ap, ap->active_tag);
916 if (!ahci_host_intr(ap, qc))
917 if (ata_ratelimit())
918 dev_printk(KERN_WARNING, host_set->dev,
919 "unhandled interrupt on port %u\n",
920 i);
921
922 VPRINTK("port %u\n", i); 993 VPRINTK("port %u\n", i);
923 } else { 994 } else {
924 VPRINTK("port %u (no irq)\n", i); 995 VPRINTK("port %u (no irq)\n", i);
@@ -935,7 +1006,7 @@ static irqreturn_t ahci_interrupt (int irq, void *dev_instance, struct pt_regs *
935 handled = 1; 1006 handled = 1;
936 } 1007 }
937 1008
938 spin_unlock(&host_set->lock); 1009 spin_unlock(&host_set->lock);
939 1010
940 VPRINTK("EXIT\n"); 1011 VPRINTK("EXIT\n");
941 1012
@@ -947,12 +1018,65 @@ static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc)
947 struct ata_port *ap = qc->ap; 1018 struct ata_port *ap = qc->ap;
948 void __iomem *port_mmio = (void __iomem *) ap->ioaddr.cmd_addr; 1019 void __iomem *port_mmio = (void __iomem *) ap->ioaddr.cmd_addr;
949 1020
950 writel(1, port_mmio + PORT_CMD_ISSUE); 1021 if (qc->tf.protocol == ATA_PROT_NCQ)
1022 writel(1 << qc->tag, port_mmio + PORT_SCR_ACT);
1023 writel(1 << qc->tag, port_mmio + PORT_CMD_ISSUE);
951 readl(port_mmio + PORT_CMD_ISSUE); /* flush */ 1024 readl(port_mmio + PORT_CMD_ISSUE); /* flush */
952 1025
953 return 0; 1026 return 0;
954} 1027}
955 1028
1029static void ahci_freeze(struct ata_port *ap)
1030{
1031 void __iomem *mmio = ap->host_set->mmio_base;
1032 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
1033
1034 /* turn IRQ off */
1035 writel(0, port_mmio + PORT_IRQ_MASK);
1036}
1037
1038static void ahci_thaw(struct ata_port *ap)
1039{
1040 void __iomem *mmio = ap->host_set->mmio_base;
1041 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
1042 u32 tmp;
1043
1044 /* clear IRQ */
1045 tmp = readl(port_mmio + PORT_IRQ_STAT);
1046 writel(tmp, port_mmio + PORT_IRQ_STAT);
1047 writel(1 << ap->id, mmio + HOST_IRQ_STAT);
1048
1049 /* turn IRQ back on */
1050 writel(DEF_PORT_IRQ, port_mmio + PORT_IRQ_MASK);
1051}
1052
1053static void ahci_error_handler(struct ata_port *ap)
1054{
1055 if (!(ap->flags & ATA_FLAG_FROZEN)) {
1056 /* restart engine */
1057 ahci_stop_engine(ap);
1058 ahci_start_engine(ap);
1059 }
1060
1061 /* perform recovery */
1062 ata_do_eh(ap, ahci_prereset, ahci_softreset, ahci_hardreset,
1063 ahci_postreset);
1064}
1065
1066static void ahci_post_internal_cmd(struct ata_queued_cmd *qc)
1067{
1068 struct ata_port *ap = qc->ap;
1069
1070 if (qc->flags & ATA_QCFLAG_FAILED)
1071 qc->err_mask |= AC_ERR_OTHER;
1072
1073 if (qc->err_mask) {
1074 /* make DMA engine forget about the failed command */
1075 ahci_stop_engine(ap);
1076 ahci_start_engine(ap);
1077 }
1078}
1079
956static void ahci_setup_port(struct ata_ioports *port, unsigned long base, 1080static void ahci_setup_port(struct ata_ioports *port, unsigned long base,
957 unsigned int port_idx) 1081 unsigned int port_idx)
958{ 1082{
@@ -1097,9 +1221,6 @@ static int ahci_host_init(struct ata_probe_ent *probe_ent)
1097 writel(tmp, port_mmio + PORT_IRQ_STAT); 1221 writel(tmp, port_mmio + PORT_IRQ_STAT);
1098 1222
1099 writel(1 << i, mmio + HOST_IRQ_STAT); 1223 writel(1 << i, mmio + HOST_IRQ_STAT);
1100
1101 /* set irq mask (enables interrupts) */
1102 writel(DEF_PORT_IRQ, port_mmio + PORT_IRQ_MASK);
1103 } 1224 }
1104 1225
1105 tmp = readl(mmio + HOST_CTL); 1226 tmp = readl(mmio + HOST_CTL);
@@ -1197,6 +1318,8 @@ static int ahci_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
1197 1318
1198 VPRINTK("ENTER\n"); 1319 VPRINTK("ENTER\n");
1199 1320
1321 WARN_ON(ATA_MAX_QUEUE > AHCI_MAX_CMDS);
1322
1200 if (!printed_version++) 1323 if (!printed_version++)
1201 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n"); 1324 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
1202 1325
@@ -1264,6 +1387,10 @@ static int ahci_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
1264 if (rc) 1387 if (rc)
1265 goto err_out_hpriv; 1388 goto err_out_hpriv;
1266 1389
1390 if (!(probe_ent->host_flags & AHCI_FLAG_NO_NCQ) &&
1391 (hpriv->cap & HOST_CAP_NCQ))
1392 probe_ent->host_flags |= ATA_FLAG_NCQ;
1393
1267 ahci_print_info(probe_ent); 1394 ahci_print_info(probe_ent);
1268 1395
1269 /* FIXME: check ata_device_add return value */ 1396 /* FIXME: check ata_device_add return value */
@@ -1295,21 +1422,17 @@ static void ahci_remove_one (struct pci_dev *pdev)
1295 struct device *dev = pci_dev_to_dev(pdev); 1422 struct device *dev = pci_dev_to_dev(pdev);
1296 struct ata_host_set *host_set = dev_get_drvdata(dev); 1423 struct ata_host_set *host_set = dev_get_drvdata(dev);
1297 struct ahci_host_priv *hpriv = host_set->private_data; 1424 struct ahci_host_priv *hpriv = host_set->private_data;
1298 struct ata_port *ap;
1299 unsigned int i; 1425 unsigned int i;
1300 int have_msi; 1426 int have_msi;
1301 1427
1302 for (i = 0; i < host_set->n_ports; i++) { 1428 for (i = 0; i < host_set->n_ports; i++)
1303 ap = host_set->ports[i]; 1429 ata_port_detach(host_set->ports[i]);
1304
1305 scsi_remove_host(ap->host);
1306 }
1307 1430
1308 have_msi = hpriv->flags & AHCI_FLAG_MSI; 1431 have_msi = hpriv->flags & AHCI_FLAG_MSI;
1309 free_irq(host_set->irq, host_set); 1432 free_irq(host_set->irq, host_set);
1310 1433
1311 for (i = 0; i < host_set->n_ports; i++) { 1434 for (i = 0; i < host_set->n_ports; i++) {
1312 ap = host_set->ports[i]; 1435 struct ata_port *ap = host_set->ports[i];
1313 1436
1314 ata_scsi_release(ap->host); 1437 ata_scsi_release(ap->host);
1315 scsi_host_put(ap->host); 1438 scsi_host_put(ap->host);
diff --git a/drivers/scsi/ata_piix.c b/drivers/scsi/ata_piix.c
index 6dc88149f9f1..521b718763f6 100644
--- a/drivers/scsi/ata_piix.c
+++ b/drivers/scsi/ata_piix.c
@@ -93,7 +93,7 @@
93#include <linux/libata.h> 93#include <linux/libata.h>
94 94
95#define DRV_NAME "ata_piix" 95#define DRV_NAME "ata_piix"
96#define DRV_VERSION "1.05" 96#define DRV_VERSION "1.10"
97 97
98enum { 98enum {
99 PIIX_IOCFG = 0x54, /* IDE I/O configuration register */ 99 PIIX_IOCFG = 0x54, /* IDE I/O configuration register */
@@ -146,11 +146,10 @@ struct piix_map_db {
146 146
147static int piix_init_one (struct pci_dev *pdev, 147static int piix_init_one (struct pci_dev *pdev,
148 const struct pci_device_id *ent); 148 const struct pci_device_id *ent);
149
150static int piix_pata_probe_reset(struct ata_port *ap, unsigned int *classes);
151static int piix_sata_probe_reset(struct ata_port *ap, unsigned int *classes);
152static void piix_set_piomode (struct ata_port *ap, struct ata_device *adev); 149static void piix_set_piomode (struct ata_port *ap, struct ata_device *adev);
153static void piix_set_dmamode (struct ata_port *ap, struct ata_device *adev); 150static void piix_set_dmamode (struct ata_port *ap, struct ata_device *adev);
151static void piix_pata_error_handler(struct ata_port *ap);
152static void piix_sata_error_handler(struct ata_port *ap);
154 153
155static unsigned int in_module_init = 1; 154static unsigned int in_module_init = 1;
156 155
@@ -159,6 +158,7 @@ static const struct pci_device_id piix_pci_tbl[] = {
159 { 0x8086, 0x7111, PCI_ANY_ID, PCI_ANY_ID, 0, 0, piix4_pata }, 158 { 0x8086, 0x7111, PCI_ANY_ID, PCI_ANY_ID, 0, 0, piix4_pata },
160 { 0x8086, 0x24db, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich5_pata }, 159 { 0x8086, 0x24db, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich5_pata },
161 { 0x8086, 0x25a2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich5_pata }, 160 { 0x8086, 0x25a2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich5_pata },
161 { 0x8086, 0x27df, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich5_pata },
162#endif 162#endif
163 163
164 /* NOTE: The following PCI ids must be kept in sync with the 164 /* NOTE: The following PCI ids must be kept in sync with the
@@ -218,6 +218,7 @@ static struct scsi_host_template piix_sht = {
218 .proc_name = DRV_NAME, 218 .proc_name = DRV_NAME,
219 .dma_boundary = ATA_DMA_BOUNDARY, 219 .dma_boundary = ATA_DMA_BOUNDARY,
220 .slave_configure = ata_scsi_slave_config, 220 .slave_configure = ata_scsi_slave_config,
221 .slave_destroy = ata_scsi_slave_destroy,
221 .bios_param = ata_std_bios_param, 222 .bios_param = ata_std_bios_param,
222 .resume = ata_scsi_device_resume, 223 .resume = ata_scsi_device_resume,
223 .suspend = ata_scsi_device_suspend, 224 .suspend = ata_scsi_device_suspend,
@@ -227,6 +228,7 @@ static const struct ata_port_operations piix_pata_ops = {
227 .port_disable = ata_port_disable, 228 .port_disable = ata_port_disable,
228 .set_piomode = piix_set_piomode, 229 .set_piomode = piix_set_piomode,
229 .set_dmamode = piix_set_dmamode, 230 .set_dmamode = piix_set_dmamode,
231 .mode_filter = ata_pci_default_filter,
230 232
231 .tf_load = ata_tf_load, 233 .tf_load = ata_tf_load,
232 .tf_read = ata_tf_read, 234 .tf_read = ata_tf_read,
@@ -234,16 +236,18 @@ static const struct ata_port_operations piix_pata_ops = {
234 .exec_command = ata_exec_command, 236 .exec_command = ata_exec_command,
235 .dev_select = ata_std_dev_select, 237 .dev_select = ata_std_dev_select,
236 238
237 .probe_reset = piix_pata_probe_reset,
238
239 .bmdma_setup = ata_bmdma_setup, 239 .bmdma_setup = ata_bmdma_setup,
240 .bmdma_start = ata_bmdma_start, 240 .bmdma_start = ata_bmdma_start,
241 .bmdma_stop = ata_bmdma_stop, 241 .bmdma_stop = ata_bmdma_stop,
242 .bmdma_status = ata_bmdma_status, 242 .bmdma_status = ata_bmdma_status,
243 .qc_prep = ata_qc_prep, 243 .qc_prep = ata_qc_prep,
244 .qc_issue = ata_qc_issue_prot, 244 .qc_issue = ata_qc_issue_prot,
245 .data_xfer = ata_pio_data_xfer,
245 246
246 .eng_timeout = ata_eng_timeout, 247 .freeze = ata_bmdma_freeze,
248 .thaw = ata_bmdma_thaw,
249 .error_handler = piix_pata_error_handler,
250 .post_internal_cmd = ata_bmdma_post_internal_cmd,
247 251
248 .irq_handler = ata_interrupt, 252 .irq_handler = ata_interrupt,
249 .irq_clear = ata_bmdma_irq_clear, 253 .irq_clear = ata_bmdma_irq_clear,
@@ -262,16 +266,18 @@ static const struct ata_port_operations piix_sata_ops = {
262 .exec_command = ata_exec_command, 266 .exec_command = ata_exec_command,
263 .dev_select = ata_std_dev_select, 267 .dev_select = ata_std_dev_select,
264 268
265 .probe_reset = piix_sata_probe_reset,
266
267 .bmdma_setup = ata_bmdma_setup, 269 .bmdma_setup = ata_bmdma_setup,
268 .bmdma_start = ata_bmdma_start, 270 .bmdma_start = ata_bmdma_start,
269 .bmdma_stop = ata_bmdma_stop, 271 .bmdma_stop = ata_bmdma_stop,
270 .bmdma_status = ata_bmdma_status, 272 .bmdma_status = ata_bmdma_status,
271 .qc_prep = ata_qc_prep, 273 .qc_prep = ata_qc_prep,
272 .qc_issue = ata_qc_issue_prot, 274 .qc_issue = ata_qc_issue_prot,
275 .data_xfer = ata_pio_data_xfer,
273 276
274 .eng_timeout = ata_eng_timeout, 277 .freeze = ata_bmdma_freeze,
278 .thaw = ata_bmdma_thaw,
279 .error_handler = piix_sata_error_handler,
280 .post_internal_cmd = ata_bmdma_post_internal_cmd,
275 281
276 .irq_handler = ata_interrupt, 282 .irq_handler = ata_interrupt,
277 .irq_clear = ata_bmdma_irq_clear, 283 .irq_clear = ata_bmdma_irq_clear,
@@ -455,59 +461,51 @@ cbl40:
455} 461}
456 462
457/** 463/**
458 * piix_pata_probeinit - probeinit for PATA host controller 464 * piix_pata_prereset - prereset for PATA host controller
459 * @ap: Target port 465 * @ap: Target port
460 * 466 *
461 * Probeinit including cable detection. 467 * Prereset including cable detection.
462 *
463 * LOCKING:
464 * None (inherited from caller).
465 */
466static void piix_pata_probeinit(struct ata_port *ap)
467{
468 piix_pata_cbl_detect(ap);
469 ata_std_probeinit(ap);
470}
471
472/**
473 * piix_pata_probe_reset - Perform reset on PATA port and classify
474 * @ap: Port to reset
475 * @classes: Resulting classes of attached devices
476 *
477 * Reset PATA phy and classify attached devices.
478 * 468 *
479 * LOCKING: 469 * LOCKING:
480 * None (inherited from caller). 470 * None (inherited from caller).
481 */ 471 */
482static int piix_pata_probe_reset(struct ata_port *ap, unsigned int *classes) 472static int piix_pata_prereset(struct ata_port *ap)
483{ 473{
484 struct pci_dev *pdev = to_pci_dev(ap->host_set->dev); 474 struct pci_dev *pdev = to_pci_dev(ap->host_set->dev);
485 475
486 if (!pci_test_config_bits(pdev, &piix_enable_bits[ap->hard_port_no])) { 476 if (!pci_test_config_bits(pdev, &piix_enable_bits[ap->hard_port_no])) {
487 printk(KERN_INFO "ata%u: port disabled. ignoring.\n", ap->id); 477 ata_port_printk(ap, KERN_INFO, "port disabled. ignoring.\n");
478 ap->eh_context.i.action &= ~ATA_EH_RESET_MASK;
488 return 0; 479 return 0;
489 } 480 }
490 481
491 return ata_drive_probe_reset(ap, piix_pata_probeinit, 482 piix_pata_cbl_detect(ap);
492 ata_std_softreset, NULL, 483
493 ata_std_postreset, classes); 484 return ata_std_prereset(ap);
485}
486
487static void piix_pata_error_handler(struct ata_port *ap)
488{
489 ata_bmdma_drive_eh(ap, piix_pata_prereset, ata_std_softreset, NULL,
490 ata_std_postreset);
494} 491}
495 492
496/** 493/**
497 * piix_sata_probe - Probe PCI device for present SATA devices 494 * piix_sata_prereset - prereset for SATA host controller
498 * @ap: Port associated with the PCI device we wish to probe 495 * @ap: Target port
499 * 496 *
500 * Reads and configures SATA PCI device's PCI config register 497 * Reads and configures SATA PCI device's PCI config register
501 * Port Configuration and Status (PCS) to determine port and 498 * Port Configuration and Status (PCS) to determine port and
502 * device availability. 499 * device availability. Return -ENODEV to skip reset if no
500 * device is present.
503 * 501 *
504 * LOCKING: 502 * LOCKING:
505 * None (inherited from caller). 503 * None (inherited from caller).
506 * 504 *
507 * RETURNS: 505 * RETURNS:
508 * Mask of avaliable devices on the port. 506 * 0 if device is present, -ENODEV otherwise.
509 */ 507 */
510static unsigned int piix_sata_probe (struct ata_port *ap) 508static int piix_sata_prereset(struct ata_port *ap)
511{ 509{
512 struct pci_dev *pdev = to_pci_dev(ap->host_set->dev); 510 struct pci_dev *pdev = to_pci_dev(ap->host_set->dev);
513 const unsigned int *map = ap->host_set->private_data; 511 const unsigned int *map = ap->host_set->private_data;
@@ -549,29 +547,19 @@ static unsigned int piix_sata_probe (struct ata_port *ap)
549 DPRINTK("ata%u: LEAVE, pcs=0x%x present_mask=0x%x\n", 547 DPRINTK("ata%u: LEAVE, pcs=0x%x present_mask=0x%x\n",
550 ap->id, pcs, present_mask); 548 ap->id, pcs, present_mask);
551 549
552 return present_mask; 550 if (!present_mask) {
553} 551 ata_port_printk(ap, KERN_INFO, "SATA port has no device.\n");
554 552 ap->eh_context.i.action &= ~ATA_EH_RESET_MASK;
555/**
556 * piix_sata_probe_reset - Perform reset on SATA port and classify
557 * @ap: Port to reset
558 * @classes: Resulting classes of attached devices
559 *
560 * Reset SATA phy and classify attached devices.
561 *
562 * LOCKING:
563 * None (inherited from caller).
564 */
565static int piix_sata_probe_reset(struct ata_port *ap, unsigned int *classes)
566{
567 if (!piix_sata_probe(ap)) {
568 printk(KERN_INFO "ata%u: SATA port has no device.\n", ap->id);
569 return 0; 553 return 0;
570 } 554 }
571 555
572 return ata_drive_probe_reset(ap, ata_std_probeinit, 556 return ata_std_prereset(ap);
573 ata_std_softreset, NULL, 557}
574 ata_std_postreset, classes); 558
559static void piix_sata_error_handler(struct ata_port *ap)
560{
561 ata_bmdma_drive_eh(ap, piix_sata_prereset, ata_std_softreset, NULL,
562 ata_std_postreset);
575} 563}
576 564
577/** 565/**
@@ -760,15 +748,15 @@ static int __devinit piix_check_450nx_errata(struct pci_dev *ata_dev)
760 pci_read_config_byte(pdev, PCI_REVISION_ID, &rev); 748 pci_read_config_byte(pdev, PCI_REVISION_ID, &rev);
761 pci_read_config_word(pdev, 0x41, &cfg); 749 pci_read_config_word(pdev, 0x41, &cfg);
762 /* Only on the original revision: IDE DMA can hang */ 750 /* Only on the original revision: IDE DMA can hang */
763 if(rev == 0x00) 751 if (rev == 0x00)
764 no_piix_dma = 1; 752 no_piix_dma = 1;
765 /* On all revisions below 5 PXB bus lock must be disabled for IDE */ 753 /* On all revisions below 5 PXB bus lock must be disabled for IDE */
766 else if(cfg & (1<<14) && rev < 5) 754 else if (cfg & (1<<14) && rev < 5)
767 no_piix_dma = 2; 755 no_piix_dma = 2;
768 } 756 }
769 if(no_piix_dma) 757 if (no_piix_dma)
770 dev_printk(KERN_WARNING, &ata_dev->dev, "450NX errata present, disabling IDE DMA.\n"); 758 dev_printk(KERN_WARNING, &ata_dev->dev, "450NX errata present, disabling IDE DMA.\n");
771 if(no_piix_dma == 2) 759 if (no_piix_dma == 2)
772 dev_printk(KERN_WARNING, &ata_dev->dev, "A BIOS update may resolve this.\n"); 760 dev_printk(KERN_WARNING, &ata_dev->dev, "A BIOS update may resolve this.\n");
773 return no_piix_dma; 761 return no_piix_dma;
774} 762}
diff --git a/drivers/scsi/libata-bmdma.c b/drivers/scsi/libata-bmdma.c
index 835dff0bafdc..004e1a0d8b71 100644
--- a/drivers/scsi/libata-bmdma.c
+++ b/drivers/scsi/libata-bmdma.c
@@ -652,6 +652,151 @@ void ata_bmdma_stop(struct ata_queued_cmd *qc)
652 ata_altstatus(ap); /* dummy read */ 652 ata_altstatus(ap); /* dummy read */
653} 653}
654 654
655/**
656 * ata_bmdma_freeze - Freeze BMDMA controller port
657 * @ap: port to freeze
658 *
659 * Freeze BMDMA controller port.
660 *
661 * LOCKING:
662 * Inherited from caller.
663 */
664void ata_bmdma_freeze(struct ata_port *ap)
665{
666 struct ata_ioports *ioaddr = &ap->ioaddr;
667
668 ap->ctl |= ATA_NIEN;
669 ap->last_ctl = ap->ctl;
670
671 if (ap->flags & ATA_FLAG_MMIO)
672 writeb(ap->ctl, (void __iomem *)ioaddr->ctl_addr);
673 else
674 outb(ap->ctl, ioaddr->ctl_addr);
675}
676
677/**
678 * ata_bmdma_thaw - Thaw BMDMA controller port
679 * @ap: port to thaw
680 *
681 * Thaw BMDMA controller port.
682 *
683 * LOCKING:
684 * Inherited from caller.
685 */
686void ata_bmdma_thaw(struct ata_port *ap)
687{
688 /* clear & re-enable interrupts */
689 ata_chk_status(ap);
690 ap->ops->irq_clear(ap);
691 if (ap->ioaddr.ctl_addr) /* FIXME: hack. create a hook instead */
692 ata_irq_on(ap);
693}
694
695/**
696 * ata_bmdma_drive_eh - Perform EH with given methods for BMDMA controller
697 * @ap: port to handle error for
698 * @prereset: prereset method (can be NULL)
699 * @softreset: softreset method (can be NULL)
700 * @hardreset: hardreset method (can be NULL)
701 * @postreset: postreset method (can be NULL)
702 *
703 * Handle error for ATA BMDMA controller. It can handle both
704 * PATA and SATA controllers. Many controllers should be able to
705 * use this EH as-is or with some added handling before and
706 * after.
707 *
708 * This function is intended to be used for constructing
709 * ->error_handler callback by low level drivers.
710 *
711 * LOCKING:
712 * Kernel thread context (may sleep)
713 */
714void ata_bmdma_drive_eh(struct ata_port *ap, ata_prereset_fn_t prereset,
715 ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
716 ata_postreset_fn_t postreset)
717{
718 struct ata_eh_context *ehc = &ap->eh_context;
719 struct ata_queued_cmd *qc;
720 unsigned long flags;
721 int thaw = 0;
722
723 qc = __ata_qc_from_tag(ap, ap->active_tag);
724 if (qc && !(qc->flags & ATA_QCFLAG_FAILED))
725 qc = NULL;
726
727 /* reset PIO HSM and stop DMA engine */
728 spin_lock_irqsave(ap->lock, flags);
729
730 ap->hsm_task_state = HSM_ST_IDLE;
731
732 if (qc && (qc->tf.protocol == ATA_PROT_DMA ||
733 qc->tf.protocol == ATA_PROT_ATAPI_DMA)) {
734 u8 host_stat;
735
736 host_stat = ata_bmdma_status(ap);
737
738 ata_ehi_push_desc(&ehc->i, "BMDMA stat 0x%x", host_stat);
739
740 /* BMDMA controllers indicate host bus error by
741 * setting DMA_ERR bit and timing out. As it wasn't
742 * really a timeout event, adjust error mask and
743 * cancel frozen state.
744 */
745 if (qc->err_mask == AC_ERR_TIMEOUT && host_stat & ATA_DMA_ERR) {
746 qc->err_mask = AC_ERR_HOST_BUS;
747 thaw = 1;
748 }
749
750 ap->ops->bmdma_stop(qc);
751 }
752
753 ata_altstatus(ap);
754 ata_chk_status(ap);
755 ap->ops->irq_clear(ap);
756
757 spin_unlock_irqrestore(ap->lock, flags);
758
759 if (thaw)
760 ata_eh_thaw_port(ap);
761
762 /* PIO and DMA engines have been stopped, perform recovery */
763 ata_do_eh(ap, prereset, softreset, hardreset, postreset);
764}
765
766/**
767 * ata_bmdma_error_handler - Stock error handler for BMDMA controller
768 * @ap: port to handle error for
769 *
770 * Stock error handler for BMDMA controller.
771 *
772 * LOCKING:
773 * Kernel thread context (may sleep)
774 */
775void ata_bmdma_error_handler(struct ata_port *ap)
776{
777 ata_reset_fn_t hardreset;
778
779 hardreset = NULL;
780 if (sata_scr_valid(ap))
781 hardreset = sata_std_hardreset;
782
783 ata_bmdma_drive_eh(ap, ata_std_prereset, ata_std_softreset, hardreset,
784 ata_std_postreset);
785}
786
787/**
788 * ata_bmdma_post_internal_cmd - Stock post_internal_cmd for
789 * BMDMA controller
790 * @qc: internal command to clean up
791 *
792 * LOCKING:
793 * Kernel thread context (may sleep)
794 */
795void ata_bmdma_post_internal_cmd(struct ata_queued_cmd *qc)
796{
797 ata_bmdma_stop(qc);
798}
799
655#ifdef CONFIG_PCI 800#ifdef CONFIG_PCI
656static struct ata_probe_ent * 801static struct ata_probe_ent *
657ata_probe_ent_alloc(struct device *dev, const struct ata_port_info *port) 802ata_probe_ent_alloc(struct device *dev, const struct ata_port_info *port)
@@ -930,10 +1075,21 @@ int ata_pci_init_one (struct pci_dev *pdev, struct ata_port_info **port_info,
930 1075
931 /* FIXME: check ata_device_add return */ 1076 /* FIXME: check ata_device_add return */
932 if (legacy_mode) { 1077 if (legacy_mode) {
933 if (legacy_mode & (1 << 0)) 1078 struct device *dev = &pdev->dev;
1079 struct ata_host_set *host_set = NULL;
1080
1081 if (legacy_mode & (1 << 0)) {
934 ata_device_add(probe_ent); 1082 ata_device_add(probe_ent);
935 if (legacy_mode & (1 << 1)) 1083 host_set = dev_get_drvdata(dev);
1084 }
1085
1086 if (legacy_mode & (1 << 1)) {
936 ata_device_add(probe_ent2); 1087 ata_device_add(probe_ent2);
1088 if (host_set) {
1089 host_set->next = dev_get_drvdata(dev);
1090 dev_set_drvdata(dev, host_set);
1091 }
1092 }
937 } else 1093 } else
938 ata_device_add(probe_ent); 1094 ata_device_add(probe_ent);
939 1095
diff --git a/drivers/scsi/libata-core.c b/drivers/scsi/libata-core.c
index de9ba7890b5a..6c66877be2bf 100644
--- a/drivers/scsi/libata-core.c
+++ b/drivers/scsi/libata-core.c
@@ -61,22 +61,29 @@
61 61
62#include "libata.h" 62#include "libata.h"
63 63
64static unsigned int ata_dev_init_params(struct ata_port *ap, 64/* debounce timing parameters in msecs { interval, duration, timeout } */
65 struct ata_device *dev, 65const unsigned long sata_deb_timing_boot[] = { 5, 100, 2000 };
66 u16 heads, 66const unsigned long sata_deb_timing_eh[] = { 25, 500, 2000 };
67 u16 sectors); 67const unsigned long sata_deb_timing_before_fsrst[] = { 100, 2000, 5000 };
68static void ata_set_mode(struct ata_port *ap); 68
69static unsigned int ata_dev_set_xfermode(struct ata_port *ap, 69static unsigned int ata_dev_init_params(struct ata_device *dev,
70 struct ata_device *dev); 70 u16 heads, u16 sectors);
71static void ata_dev_xfermask(struct ata_port *ap, struct ata_device *dev); 71static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
72static void ata_dev_xfermask(struct ata_device *dev);
72 73
73static unsigned int ata_unique_id = 1; 74static unsigned int ata_unique_id = 1;
74static struct workqueue_struct *ata_wq; 75static struct workqueue_struct *ata_wq;
75 76
77struct workqueue_struct *ata_aux_wq;
78
76int atapi_enabled = 1; 79int atapi_enabled = 1;
77module_param(atapi_enabled, int, 0444); 80module_param(atapi_enabled, int, 0444);
78MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on)"); 81MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on)");
79 82
83int atapi_dmadir = 0;
84module_param(atapi_dmadir, int, 0444);
85MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off, 1=on)");
86
80int libata_fua = 0; 87int libata_fua = 0;
81module_param_named(fua, libata_fua, int, 0444); 88module_param_named(fua, libata_fua, int, 0444);
82MODULE_PARM_DESC(fua, "FUA support (0=off, 1=on)"); 89MODULE_PARM_DESC(fua, "FUA support (0=off, 1=on)");
@@ -397,11 +404,22 @@ static const char *ata_mode_string(unsigned int xfer_mask)
397 return "<n/a>"; 404 return "<n/a>";
398} 405}
399 406
400static void ata_dev_disable(struct ata_port *ap, struct ata_device *dev) 407static const char *sata_spd_string(unsigned int spd)
401{ 408{
402 if (ata_dev_present(dev)) { 409 static const char * const spd_str[] = {
403 printk(KERN_WARNING "ata%u: dev %u disabled\n", 410 "1.5 Gbps",
404 ap->id, dev->devno); 411 "3.0 Gbps",
412 };
413
414 if (spd == 0 || (spd - 1) >= ARRAY_SIZE(spd_str))
415 return "<unknown>";
416 return spd_str[spd - 1];
417}
418
419void ata_dev_disable(struct ata_device *dev)
420{
421 if (ata_dev_enabled(dev) && ata_msg_drv(dev->ap)) {
422 ata_dev_printk(dev, KERN_WARNING, "disabled\n");
405 dev->class++; 423 dev->class++;
406 } 424 }
407} 425}
@@ -759,8 +777,11 @@ void ata_std_dev_select (struct ata_port *ap, unsigned int device)
759void ata_dev_select(struct ata_port *ap, unsigned int device, 777void ata_dev_select(struct ata_port *ap, unsigned int device,
760 unsigned int wait, unsigned int can_sleep) 778 unsigned int wait, unsigned int can_sleep)
761{ 779{
762 VPRINTK("ENTER, ata%u: device %u, wait %u\n", 780 if (ata_msg_probe(ap)) {
763 ap->id, device, wait); 781 ata_port_printk(ap, KERN_INFO, "ata_dev_select: ENTER, ata%u: "
782 "device %u, wait %u\n",
783 ap->id, device, wait);
784 }
764 785
765 if (wait) 786 if (wait)
766 ata_wait_idle(ap); 787 ata_wait_idle(ap);
@@ -915,9 +936,9 @@ void ata_port_flush_task(struct ata_port *ap)
915 936
916 DPRINTK("ENTER\n"); 937 DPRINTK("ENTER\n");
917 938
918 spin_lock_irqsave(&ap->host_set->lock, flags); 939 spin_lock_irqsave(ap->lock, flags);
919 ap->flags |= ATA_FLAG_FLUSH_PORT_TASK; 940 ap->flags |= ATA_FLAG_FLUSH_PORT_TASK;
920 spin_unlock_irqrestore(&ap->host_set->lock, flags); 941 spin_unlock_irqrestore(ap->lock, flags);
921 942
922 DPRINTK("flush #1\n"); 943 DPRINTK("flush #1\n");
923 flush_workqueue(ata_wq); 944 flush_workqueue(ata_wq);
@@ -928,30 +949,31 @@ void ata_port_flush_task(struct ata_port *ap)
928 * Cancel and flush. 949 * Cancel and flush.
929 */ 950 */
930 if (!cancel_delayed_work(&ap->port_task)) { 951 if (!cancel_delayed_work(&ap->port_task)) {
931 DPRINTK("flush #2\n"); 952 if (ata_msg_ctl(ap))
953 ata_port_printk(ap, KERN_DEBUG, "%s: flush #2\n", __FUNCTION__);
932 flush_workqueue(ata_wq); 954 flush_workqueue(ata_wq);
933 } 955 }
934 956
935 spin_lock_irqsave(&ap->host_set->lock, flags); 957 spin_lock_irqsave(ap->lock, flags);
936 ap->flags &= ~ATA_FLAG_FLUSH_PORT_TASK; 958 ap->flags &= ~ATA_FLAG_FLUSH_PORT_TASK;
937 spin_unlock_irqrestore(&ap->host_set->lock, flags); 959 spin_unlock_irqrestore(ap->lock, flags);
938 960
939 DPRINTK("EXIT\n"); 961 if (ata_msg_ctl(ap))
962 ata_port_printk(ap, KERN_DEBUG, "%s: EXIT\n", __FUNCTION__);
940} 963}
941 964
942void ata_qc_complete_internal(struct ata_queued_cmd *qc) 965void ata_qc_complete_internal(struct ata_queued_cmd *qc)
943{ 966{
944 struct completion *waiting = qc->private_data; 967 struct completion *waiting = qc->private_data;
945 968
946 qc->ap->ops->tf_read(qc->ap, &qc->tf);
947 complete(waiting); 969 complete(waiting);
948} 970}
949 971
950/** 972/**
951 * ata_exec_internal - execute libata internal command 973 * ata_exec_internal - execute libata internal command
952 * @ap: Port to which the command is sent
953 * @dev: Device to which the command is sent 974 * @dev: Device to which the command is sent
954 * @tf: Taskfile registers for the command and the result 975 * @tf: Taskfile registers for the command and the result
976 * @cdb: CDB for packet command
955 * @dma_dir: Data tranfer direction of the command 977 * @dma_dir: Data tranfer direction of the command
956 * @buf: Data buffer of the command 978 * @buf: Data buffer of the command
957 * @buflen: Length of data buffer 979 * @buflen: Length of data buffer
@@ -964,25 +986,66 @@ void ata_qc_complete_internal(struct ata_queued_cmd *qc)
964 * 986 *
965 * LOCKING: 987 * LOCKING:
966 * None. Should be called with kernel context, might sleep. 988 * None. Should be called with kernel context, might sleep.
989 *
990 * RETURNS:
991 * Zero on success, AC_ERR_* mask on failure
967 */ 992 */
968 993unsigned ata_exec_internal(struct ata_device *dev,
969static unsigned 994 struct ata_taskfile *tf, const u8 *cdb,
970ata_exec_internal(struct ata_port *ap, struct ata_device *dev, 995 int dma_dir, void *buf, unsigned int buflen)
971 struct ata_taskfile *tf,
972 int dma_dir, void *buf, unsigned int buflen)
973{ 996{
997 struct ata_port *ap = dev->ap;
974 u8 command = tf->command; 998 u8 command = tf->command;
975 struct ata_queued_cmd *qc; 999 struct ata_queued_cmd *qc;
1000 unsigned int tag, preempted_tag;
1001 u32 preempted_sactive, preempted_qc_active;
976 DECLARE_COMPLETION(wait); 1002 DECLARE_COMPLETION(wait);
977 unsigned long flags; 1003 unsigned long flags;
978 unsigned int err_mask; 1004 unsigned int err_mask;
1005 int rc;
1006
1007 spin_lock_irqsave(ap->lock, flags);
1008
1009 /* no internal command while frozen */
1010 if (ap->flags & ATA_FLAG_FROZEN) {
1011 spin_unlock_irqrestore(ap->lock, flags);
1012 return AC_ERR_SYSTEM;
1013 }
979 1014
980 spin_lock_irqsave(&ap->host_set->lock, flags); 1015 /* initialize internal qc */
981 1016
982 qc = ata_qc_new_init(ap, dev); 1017 /* XXX: Tag 0 is used for drivers with legacy EH as some
983 BUG_ON(qc == NULL); 1018 * drivers choke if any other tag is given. This breaks
1019 * ata_tag_internal() test for those drivers. Don't use new
1020 * EH stuff without converting to it.
1021 */
1022 if (ap->ops->error_handler)
1023 tag = ATA_TAG_INTERNAL;
1024 else
1025 tag = 0;
1026
1027 if (test_and_set_bit(tag, &ap->qc_allocated))
1028 BUG();
1029 qc = __ata_qc_from_tag(ap, tag);
984 1030
1031 qc->tag = tag;
1032 qc->scsicmd = NULL;
1033 qc->ap = ap;
1034 qc->dev = dev;
1035 ata_qc_reinit(qc);
1036
1037 preempted_tag = ap->active_tag;
1038 preempted_sactive = ap->sactive;
1039 preempted_qc_active = ap->qc_active;
1040 ap->active_tag = ATA_TAG_POISON;
1041 ap->sactive = 0;
1042 ap->qc_active = 0;
1043
1044 /* prepare & issue qc */
985 qc->tf = *tf; 1045 qc->tf = *tf;
1046 if (cdb)
1047 memcpy(qc->cdb, cdb, ATAPI_CDB_LEN);
1048 qc->flags |= ATA_QCFLAG_RESULT_TF;
986 qc->dma_dir = dma_dir; 1049 qc->dma_dir = dma_dir;
987 if (dma_dir != DMA_NONE) { 1050 if (dma_dir != DMA_NONE) {
988 ata_sg_init_one(qc, buf, buflen); 1051 ata_sg_init_one(qc, buf, buflen);
@@ -994,33 +1057,58 @@ ata_exec_internal(struct ata_port *ap, struct ata_device *dev,
994 1057
995 ata_qc_issue(qc); 1058 ata_qc_issue(qc);
996 1059
997 spin_unlock_irqrestore(&ap->host_set->lock, flags); 1060 spin_unlock_irqrestore(ap->lock, flags);
1061
1062 rc = wait_for_completion_timeout(&wait, ATA_TMOUT_INTERNAL);
998 1063
999 if (!wait_for_completion_timeout(&wait, ATA_TMOUT_INTERNAL)) { 1064 ata_port_flush_task(ap);
1000 ata_port_flush_task(ap);
1001 1065
1002 spin_lock_irqsave(&ap->host_set->lock, flags); 1066 if (!rc) {
1067 spin_lock_irqsave(ap->lock, flags);
1003 1068
1004 /* We're racing with irq here. If we lose, the 1069 /* We're racing with irq here. If we lose, the
1005 * following test prevents us from completing the qc 1070 * following test prevents us from completing the qc
1006 * again. If completion irq occurs after here but 1071 * twice. If we win, the port is frozen and will be
1007 * before the caller cleans up, it will result in a 1072 * cleaned up by ->post_internal_cmd().
1008 * spurious interrupt. We can live with that.
1009 */ 1073 */
1010 if (qc->flags & ATA_QCFLAG_ACTIVE) { 1074 if (qc->flags & ATA_QCFLAG_ACTIVE) {
1011 qc->err_mask = AC_ERR_TIMEOUT; 1075 qc->err_mask |= AC_ERR_TIMEOUT;
1012 ata_qc_complete(qc); 1076
1013 printk(KERN_WARNING "ata%u: qc timeout (cmd 0x%x)\n", 1077 if (ap->ops->error_handler)
1014 ap->id, command); 1078 ata_port_freeze(ap);
1079 else
1080 ata_qc_complete(qc);
1081
1082 if (ata_msg_warn(ap))
1083 ata_dev_printk(dev, KERN_WARNING,
1084 "qc timeout (cmd 0x%x)\n", command);
1015 } 1085 }
1016 1086
1017 spin_unlock_irqrestore(&ap->host_set->lock, flags); 1087 spin_unlock_irqrestore(ap->lock, flags);
1018 } 1088 }
1019 1089
1020 *tf = qc->tf; 1090 /* do post_internal_cmd */
1091 if (ap->ops->post_internal_cmd)
1092 ap->ops->post_internal_cmd(qc);
1093
1094 if (qc->flags & ATA_QCFLAG_FAILED && !qc->err_mask) {
1095 if (ata_msg_warn(ap))
1096 ata_dev_printk(dev, KERN_WARNING,
1097 "zero err_mask for failed "
1098 "internal command, assuming AC_ERR_OTHER\n");
1099 qc->err_mask |= AC_ERR_OTHER;
1100 }
1101
1102 /* finish up */
1103 spin_lock_irqsave(ap->lock, flags);
1104
1105 *tf = qc->result_tf;
1021 err_mask = qc->err_mask; 1106 err_mask = qc->err_mask;
1022 1107
1023 ata_qc_free(qc); 1108 ata_qc_free(qc);
1109 ap->active_tag = preempted_tag;
1110 ap->sactive = preempted_sactive;
1111 ap->qc_active = preempted_qc_active;
1024 1112
1025 /* XXX - Some LLDDs (sata_mv) disable port on command failure. 1113 /* XXX - Some LLDDs (sata_mv) disable port on command failure.
1026 * Until those drivers are fixed, we detect the condition 1114 * Until those drivers are fixed, we detect the condition
@@ -1033,11 +1121,13 @@ ata_exec_internal(struct ata_port *ap, struct ata_device *dev,
1033 * 1121 *
1034 * Kill the following code as soon as those drivers are fixed. 1122 * Kill the following code as soon as those drivers are fixed.
1035 */ 1123 */
1036 if (ap->flags & ATA_FLAG_PORT_DISABLED) { 1124 if (ap->flags & ATA_FLAG_DISABLED) {
1037 err_mask |= AC_ERR_SYSTEM; 1125 err_mask |= AC_ERR_SYSTEM;
1038 ata_port_probe(ap); 1126 ata_port_probe(ap);
1039 } 1127 }
1040 1128
1129 spin_unlock_irqrestore(ap->lock, flags);
1130
1041 return err_mask; 1131 return err_mask;
1042} 1132}
1043 1133
@@ -1076,11 +1166,10 @@ unsigned int ata_pio_need_iordy(const struct ata_device *adev)
1076 1166
1077/** 1167/**
1078 * ata_dev_read_id - Read ID data from the specified device 1168 * ata_dev_read_id - Read ID data from the specified device
1079 * @ap: port on which target device resides
1080 * @dev: target device 1169 * @dev: target device
1081 * @p_class: pointer to class of the target device (may be changed) 1170 * @p_class: pointer to class of the target device (may be changed)
1082 * @post_reset: is this read ID post-reset? 1171 * @post_reset: is this read ID post-reset?
1083 * @p_id: read IDENTIFY page (newly allocated) 1172 * @id: buffer to read IDENTIFY data into
1084 * 1173 *
1085 * Read ID data from the specified device. ATA_CMD_ID_ATA is 1174 * Read ID data from the specified device. ATA_CMD_ID_ATA is
1086 * performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI 1175 * performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI
@@ -1093,29 +1182,24 @@ unsigned int ata_pio_need_iordy(const struct ata_device *adev)
1093 * RETURNS: 1182 * RETURNS:
1094 * 0 on success, -errno otherwise. 1183 * 0 on success, -errno otherwise.
1095 */ 1184 */
1096static int ata_dev_read_id(struct ata_port *ap, struct ata_device *dev, 1185int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
1097 unsigned int *p_class, int post_reset, u16 **p_id) 1186 int post_reset, u16 *id)
1098{ 1187{
1188 struct ata_port *ap = dev->ap;
1099 unsigned int class = *p_class; 1189 unsigned int class = *p_class;
1100 struct ata_taskfile tf; 1190 struct ata_taskfile tf;
1101 unsigned int err_mask = 0; 1191 unsigned int err_mask = 0;
1102 u16 *id;
1103 const char *reason; 1192 const char *reason;
1104 int rc; 1193 int rc;
1105 1194
1106 DPRINTK("ENTER, host %u, dev %u\n", ap->id, dev->devno); 1195 if (ata_msg_ctl(ap))
1196 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER, host %u, dev %u\n",
1197 __FUNCTION__, ap->id, dev->devno);
1107 1198
1108 ata_dev_select(ap, dev->devno, 1, 1); /* select device 0/1 */ 1199 ata_dev_select(ap, dev->devno, 1, 1); /* select device 0/1 */
1109 1200
1110 id = kmalloc(sizeof(id[0]) * ATA_ID_WORDS, GFP_KERNEL);
1111 if (id == NULL) {
1112 rc = -ENOMEM;
1113 reason = "out of memory";
1114 goto err_out;
1115 }
1116
1117 retry: 1201 retry:
1118 ata_tf_init(ap, &tf, dev->devno); 1202 ata_tf_init(dev, &tf);
1119 1203
1120 switch (class) { 1204 switch (class) {
1121 case ATA_DEV_ATA: 1205 case ATA_DEV_ATA:
@@ -1132,7 +1216,7 @@ static int ata_dev_read_id(struct ata_port *ap, struct ata_device *dev,
1132 1216
1133 tf.protocol = ATA_PROT_PIO; 1217 tf.protocol = ATA_PROT_PIO;
1134 1218
1135 err_mask = ata_exec_internal(ap, dev, &tf, DMA_FROM_DEVICE, 1219 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE,
1136 id, sizeof(id[0]) * ATA_ID_WORDS); 1220 id, sizeof(id[0]) * ATA_ID_WORDS);
1137 if (err_mask) { 1221 if (err_mask) {
1138 rc = -EIO; 1222 rc = -EIO;
@@ -1159,7 +1243,7 @@ static int ata_dev_read_id(struct ata_port *ap, struct ata_device *dev,
1159 * Some drives were very specific about that exact sequence. 1243 * Some drives were very specific about that exact sequence.
1160 */ 1244 */
1161 if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) { 1245 if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
1162 err_mask = ata_dev_init_params(ap, dev, id[3], id[6]); 1246 err_mask = ata_dev_init_params(dev, id[3], id[6]);
1163 if (err_mask) { 1247 if (err_mask) {
1164 rc = -EIO; 1248 rc = -EIO;
1165 reason = "INIT_DEV_PARAMS failed"; 1249 reason = "INIT_DEV_PARAMS failed";
@@ -1175,25 +1259,45 @@ static int ata_dev_read_id(struct ata_port *ap, struct ata_device *dev,
1175 } 1259 }
1176 1260
1177 *p_class = class; 1261 *p_class = class;
1178 *p_id = id; 1262
1179 return 0; 1263 return 0;
1180 1264
1181 err_out: 1265 err_out:
1182 printk(KERN_WARNING "ata%u: dev %u failed to IDENTIFY (%s)\n", 1266 if (ata_msg_warn(ap))
1183 ap->id, dev->devno, reason); 1267 ata_dev_printk(dev, KERN_WARNING, "failed to IDENTIFY "
1184 kfree(id); 1268 "(%s, err_mask=0x%x)\n", reason, err_mask);
1185 return rc; 1269 return rc;
1186} 1270}
1187 1271
1188static inline u8 ata_dev_knobble(const struct ata_port *ap, 1272static inline u8 ata_dev_knobble(struct ata_device *dev)
1189 struct ata_device *dev)
1190{ 1273{
1191 return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id))); 1274 return ((dev->ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
1275}
1276
1277static void ata_dev_config_ncq(struct ata_device *dev,
1278 char *desc, size_t desc_sz)
1279{
1280 struct ata_port *ap = dev->ap;
1281 int hdepth = 0, ddepth = ata_id_queue_depth(dev->id);
1282
1283 if (!ata_id_has_ncq(dev->id)) {
1284 desc[0] = '\0';
1285 return;
1286 }
1287
1288 if (ap->flags & ATA_FLAG_NCQ) {
1289 hdepth = min(ap->host->can_queue, ATA_MAX_QUEUE - 1);
1290 dev->flags |= ATA_DFLAG_NCQ;
1291 }
1292
1293 if (hdepth >= ddepth)
1294 snprintf(desc, desc_sz, "NCQ (depth %d)", ddepth);
1295 else
1296 snprintf(desc, desc_sz, "NCQ (depth %d/%d)", hdepth, ddepth);
1192} 1297}
1193 1298
1194/** 1299/**
1195 * ata_dev_configure - Configure the specified ATA/ATAPI device 1300 * ata_dev_configure - Configure the specified ATA/ATAPI device
1196 * @ap: Port on which target device resides
1197 * @dev: Target device to configure 1301 * @dev: Target device to configure
1198 * @print_info: Enable device info printout 1302 * @print_info: Enable device info printout
1199 * 1303 *
@@ -1206,30 +1310,33 @@ static inline u8 ata_dev_knobble(const struct ata_port *ap,
1206 * RETURNS: 1310 * RETURNS:
1207 * 0 on success, -errno otherwise 1311 * 0 on success, -errno otherwise
1208 */ 1312 */
1209static int ata_dev_configure(struct ata_port *ap, struct ata_device *dev, 1313int ata_dev_configure(struct ata_device *dev, int print_info)
1210 int print_info)
1211{ 1314{
1315 struct ata_port *ap = dev->ap;
1212 const u16 *id = dev->id; 1316 const u16 *id = dev->id;
1213 unsigned int xfer_mask; 1317 unsigned int xfer_mask;
1214 int i, rc; 1318 int i, rc;
1215 1319
1216 if (!ata_dev_present(dev)) { 1320 if (!ata_dev_enabled(dev) && ata_msg_info(ap)) {
1217 DPRINTK("ENTER/EXIT (host %u, dev %u) -- nodev\n", 1321 ata_dev_printk(dev, KERN_INFO, "%s: ENTER/EXIT (host %u, dev %u) -- nodev\n",
1218 ap->id, dev->devno); 1322 __FUNCTION__, ap->id, dev->devno);
1219 return 0; 1323 return 0;
1220 } 1324 }
1221 1325
1222 DPRINTK("ENTER, host %u, dev %u\n", ap->id, dev->devno); 1326 if (ata_msg_probe(ap))
1327 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER, host %u, dev %u\n",
1328 __FUNCTION__, ap->id, dev->devno);
1223 1329
1224 /* print device capabilities */ 1330 /* print device capabilities */
1225 if (print_info) 1331 if (ata_msg_probe(ap))
1226 printk(KERN_DEBUG "ata%u: dev %u cfg 49:%04x 82:%04x 83:%04x " 1332 ata_dev_printk(dev, KERN_DEBUG, "%s: cfg 49:%04x 82:%04x 83:%04x "
1227 "84:%04x 85:%04x 86:%04x 87:%04x 88:%04x\n", 1333 "84:%04x 85:%04x 86:%04x 87:%04x 88:%04x\n",
1228 ap->id, dev->devno, id[49], id[82], id[83], 1334 __FUNCTION__,
1229 id[84], id[85], id[86], id[87], id[88]); 1335 id[49], id[82], id[83], id[84],
1336 id[85], id[86], id[87], id[88]);
1230 1337
1231 /* initialize to-be-configured parameters */ 1338 /* initialize to-be-configured parameters */
1232 dev->flags = 0; 1339 dev->flags &= ~ATA_DFLAG_CFG_MASK;
1233 dev->max_sectors = 0; 1340 dev->max_sectors = 0;
1234 dev->cdb_len = 0; 1341 dev->cdb_len = 0;
1235 dev->n_sectors = 0; 1342 dev->n_sectors = 0;
@@ -1244,7 +1351,8 @@ static int ata_dev_configure(struct ata_port *ap, struct ata_device *dev,
1244 /* find max transfer mode; for printk only */ 1351 /* find max transfer mode; for printk only */
1245 xfer_mask = ata_id_xfermask(id); 1352 xfer_mask = ata_id_xfermask(id);
1246 1353
1247 ata_dump_id(id); 1354 if (ata_msg_probe(ap))
1355 ata_dump_id(id);
1248 1356
1249 /* ATA-specific feature tests */ 1357 /* ATA-specific feature tests */
1250 if (dev->class == ATA_DEV_ATA) { 1358 if (dev->class == ATA_DEV_ATA) {
@@ -1252,6 +1360,7 @@ static int ata_dev_configure(struct ata_port *ap, struct ata_device *dev,
1252 1360
1253 if (ata_id_has_lba(id)) { 1361 if (ata_id_has_lba(id)) {
1254 const char *lba_desc; 1362 const char *lba_desc;
1363 char ncq_desc[20];
1255 1364
1256 lba_desc = "LBA"; 1365 lba_desc = "LBA";
1257 dev->flags |= ATA_DFLAG_LBA; 1366 dev->flags |= ATA_DFLAG_LBA;
@@ -1260,15 +1369,17 @@ static int ata_dev_configure(struct ata_port *ap, struct ata_device *dev,
1260 lba_desc = "LBA48"; 1369 lba_desc = "LBA48";
1261 } 1370 }
1262 1371
1372 /* config NCQ */
1373 ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc));
1374
1263 /* print device info to dmesg */ 1375 /* print device info to dmesg */
1264 if (print_info) 1376 if (ata_msg_info(ap))
1265 printk(KERN_INFO "ata%u: dev %u ATA-%d, " 1377 ata_dev_printk(dev, KERN_INFO, "ATA-%d, "
1266 "max %s, %Lu sectors: %s\n", 1378 "max %s, %Lu sectors: %s %s\n",
1267 ap->id, dev->devno, 1379 ata_id_major_version(id),
1268 ata_id_major_version(id), 1380 ata_mode_string(xfer_mask),
1269 ata_mode_string(xfer_mask), 1381 (unsigned long long)dev->n_sectors,
1270 (unsigned long long)dev->n_sectors, 1382 lba_desc, ncq_desc);
1271 lba_desc);
1272 } else { 1383 } else {
1273 /* CHS */ 1384 /* CHS */
1274 1385
@@ -1285,14 +1396,20 @@ static int ata_dev_configure(struct ata_port *ap, struct ata_device *dev,
1285 } 1396 }
1286 1397
1287 /* print device info to dmesg */ 1398 /* print device info to dmesg */
1288 if (print_info) 1399 if (ata_msg_info(ap))
1289 printk(KERN_INFO "ata%u: dev %u ATA-%d, " 1400 ata_dev_printk(dev, KERN_INFO, "ATA-%d, "
1290 "max %s, %Lu sectors: CHS %u/%u/%u\n", 1401 "max %s, %Lu sectors: CHS %u/%u/%u\n",
1291 ap->id, dev->devno, 1402 ata_id_major_version(id),
1292 ata_id_major_version(id), 1403 ata_mode_string(xfer_mask),
1293 ata_mode_string(xfer_mask), 1404 (unsigned long long)dev->n_sectors,
1294 (unsigned long long)dev->n_sectors, 1405 dev->cylinders, dev->heads, dev->sectors);
1295 dev->cylinders, dev->heads, dev->sectors); 1406 }
1407
1408 if (dev->id[59] & 0x100) {
1409 dev->multi_count = dev->id[59] & 0xff;
1410 if (ata_msg_info(ap))
1411 ata_dev_printk(dev, KERN_INFO, "ata%u: dev %u multi count %u\n",
1412 ap->id, dev->devno, dev->multi_count);
1296 } 1413 }
1297 1414
1298 dev->cdb_len = 16; 1415 dev->cdb_len = 16;
@@ -1300,18 +1417,28 @@ static int ata_dev_configure(struct ata_port *ap, struct ata_device *dev,
1300 1417
1301 /* ATAPI-specific feature tests */ 1418 /* ATAPI-specific feature tests */
1302 else if (dev->class == ATA_DEV_ATAPI) { 1419 else if (dev->class == ATA_DEV_ATAPI) {
1420 char *cdb_intr_string = "";
1421
1303 rc = atapi_cdb_len(id); 1422 rc = atapi_cdb_len(id);
1304 if ((rc < 12) || (rc > ATAPI_CDB_LEN)) { 1423 if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
1305 printk(KERN_WARNING "ata%u: unsupported CDB len\n", ap->id); 1424 if (ata_msg_warn(ap))
1425 ata_dev_printk(dev, KERN_WARNING,
1426 "unsupported CDB len\n");
1306 rc = -EINVAL; 1427 rc = -EINVAL;
1307 goto err_out_nosup; 1428 goto err_out_nosup;
1308 } 1429 }
1309 dev->cdb_len = (unsigned int) rc; 1430 dev->cdb_len = (unsigned int) rc;
1310 1431
1432 if (ata_id_cdb_intr(dev->id)) {
1433 dev->flags |= ATA_DFLAG_CDB_INTR;
1434 cdb_intr_string = ", CDB intr";
1435 }
1436
1311 /* print device info to dmesg */ 1437 /* print device info to dmesg */
1312 if (print_info) 1438 if (ata_msg_info(ap))
1313 printk(KERN_INFO "ata%u: dev %u ATAPI, max %s\n", 1439 ata_dev_printk(dev, KERN_INFO, "ATAPI, max %s%s\n",
1314 ap->id, dev->devno, ata_mode_string(xfer_mask)); 1440 ata_mode_string(xfer_mask),
1441 cdb_intr_string);
1315 } 1442 }
1316 1443
1317 ap->host->max_cmd_len = 0; 1444 ap->host->max_cmd_len = 0;
@@ -1321,10 +1448,10 @@ static int ata_dev_configure(struct ata_port *ap, struct ata_device *dev,
1321 ap->device[i].cdb_len); 1448 ap->device[i].cdb_len);
1322 1449
1323 /* limit bridge transfers to udma5, 200 sectors */ 1450 /* limit bridge transfers to udma5, 200 sectors */
1324 if (ata_dev_knobble(ap, dev)) { 1451 if (ata_dev_knobble(dev)) {
1325 if (print_info) 1452 if (ata_msg_info(ap))
1326 printk(KERN_INFO "ata%u(%u): applying bridge limits\n", 1453 ata_dev_printk(dev, KERN_INFO,
1327 ap->id, dev->devno); 1454 "applying bridge limits\n");
1328 dev->udma_mask &= ATA_UDMA5; 1455 dev->udma_mask &= ATA_UDMA5;
1329 dev->max_sectors = ATA_MAX_SECTORS; 1456 dev->max_sectors = ATA_MAX_SECTORS;
1330 } 1457 }
@@ -1332,11 +1459,15 @@ static int ata_dev_configure(struct ata_port *ap, struct ata_device *dev,
1332 if (ap->ops->dev_config) 1459 if (ap->ops->dev_config)
1333 ap->ops->dev_config(ap, dev); 1460 ap->ops->dev_config(ap, dev);
1334 1461
1335 DPRINTK("EXIT, drv_stat = 0x%x\n", ata_chk_status(ap)); 1462 if (ata_msg_probe(ap))
1463 ata_dev_printk(dev, KERN_DEBUG, "%s: EXIT, drv_stat = 0x%x\n",
1464 __FUNCTION__, ata_chk_status(ap));
1336 return 0; 1465 return 0;
1337 1466
1338err_out_nosup: 1467err_out_nosup:
1339 DPRINTK("EXIT, err\n"); 1468 if (ata_msg_probe(ap))
1469 ata_dev_printk(dev, KERN_DEBUG,
1470 "%s: EXIT, err\n", __FUNCTION__);
1340 return rc; 1471 return rc;
1341} 1472}
1342 1473
@@ -1352,79 +1483,104 @@ err_out_nosup:
1352 * PCI/etc. bus probe sem. 1483 * PCI/etc. bus probe sem.
1353 * 1484 *
1354 * RETURNS: 1485 * RETURNS:
1355 * Zero on success, non-zero on error. 1486 * Zero on success, negative errno otherwise.
1356 */ 1487 */
1357 1488
1358static int ata_bus_probe(struct ata_port *ap) 1489static int ata_bus_probe(struct ata_port *ap)
1359{ 1490{
1360 unsigned int classes[ATA_MAX_DEVICES]; 1491 unsigned int classes[ATA_MAX_DEVICES];
1361 unsigned int i, rc, found = 0; 1492 int tries[ATA_MAX_DEVICES];
1493 int i, rc, down_xfermask;
1494 struct ata_device *dev;
1362 1495
1363 ata_port_probe(ap); 1496 ata_port_probe(ap);
1364 1497
1365 /* reset and determine device classes */
1366 for (i = 0; i < ATA_MAX_DEVICES; i++) 1498 for (i = 0; i < ATA_MAX_DEVICES; i++)
1367 classes[i] = ATA_DEV_UNKNOWN; 1499 tries[i] = ATA_PROBE_MAX_TRIES;
1368 1500
1369 if (ap->ops->probe_reset) { 1501 retry:
1370 rc = ap->ops->probe_reset(ap, classes); 1502 down_xfermask = 0;
1371 if (rc) {
1372 printk("ata%u: reset failed (errno=%d)\n", ap->id, rc);
1373 return rc;
1374 }
1375 } else {
1376 ap->ops->phy_reset(ap);
1377 1503
1378 if (!(ap->flags & ATA_FLAG_PORT_DISABLED)) 1504 /* reset and determine device classes */
1379 for (i = 0; i < ATA_MAX_DEVICES; i++) 1505 ap->ops->phy_reset(ap);
1380 classes[i] = ap->device[i].class;
1381 1506
1382 ata_port_probe(ap); 1507 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1508 dev = &ap->device[i];
1509
1510 if (!(ap->flags & ATA_FLAG_DISABLED) &&
1511 dev->class != ATA_DEV_UNKNOWN)
1512 classes[dev->devno] = dev->class;
1513 else
1514 classes[dev->devno] = ATA_DEV_NONE;
1515
1516 dev->class = ATA_DEV_UNKNOWN;
1383 } 1517 }
1384 1518
1519 ata_port_probe(ap);
1520
1521 /* after the reset the device state is PIO 0 and the controller
1522 state is undefined. Record the mode */
1523
1385 for (i = 0; i < ATA_MAX_DEVICES; i++) 1524 for (i = 0; i < ATA_MAX_DEVICES; i++)
1386 if (classes[i] == ATA_DEV_UNKNOWN) 1525 ap->device[i].pio_mode = XFER_PIO_0;
1387 classes[i] = ATA_DEV_NONE;
1388 1526
1389 /* read IDENTIFY page and configure devices */ 1527 /* read IDENTIFY page and configure devices */
1390 for (i = 0; i < ATA_MAX_DEVICES; i++) { 1528 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1391 struct ata_device *dev = &ap->device[i]; 1529 dev = &ap->device[i];
1392 1530
1393 dev->class = classes[i]; 1531 if (tries[i])
1532 dev->class = classes[i];
1394 1533
1395 if (!ata_dev_present(dev)) 1534 if (!ata_dev_enabled(dev))
1396 continue; 1535 continue;
1397 1536
1398 WARN_ON(dev->id != NULL); 1537 rc = ata_dev_read_id(dev, &dev->class, 1, dev->id);
1399 if (ata_dev_read_id(ap, dev, &dev->class, 1, &dev->id)) { 1538 if (rc)
1400 dev->class = ATA_DEV_NONE; 1539 goto fail;
1401 continue;
1402 }
1403 1540
1404 if (ata_dev_configure(ap, dev, 1)) { 1541 rc = ata_dev_configure(dev, 1);
1405 ata_dev_disable(ap, dev); 1542 if (rc)
1406 continue; 1543 goto fail;
1407 } 1544 }
1408 1545
1409 found = 1; 1546 /* configure transfer mode */
1547 rc = ata_set_mode(ap, &dev);
1548 if (rc) {
1549 down_xfermask = 1;
1550 goto fail;
1410 } 1551 }
1411 1552
1412 if (!found) 1553 for (i = 0; i < ATA_MAX_DEVICES; i++)
1413 goto err_out_disable; 1554 if (ata_dev_enabled(&ap->device[i]))
1555 return 0;
1414 1556
1415 if (ap->ops->set_mode) 1557 /* no device present, disable port */
1416 ap->ops->set_mode(ap); 1558 ata_port_disable(ap);
1417 else 1559 ap->ops->port_disable(ap);
1418 ata_set_mode(ap); 1560 return -ENODEV;
1419 1561
1420 if (ap->flags & ATA_FLAG_PORT_DISABLED) 1562 fail:
1421 goto err_out_disable; 1563 switch (rc) {
1564 case -EINVAL:
1565 case -ENODEV:
1566 tries[dev->devno] = 0;
1567 break;
1568 case -EIO:
1569 sata_down_spd_limit(ap);
1570 /* fall through */
1571 default:
1572 tries[dev->devno]--;
1573 if (down_xfermask &&
1574 ata_down_xfermask_limit(dev, tries[dev->devno] == 1))
1575 tries[dev->devno] = 0;
1576 }
1422 1577
1423 return 0; 1578 if (!tries[dev->devno]) {
1579 ata_down_xfermask_limit(dev, 1);
1580 ata_dev_disable(dev);
1581 }
1424 1582
1425err_out_disable: 1583 goto retry;
1426 ap->ops->port_disable(ap);
1427 return -1;
1428} 1584}
1429 1585
1430/** 1586/**
@@ -1440,7 +1596,7 @@ err_out_disable:
1440 1596
1441void ata_port_probe(struct ata_port *ap) 1597void ata_port_probe(struct ata_port *ap)
1442{ 1598{
1443 ap->flags &= ~ATA_FLAG_PORT_DISABLED; 1599 ap->flags &= ~ATA_FLAG_DISABLED;
1444} 1600}
1445 1601
1446/** 1602/**
@@ -1454,27 +1610,21 @@ void ata_port_probe(struct ata_port *ap)
1454 */ 1610 */
1455static void sata_print_link_status(struct ata_port *ap) 1611static void sata_print_link_status(struct ata_port *ap)
1456{ 1612{
1457 u32 sstatus, tmp; 1613 u32 sstatus, scontrol, tmp;
1458 const char *speed;
1459 1614
1460 if (!ap->ops->scr_read) 1615 if (sata_scr_read(ap, SCR_STATUS, &sstatus))
1461 return; 1616 return;
1617 sata_scr_read(ap, SCR_CONTROL, &scontrol);
1462 1618
1463 sstatus = scr_read(ap, SCR_STATUS); 1619 if (ata_port_online(ap)) {
1464
1465 if (sata_dev_present(ap)) {
1466 tmp = (sstatus >> 4) & 0xf; 1620 tmp = (sstatus >> 4) & 0xf;
1467 if (tmp & (1 << 0)) 1621 ata_port_printk(ap, KERN_INFO,
1468 speed = "1.5"; 1622 "SATA link up %s (SStatus %X SControl %X)\n",
1469 else if (tmp & (1 << 1)) 1623 sata_spd_string(tmp), sstatus, scontrol);
1470 speed = "3.0";
1471 else
1472 speed = "<unknown>";
1473 printk(KERN_INFO "ata%u: SATA link up %s Gbps (SStatus %X)\n",
1474 ap->id, speed, sstatus);
1475 } else { 1624 } else {
1476 printk(KERN_INFO "ata%u: SATA link down (SStatus %X)\n", 1625 ata_port_printk(ap, KERN_INFO,
1477 ap->id, sstatus); 1626 "SATA link down (SStatus %X SControl %X)\n",
1627 sstatus, scontrol);
1478 } 1628 }
1479} 1629}
1480 1630
@@ -1497,17 +1647,18 @@ void __sata_phy_reset(struct ata_port *ap)
1497 1647
1498 if (ap->flags & ATA_FLAG_SATA_RESET) { 1648 if (ap->flags & ATA_FLAG_SATA_RESET) {
1499 /* issue phy wake/reset */ 1649 /* issue phy wake/reset */
1500 scr_write_flush(ap, SCR_CONTROL, 0x301); 1650 sata_scr_write_flush(ap, SCR_CONTROL, 0x301);
1501 /* Couldn't find anything in SATA I/II specs, but 1651 /* Couldn't find anything in SATA I/II specs, but
1502 * AHCI-1.1 10.4.2 says at least 1 ms. */ 1652 * AHCI-1.1 10.4.2 says at least 1 ms. */
1503 mdelay(1); 1653 mdelay(1);
1504 } 1654 }
1505 scr_write_flush(ap, SCR_CONTROL, 0x300); /* phy wake/clear reset */ 1655 /* phy wake/clear reset */
1656 sata_scr_write_flush(ap, SCR_CONTROL, 0x300);
1506 1657
1507 /* wait for phy to become ready, if necessary */ 1658 /* wait for phy to become ready, if necessary */
1508 do { 1659 do {
1509 msleep(200); 1660 msleep(200);
1510 sstatus = scr_read(ap, SCR_STATUS); 1661 sata_scr_read(ap, SCR_STATUS, &sstatus);
1511 if ((sstatus & 0xf) != 1) 1662 if ((sstatus & 0xf) != 1)
1512 break; 1663 break;
1513 } while (time_before(jiffies, timeout)); 1664 } while (time_before(jiffies, timeout));
@@ -1516,12 +1667,12 @@ void __sata_phy_reset(struct ata_port *ap)
1516 sata_print_link_status(ap); 1667 sata_print_link_status(ap);
1517 1668
1518 /* TODO: phy layer with polling, timeouts, etc. */ 1669 /* TODO: phy layer with polling, timeouts, etc. */
1519 if (sata_dev_present(ap)) 1670 if (!ata_port_offline(ap))
1520 ata_port_probe(ap); 1671 ata_port_probe(ap);
1521 else 1672 else
1522 ata_port_disable(ap); 1673 ata_port_disable(ap);
1523 1674
1524 if (ap->flags & ATA_FLAG_PORT_DISABLED) 1675 if (ap->flags & ATA_FLAG_DISABLED)
1525 return; 1676 return;
1526 1677
1527 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) { 1678 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
@@ -1546,24 +1697,24 @@ void __sata_phy_reset(struct ata_port *ap)
1546void sata_phy_reset(struct ata_port *ap) 1697void sata_phy_reset(struct ata_port *ap)
1547{ 1698{
1548 __sata_phy_reset(ap); 1699 __sata_phy_reset(ap);
1549 if (ap->flags & ATA_FLAG_PORT_DISABLED) 1700 if (ap->flags & ATA_FLAG_DISABLED)
1550 return; 1701 return;
1551 ata_bus_reset(ap); 1702 ata_bus_reset(ap);
1552} 1703}
1553 1704
1554/** 1705/**
1555 * ata_dev_pair - return other device on cable 1706 * ata_dev_pair - return other device on cable
1556 * @ap: port
1557 * @adev: device 1707 * @adev: device
1558 * 1708 *
1559 * Obtain the other device on the same cable, or if none is 1709 * Obtain the other device on the same cable, or if none is
1560 * present NULL is returned 1710 * present NULL is returned
1561 */ 1711 */
1562 1712
1563struct ata_device *ata_dev_pair(struct ata_port *ap, struct ata_device *adev) 1713struct ata_device *ata_dev_pair(struct ata_device *adev)
1564{ 1714{
1715 struct ata_port *ap = adev->ap;
1565 struct ata_device *pair = &ap->device[1 - adev->devno]; 1716 struct ata_device *pair = &ap->device[1 - adev->devno];
1566 if (!ata_dev_present(pair)) 1717 if (!ata_dev_enabled(pair))
1567 return NULL; 1718 return NULL;
1568 return pair; 1719 return pair;
1569} 1720}
@@ -1585,7 +1736,122 @@ void ata_port_disable(struct ata_port *ap)
1585{ 1736{
1586 ap->device[0].class = ATA_DEV_NONE; 1737 ap->device[0].class = ATA_DEV_NONE;
1587 ap->device[1].class = ATA_DEV_NONE; 1738 ap->device[1].class = ATA_DEV_NONE;
1588 ap->flags |= ATA_FLAG_PORT_DISABLED; 1739 ap->flags |= ATA_FLAG_DISABLED;
1740}
1741
1742/**
1743 * sata_down_spd_limit - adjust SATA spd limit downward
1744 * @ap: Port to adjust SATA spd limit for
1745 *
1746 * Adjust SATA spd limit of @ap downward. Note that this
1747 * function only adjusts the limit. The change must be applied
1748 * using sata_set_spd().
1749 *
1750 * LOCKING:
1751 * Inherited from caller.
1752 *
1753 * RETURNS:
1754 * 0 on success, negative errno on failure
1755 */
1756int sata_down_spd_limit(struct ata_port *ap)
1757{
1758 u32 sstatus, spd, mask;
1759 int rc, highbit;
1760
1761 rc = sata_scr_read(ap, SCR_STATUS, &sstatus);
1762 if (rc)
1763 return rc;
1764
1765 mask = ap->sata_spd_limit;
1766 if (mask <= 1)
1767 return -EINVAL;
1768 highbit = fls(mask) - 1;
1769 mask &= ~(1 << highbit);
1770
1771 spd = (sstatus >> 4) & 0xf;
1772 if (spd <= 1)
1773 return -EINVAL;
1774 spd--;
1775 mask &= (1 << spd) - 1;
1776 if (!mask)
1777 return -EINVAL;
1778
1779 ap->sata_spd_limit = mask;
1780
1781 ata_port_printk(ap, KERN_WARNING, "limiting SATA link speed to %s\n",
1782 sata_spd_string(fls(mask)));
1783
1784 return 0;
1785}
1786
1787static int __sata_set_spd_needed(struct ata_port *ap, u32 *scontrol)
1788{
1789 u32 spd, limit;
1790
1791 if (ap->sata_spd_limit == UINT_MAX)
1792 limit = 0;
1793 else
1794 limit = fls(ap->sata_spd_limit);
1795
1796 spd = (*scontrol >> 4) & 0xf;
1797 *scontrol = (*scontrol & ~0xf0) | ((limit & 0xf) << 4);
1798
1799 return spd != limit;
1800}
1801
1802/**
1803 * sata_set_spd_needed - is SATA spd configuration needed
1804 * @ap: Port in question
1805 *
1806 * Test whether the spd limit in SControl matches
1807 * @ap->sata_spd_limit. This function is used to determine
1808 * whether hardreset is necessary to apply SATA spd
1809 * configuration.
1810 *
1811 * LOCKING:
1812 * Inherited from caller.
1813 *
1814 * RETURNS:
1815 * 1 if SATA spd configuration is needed, 0 otherwise.
1816 */
1817int sata_set_spd_needed(struct ata_port *ap)
1818{
1819 u32 scontrol;
1820
1821 if (sata_scr_read(ap, SCR_CONTROL, &scontrol))
1822 return 0;
1823
1824 return __sata_set_spd_needed(ap, &scontrol);
1825}
1826
1827/**
1828 * sata_set_spd - set SATA spd according to spd limit
1829 * @ap: Port to set SATA spd for
1830 *
1831 * Set SATA spd of @ap according to sata_spd_limit.
1832 *
1833 * LOCKING:
1834 * Inherited from caller.
1835 *
1836 * RETURNS:
1837 * 0 if spd doesn't need to be changed, 1 if spd has been
1838 * changed. Negative errno if SCR registers are inaccessible.
1839 */
1840int sata_set_spd(struct ata_port *ap)
1841{
1842 u32 scontrol;
1843 int rc;
1844
1845 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
1846 return rc;
1847
1848 if (!__sata_set_spd_needed(ap, &scontrol))
1849 return 0;
1850
1851 if ((rc = sata_scr_write(ap, SCR_CONTROL, scontrol)))
1852 return rc;
1853
1854 return 1;
1589} 1855}
1590 1856
1591/* 1857/*
@@ -1736,151 +2002,196 @@ int ata_timing_compute(struct ata_device *adev, unsigned short speed,
1736 return 0; 2002 return 0;
1737} 2003}
1738 2004
1739static int ata_dev_set_mode(struct ata_port *ap, struct ata_device *dev) 2005/**
2006 * ata_down_xfermask_limit - adjust dev xfer masks downward
2007 * @dev: Device to adjust xfer masks
2008 * @force_pio0: Force PIO0
2009 *
2010 * Adjust xfer masks of @dev downward. Note that this function
2011 * does not apply the change. Invoking ata_set_mode() afterwards
2012 * will apply the limit.
2013 *
2014 * LOCKING:
2015 * Inherited from caller.
2016 *
2017 * RETURNS:
2018 * 0 on success, negative errno on failure
2019 */
2020int ata_down_xfermask_limit(struct ata_device *dev, int force_pio0)
2021{
2022 unsigned long xfer_mask;
2023 int highbit;
2024
2025 xfer_mask = ata_pack_xfermask(dev->pio_mask, dev->mwdma_mask,
2026 dev->udma_mask);
2027
2028 if (!xfer_mask)
2029 goto fail;
2030 /* don't gear down to MWDMA from UDMA, go directly to PIO */
2031 if (xfer_mask & ATA_MASK_UDMA)
2032 xfer_mask &= ~ATA_MASK_MWDMA;
2033
2034 highbit = fls(xfer_mask) - 1;
2035 xfer_mask &= ~(1 << highbit);
2036 if (force_pio0)
2037 xfer_mask &= 1 << ATA_SHIFT_PIO;
2038 if (!xfer_mask)
2039 goto fail;
2040
2041 ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask,
2042 &dev->udma_mask);
2043
2044 ata_dev_printk(dev, KERN_WARNING, "limiting speed to %s\n",
2045 ata_mode_string(xfer_mask));
2046
2047 return 0;
2048
2049 fail:
2050 return -EINVAL;
2051}
2052
2053static int ata_dev_set_mode(struct ata_device *dev)
1740{ 2054{
1741 unsigned int err_mask; 2055 unsigned int err_mask;
1742 int rc; 2056 int rc;
1743 2057
2058 dev->flags &= ~ATA_DFLAG_PIO;
1744 if (dev->xfer_shift == ATA_SHIFT_PIO) 2059 if (dev->xfer_shift == ATA_SHIFT_PIO)
1745 dev->flags |= ATA_DFLAG_PIO; 2060 dev->flags |= ATA_DFLAG_PIO;
1746 2061
1747 err_mask = ata_dev_set_xfermode(ap, dev); 2062 err_mask = ata_dev_set_xfermode(dev);
1748 if (err_mask) { 2063 if (err_mask) {
1749 printk(KERN_ERR 2064 ata_dev_printk(dev, KERN_ERR, "failed to set xfermode "
1750 "ata%u: failed to set xfermode (err_mask=0x%x)\n", 2065 "(err_mask=0x%x)\n", err_mask);
1751 ap->id, err_mask);
1752 return -EIO; 2066 return -EIO;
1753 } 2067 }
1754 2068
1755 rc = ata_dev_revalidate(ap, dev, 0); 2069 rc = ata_dev_revalidate(dev, 0);
1756 if (rc) { 2070 if (rc)
1757 printk(KERN_ERR
1758 "ata%u: failed to revalidate after set xfermode\n",
1759 ap->id);
1760 return rc; 2071 return rc;
1761 }
1762 2072
1763 DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n", 2073 DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n",
1764 dev->xfer_shift, (int)dev->xfer_mode); 2074 dev->xfer_shift, (int)dev->xfer_mode);
1765 2075
1766 printk(KERN_INFO "ata%u: dev %u configured for %s\n", 2076 ata_dev_printk(dev, KERN_INFO, "configured for %s\n",
1767 ap->id, dev->devno, 2077 ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)));
1768 ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)));
1769 return 0;
1770}
1771
1772static int ata_host_set_pio(struct ata_port *ap)
1773{
1774 int i;
1775
1776 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1777 struct ata_device *dev = &ap->device[i];
1778
1779 if (!ata_dev_present(dev))
1780 continue;
1781
1782 if (!dev->pio_mode) {
1783 printk(KERN_WARNING "ata%u: no PIO support for device %d.\n", ap->id, i);
1784 return -1;
1785 }
1786
1787 dev->xfer_mode = dev->pio_mode;
1788 dev->xfer_shift = ATA_SHIFT_PIO;
1789 if (ap->ops->set_piomode)
1790 ap->ops->set_piomode(ap, dev);
1791 }
1792
1793 return 0; 2078 return 0;
1794} 2079}
1795 2080
1796static void ata_host_set_dma(struct ata_port *ap)
1797{
1798 int i;
1799
1800 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1801 struct ata_device *dev = &ap->device[i];
1802
1803 if (!ata_dev_present(dev) || !dev->dma_mode)
1804 continue;
1805
1806 dev->xfer_mode = dev->dma_mode;
1807 dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode);
1808 if (ap->ops->set_dmamode)
1809 ap->ops->set_dmamode(ap, dev);
1810 }
1811}
1812
1813/** 2081/**
1814 * ata_set_mode - Program timings and issue SET FEATURES - XFER 2082 * ata_set_mode - Program timings and issue SET FEATURES - XFER
1815 * @ap: port on which timings will be programmed 2083 * @ap: port on which timings will be programmed
2084 * @r_failed_dev: out paramter for failed device
1816 * 2085 *
1817 * Set ATA device disk transfer mode (PIO3, UDMA6, etc.). 2086 * Set ATA device disk transfer mode (PIO3, UDMA6, etc.). If
2087 * ata_set_mode() fails, pointer to the failing device is
2088 * returned in @r_failed_dev.
1818 * 2089 *
1819 * LOCKING: 2090 * LOCKING:
1820 * PCI/etc. bus probe sem. 2091 * PCI/etc. bus probe sem.
2092 *
2093 * RETURNS:
2094 * 0 on success, negative errno otherwise
1821 */ 2095 */
1822static void ata_set_mode(struct ata_port *ap) 2096int ata_set_mode(struct ata_port *ap, struct ata_device **r_failed_dev)
1823{ 2097{
1824 int i, rc, used_dma = 0; 2098 struct ata_device *dev;
2099 int i, rc = 0, used_dma = 0, found = 0;
2100
2101 /* has private set_mode? */
2102 if (ap->ops->set_mode) {
2103 /* FIXME: make ->set_mode handle no device case and
2104 * return error code and failing device on failure.
2105 */
2106 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2107 if (ata_dev_enabled(&ap->device[i])) {
2108 ap->ops->set_mode(ap);
2109 break;
2110 }
2111 }
2112 return 0;
2113 }
1825 2114
1826 /* step 1: calculate xfer_mask */ 2115 /* step 1: calculate xfer_mask */
1827 for (i = 0; i < ATA_MAX_DEVICES; i++) { 2116 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1828 struct ata_device *dev = &ap->device[i];
1829 unsigned int pio_mask, dma_mask; 2117 unsigned int pio_mask, dma_mask;
1830 2118
1831 if (!ata_dev_present(dev)) 2119 dev = &ap->device[i];
1832 continue;
1833 2120
1834 ata_dev_xfermask(ap, dev); 2121 if (!ata_dev_enabled(dev))
2122 continue;
1835 2123
1836 /* TODO: let LLDD filter dev->*_mask here */ 2124 ata_dev_xfermask(dev);
1837 2125
1838 pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0); 2126 pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0);
1839 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask); 2127 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
1840 dev->pio_mode = ata_xfer_mask2mode(pio_mask); 2128 dev->pio_mode = ata_xfer_mask2mode(pio_mask);
1841 dev->dma_mode = ata_xfer_mask2mode(dma_mask); 2129 dev->dma_mode = ata_xfer_mask2mode(dma_mask);
1842 2130
2131 found = 1;
1843 if (dev->dma_mode) 2132 if (dev->dma_mode)
1844 used_dma = 1; 2133 used_dma = 1;
1845 } 2134 }
2135 if (!found)
2136 goto out;
1846 2137
1847 /* step 2: always set host PIO timings */ 2138 /* step 2: always set host PIO timings */
1848 rc = ata_host_set_pio(ap); 2139 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1849 if (rc) 2140 dev = &ap->device[i];
1850 goto err_out; 2141 if (!ata_dev_enabled(dev))
2142 continue;
2143
2144 if (!dev->pio_mode) {
2145 ata_dev_printk(dev, KERN_WARNING, "no PIO support\n");
2146 rc = -EINVAL;
2147 goto out;
2148 }
2149
2150 dev->xfer_mode = dev->pio_mode;
2151 dev->xfer_shift = ATA_SHIFT_PIO;
2152 if (ap->ops->set_piomode)
2153 ap->ops->set_piomode(ap, dev);
2154 }
1851 2155
1852 /* step 3: set host DMA timings */ 2156 /* step 3: set host DMA timings */
1853 ata_host_set_dma(ap); 2157 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2158 dev = &ap->device[i];
2159
2160 if (!ata_dev_enabled(dev) || !dev->dma_mode)
2161 continue;
2162
2163 dev->xfer_mode = dev->dma_mode;
2164 dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode);
2165 if (ap->ops->set_dmamode)
2166 ap->ops->set_dmamode(ap, dev);
2167 }
1854 2168
1855 /* step 4: update devices' xfer mode */ 2169 /* step 4: update devices' xfer mode */
1856 for (i = 0; i < ATA_MAX_DEVICES; i++) { 2170 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1857 struct ata_device *dev = &ap->device[i]; 2171 dev = &ap->device[i];
1858 2172
1859 if (!ata_dev_present(dev)) 2173 if (!ata_dev_enabled(dev))
1860 continue; 2174 continue;
1861 2175
1862 if (ata_dev_set_mode(ap, dev)) 2176 rc = ata_dev_set_mode(dev);
1863 goto err_out; 2177 if (rc)
2178 goto out;
1864 } 2179 }
1865 2180
1866 /* 2181 /* Record simplex status. If we selected DMA then the other
1867 * Record simplex status. If we selected DMA then the other 2182 * host channels are not permitted to do so.
1868 * host channels are not permitted to do so.
1869 */ 2183 */
1870
1871 if (used_dma && (ap->host_set->flags & ATA_HOST_SIMPLEX)) 2184 if (used_dma && (ap->host_set->flags & ATA_HOST_SIMPLEX))
1872 ap->host_set->simplex_claimed = 1; 2185 ap->host_set->simplex_claimed = 1;
1873 2186
1874 /* 2187 /* step5: chip specific finalisation */
1875 * Chip specific finalisation
1876 */
1877 if (ap->ops->post_set_mode) 2188 if (ap->ops->post_set_mode)
1878 ap->ops->post_set_mode(ap); 2189 ap->ops->post_set_mode(ap);
1879 2190
1880 return; 2191 out:
1881 2192 if (rc)
1882err_out: 2193 *r_failed_dev = dev;
1883 ata_port_disable(ap); 2194 return rc;
1884} 2195}
1885 2196
1886/** 2197/**
@@ -1930,8 +2241,8 @@ unsigned int ata_busy_sleep (struct ata_port *ap,
1930 } 2241 }
1931 2242
1932 if (status & ATA_BUSY) 2243 if (status & ATA_BUSY)
1933 printk(KERN_WARNING "ata%u is slow to respond, " 2244 ata_port_printk(ap, KERN_WARNING,
1934 "please be patient\n", ap->id); 2245 "port is slow to respond, please be patient\n");
1935 2246
1936 timeout = timer_start + tmout; 2247 timeout = timer_start + tmout;
1937 while ((status & ATA_BUSY) && (time_before(jiffies, timeout))) { 2248 while ((status & ATA_BUSY) && (time_before(jiffies, timeout))) {
@@ -1940,8 +2251,8 @@ unsigned int ata_busy_sleep (struct ata_port *ap,
1940 } 2251 }
1941 2252
1942 if (status & ATA_BUSY) { 2253 if (status & ATA_BUSY) {
1943 printk(KERN_ERR "ata%u failed to respond (%lu secs)\n", 2254 ata_port_printk(ap, KERN_ERR, "port failed to respond "
1944 ap->id, tmout / HZ); 2255 "(%lu secs)\n", tmout / HZ);
1945 return 1; 2256 return 1;
1946 } 2257 }
1947 2258
@@ -2033,8 +2344,10 @@ static unsigned int ata_bus_softreset(struct ata_port *ap,
2033 * the bus shows 0xFF because the odd clown forgets the D7 2344 * the bus shows 0xFF because the odd clown forgets the D7
2034 * pulldown resistor. 2345 * pulldown resistor.
2035 */ 2346 */
2036 if (ata_check_status(ap) == 0xFF) 2347 if (ata_check_status(ap) == 0xFF) {
2348 ata_port_printk(ap, KERN_ERR, "SRST failed (status 0xFF)\n");
2037 return AC_ERR_OTHER; 2349 return AC_ERR_OTHER;
2350 }
2038 2351
2039 ata_bus_post_reset(ap, devmask); 2352 ata_bus_post_reset(ap, devmask);
2040 2353
@@ -2058,7 +2371,7 @@ static unsigned int ata_bus_softreset(struct ata_port *ap,
2058 * Obtains host_set lock. 2371 * Obtains host_set lock.
2059 * 2372 *
2060 * SIDE EFFECTS: 2373 * SIDE EFFECTS:
2061 * Sets ATA_FLAG_PORT_DISABLED if bus reset fails. 2374 * Sets ATA_FLAG_DISABLED if bus reset fails.
2062 */ 2375 */
2063 2376
2064void ata_bus_reset(struct ata_port *ap) 2377void ata_bus_reset(struct ata_port *ap)
@@ -2126,60 +2439,195 @@ void ata_bus_reset(struct ata_port *ap)
2126 return; 2439 return;
2127 2440
2128err_out: 2441err_out:
2129 printk(KERN_ERR "ata%u: disabling port\n", ap->id); 2442 ata_port_printk(ap, KERN_ERR, "disabling port\n");
2130 ap->ops->port_disable(ap); 2443 ap->ops->port_disable(ap);
2131 2444
2132 DPRINTK("EXIT\n"); 2445 DPRINTK("EXIT\n");
2133} 2446}
2134 2447
2135static int sata_phy_resume(struct ata_port *ap) 2448/**
2449 * sata_phy_debounce - debounce SATA phy status
2450 * @ap: ATA port to debounce SATA phy status for
2451 * @params: timing parameters { interval, duratinon, timeout } in msec
2452 *
2453 * Make sure SStatus of @ap reaches stable state, determined by
2454 * holding the same value where DET is not 1 for @duration polled
2455 * every @interval, before @timeout. Timeout constraints the
2456 * beginning of the stable state. Because, after hot unplugging,
2457 * DET gets stuck at 1 on some controllers, this functions waits
2458 * until timeout then returns 0 if DET is stable at 1.
2459 *
2460 * LOCKING:
2461 * Kernel thread context (may sleep)
2462 *
2463 * RETURNS:
2464 * 0 on success, -errno on failure.
2465 */
2466int sata_phy_debounce(struct ata_port *ap, const unsigned long *params)
2136{ 2467{
2137 unsigned long timeout = jiffies + (HZ * 5); 2468 unsigned long interval_msec = params[0];
2138 u32 sstatus; 2469 unsigned long duration = params[1] * HZ / 1000;
2470 unsigned long timeout = jiffies + params[2] * HZ / 1000;
2471 unsigned long last_jiffies;
2472 u32 last, cur;
2473 int rc;
2139 2474
2140 scr_write_flush(ap, SCR_CONTROL, 0x300); 2475 if ((rc = sata_scr_read(ap, SCR_STATUS, &cur)))
2476 return rc;
2477 cur &= 0xf;
2141 2478
2142 /* Wait for phy to become ready, if necessary. */ 2479 last = cur;
2143 do { 2480 last_jiffies = jiffies;
2144 msleep(200);
2145 sstatus = scr_read(ap, SCR_STATUS);
2146 if ((sstatus & 0xf) != 1)
2147 return 0;
2148 } while (time_before(jiffies, timeout));
2149 2481
2150 return -1; 2482 while (1) {
2483 msleep(interval_msec);
2484 if ((rc = sata_scr_read(ap, SCR_STATUS, &cur)))
2485 return rc;
2486 cur &= 0xf;
2487
2488 /* DET stable? */
2489 if (cur == last) {
2490 if (cur == 1 && time_before(jiffies, timeout))
2491 continue;
2492 if (time_after(jiffies, last_jiffies + duration))
2493 return 0;
2494 continue;
2495 }
2496
2497 /* unstable, start over */
2498 last = cur;
2499 last_jiffies = jiffies;
2500
2501 /* check timeout */
2502 if (time_after(jiffies, timeout))
2503 return -EBUSY;
2504 }
2151} 2505}
2152 2506
2153/** 2507/**
2154 * ata_std_probeinit - initialize probing 2508 * sata_phy_resume - resume SATA phy
2155 * @ap: port to be probed 2509 * @ap: ATA port to resume SATA phy for
2510 * @params: timing parameters { interval, duratinon, timeout } in msec
2511 *
2512 * Resume SATA phy of @ap and debounce it.
2156 * 2513 *
2157 * @ap is about to be probed. Initialize it. This function is 2514 * LOCKING:
2158 * to be used as standard callback for ata_drive_probe_reset(). 2515 * Kernel thread context (may sleep)
2159 * 2516 *
2160 * NOTE!!! Do not use this function as probeinit if a low level 2517 * RETURNS:
2161 * driver implements only hardreset. Just pass NULL as probeinit 2518 * 0 on success, -errno on failure.
2162 * in that case. Using this function is probably okay but doing
2163 * so makes reset sequence different from the original
2164 * ->phy_reset implementation and Jeff nervous. :-P
2165 */ 2519 */
2166void ata_std_probeinit(struct ata_port *ap) 2520int sata_phy_resume(struct ata_port *ap, const unsigned long *params)
2167{ 2521{
2168 if ((ap->flags & ATA_FLAG_SATA) && ap->ops->scr_read) { 2522 u32 scontrol;
2169 sata_phy_resume(ap); 2523 int rc;
2170 if (sata_dev_present(ap)) 2524
2171 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT); 2525 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
2526 return rc;
2527
2528 scontrol = (scontrol & 0x0f0) | 0x300;
2529
2530 if ((rc = sata_scr_write(ap, SCR_CONTROL, scontrol)))
2531 return rc;
2532
2533 /* Some PHYs react badly if SStatus is pounded immediately
2534 * after resuming. Delay 200ms before debouncing.
2535 */
2536 msleep(200);
2537
2538 return sata_phy_debounce(ap, params);
2539}
2540
2541static void ata_wait_spinup(struct ata_port *ap)
2542{
2543 struct ata_eh_context *ehc = &ap->eh_context;
2544 unsigned long end, secs;
2545 int rc;
2546
2547 /* first, debounce phy if SATA */
2548 if (ap->cbl == ATA_CBL_SATA) {
2549 rc = sata_phy_debounce(ap, sata_deb_timing_eh);
2550
2551 /* if debounced successfully and offline, no need to wait */
2552 if ((rc == 0 || rc == -EOPNOTSUPP) && ata_port_offline(ap))
2553 return;
2172 } 2554 }
2555
2556 /* okay, let's give the drive time to spin up */
2557 end = ehc->i.hotplug_timestamp + ATA_SPINUP_WAIT * HZ / 1000;
2558 secs = ((end - jiffies) + HZ - 1) / HZ;
2559
2560 if (time_after(jiffies, end))
2561 return;
2562
2563 if (secs > 5)
2564 ata_port_printk(ap, KERN_INFO, "waiting for device to spin up "
2565 "(%lu secs)\n", secs);
2566
2567 schedule_timeout_uninterruptible(end - jiffies);
2568}
2569
2570/**
2571 * ata_std_prereset - prepare for reset
2572 * @ap: ATA port to be reset
2573 *
2574 * @ap is about to be reset. Initialize it.
2575 *
2576 * LOCKING:
2577 * Kernel thread context (may sleep)
2578 *
2579 * RETURNS:
2580 * 0 on success, -errno otherwise.
2581 */
2582int ata_std_prereset(struct ata_port *ap)
2583{
2584 struct ata_eh_context *ehc = &ap->eh_context;
2585 const unsigned long *timing;
2586 int rc;
2587
2588 /* hotplug? */
2589 if (ehc->i.flags & ATA_EHI_HOTPLUGGED) {
2590 if (ap->flags & ATA_FLAG_HRST_TO_RESUME)
2591 ehc->i.action |= ATA_EH_HARDRESET;
2592 if (ap->flags & ATA_FLAG_SKIP_D2H_BSY)
2593 ata_wait_spinup(ap);
2594 }
2595
2596 /* if we're about to do hardreset, nothing more to do */
2597 if (ehc->i.action & ATA_EH_HARDRESET)
2598 return 0;
2599
2600 /* if SATA, resume phy */
2601 if (ap->cbl == ATA_CBL_SATA) {
2602 if (ap->flags & ATA_FLAG_LOADING)
2603 timing = sata_deb_timing_boot;
2604 else
2605 timing = sata_deb_timing_eh;
2606
2607 rc = sata_phy_resume(ap, timing);
2608 if (rc && rc != -EOPNOTSUPP) {
2609 /* phy resume failed */
2610 ata_port_printk(ap, KERN_WARNING, "failed to resume "
2611 "link for reset (errno=%d)\n", rc);
2612 return rc;
2613 }
2614 }
2615
2616 /* Wait for !BSY if the controller can wait for the first D2H
2617 * Reg FIS and we don't know that no device is attached.
2618 */
2619 if (!(ap->flags & ATA_FLAG_SKIP_D2H_BSY) && !ata_port_offline(ap))
2620 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
2621
2622 return 0;
2173} 2623}
2174 2624
2175/** 2625/**
2176 * ata_std_softreset - reset host port via ATA SRST 2626 * ata_std_softreset - reset host port via ATA SRST
2177 * @ap: port to reset 2627 * @ap: port to reset
2178 * @verbose: fail verbosely
2179 * @classes: resulting classes of attached devices 2628 * @classes: resulting classes of attached devices
2180 * 2629 *
2181 * Reset host port using ATA SRST. This function is to be used 2630 * Reset host port using ATA SRST.
2182 * as standard callback for ata_drive_*_reset() functions.
2183 * 2631 *
2184 * LOCKING: 2632 * LOCKING:
2185 * Kernel thread context (may sleep) 2633 * Kernel thread context (may sleep)
@@ -2187,7 +2635,7 @@ void ata_std_probeinit(struct ata_port *ap)
2187 * RETURNS: 2635 * RETURNS:
2188 * 0 on success, -errno otherwise. 2636 * 0 on success, -errno otherwise.
2189 */ 2637 */
2190int ata_std_softreset(struct ata_port *ap, int verbose, unsigned int *classes) 2638int ata_std_softreset(struct ata_port *ap, unsigned int *classes)
2191{ 2639{
2192 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS; 2640 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
2193 unsigned int devmask = 0, err_mask; 2641 unsigned int devmask = 0, err_mask;
@@ -2195,7 +2643,7 @@ int ata_std_softreset(struct ata_port *ap, int verbose, unsigned int *classes)
2195 2643
2196 DPRINTK("ENTER\n"); 2644 DPRINTK("ENTER\n");
2197 2645
2198 if (ap->ops->scr_read && !sata_dev_present(ap)) { 2646 if (ata_port_offline(ap)) {
2199 classes[0] = ATA_DEV_NONE; 2647 classes[0] = ATA_DEV_NONE;
2200 goto out; 2648 goto out;
2201 } 2649 }
@@ -2213,11 +2661,7 @@ int ata_std_softreset(struct ata_port *ap, int verbose, unsigned int *classes)
2213 DPRINTK("about to softreset, devmask=%x\n", devmask); 2661 DPRINTK("about to softreset, devmask=%x\n", devmask);
2214 err_mask = ata_bus_softreset(ap, devmask); 2662 err_mask = ata_bus_softreset(ap, devmask);
2215 if (err_mask) { 2663 if (err_mask) {
2216 if (verbose) 2664 ata_port_printk(ap, KERN_ERR, "SRST failed (err_mask=0x%x)\n",
2217 printk(KERN_ERR "ata%u: SRST failed (err_mask=0x%x)\n",
2218 ap->id, err_mask);
2219 else
2220 DPRINTK("EXIT, softreset failed (err_mask=0x%x)\n",
2221 err_mask); 2665 err_mask);
2222 return -EIO; 2666 return -EIO;
2223 } 2667 }
@@ -2235,12 +2679,9 @@ int ata_std_softreset(struct ata_port *ap, int verbose, unsigned int *classes)
2235/** 2679/**
2236 * sata_std_hardreset - reset host port via SATA phy reset 2680 * sata_std_hardreset - reset host port via SATA phy reset
2237 * @ap: port to reset 2681 * @ap: port to reset
2238 * @verbose: fail verbosely
2239 * @class: resulting class of attached device 2682 * @class: resulting class of attached device
2240 * 2683 *
2241 * SATA phy-reset host port using DET bits of SControl register. 2684 * SATA phy-reset host port using DET bits of SControl register.
2242 * This function is to be used as standard callback for
2243 * ata_drive_*_reset().
2244 * 2685 *
2245 * LOCKING: 2686 * LOCKING:
2246 * Kernel thread context (may sleep) 2687 * Kernel thread context (may sleep)
@@ -2248,35 +2689,57 @@ int ata_std_softreset(struct ata_port *ap, int verbose, unsigned int *classes)
2248 * RETURNS: 2689 * RETURNS:
2249 * 0 on success, -errno otherwise. 2690 * 0 on success, -errno otherwise.
2250 */ 2691 */
2251int sata_std_hardreset(struct ata_port *ap, int verbose, unsigned int *class) 2692int sata_std_hardreset(struct ata_port *ap, unsigned int *class)
2252{ 2693{
2694 u32 scontrol;
2695 int rc;
2696
2253 DPRINTK("ENTER\n"); 2697 DPRINTK("ENTER\n");
2254 2698
2255 /* Issue phy wake/reset */ 2699 if (sata_set_spd_needed(ap)) {
2256 scr_write_flush(ap, SCR_CONTROL, 0x301); 2700 /* SATA spec says nothing about how to reconfigure
2701 * spd. To be on the safe side, turn off phy during
2702 * reconfiguration. This works for at least ICH7 AHCI
2703 * and Sil3124.
2704 */
2705 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
2706 return rc;
2257 2707
2258 /* 2708 scontrol = (scontrol & 0x0f0) | 0x302;
2259 * Couldn't find anything in SATA I/II specs, but AHCI-1.1 2709
2710 if ((rc = sata_scr_write(ap, SCR_CONTROL, scontrol)))
2711 return rc;
2712
2713 sata_set_spd(ap);
2714 }
2715
2716 /* issue phy wake/reset */
2717 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
2718 return rc;
2719
2720 scontrol = (scontrol & 0x0f0) | 0x301;
2721
2722 if ((rc = sata_scr_write_flush(ap, SCR_CONTROL, scontrol)))
2723 return rc;
2724
2725 /* Couldn't find anything in SATA I/II specs, but AHCI-1.1
2260 * 10.4.2 says at least 1 ms. 2726 * 10.4.2 says at least 1 ms.
2261 */ 2727 */
2262 msleep(1); 2728 msleep(1);
2263 2729
2264 /* Bring phy back */ 2730 /* bring phy back */
2265 sata_phy_resume(ap); 2731 sata_phy_resume(ap, sata_deb_timing_eh);
2266 2732
2267 /* TODO: phy layer with polling, timeouts, etc. */ 2733 /* TODO: phy layer with polling, timeouts, etc. */
2268 if (!sata_dev_present(ap)) { 2734 if (ata_port_offline(ap)) {
2269 *class = ATA_DEV_NONE; 2735 *class = ATA_DEV_NONE;
2270 DPRINTK("EXIT, link offline\n"); 2736 DPRINTK("EXIT, link offline\n");
2271 return 0; 2737 return 0;
2272 } 2738 }
2273 2739
2274 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) { 2740 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
2275 if (verbose) 2741 ata_port_printk(ap, KERN_ERR,
2276 printk(KERN_ERR "ata%u: COMRESET failed " 2742 "COMRESET failed (device not ready)\n");
2277 "(device not ready)\n", ap->id);
2278 else
2279 DPRINTK("EXIT, device not ready\n");
2280 return -EIO; 2743 return -EIO;
2281 } 2744 }
2282 2745
@@ -2297,27 +2760,28 @@ int sata_std_hardreset(struct ata_port *ap, int verbose, unsigned int *class)
2297 * the device might have been reset more than once using 2760 * the device might have been reset more than once using
2298 * different reset methods before postreset is invoked. 2761 * different reset methods before postreset is invoked.
2299 * 2762 *
2300 * This function is to be used as standard callback for
2301 * ata_drive_*_reset().
2302 *
2303 * LOCKING: 2763 * LOCKING:
2304 * Kernel thread context (may sleep) 2764 * Kernel thread context (may sleep)
2305 */ 2765 */
2306void ata_std_postreset(struct ata_port *ap, unsigned int *classes) 2766void ata_std_postreset(struct ata_port *ap, unsigned int *classes)
2307{ 2767{
2308 DPRINTK("ENTER\n"); 2768 u32 serror;
2309 2769
2310 /* set cable type if it isn't already set */ 2770 DPRINTK("ENTER\n");
2311 if (ap->cbl == ATA_CBL_NONE && ap->flags & ATA_FLAG_SATA)
2312 ap->cbl = ATA_CBL_SATA;
2313 2771
2314 /* print link status */ 2772 /* print link status */
2315 if (ap->cbl == ATA_CBL_SATA) 2773 sata_print_link_status(ap);
2316 sata_print_link_status(ap); 2774
2775 /* clear SError */
2776 if (sata_scr_read(ap, SCR_ERROR, &serror) == 0)
2777 sata_scr_write(ap, SCR_ERROR, serror);
2317 2778
2318 /* re-enable interrupts */ 2779 /* re-enable interrupts */
2319 if (ap->ioaddr.ctl_addr) /* FIXME: hack. create a hook instead */ 2780 if (!ap->ops->error_handler) {
2320 ata_irq_on(ap); 2781 /* FIXME: hack. create a hook instead */
2782 if (ap->ioaddr.ctl_addr)
2783 ata_irq_on(ap);
2784 }
2321 2785
2322 /* is double-select really necessary? */ 2786 /* is double-select really necessary? */
2323 if (classes[0] != ATA_DEV_NONE) 2787 if (classes[0] != ATA_DEV_NONE)
@@ -2343,126 +2807,7 @@ void ata_std_postreset(struct ata_port *ap, unsigned int *classes)
2343} 2807}
2344 2808
2345/** 2809/**
2346 * ata_std_probe_reset - standard probe reset method
2347 * @ap: prot to perform probe-reset
2348 * @classes: resulting classes of attached devices
2349 *
2350 * The stock off-the-shelf ->probe_reset method.
2351 *
2352 * LOCKING:
2353 * Kernel thread context (may sleep)
2354 *
2355 * RETURNS:
2356 * 0 on success, -errno otherwise.
2357 */
2358int ata_std_probe_reset(struct ata_port *ap, unsigned int *classes)
2359{
2360 ata_reset_fn_t hardreset;
2361
2362 hardreset = NULL;
2363 if (ap->flags & ATA_FLAG_SATA && ap->ops->scr_read)
2364 hardreset = sata_std_hardreset;
2365
2366 return ata_drive_probe_reset(ap, ata_std_probeinit,
2367 ata_std_softreset, hardreset,
2368 ata_std_postreset, classes);
2369}
2370
2371static int do_probe_reset(struct ata_port *ap, ata_reset_fn_t reset,
2372 ata_postreset_fn_t postreset,
2373 unsigned int *classes)
2374{
2375 int i, rc;
2376
2377 for (i = 0; i < ATA_MAX_DEVICES; i++)
2378 classes[i] = ATA_DEV_UNKNOWN;
2379
2380 rc = reset(ap, 0, classes);
2381 if (rc)
2382 return rc;
2383
2384 /* If any class isn't ATA_DEV_UNKNOWN, consider classification
2385 * is complete and convert all ATA_DEV_UNKNOWN to
2386 * ATA_DEV_NONE.
2387 */
2388 for (i = 0; i < ATA_MAX_DEVICES; i++)
2389 if (classes[i] != ATA_DEV_UNKNOWN)
2390 break;
2391
2392 if (i < ATA_MAX_DEVICES)
2393 for (i = 0; i < ATA_MAX_DEVICES; i++)
2394 if (classes[i] == ATA_DEV_UNKNOWN)
2395 classes[i] = ATA_DEV_NONE;
2396
2397 if (postreset)
2398 postreset(ap, classes);
2399
2400 return classes[0] != ATA_DEV_UNKNOWN ? 0 : -ENODEV;
2401}
2402
2403/**
2404 * ata_drive_probe_reset - Perform probe reset with given methods
2405 * @ap: port to reset
2406 * @probeinit: probeinit method (can be NULL)
2407 * @softreset: softreset method (can be NULL)
2408 * @hardreset: hardreset method (can be NULL)
2409 * @postreset: postreset method (can be NULL)
2410 * @classes: resulting classes of attached devices
2411 *
2412 * Reset the specified port and classify attached devices using
2413 * given methods. This function prefers softreset but tries all
2414 * possible reset sequences to reset and classify devices. This
2415 * function is intended to be used for constructing ->probe_reset
2416 * callback by low level drivers.
2417 *
2418 * Reset methods should follow the following rules.
2419 *
2420 * - Return 0 on sucess, -errno on failure.
2421 * - If classification is supported, fill classes[] with
2422 * recognized class codes.
2423 * - If classification is not supported, leave classes[] alone.
2424 * - If verbose is non-zero, print error message on failure;
2425 * otherwise, shut up.
2426 *
2427 * LOCKING:
2428 * Kernel thread context (may sleep)
2429 *
2430 * RETURNS:
2431 * 0 on success, -EINVAL if no reset method is avaliable, -ENODEV
2432 * if classification fails, and any error code from reset
2433 * methods.
2434 */
2435int ata_drive_probe_reset(struct ata_port *ap, ata_probeinit_fn_t probeinit,
2436 ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
2437 ata_postreset_fn_t postreset, unsigned int *classes)
2438{
2439 int rc = -EINVAL;
2440
2441 if (probeinit)
2442 probeinit(ap);
2443
2444 if (softreset) {
2445 rc = do_probe_reset(ap, softreset, postreset, classes);
2446 if (rc == 0)
2447 return 0;
2448 }
2449
2450 if (!hardreset)
2451 return rc;
2452
2453 rc = do_probe_reset(ap, hardreset, postreset, classes);
2454 if (rc == 0 || rc != -ENODEV)
2455 return rc;
2456
2457 if (softreset)
2458 rc = do_probe_reset(ap, softreset, postreset, classes);
2459
2460 return rc;
2461}
2462
2463/**
2464 * ata_dev_same_device - Determine whether new ID matches configured device 2810 * ata_dev_same_device - Determine whether new ID matches configured device
2465 * @ap: port on which the device to compare against resides
2466 * @dev: device to compare against 2811 * @dev: device to compare against
2467 * @new_class: class of the new device 2812 * @new_class: class of the new device
2468 * @new_id: IDENTIFY page of the new device 2813 * @new_id: IDENTIFY page of the new device
@@ -2477,17 +2822,16 @@ int ata_drive_probe_reset(struct ata_port *ap, ata_probeinit_fn_t probeinit,
2477 * RETURNS: 2822 * RETURNS:
2478 * 1 if @dev matches @new_class and @new_id, 0 otherwise. 2823 * 1 if @dev matches @new_class and @new_id, 0 otherwise.
2479 */ 2824 */
2480static int ata_dev_same_device(struct ata_port *ap, struct ata_device *dev, 2825static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class,
2481 unsigned int new_class, const u16 *new_id) 2826 const u16 *new_id)
2482{ 2827{
2483 const u16 *old_id = dev->id; 2828 const u16 *old_id = dev->id;
2484 unsigned char model[2][41], serial[2][21]; 2829 unsigned char model[2][41], serial[2][21];
2485 u64 new_n_sectors; 2830 u64 new_n_sectors;
2486 2831
2487 if (dev->class != new_class) { 2832 if (dev->class != new_class) {
2488 printk(KERN_INFO 2833 ata_dev_printk(dev, KERN_INFO, "class mismatch %d != %d\n",
2489 "ata%u: dev %u class mismatch %d != %d\n", 2834 dev->class, new_class);
2490 ap->id, dev->devno, dev->class, new_class);
2491 return 0; 2835 return 0;
2492 } 2836 }
2493 2837
@@ -2498,24 +2842,22 @@ static int ata_dev_same_device(struct ata_port *ap, struct ata_device *dev,
2498 new_n_sectors = ata_id_n_sectors(new_id); 2842 new_n_sectors = ata_id_n_sectors(new_id);
2499 2843
2500 if (strcmp(model[0], model[1])) { 2844 if (strcmp(model[0], model[1])) {
2501 printk(KERN_INFO 2845 ata_dev_printk(dev, KERN_INFO, "model number mismatch "
2502 "ata%u: dev %u model number mismatch '%s' != '%s'\n", 2846 "'%s' != '%s'\n", model[0], model[1]);
2503 ap->id, dev->devno, model[0], model[1]);
2504 return 0; 2847 return 0;
2505 } 2848 }
2506 2849
2507 if (strcmp(serial[0], serial[1])) { 2850 if (strcmp(serial[0], serial[1])) {
2508 printk(KERN_INFO 2851 ata_dev_printk(dev, KERN_INFO, "serial number mismatch "
2509 "ata%u: dev %u serial number mismatch '%s' != '%s'\n", 2852 "'%s' != '%s'\n", serial[0], serial[1]);
2510 ap->id, dev->devno, serial[0], serial[1]);
2511 return 0; 2853 return 0;
2512 } 2854 }
2513 2855
2514 if (dev->class == ATA_DEV_ATA && dev->n_sectors != new_n_sectors) { 2856 if (dev->class == ATA_DEV_ATA && dev->n_sectors != new_n_sectors) {
2515 printk(KERN_INFO 2857 ata_dev_printk(dev, KERN_INFO, "n_sectors mismatch "
2516 "ata%u: dev %u n_sectors mismatch %llu != %llu\n", 2858 "%llu != %llu\n",
2517 ap->id, dev->devno, (unsigned long long)dev->n_sectors, 2859 (unsigned long long)dev->n_sectors,
2518 (unsigned long long)new_n_sectors); 2860 (unsigned long long)new_n_sectors);
2519 return 0; 2861 return 0;
2520 } 2862 }
2521 2863
@@ -2524,7 +2866,6 @@ static int ata_dev_same_device(struct ata_port *ap, struct ata_device *dev,
2524 2866
2525/** 2867/**
2526 * ata_dev_revalidate - Revalidate ATA device 2868 * ata_dev_revalidate - Revalidate ATA device
2527 * @ap: port on which the device to revalidate resides
2528 * @dev: device to revalidate 2869 * @dev: device to revalidate
2529 * @post_reset: is this revalidation after reset? 2870 * @post_reset: is this revalidation after reset?
2530 * 2871 *
@@ -2537,40 +2878,37 @@ static int ata_dev_same_device(struct ata_port *ap, struct ata_device *dev,
2537 * RETURNS: 2878 * RETURNS:
2538 * 0 on success, negative errno otherwise 2879 * 0 on success, negative errno otherwise
2539 */ 2880 */
2540int ata_dev_revalidate(struct ata_port *ap, struct ata_device *dev, 2881int ata_dev_revalidate(struct ata_device *dev, int post_reset)
2541 int post_reset)
2542{ 2882{
2543 unsigned int class; 2883 unsigned int class = dev->class;
2544 u16 *id; 2884 u16 *id = (void *)dev->ap->sector_buf;
2545 int rc; 2885 int rc;
2546 2886
2547 if (!ata_dev_present(dev)) 2887 if (!ata_dev_enabled(dev)) {
2548 return -ENODEV; 2888 rc = -ENODEV;
2549 2889 goto fail;
2550 class = dev->class; 2890 }
2551 id = NULL;
2552 2891
2553 /* allocate & read ID data */ 2892 /* read ID data */
2554 rc = ata_dev_read_id(ap, dev, &class, post_reset, &id); 2893 rc = ata_dev_read_id(dev, &class, post_reset, id);
2555 if (rc) 2894 if (rc)
2556 goto fail; 2895 goto fail;
2557 2896
2558 /* is the device still there? */ 2897 /* is the device still there? */
2559 if (!ata_dev_same_device(ap, dev, class, id)) { 2898 if (!ata_dev_same_device(dev, class, id)) {
2560 rc = -ENODEV; 2899 rc = -ENODEV;
2561 goto fail; 2900 goto fail;
2562 } 2901 }
2563 2902
2564 kfree(dev->id); 2903 memcpy(dev->id, id, sizeof(id[0]) * ATA_ID_WORDS);
2565 dev->id = id;
2566 2904
2567 /* configure device according to the new ID */ 2905 /* configure device according to the new ID */
2568 return ata_dev_configure(ap, dev, 0); 2906 rc = ata_dev_configure(dev, 0);
2907 if (rc == 0)
2908 return 0;
2569 2909
2570 fail: 2910 fail:
2571 printk(KERN_ERR "ata%u: dev %u revalidation failed (errno=%d)\n", 2911 ata_dev_printk(dev, KERN_ERR, "revalidation failed (errno=%d)\n", rc);
2572 ap->id, dev->devno, rc);
2573 kfree(id);
2574 return rc; 2912 return rc;
2575} 2913}
2576 2914
@@ -2626,6 +2964,14 @@ static int ata_dma_blacklisted(const struct ata_device *dev)
2626 unsigned int nlen, rlen; 2964 unsigned int nlen, rlen;
2627 int i; 2965 int i;
2628 2966
2967 /* We don't support polling DMA.
2968 * DMA blacklist those ATAPI devices with CDB-intr (and use PIO)
2969 * if the LLDD handles only interrupts in the HSM_ST_LAST state.
2970 */
2971 if ((dev->ap->flags & ATA_FLAG_PIO_POLLING) &&
2972 (dev->flags & ATA_DFLAG_CDB_INTR))
2973 return 1;
2974
2629 ata_id_string(dev->id, model_num, ATA_ID_PROD_OFS, 2975 ata_id_string(dev->id, model_num, ATA_ID_PROD_OFS,
2630 sizeof(model_num)); 2976 sizeof(model_num));
2631 ata_id_string(dev->id, model_rev, ATA_ID_FW_REV_OFS, 2977 ata_id_string(dev->id, model_rev, ATA_ID_FW_REV_OFS,
@@ -2646,7 +2992,6 @@ static int ata_dma_blacklisted(const struct ata_device *dev)
2646 2992
2647/** 2993/**
2648 * ata_dev_xfermask - Compute supported xfermask of the given device 2994 * ata_dev_xfermask - Compute supported xfermask of the given device
2649 * @ap: Port on which the device to compute xfermask for resides
2650 * @dev: Device to compute xfermask for 2995 * @dev: Device to compute xfermask for
2651 * 2996 *
2652 * Compute supported xfermask of @dev and store it in 2997 * Compute supported xfermask of @dev and store it in
@@ -2661,49 +3006,61 @@ static int ata_dma_blacklisted(const struct ata_device *dev)
2661 * LOCKING: 3006 * LOCKING:
2662 * None. 3007 * None.
2663 */ 3008 */
2664static void ata_dev_xfermask(struct ata_port *ap, struct ata_device *dev) 3009static void ata_dev_xfermask(struct ata_device *dev)
2665{ 3010{
3011 struct ata_port *ap = dev->ap;
2666 struct ata_host_set *hs = ap->host_set; 3012 struct ata_host_set *hs = ap->host_set;
2667 unsigned long xfer_mask; 3013 unsigned long xfer_mask;
2668 int i; 3014 int i;
2669 3015
2670 xfer_mask = ata_pack_xfermask(ap->pio_mask, ap->mwdma_mask, 3016 xfer_mask = ata_pack_xfermask(ap->pio_mask,
2671 ap->udma_mask); 3017 ap->mwdma_mask, ap->udma_mask);
3018
3019 /* Apply cable rule here. Don't apply it early because when
3020 * we handle hot plug the cable type can itself change.
3021 */
3022 if (ap->cbl == ATA_CBL_PATA40)
3023 xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
2672 3024
2673 /* FIXME: Use port-wide xfermask for now */ 3025 /* FIXME: Use port-wide xfermask for now */
2674 for (i = 0; i < ATA_MAX_DEVICES; i++) { 3026 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2675 struct ata_device *d = &ap->device[i]; 3027 struct ata_device *d = &ap->device[i];
2676 if (!ata_dev_present(d)) 3028
3029 if (ata_dev_absent(d))
2677 continue; 3030 continue;
2678 xfer_mask &= ata_pack_xfermask(d->pio_mask, d->mwdma_mask, 3031
2679 d->udma_mask); 3032 if (ata_dev_disabled(d)) {
3033 /* to avoid violating device selection timing */
3034 xfer_mask &= ata_pack_xfermask(d->pio_mask,
3035 UINT_MAX, UINT_MAX);
3036 continue;
3037 }
3038
3039 xfer_mask &= ata_pack_xfermask(d->pio_mask,
3040 d->mwdma_mask, d->udma_mask);
2680 xfer_mask &= ata_id_xfermask(d->id); 3041 xfer_mask &= ata_id_xfermask(d->id);
2681 if (ata_dma_blacklisted(d)) 3042 if (ata_dma_blacklisted(d))
2682 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA); 3043 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
2683 /* Apply cable rule here. Don't apply it early because when
2684 we handle hot plug the cable type can itself change */
2685 if (ap->cbl == ATA_CBL_PATA40)
2686 xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
2687 } 3044 }
2688 3045
2689 if (ata_dma_blacklisted(dev)) 3046 if (ata_dma_blacklisted(dev))
2690 printk(KERN_WARNING "ata%u: dev %u is on DMA blacklist, " 3047 ata_dev_printk(dev, KERN_WARNING,
2691 "disabling DMA\n", ap->id, dev->devno); 3048 "device is on DMA blacklist, disabling DMA\n");
2692 3049
2693 if (hs->flags & ATA_HOST_SIMPLEX) { 3050 if (hs->flags & ATA_HOST_SIMPLEX) {
2694 if (hs->simplex_claimed) 3051 if (hs->simplex_claimed)
2695 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA); 3052 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
2696 } 3053 }
3054
2697 if (ap->ops->mode_filter) 3055 if (ap->ops->mode_filter)
2698 xfer_mask = ap->ops->mode_filter(ap, dev, xfer_mask); 3056 xfer_mask = ap->ops->mode_filter(ap, dev, xfer_mask);
2699 3057
2700 ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask, 3058 ata_unpack_xfermask(xfer_mask, &dev->pio_mask,
2701 &dev->udma_mask); 3059 &dev->mwdma_mask, &dev->udma_mask);
2702} 3060}
2703 3061
2704/** 3062/**
2705 * ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command 3063 * ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
2706 * @ap: Port associated with device @dev
2707 * @dev: Device to which command will be sent 3064 * @dev: Device to which command will be sent
2708 * 3065 *
2709 * Issue SET FEATURES - XFER MODE command to device @dev 3066 * Issue SET FEATURES - XFER MODE command to device @dev
@@ -2716,8 +3073,7 @@ static void ata_dev_xfermask(struct ata_port *ap, struct ata_device *dev)
2716 * 0 on success, AC_ERR_* mask otherwise. 3073 * 0 on success, AC_ERR_* mask otherwise.
2717 */ 3074 */
2718 3075
2719static unsigned int ata_dev_set_xfermode(struct ata_port *ap, 3076static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
2720 struct ata_device *dev)
2721{ 3077{
2722 struct ata_taskfile tf; 3078 struct ata_taskfile tf;
2723 unsigned int err_mask; 3079 unsigned int err_mask;
@@ -2725,14 +3081,14 @@ static unsigned int ata_dev_set_xfermode(struct ata_port *ap,
2725 /* set up set-features taskfile */ 3081 /* set up set-features taskfile */
2726 DPRINTK("set features - xfer mode\n"); 3082 DPRINTK("set features - xfer mode\n");
2727 3083
2728 ata_tf_init(ap, &tf, dev->devno); 3084 ata_tf_init(dev, &tf);
2729 tf.command = ATA_CMD_SET_FEATURES; 3085 tf.command = ATA_CMD_SET_FEATURES;
2730 tf.feature = SETFEATURES_XFER; 3086 tf.feature = SETFEATURES_XFER;
2731 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; 3087 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
2732 tf.protocol = ATA_PROT_NODATA; 3088 tf.protocol = ATA_PROT_NODATA;
2733 tf.nsect = dev->xfer_mode; 3089 tf.nsect = dev->xfer_mode;
2734 3090
2735 err_mask = ata_exec_internal(ap, dev, &tf, DMA_NONE, NULL, 0); 3091 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
2736 3092
2737 DPRINTK("EXIT, err_mask=%x\n", err_mask); 3093 DPRINTK("EXIT, err_mask=%x\n", err_mask);
2738 return err_mask; 3094 return err_mask;
@@ -2740,7 +3096,6 @@ static unsigned int ata_dev_set_xfermode(struct ata_port *ap,
2740 3096
2741/** 3097/**
2742 * ata_dev_init_params - Issue INIT DEV PARAMS command 3098 * ata_dev_init_params - Issue INIT DEV PARAMS command
2743 * @ap: Port associated with device @dev
2744 * @dev: Device to which command will be sent 3099 * @dev: Device to which command will be sent
2745 * @heads: Number of heads (taskfile parameter) 3100 * @heads: Number of heads (taskfile parameter)
2746 * @sectors: Number of sectors (taskfile parameter) 3101 * @sectors: Number of sectors (taskfile parameter)
@@ -2751,11 +3106,8 @@ static unsigned int ata_dev_set_xfermode(struct ata_port *ap,
2751 * RETURNS: 3106 * RETURNS:
2752 * 0 on success, AC_ERR_* mask otherwise. 3107 * 0 on success, AC_ERR_* mask otherwise.
2753 */ 3108 */
2754 3109static unsigned int ata_dev_init_params(struct ata_device *dev,
2755static unsigned int ata_dev_init_params(struct ata_port *ap, 3110 u16 heads, u16 sectors)
2756 struct ata_device *dev,
2757 u16 heads,
2758 u16 sectors)
2759{ 3111{
2760 struct ata_taskfile tf; 3112 struct ata_taskfile tf;
2761 unsigned int err_mask; 3113 unsigned int err_mask;
@@ -2767,14 +3119,14 @@ static unsigned int ata_dev_init_params(struct ata_port *ap,
2767 /* set up init dev params taskfile */ 3119 /* set up init dev params taskfile */
2768 DPRINTK("init dev params \n"); 3120 DPRINTK("init dev params \n");
2769 3121
2770 ata_tf_init(ap, &tf, dev->devno); 3122 ata_tf_init(dev, &tf);
2771 tf.command = ATA_CMD_INIT_DEV_PARAMS; 3123 tf.command = ATA_CMD_INIT_DEV_PARAMS;
2772 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; 3124 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
2773 tf.protocol = ATA_PROT_NODATA; 3125 tf.protocol = ATA_PROT_NODATA;
2774 tf.nsect = sectors; 3126 tf.nsect = sectors;
2775 tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */ 3127 tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
2776 3128
2777 err_mask = ata_exec_internal(ap, dev, &tf, DMA_NONE, NULL, 0); 3129 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
2778 3130
2779 DPRINTK("EXIT, err_mask=%x\n", err_mask); 3131 DPRINTK("EXIT, err_mask=%x\n", err_mask);
2780 return err_mask; 3132 return err_mask;
@@ -2957,6 +3309,7 @@ void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf, unsigned int buflen)
2957 qc->n_elem = 1; 3309 qc->n_elem = 1;
2958 qc->orig_n_elem = 1; 3310 qc->orig_n_elem = 1;
2959 qc->buf_virt = buf; 3311 qc->buf_virt = buf;
3312 qc->nbytes = buflen;
2960 3313
2961 sg = qc->__sg; 3314 sg = qc->__sg;
2962 sg_init_one(sg, buf, buflen); 3315 sg_init_one(sg, buf, buflen);
@@ -3140,134 +3493,6 @@ skip_map:
3140} 3493}
3141 3494
3142/** 3495/**
3143 * ata_poll_qc_complete - turn irq back on and finish qc
3144 * @qc: Command to complete
3145 * @err_mask: ATA status register content
3146 *
3147 * LOCKING:
3148 * None. (grabs host lock)
3149 */
3150
3151void ata_poll_qc_complete(struct ata_queued_cmd *qc)
3152{
3153 struct ata_port *ap = qc->ap;
3154 unsigned long flags;
3155
3156 spin_lock_irqsave(&ap->host_set->lock, flags);
3157 ap->flags &= ~ATA_FLAG_NOINTR;
3158 ata_irq_on(ap);
3159 ata_qc_complete(qc);
3160 spin_unlock_irqrestore(&ap->host_set->lock, flags);
3161}
3162
3163/**
3164 * ata_pio_poll - poll using PIO, depending on current state
3165 * @ap: the target ata_port
3166 *
3167 * LOCKING:
3168 * None. (executing in kernel thread context)
3169 *
3170 * RETURNS:
3171 * timeout value to use
3172 */
3173
3174static unsigned long ata_pio_poll(struct ata_port *ap)
3175{
3176 struct ata_queued_cmd *qc;
3177 u8 status;
3178 unsigned int poll_state = HSM_ST_UNKNOWN;
3179 unsigned int reg_state = HSM_ST_UNKNOWN;
3180
3181 qc = ata_qc_from_tag(ap, ap->active_tag);
3182 WARN_ON(qc == NULL);
3183
3184 switch (ap->hsm_task_state) {
3185 case HSM_ST:
3186 case HSM_ST_POLL:
3187 poll_state = HSM_ST_POLL;
3188 reg_state = HSM_ST;
3189 break;
3190 case HSM_ST_LAST:
3191 case HSM_ST_LAST_POLL:
3192 poll_state = HSM_ST_LAST_POLL;
3193 reg_state = HSM_ST_LAST;
3194 break;
3195 default:
3196 BUG();
3197 break;
3198 }
3199
3200 status = ata_chk_status(ap);
3201 if (status & ATA_BUSY) {
3202 if (time_after(jiffies, ap->pio_task_timeout)) {
3203 qc->err_mask |= AC_ERR_TIMEOUT;
3204 ap->hsm_task_state = HSM_ST_TMOUT;
3205 return 0;
3206 }
3207 ap->hsm_task_state = poll_state;
3208 return ATA_SHORT_PAUSE;
3209 }
3210
3211 ap->hsm_task_state = reg_state;
3212 return 0;
3213}
3214
3215/**
3216 * ata_pio_complete - check if drive is busy or idle
3217 * @ap: the target ata_port
3218 *
3219 * LOCKING:
3220 * None. (executing in kernel thread context)
3221 *
3222 * RETURNS:
3223 * Non-zero if qc completed, zero otherwise.
3224 */
3225
3226static int ata_pio_complete (struct ata_port *ap)
3227{
3228 struct ata_queued_cmd *qc;
3229 u8 drv_stat;
3230
3231 /*
3232 * This is purely heuristic. This is a fast path. Sometimes when
3233 * we enter, BSY will be cleared in a chk-status or two. If not,
3234 * the drive is probably seeking or something. Snooze for a couple
3235 * msecs, then chk-status again. If still busy, fall back to
3236 * HSM_ST_POLL state.
3237 */
3238 drv_stat = ata_busy_wait(ap, ATA_BUSY, 10);
3239 if (drv_stat & ATA_BUSY) {
3240 msleep(2);
3241 drv_stat = ata_busy_wait(ap, ATA_BUSY, 10);
3242 if (drv_stat & ATA_BUSY) {
3243 ap->hsm_task_state = HSM_ST_LAST_POLL;
3244 ap->pio_task_timeout = jiffies + ATA_TMOUT_PIO;
3245 return 0;
3246 }
3247 }
3248
3249 qc = ata_qc_from_tag(ap, ap->active_tag);
3250 WARN_ON(qc == NULL);
3251
3252 drv_stat = ata_wait_idle(ap);
3253 if (!ata_ok(drv_stat)) {
3254 qc->err_mask |= __ac_err_mask(drv_stat);
3255 ap->hsm_task_state = HSM_ST_ERR;
3256 return 0;
3257 }
3258
3259 ap->hsm_task_state = HSM_ST_IDLE;
3260
3261 WARN_ON(qc->err_mask);
3262 ata_poll_qc_complete(qc);
3263
3264 /* another command may start at this point */
3265
3266 return 1;
3267}
3268
3269
3270/**
3271 * swap_buf_le16 - swap halves of 16-bit words in place 3496 * swap_buf_le16 - swap halves of 16-bit words in place
3272 * @buf: Buffer to swap 3497 * @buf: Buffer to swap
3273 * @buf_words: Number of 16-bit words in buffer. 3498 * @buf_words: Number of 16-bit words in buffer.
@@ -3291,7 +3516,7 @@ void swap_buf_le16(u16 *buf, unsigned int buf_words)
3291 3516
3292/** 3517/**
3293 * ata_mmio_data_xfer - Transfer data by MMIO 3518 * ata_mmio_data_xfer - Transfer data by MMIO
3294 * @ap: port to read/write 3519 * @adev: device for this I/O
3295 * @buf: data buffer 3520 * @buf: data buffer
3296 * @buflen: buffer length 3521 * @buflen: buffer length
3297 * @write_data: read/write 3522 * @write_data: read/write
@@ -3302,9 +3527,10 @@ void swap_buf_le16(u16 *buf, unsigned int buf_words)
3302 * Inherited from caller. 3527 * Inherited from caller.
3303 */ 3528 */
3304 3529
3305static void ata_mmio_data_xfer(struct ata_port *ap, unsigned char *buf, 3530void ata_mmio_data_xfer(struct ata_device *adev, unsigned char *buf,
3306 unsigned int buflen, int write_data) 3531 unsigned int buflen, int write_data)
3307{ 3532{
3533 struct ata_port *ap = adev->ap;
3308 unsigned int i; 3534 unsigned int i;
3309 unsigned int words = buflen >> 1; 3535 unsigned int words = buflen >> 1;
3310 u16 *buf16 = (u16 *) buf; 3536 u16 *buf16 = (u16 *) buf;
@@ -3336,7 +3562,7 @@ static void ata_mmio_data_xfer(struct ata_port *ap, unsigned char *buf,
3336 3562
3337/** 3563/**
3338 * ata_pio_data_xfer - Transfer data by PIO 3564 * ata_pio_data_xfer - Transfer data by PIO
3339 * @ap: port to read/write 3565 * @adev: device to target
3340 * @buf: data buffer 3566 * @buf: data buffer
3341 * @buflen: buffer length 3567 * @buflen: buffer length
3342 * @write_data: read/write 3568 * @write_data: read/write
@@ -3347,9 +3573,10 @@ static void ata_mmio_data_xfer(struct ata_port *ap, unsigned char *buf,
3347 * Inherited from caller. 3573 * Inherited from caller.
3348 */ 3574 */
3349 3575
3350static void ata_pio_data_xfer(struct ata_port *ap, unsigned char *buf, 3576void ata_pio_data_xfer(struct ata_device *adev, unsigned char *buf,
3351 unsigned int buflen, int write_data) 3577 unsigned int buflen, int write_data)
3352{ 3578{
3579 struct ata_port *ap = adev->ap;
3353 unsigned int words = buflen >> 1; 3580 unsigned int words = buflen >> 1;
3354 3581
3355 /* Transfer multiple of 2 bytes */ 3582 /* Transfer multiple of 2 bytes */
@@ -3374,38 +3601,29 @@ static void ata_pio_data_xfer(struct ata_port *ap, unsigned char *buf,
3374} 3601}
3375 3602
3376/** 3603/**
3377 * ata_data_xfer - Transfer data from/to the data register. 3604 * ata_pio_data_xfer_noirq - Transfer data by PIO
3378 * @ap: port to read/write 3605 * @adev: device to target
3379 * @buf: data buffer 3606 * @buf: data buffer
3380 * @buflen: buffer length 3607 * @buflen: buffer length
3381 * @do_write: read/write 3608 * @write_data: read/write
3382 * 3609 *
3383 * Transfer data from/to the device data register. 3610 * Transfer data from/to the device data register by PIO. Do the
3611 * transfer with interrupts disabled.
3384 * 3612 *
3385 * LOCKING: 3613 * LOCKING:
3386 * Inherited from caller. 3614 * Inherited from caller.
3387 */ 3615 */
3388 3616
3389static void ata_data_xfer(struct ata_port *ap, unsigned char *buf, 3617void ata_pio_data_xfer_noirq(struct ata_device *adev, unsigned char *buf,
3390 unsigned int buflen, int do_write) 3618 unsigned int buflen, int write_data)
3391{ 3619{
3392 /* Make the crap hardware pay the costs not the good stuff */ 3620 unsigned long flags;
3393 if (unlikely(ap->flags & ATA_FLAG_IRQ_MASK)) { 3621 local_irq_save(flags);
3394 unsigned long flags; 3622 ata_pio_data_xfer(adev, buf, buflen, write_data);
3395 local_irq_save(flags); 3623 local_irq_restore(flags);
3396 if (ap->flags & ATA_FLAG_MMIO)
3397 ata_mmio_data_xfer(ap, buf, buflen, do_write);
3398 else
3399 ata_pio_data_xfer(ap, buf, buflen, do_write);
3400 local_irq_restore(flags);
3401 } else {
3402 if (ap->flags & ATA_FLAG_MMIO)
3403 ata_mmio_data_xfer(ap, buf, buflen, do_write);
3404 else
3405 ata_pio_data_xfer(ap, buf, buflen, do_write);
3406 }
3407} 3624}
3408 3625
3626
3409/** 3627/**
3410 * ata_pio_sector - Transfer ATA_SECT_SIZE (512 bytes) of data. 3628 * ata_pio_sector - Transfer ATA_SECT_SIZE (512 bytes) of data.
3411 * @qc: Command on going 3629 * @qc: Command on going
@@ -3435,7 +3653,24 @@ static void ata_pio_sector(struct ata_queued_cmd *qc)
3435 page = nth_page(page, (offset >> PAGE_SHIFT)); 3653 page = nth_page(page, (offset >> PAGE_SHIFT));
3436 offset %= PAGE_SIZE; 3654 offset %= PAGE_SIZE;
3437 3655
3438 buf = kmap(page) + offset; 3656 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
3657
3658 if (PageHighMem(page)) {
3659 unsigned long flags;
3660
3661 /* FIXME: use a bounce buffer */
3662 local_irq_save(flags);
3663 buf = kmap_atomic(page, KM_IRQ0);
3664
3665 /* do the actual data transfer */
3666 ap->ops->data_xfer(qc->dev, buf + offset, ATA_SECT_SIZE, do_write);
3667
3668 kunmap_atomic(buf, KM_IRQ0);
3669 local_irq_restore(flags);
3670 } else {
3671 buf = page_address(page);
3672 ap->ops->data_xfer(qc->dev, buf + offset, ATA_SECT_SIZE, do_write);
3673 }
3439 3674
3440 qc->cursect++; 3675 qc->cursect++;
3441 qc->cursg_ofs++; 3676 qc->cursg_ofs++;
@@ -3444,14 +3679,68 @@ static void ata_pio_sector(struct ata_queued_cmd *qc)
3444 qc->cursg++; 3679 qc->cursg++;
3445 qc->cursg_ofs = 0; 3680 qc->cursg_ofs = 0;
3446 } 3681 }
3682}
3447 3683
3448 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read"); 3684/**
3685 * ata_pio_sectors - Transfer one or many 512-byte sectors.
3686 * @qc: Command on going
3687 *
3688 * Transfer one or many ATA_SECT_SIZE of data from/to the
3689 * ATA device for the DRQ request.
3690 *
3691 * LOCKING:
3692 * Inherited from caller.
3693 */
3694
3695static void ata_pio_sectors(struct ata_queued_cmd *qc)
3696{
3697 if (is_multi_taskfile(&qc->tf)) {
3698 /* READ/WRITE MULTIPLE */
3699 unsigned int nsect;
3449 3700
3450 /* do the actual data transfer */ 3701 WARN_ON(qc->dev->multi_count == 0);
3451 do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
3452 ata_data_xfer(ap, buf, ATA_SECT_SIZE, do_write);
3453 3702
3454 kunmap(page); 3703 nsect = min(qc->nsect - qc->cursect, qc->dev->multi_count);
3704 while (nsect--)
3705 ata_pio_sector(qc);
3706 } else
3707 ata_pio_sector(qc);
3708}
3709
3710/**
3711 * atapi_send_cdb - Write CDB bytes to hardware
3712 * @ap: Port to which ATAPI device is attached.
3713 * @qc: Taskfile currently active
3714 *
3715 * When device has indicated its readiness to accept
3716 * a CDB, this function is called. Send the CDB.
3717 *
3718 * LOCKING:
3719 * caller.
3720 */
3721
3722static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc)
3723{
3724 /* send SCSI cdb */
3725 DPRINTK("send cdb\n");
3726 WARN_ON(qc->dev->cdb_len < 12);
3727
3728 ap->ops->data_xfer(qc->dev, qc->cdb, qc->dev->cdb_len, 1);
3729 ata_altstatus(ap); /* flush */
3730
3731 switch (qc->tf.protocol) {
3732 case ATA_PROT_ATAPI:
3733 ap->hsm_task_state = HSM_ST;
3734 break;
3735 case ATA_PROT_ATAPI_NODATA:
3736 ap->hsm_task_state = HSM_ST_LAST;
3737 break;
3738 case ATA_PROT_ATAPI_DMA:
3739 ap->hsm_task_state = HSM_ST_LAST;
3740 /* initiate bmdma */
3741 ap->ops->bmdma_start(qc);
3742 break;
3743 }
3455} 3744}
3456 3745
3457/** 3746/**
@@ -3492,11 +3781,11 @@ next_sg:
3492 unsigned int i; 3781 unsigned int i;
3493 3782
3494 if (words) /* warning if bytes > 1 */ 3783 if (words) /* warning if bytes > 1 */
3495 printk(KERN_WARNING "ata%u: %u bytes trailing data\n", 3784 ata_dev_printk(qc->dev, KERN_WARNING,
3496 ap->id, bytes); 3785 "%u bytes trailing data\n", bytes);
3497 3786
3498 for (i = 0; i < words; i++) 3787 for (i = 0; i < words; i++)
3499 ata_data_xfer(ap, (unsigned char*)pad_buf, 2, do_write); 3788 ap->ops->data_xfer(qc->dev, (unsigned char*)pad_buf, 2, do_write);
3500 3789
3501 ap->hsm_task_state = HSM_ST_LAST; 3790 ap->hsm_task_state = HSM_ST_LAST;
3502 return; 3791 return;
@@ -3517,7 +3806,24 @@ next_sg:
3517 /* don't cross page boundaries */ 3806 /* don't cross page boundaries */
3518 count = min(count, (unsigned int)PAGE_SIZE - offset); 3807 count = min(count, (unsigned int)PAGE_SIZE - offset);
3519 3808
3520 buf = kmap(page) + offset; 3809 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
3810
3811 if (PageHighMem(page)) {
3812 unsigned long flags;
3813
3814 /* FIXME: use bounce buffer */
3815 local_irq_save(flags);
3816 buf = kmap_atomic(page, KM_IRQ0);
3817
3818 /* do the actual data transfer */
3819 ap->ops->data_xfer(qc->dev, buf + offset, count, do_write);
3820
3821 kunmap_atomic(buf, KM_IRQ0);
3822 local_irq_restore(flags);
3823 } else {
3824 buf = page_address(page);
3825 ap->ops->data_xfer(qc->dev, buf + offset, count, do_write);
3826 }
3521 3827
3522 bytes -= count; 3828 bytes -= count;
3523 qc->curbytes += count; 3829 qc->curbytes += count;
@@ -3528,13 +3834,6 @@ next_sg:
3528 qc->cursg_ofs = 0; 3834 qc->cursg_ofs = 0;
3529 } 3835 }
3530 3836
3531 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
3532
3533 /* do the actual data transfer */
3534 ata_data_xfer(ap, buf, count, do_write);
3535
3536 kunmap(page);
3537
3538 if (bytes) 3837 if (bytes)
3539 goto next_sg; 3838 goto next_sg;
3540} 3839}
@@ -3556,10 +3855,16 @@ static void atapi_pio_bytes(struct ata_queued_cmd *qc)
3556 unsigned int ireason, bc_lo, bc_hi, bytes; 3855 unsigned int ireason, bc_lo, bc_hi, bytes;
3557 int i_write, do_write = (qc->tf.flags & ATA_TFLAG_WRITE) ? 1 : 0; 3856 int i_write, do_write = (qc->tf.flags & ATA_TFLAG_WRITE) ? 1 : 0;
3558 3857
3559 ap->ops->tf_read(ap, &qc->tf); 3858 /* Abuse qc->result_tf for temp storage of intermediate TF
3560 ireason = qc->tf.nsect; 3859 * here to save some kernel stack usage.
3561 bc_lo = qc->tf.lbam; 3860 * For normal completion, qc->result_tf is not relevant. For
3562 bc_hi = qc->tf.lbah; 3861 * error, qc->result_tf is later overwritten by ata_qc_complete().
3862 * So, the correctness of qc->result_tf is not affected.
3863 */
3864 ap->ops->tf_read(ap, &qc->result_tf);
3865 ireason = qc->result_tf.nsect;
3866 bc_lo = qc->result_tf.lbam;
3867 bc_hi = qc->result_tf.lbah;
3563 bytes = (bc_hi << 8) | bc_lo; 3868 bytes = (bc_hi << 8) | bc_lo;
3564 3869
3565 /* shall be cleared to zero, indicating xfer of data */ 3870 /* shall be cleared to zero, indicating xfer of data */
@@ -3571,307 +3876,365 @@ static void atapi_pio_bytes(struct ata_queued_cmd *qc)
3571 if (do_write != i_write) 3876 if (do_write != i_write)
3572 goto err_out; 3877 goto err_out;
3573 3878
3879 VPRINTK("ata%u: xfering %d bytes\n", ap->id, bytes);
3880
3574 __atapi_pio_bytes(qc, bytes); 3881 __atapi_pio_bytes(qc, bytes);
3575 3882
3576 return; 3883 return;
3577 3884
3578err_out: 3885err_out:
3579 printk(KERN_INFO "ata%u: dev %u: ATAPI check failed\n", 3886 ata_dev_printk(dev, KERN_INFO, "ATAPI check failed\n");
3580 ap->id, dev->devno);
3581 qc->err_mask |= AC_ERR_HSM; 3887 qc->err_mask |= AC_ERR_HSM;
3582 ap->hsm_task_state = HSM_ST_ERR; 3888 ap->hsm_task_state = HSM_ST_ERR;
3583} 3889}
3584 3890
3585/** 3891/**
3586 * ata_pio_block - start PIO on a block 3892 * ata_hsm_ok_in_wq - Check if the qc can be handled in the workqueue.
3587 * @ap: the target ata_port 3893 * @ap: the target ata_port
3894 * @qc: qc on going
3588 * 3895 *
3589 * LOCKING: 3896 * RETURNS:
3590 * None. (executing in kernel thread context) 3897 * 1 if ok in workqueue, 0 otherwise.
3591 */ 3898 */
3592 3899
3593static void ata_pio_block(struct ata_port *ap) 3900static inline int ata_hsm_ok_in_wq(struct ata_port *ap, struct ata_queued_cmd *qc)
3594{ 3901{
3595 struct ata_queued_cmd *qc; 3902 if (qc->tf.flags & ATA_TFLAG_POLLING)
3596 u8 status; 3903 return 1;
3597 3904
3598 /* 3905 if (ap->hsm_task_state == HSM_ST_FIRST) {
3599 * This is purely heuristic. This is a fast path. 3906 if (qc->tf.protocol == ATA_PROT_PIO &&
3600 * Sometimes when we enter, BSY will be cleared in 3907 (qc->tf.flags & ATA_TFLAG_WRITE))
3601 * a chk-status or two. If not, the drive is probably seeking 3908 return 1;
3602 * or something. Snooze for a couple msecs, then 3909
3603 * chk-status again. If still busy, fall back to 3910 if (is_atapi_taskfile(&qc->tf) &&
3604 * HSM_ST_POLL state. 3911 !(qc->dev->flags & ATA_DFLAG_CDB_INTR))
3605 */ 3912 return 1;
3606 status = ata_busy_wait(ap, ATA_BUSY, 5);
3607 if (status & ATA_BUSY) {
3608 msleep(2);
3609 status = ata_busy_wait(ap, ATA_BUSY, 10);
3610 if (status & ATA_BUSY) {
3611 ap->hsm_task_state = HSM_ST_POLL;
3612 ap->pio_task_timeout = jiffies + ATA_TMOUT_PIO;
3613 return;
3614 }
3615 } 3913 }
3616 3914
3617 qc = ata_qc_from_tag(ap, ap->active_tag); 3915 return 0;
3618 WARN_ON(qc == NULL); 3916}
3619 3917
3620 /* check error */ 3918/**
3621 if (status & (ATA_ERR | ATA_DF)) { 3919 * ata_hsm_qc_complete - finish a qc running on standard HSM
3622 qc->err_mask |= AC_ERR_DEV; 3920 * @qc: Command to complete
3623 ap->hsm_task_state = HSM_ST_ERR; 3921 * @in_wq: 1 if called from workqueue, 0 otherwise
3624 return; 3922 *
3625 } 3923 * Finish @qc which is running on standard HSM.
3924 *
3925 * LOCKING:
3926 * If @in_wq is zero, spin_lock_irqsave(host_set lock).
3927 * Otherwise, none on entry and grabs host lock.
3928 */
3929static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
3930{
3931 struct ata_port *ap = qc->ap;
3932 unsigned long flags;
3626 3933
3627 /* transfer data if any */ 3934 if (ap->ops->error_handler) {
3628 if (is_atapi_taskfile(&qc->tf)) { 3935 if (in_wq) {
3629 /* DRQ=0 means no more data to transfer */ 3936 spin_lock_irqsave(ap->lock, flags);
3630 if ((status & ATA_DRQ) == 0) {
3631 ap->hsm_task_state = HSM_ST_LAST;
3632 return;
3633 }
3634 3937
3635 atapi_pio_bytes(qc); 3938 /* EH might have kicked in while host_set lock
3636 } else { 3939 * is released.
3637 /* handle BSY=0, DRQ=0 as error */ 3940 */
3638 if ((status & ATA_DRQ) == 0) { 3941 qc = ata_qc_from_tag(ap, qc->tag);
3639 qc->err_mask |= AC_ERR_HSM; 3942 if (qc) {
3640 ap->hsm_task_state = HSM_ST_ERR; 3943 if (likely(!(qc->err_mask & AC_ERR_HSM))) {
3641 return; 3944 ata_irq_on(ap);
3642 } 3945 ata_qc_complete(qc);
3946 } else
3947 ata_port_freeze(ap);
3948 }
3643 3949
3644 ata_pio_sector(qc); 3950 spin_unlock_irqrestore(ap->lock, flags);
3951 } else {
3952 if (likely(!(qc->err_mask & AC_ERR_HSM)))
3953 ata_qc_complete(qc);
3954 else
3955 ata_port_freeze(ap);
3956 }
3957 } else {
3958 if (in_wq) {
3959 spin_lock_irqsave(ap->lock, flags);
3960 ata_irq_on(ap);
3961 ata_qc_complete(qc);
3962 spin_unlock_irqrestore(ap->lock, flags);
3963 } else
3964 ata_qc_complete(qc);
3645 } 3965 }
3646 3966
3647 ata_altstatus(ap); /* flush */ 3967 ata_altstatus(ap); /* flush */
3648} 3968}
3649 3969
3650static void ata_pio_error(struct ata_port *ap) 3970/**
3971 * ata_hsm_move - move the HSM to the next state.
3972 * @ap: the target ata_port
3973 * @qc: qc on going
3974 * @status: current device status
3975 * @in_wq: 1 if called from workqueue, 0 otherwise
3976 *
3977 * RETURNS:
3978 * 1 when poll next status needed, 0 otherwise.
3979 */
3980int ata_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
3981 u8 status, int in_wq)
3651{ 3982{
3652 struct ata_queued_cmd *qc; 3983 unsigned long flags = 0;
3653 3984 int poll_next;
3654 qc = ata_qc_from_tag(ap, ap->active_tag);
3655 WARN_ON(qc == NULL);
3656 3985
3657 if (qc->tf.command != ATA_CMD_PACKET) 3986 WARN_ON((qc->flags & ATA_QCFLAG_ACTIVE) == 0);
3658 printk(KERN_WARNING "ata%u: PIO error\n", ap->id);
3659 3987
3660 /* make sure qc->err_mask is available to 3988 /* Make sure ata_qc_issue_prot() does not throw things
3661 * know what's wrong and recover 3989 * like DMA polling into the workqueue. Notice that
3990 * in_wq is not equivalent to (qc->tf.flags & ATA_TFLAG_POLLING).
3662 */ 3991 */
3663 WARN_ON(qc->err_mask == 0); 3992 WARN_ON(in_wq != ata_hsm_ok_in_wq(ap, qc));
3664
3665 ap->hsm_task_state = HSM_ST_IDLE;
3666
3667 ata_poll_qc_complete(qc);
3668}
3669
3670static void ata_pio_task(void *_data)
3671{
3672 struct ata_port *ap = _data;
3673 unsigned long timeout;
3674 int qc_completed;
3675 3993
3676fsm_start: 3994fsm_start:
3677 timeout = 0; 3995 DPRINTK("ata%u: protocol %d task_state %d (dev_stat 0x%X)\n",
3678 qc_completed = 0; 3996 ap->id, qc->tf.protocol, ap->hsm_task_state, status);
3679 3997
3680 switch (ap->hsm_task_state) { 3998 switch (ap->hsm_task_state) {
3681 case HSM_ST_IDLE: 3999 case HSM_ST_FIRST:
3682 return; 4000 /* Send first data block or PACKET CDB */
3683 4001
3684 case HSM_ST: 4002 /* If polling, we will stay in the work queue after
3685 ata_pio_block(ap); 4003 * sending the data. Otherwise, interrupt handler
3686 break; 4004 * takes over after sending the data.
3687 4005 */
3688 case HSM_ST_LAST: 4006 poll_next = (qc->tf.flags & ATA_TFLAG_POLLING);
3689 qc_completed = ata_pio_complete(ap); 4007
3690 break; 4008 /* check device status */
3691 4009 if (unlikely((status & ATA_DRQ) == 0)) {
3692 case HSM_ST_POLL: 4010 /* handle BSY=0, DRQ=0 as error */
3693 case HSM_ST_LAST_POLL: 4011 if (likely(status & (ATA_ERR | ATA_DF)))
3694 timeout = ata_pio_poll(ap); 4012 /* device stops HSM for abort/error */
3695 break; 4013 qc->err_mask |= AC_ERR_DEV;
3696 4014 else
3697 case HSM_ST_TMOUT: 4015 /* HSM violation. Let EH handle this */
3698 case HSM_ST_ERR: 4016 qc->err_mask |= AC_ERR_HSM;
3699 ata_pio_error(ap);
3700 return;
3701 }
3702
3703 if (timeout)
3704 ata_port_queue_task(ap, ata_pio_task, ap, timeout);
3705 else if (!qc_completed)
3706 goto fsm_start;
3707}
3708
3709/**
3710 * atapi_packet_task - Write CDB bytes to hardware
3711 * @_data: Port to which ATAPI device is attached.
3712 *
3713 * When device has indicated its readiness to accept
3714 * a CDB, this function is called. Send the CDB.
3715 * If DMA is to be performed, exit immediately.
3716 * Otherwise, we are in polling mode, so poll
3717 * status under operation succeeds or fails.
3718 *
3719 * LOCKING:
3720 * Kernel thread context (may sleep)
3721 */
3722
3723static void atapi_packet_task(void *_data)
3724{
3725 struct ata_port *ap = _data;
3726 struct ata_queued_cmd *qc;
3727 u8 status;
3728
3729 qc = ata_qc_from_tag(ap, ap->active_tag);
3730 WARN_ON(qc == NULL);
3731 WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
3732 4017
3733 /* sleep-wait for BSY to clear */ 4018 ap->hsm_task_state = HSM_ST_ERR;
3734 DPRINTK("busy wait\n"); 4019 goto fsm_start;
3735 if (ata_busy_sleep(ap, ATA_TMOUT_CDB_QUICK, ATA_TMOUT_CDB)) { 4020 }
3736 qc->err_mask |= AC_ERR_TIMEOUT;
3737 goto err_out;
3738 }
3739 4021
3740 /* make sure DRQ is set */ 4022 /* Device should not ask for data transfer (DRQ=1)
3741 status = ata_chk_status(ap); 4023 * when it finds something wrong.
3742 if ((status & (ATA_BUSY | ATA_DRQ)) != ATA_DRQ) { 4024 * We ignore DRQ here and stop the HSM by
3743 qc->err_mask |= AC_ERR_HSM; 4025 * changing hsm_task_state to HSM_ST_ERR and
3744 goto err_out; 4026 * let the EH abort the command or reset the device.
3745 } 4027 */
4028 if (unlikely(status & (ATA_ERR | ATA_DF))) {
4029 printk(KERN_WARNING "ata%d: DRQ=1 with device error, dev_stat 0x%X\n",
4030 ap->id, status);
4031 qc->err_mask |= AC_ERR_HSM;
4032 ap->hsm_task_state = HSM_ST_ERR;
4033 goto fsm_start;
4034 }
3746 4035
3747 /* send SCSI cdb */ 4036 /* Send the CDB (atapi) or the first data block (ata pio out).
3748 DPRINTK("send cdb\n"); 4037 * During the state transition, interrupt handler shouldn't
3749 WARN_ON(qc->dev->cdb_len < 12); 4038 * be invoked before the data transfer is complete and
4039 * hsm_task_state is changed. Hence, the following locking.
4040 */
4041 if (in_wq)
4042 spin_lock_irqsave(ap->lock, flags);
3750 4043
3751 if (qc->tf.protocol == ATA_PROT_ATAPI_DMA || 4044 if (qc->tf.protocol == ATA_PROT_PIO) {
3752 qc->tf.protocol == ATA_PROT_ATAPI_NODATA) { 4045 /* PIO data out protocol.
3753 unsigned long flags; 4046 * send first data block.
4047 */
3754 4048
3755 /* Once we're done issuing command and kicking bmdma, 4049 /* ata_pio_sectors() might change the state
3756 * irq handler takes over. To not lose irq, we need 4050 * to HSM_ST_LAST. so, the state is changed here
3757 * to clear NOINTR flag before sending cdb, but 4051 * before ata_pio_sectors().
3758 * interrupt handler shouldn't be invoked before we're 4052 */
3759 * finished. Hence, the following locking. 4053 ap->hsm_task_state = HSM_ST;
4054 ata_pio_sectors(qc);
4055 ata_altstatus(ap); /* flush */
4056 } else
4057 /* send CDB */
4058 atapi_send_cdb(ap, qc);
4059
4060 if (in_wq)
4061 spin_unlock_irqrestore(ap->lock, flags);
4062
4063 /* if polling, ata_pio_task() handles the rest.
4064 * otherwise, interrupt handler takes over from here.
3760 */ 4065 */
3761 spin_lock_irqsave(&ap->host_set->lock, flags); 4066 break;
3762 ap->flags &= ~ATA_FLAG_NOINTR;
3763 ata_data_xfer(ap, qc->cdb, qc->dev->cdb_len, 1);
3764 ata_altstatus(ap); /* flush */
3765 4067
3766 if (qc->tf.protocol == ATA_PROT_ATAPI_DMA) 4068 case HSM_ST:
3767 ap->ops->bmdma_start(qc); /* initiate bmdma */ 4069 /* complete command or read/write the data register */
3768 spin_unlock_irqrestore(&ap->host_set->lock, flags); 4070 if (qc->tf.protocol == ATA_PROT_ATAPI) {
3769 } else { 4071 /* ATAPI PIO protocol */
3770 ata_data_xfer(ap, qc->cdb, qc->dev->cdb_len, 1); 4072 if ((status & ATA_DRQ) == 0) {
3771 ata_altstatus(ap); /* flush */ 4073 /* No more data to transfer or device error.
4074 * Device error will be tagged in HSM_ST_LAST.
4075 */
4076 ap->hsm_task_state = HSM_ST_LAST;
4077 goto fsm_start;
4078 }
3772 4079
3773 /* PIO commands are handled by polling */ 4080 /* Device should not ask for data transfer (DRQ=1)
3774 ap->hsm_task_state = HSM_ST; 4081 * when it finds something wrong.
3775 ata_port_queue_task(ap, ata_pio_task, ap, 0); 4082 * We ignore DRQ here and stop the HSM by
3776 } 4083 * changing hsm_task_state to HSM_ST_ERR and
4084 * let the EH abort the command or reset the device.
4085 */
4086 if (unlikely(status & (ATA_ERR | ATA_DF))) {
4087 printk(KERN_WARNING "ata%d: DRQ=1 with device error, dev_stat 0x%X\n",
4088 ap->id, status);
4089 qc->err_mask |= AC_ERR_HSM;
4090 ap->hsm_task_state = HSM_ST_ERR;
4091 goto fsm_start;
4092 }
3777 4093
3778 return; 4094 atapi_pio_bytes(qc);
3779 4095
3780err_out: 4096 if (unlikely(ap->hsm_task_state == HSM_ST_ERR))
3781 ata_poll_qc_complete(qc); 4097 /* bad ireason reported by device */
3782} 4098 goto fsm_start;
3783 4099
3784/** 4100 } else {
3785 * ata_qc_timeout - Handle timeout of queued command 4101 /* ATA PIO protocol */
3786 * @qc: Command that timed out 4102 if (unlikely((status & ATA_DRQ) == 0)) {
3787 * 4103 /* handle BSY=0, DRQ=0 as error */
3788 * Some part of the kernel (currently, only the SCSI layer) 4104 if (likely(status & (ATA_ERR | ATA_DF)))
3789 * has noticed that the active command on port @ap has not 4105 /* device stops HSM for abort/error */
3790 * completed after a specified length of time. Handle this 4106 qc->err_mask |= AC_ERR_DEV;
3791 * condition by disabling DMA (if necessary) and completing 4107 else
3792 * transactions, with error if necessary. 4108 /* HSM violation. Let EH handle this */
3793 * 4109 qc->err_mask |= AC_ERR_HSM;
3794 * This also handles the case of the "lost interrupt", where 4110
3795 * for some reason (possibly hardware bug, possibly driver bug) 4111 ap->hsm_task_state = HSM_ST_ERR;
3796 * an interrupt was not delivered to the driver, even though the 4112 goto fsm_start;
3797 * transaction completed successfully. 4113 }
3798 *
3799 * LOCKING:
3800 * Inherited from SCSI layer (none, can sleep)
3801 */
3802 4114
3803static void ata_qc_timeout(struct ata_queued_cmd *qc) 4115 /* For PIO reads, some devices may ask for
3804{ 4116 * data transfer (DRQ=1) alone with ERR=1.
3805 struct ata_port *ap = qc->ap; 4117 * We respect DRQ here and transfer one
3806 struct ata_host_set *host_set = ap->host_set; 4118 * block of junk data before changing the
3807 u8 host_stat = 0, drv_stat; 4119 * hsm_task_state to HSM_ST_ERR.
3808 unsigned long flags; 4120 *
4121 * For PIO writes, ERR=1 DRQ=1 doesn't make
4122 * sense since the data block has been
4123 * transferred to the device.
4124 */
4125 if (unlikely(status & (ATA_ERR | ATA_DF))) {
4126 /* data might be corrputed */
4127 qc->err_mask |= AC_ERR_DEV;
4128
4129 if (!(qc->tf.flags & ATA_TFLAG_WRITE)) {
4130 ata_pio_sectors(qc);
4131 ata_altstatus(ap);
4132 status = ata_wait_idle(ap);
4133 }
4134
4135 if (status & (ATA_BUSY | ATA_DRQ))
4136 qc->err_mask |= AC_ERR_HSM;
4137
4138 /* ata_pio_sectors() might change the
4139 * state to HSM_ST_LAST. so, the state
4140 * is changed after ata_pio_sectors().
4141 */
4142 ap->hsm_task_state = HSM_ST_ERR;
4143 goto fsm_start;
4144 }
3809 4145
3810 DPRINTK("ENTER\n"); 4146 ata_pio_sectors(qc);
3811 4147
3812 ap->hsm_task_state = HSM_ST_IDLE; 4148 if (ap->hsm_task_state == HSM_ST_LAST &&
4149 (!(qc->tf.flags & ATA_TFLAG_WRITE))) {
4150 /* all data read */
4151 ata_altstatus(ap);
4152 status = ata_wait_idle(ap);
4153 goto fsm_start;
4154 }
4155 }
3813 4156
3814 spin_lock_irqsave(&host_set->lock, flags); 4157 ata_altstatus(ap); /* flush */
4158 poll_next = 1;
4159 break;
3815 4160
3816 switch (qc->tf.protocol) { 4161 case HSM_ST_LAST:
4162 if (unlikely(!ata_ok(status))) {
4163 qc->err_mask |= __ac_err_mask(status);
4164 ap->hsm_task_state = HSM_ST_ERR;
4165 goto fsm_start;
4166 }
3817 4167
3818 case ATA_PROT_DMA: 4168 /* no more data to transfer */
3819 case ATA_PROT_ATAPI_DMA: 4169 DPRINTK("ata%u: dev %u command complete, drv_stat 0x%x\n",
3820 host_stat = ap->ops->bmdma_status(ap); 4170 ap->id, qc->dev->devno, status);
3821 4171
3822 /* before we do anything else, clear DMA-Start bit */ 4172 WARN_ON(qc->err_mask);
3823 ap->ops->bmdma_stop(qc);
3824 4173
3825 /* fall through */ 4174 ap->hsm_task_state = HSM_ST_IDLE;
3826 4175
3827 default: 4176 /* complete taskfile transaction */
3828 ata_altstatus(ap); 4177 ata_hsm_qc_complete(qc, in_wq);
3829 drv_stat = ata_chk_status(ap); 4178
4179 poll_next = 0;
4180 break;
3830 4181
3831 /* ack bmdma irq events */ 4182 case HSM_ST_ERR:
3832 ap->ops->irq_clear(ap); 4183 /* make sure qc->err_mask is available to
4184 * know what's wrong and recover
4185 */
4186 WARN_ON(qc->err_mask == 0);
3833 4187
3834 printk(KERN_ERR "ata%u: command 0x%x timeout, stat 0x%x host_stat 0x%x\n", 4188 ap->hsm_task_state = HSM_ST_IDLE;
3835 ap->id, qc->tf.command, drv_stat, host_stat);
3836 4189
3837 /* complete taskfile transaction */ 4190 /* complete taskfile transaction */
3838 qc->err_mask |= ac_err_mask(drv_stat); 4191 ata_hsm_qc_complete(qc, in_wq);
4192
4193 poll_next = 0;
3839 break; 4194 break;
4195 default:
4196 poll_next = 0;
4197 BUG();
3840 } 4198 }
3841 4199
3842 spin_unlock_irqrestore(&host_set->lock, flags); 4200 return poll_next;
3843
3844 ata_eh_qc_complete(qc);
3845
3846 DPRINTK("EXIT\n");
3847} 4201}
3848 4202
3849/** 4203static void ata_pio_task(void *_data)
3850 * ata_eng_timeout - Handle timeout of queued command
3851 * @ap: Port on which timed-out command is active
3852 *
3853 * Some part of the kernel (currently, only the SCSI layer)
3854 * has noticed that the active command on port @ap has not
3855 * completed after a specified length of time. Handle this
3856 * condition by disabling DMA (if necessary) and completing
3857 * transactions, with error if necessary.
3858 *
3859 * This also handles the case of the "lost interrupt", where
3860 * for some reason (possibly hardware bug, possibly driver bug)
3861 * an interrupt was not delivered to the driver, even though the
3862 * transaction completed successfully.
3863 *
3864 * LOCKING:
3865 * Inherited from SCSI layer (none, can sleep)
3866 */
3867
3868void ata_eng_timeout(struct ata_port *ap)
3869{ 4204{
3870 DPRINTK("ENTER\n"); 4205 struct ata_queued_cmd *qc = _data;
4206 struct ata_port *ap = qc->ap;
4207 u8 status;
4208 int poll_next;
3871 4209
3872 ata_qc_timeout(ata_qc_from_tag(ap, ap->active_tag)); 4210fsm_start:
4211 WARN_ON(ap->hsm_task_state == HSM_ST_IDLE);
3873 4212
3874 DPRINTK("EXIT\n"); 4213 /*
4214 * This is purely heuristic. This is a fast path.
4215 * Sometimes when we enter, BSY will be cleared in
4216 * a chk-status or two. If not, the drive is probably seeking
4217 * or something. Snooze for a couple msecs, then
4218 * chk-status again. If still busy, queue delayed work.
4219 */
4220 status = ata_busy_wait(ap, ATA_BUSY, 5);
4221 if (status & ATA_BUSY) {
4222 msleep(2);
4223 status = ata_busy_wait(ap, ATA_BUSY, 10);
4224 if (status & ATA_BUSY) {
4225 ata_port_queue_task(ap, ata_pio_task, qc, ATA_SHORT_PAUSE);
4226 return;
4227 }
4228 }
4229
4230 /* move the HSM */
4231 poll_next = ata_hsm_move(ap, qc, status, 1);
4232
4233 /* another command or interrupt handler
4234 * may be running at this point.
4235 */
4236 if (poll_next)
4237 goto fsm_start;
3875} 4238}
3876 4239
3877/** 4240/**
@@ -3888,9 +4251,14 @@ static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
3888 struct ata_queued_cmd *qc = NULL; 4251 struct ata_queued_cmd *qc = NULL;
3889 unsigned int i; 4252 unsigned int i;
3890 4253
3891 for (i = 0; i < ATA_MAX_QUEUE; i++) 4254 /* no command while frozen */
3892 if (!test_and_set_bit(i, &ap->qactive)) { 4255 if (unlikely(ap->flags & ATA_FLAG_FROZEN))
3893 qc = ata_qc_from_tag(ap, i); 4256 return NULL;
4257
4258 /* the last tag is reserved for internal command. */
4259 for (i = 0; i < ATA_MAX_QUEUE - 1; i++)
4260 if (!test_and_set_bit(i, &ap->qc_allocated)) {
4261 qc = __ata_qc_from_tag(ap, i);
3894 break; 4262 break;
3895 } 4263 }
3896 4264
@@ -3902,16 +4270,15 @@ static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
3902 4270
3903/** 4271/**
3904 * ata_qc_new_init - Request an available ATA command, and initialize it 4272 * ata_qc_new_init - Request an available ATA command, and initialize it
3905 * @ap: Port associated with device @dev
3906 * @dev: Device from whom we request an available command structure 4273 * @dev: Device from whom we request an available command structure
3907 * 4274 *
3908 * LOCKING: 4275 * LOCKING:
3909 * None. 4276 * None.
3910 */ 4277 */
3911 4278
3912struct ata_queued_cmd *ata_qc_new_init(struct ata_port *ap, 4279struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev)
3913 struct ata_device *dev)
3914{ 4280{
4281 struct ata_port *ap = dev->ap;
3915 struct ata_queued_cmd *qc; 4282 struct ata_queued_cmd *qc;
3916 4283
3917 qc = ata_qc_new(ap); 4284 qc = ata_qc_new(ap);
@@ -3946,36 +4313,153 @@ void ata_qc_free(struct ata_queued_cmd *qc)
3946 qc->flags = 0; 4313 qc->flags = 0;
3947 tag = qc->tag; 4314 tag = qc->tag;
3948 if (likely(ata_tag_valid(tag))) { 4315 if (likely(ata_tag_valid(tag))) {
3949 if (tag == ap->active_tag)
3950 ap->active_tag = ATA_TAG_POISON;
3951 qc->tag = ATA_TAG_POISON; 4316 qc->tag = ATA_TAG_POISON;
3952 clear_bit(tag, &ap->qactive); 4317 clear_bit(tag, &ap->qc_allocated);
3953 } 4318 }
3954} 4319}
3955 4320
3956void __ata_qc_complete(struct ata_queued_cmd *qc) 4321void __ata_qc_complete(struct ata_queued_cmd *qc)
3957{ 4322{
4323 struct ata_port *ap = qc->ap;
4324
3958 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */ 4325 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
3959 WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE)); 4326 WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
3960 4327
3961 if (likely(qc->flags & ATA_QCFLAG_DMAMAP)) 4328 if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
3962 ata_sg_clean(qc); 4329 ata_sg_clean(qc);
3963 4330
4331 /* command should be marked inactive atomically with qc completion */
4332 if (qc->tf.protocol == ATA_PROT_NCQ)
4333 ap->sactive &= ~(1 << qc->tag);
4334 else
4335 ap->active_tag = ATA_TAG_POISON;
4336
3964 /* atapi: mark qc as inactive to prevent the interrupt handler 4337 /* atapi: mark qc as inactive to prevent the interrupt handler
3965 * from completing the command twice later, before the error handler 4338 * from completing the command twice later, before the error handler
3966 * is called. (when rc != 0 and atapi request sense is needed) 4339 * is called. (when rc != 0 and atapi request sense is needed)
3967 */ 4340 */
3968 qc->flags &= ~ATA_QCFLAG_ACTIVE; 4341 qc->flags &= ~ATA_QCFLAG_ACTIVE;
4342 ap->qc_active &= ~(1 << qc->tag);
3969 4343
3970 /* call completion callback */ 4344 /* call completion callback */
3971 qc->complete_fn(qc); 4345 qc->complete_fn(qc);
3972} 4346}
3973 4347
4348/**
4349 * ata_qc_complete - Complete an active ATA command
4350 * @qc: Command to complete
4351 * @err_mask: ATA Status register contents
4352 *
4353 * Indicate to the mid and upper layers that an ATA
4354 * command has completed, with either an ok or not-ok status.
4355 *
4356 * LOCKING:
4357 * spin_lock_irqsave(host_set lock)
4358 */
4359void ata_qc_complete(struct ata_queued_cmd *qc)
4360{
4361 struct ata_port *ap = qc->ap;
4362
4363 /* XXX: New EH and old EH use different mechanisms to
4364 * synchronize EH with regular execution path.
4365 *
4366 * In new EH, a failed qc is marked with ATA_QCFLAG_FAILED.
4367 * Normal execution path is responsible for not accessing a
4368 * failed qc. libata core enforces the rule by returning NULL
4369 * from ata_qc_from_tag() for failed qcs.
4370 *
4371 * Old EH depends on ata_qc_complete() nullifying completion
4372 * requests if ATA_QCFLAG_EH_SCHEDULED is set. Old EH does
4373 * not synchronize with interrupt handler. Only PIO task is
4374 * taken care of.
4375 */
4376 if (ap->ops->error_handler) {
4377 WARN_ON(ap->flags & ATA_FLAG_FROZEN);
4378
4379 if (unlikely(qc->err_mask))
4380 qc->flags |= ATA_QCFLAG_FAILED;
4381
4382 if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) {
4383 if (!ata_tag_internal(qc->tag)) {
4384 /* always fill result TF for failed qc */
4385 ap->ops->tf_read(ap, &qc->result_tf);
4386 ata_qc_schedule_eh(qc);
4387 return;
4388 }
4389 }
4390
4391 /* read result TF if requested */
4392 if (qc->flags & ATA_QCFLAG_RESULT_TF)
4393 ap->ops->tf_read(ap, &qc->result_tf);
4394
4395 __ata_qc_complete(qc);
4396 } else {
4397 if (qc->flags & ATA_QCFLAG_EH_SCHEDULED)
4398 return;
4399
4400 /* read result TF if failed or requested */
4401 if (qc->err_mask || qc->flags & ATA_QCFLAG_RESULT_TF)
4402 ap->ops->tf_read(ap, &qc->result_tf);
4403
4404 __ata_qc_complete(qc);
4405 }
4406}
4407
4408/**
4409 * ata_qc_complete_multiple - Complete multiple qcs successfully
4410 * @ap: port in question
4411 * @qc_active: new qc_active mask
4412 * @finish_qc: LLDD callback invoked before completing a qc
4413 *
4414 * Complete in-flight commands. This functions is meant to be
4415 * called from low-level driver's interrupt routine to complete
4416 * requests normally. ap->qc_active and @qc_active is compared
4417 * and commands are completed accordingly.
4418 *
4419 * LOCKING:
4420 * spin_lock_irqsave(host_set lock)
4421 *
4422 * RETURNS:
4423 * Number of completed commands on success, -errno otherwise.
4424 */
4425int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active,
4426 void (*finish_qc)(struct ata_queued_cmd *))
4427{
4428 int nr_done = 0;
4429 u32 done_mask;
4430 int i;
4431
4432 done_mask = ap->qc_active ^ qc_active;
4433
4434 if (unlikely(done_mask & qc_active)) {
4435 ata_port_printk(ap, KERN_ERR, "illegal qc_active transition "
4436 "(%08x->%08x)\n", ap->qc_active, qc_active);
4437 return -EINVAL;
4438 }
4439
4440 for (i = 0; i < ATA_MAX_QUEUE; i++) {
4441 struct ata_queued_cmd *qc;
4442
4443 if (!(done_mask & (1 << i)))
4444 continue;
4445
4446 if ((qc = ata_qc_from_tag(ap, i))) {
4447 if (finish_qc)
4448 finish_qc(qc);
4449 ata_qc_complete(qc);
4450 nr_done++;
4451 }
4452 }
4453
4454 return nr_done;
4455}
4456
3974static inline int ata_should_dma_map(struct ata_queued_cmd *qc) 4457static inline int ata_should_dma_map(struct ata_queued_cmd *qc)
3975{ 4458{
3976 struct ata_port *ap = qc->ap; 4459 struct ata_port *ap = qc->ap;
3977 4460
3978 switch (qc->tf.protocol) { 4461 switch (qc->tf.protocol) {
4462 case ATA_PROT_NCQ:
3979 case ATA_PROT_DMA: 4463 case ATA_PROT_DMA:
3980 case ATA_PROT_ATAPI_DMA: 4464 case ATA_PROT_ATAPI_DMA:
3981 return 1; 4465 return 1;
@@ -4010,8 +4494,22 @@ void ata_qc_issue(struct ata_queued_cmd *qc)
4010{ 4494{
4011 struct ata_port *ap = qc->ap; 4495 struct ata_port *ap = qc->ap;
4012 4496
4013 qc->ap->active_tag = qc->tag; 4497 /* Make sure only one non-NCQ command is outstanding. The
4498 * check is skipped for old EH because it reuses active qc to
4499 * request ATAPI sense.
4500 */
4501 WARN_ON(ap->ops->error_handler && ata_tag_valid(ap->active_tag));
4502
4503 if (qc->tf.protocol == ATA_PROT_NCQ) {
4504 WARN_ON(ap->sactive & (1 << qc->tag));
4505 ap->sactive |= 1 << qc->tag;
4506 } else {
4507 WARN_ON(ap->sactive);
4508 ap->active_tag = qc->tag;
4509 }
4510
4014 qc->flags |= ATA_QCFLAG_ACTIVE; 4511 qc->flags |= ATA_QCFLAG_ACTIVE;
4512 ap->qc_active |= 1 << qc->tag;
4015 4513
4016 if (ata_should_dma_map(qc)) { 4514 if (ata_should_dma_map(qc)) {
4017 if (qc->flags & ATA_QCFLAG_SG) { 4515 if (qc->flags & ATA_QCFLAG_SG) {
@@ -4061,43 +4559,105 @@ unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc)
4061{ 4559{
4062 struct ata_port *ap = qc->ap; 4560 struct ata_port *ap = qc->ap;
4063 4561
4562 /* Use polling pio if the LLD doesn't handle
4563 * interrupt driven pio and atapi CDB interrupt.
4564 */
4565 if (ap->flags & ATA_FLAG_PIO_POLLING) {
4566 switch (qc->tf.protocol) {
4567 case ATA_PROT_PIO:
4568 case ATA_PROT_ATAPI:
4569 case ATA_PROT_ATAPI_NODATA:
4570 qc->tf.flags |= ATA_TFLAG_POLLING;
4571 break;
4572 case ATA_PROT_ATAPI_DMA:
4573 if (qc->dev->flags & ATA_DFLAG_CDB_INTR)
4574 /* see ata_dma_blacklisted() */
4575 BUG();
4576 break;
4577 default:
4578 break;
4579 }
4580 }
4581
4582 /* select the device */
4064 ata_dev_select(ap, qc->dev->devno, 1, 0); 4583 ata_dev_select(ap, qc->dev->devno, 1, 0);
4065 4584
4585 /* start the command */
4066 switch (qc->tf.protocol) { 4586 switch (qc->tf.protocol) {
4067 case ATA_PROT_NODATA: 4587 case ATA_PROT_NODATA:
4588 if (qc->tf.flags & ATA_TFLAG_POLLING)
4589 ata_qc_set_polling(qc);
4590
4068 ata_tf_to_host(ap, &qc->tf); 4591 ata_tf_to_host(ap, &qc->tf);
4592 ap->hsm_task_state = HSM_ST_LAST;
4593
4594 if (qc->tf.flags & ATA_TFLAG_POLLING)
4595 ata_port_queue_task(ap, ata_pio_task, qc, 0);
4596
4069 break; 4597 break;
4070 4598
4071 case ATA_PROT_DMA: 4599 case ATA_PROT_DMA:
4600 WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
4601
4072 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */ 4602 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
4073 ap->ops->bmdma_setup(qc); /* set up bmdma */ 4603 ap->ops->bmdma_setup(qc); /* set up bmdma */
4074 ap->ops->bmdma_start(qc); /* initiate bmdma */ 4604 ap->ops->bmdma_start(qc); /* initiate bmdma */
4605 ap->hsm_task_state = HSM_ST_LAST;
4075 break; 4606 break;
4076 4607
4077 case ATA_PROT_PIO: /* load tf registers, initiate polling pio */ 4608 case ATA_PROT_PIO:
4078 ata_qc_set_polling(qc); 4609 if (qc->tf.flags & ATA_TFLAG_POLLING)
4079 ata_tf_to_host(ap, &qc->tf); 4610 ata_qc_set_polling(qc);
4080 ap->hsm_task_state = HSM_ST;
4081 ata_port_queue_task(ap, ata_pio_task, ap, 0);
4082 break;
4083 4611
4084 case ATA_PROT_ATAPI:
4085 ata_qc_set_polling(qc);
4086 ata_tf_to_host(ap, &qc->tf); 4612 ata_tf_to_host(ap, &qc->tf);
4087 ata_port_queue_task(ap, atapi_packet_task, ap, 0); 4613
4614 if (qc->tf.flags & ATA_TFLAG_WRITE) {
4615 /* PIO data out protocol */
4616 ap->hsm_task_state = HSM_ST_FIRST;
4617 ata_port_queue_task(ap, ata_pio_task, qc, 0);
4618
4619 /* always send first data block using
4620 * the ata_pio_task() codepath.
4621 */
4622 } else {
4623 /* PIO data in protocol */
4624 ap->hsm_task_state = HSM_ST;
4625
4626 if (qc->tf.flags & ATA_TFLAG_POLLING)
4627 ata_port_queue_task(ap, ata_pio_task, qc, 0);
4628
4629 /* if polling, ata_pio_task() handles the rest.
4630 * otherwise, interrupt handler takes over from here.
4631 */
4632 }
4633
4088 break; 4634 break;
4089 4635
4636 case ATA_PROT_ATAPI:
4090 case ATA_PROT_ATAPI_NODATA: 4637 case ATA_PROT_ATAPI_NODATA:
4091 ap->flags |= ATA_FLAG_NOINTR; 4638 if (qc->tf.flags & ATA_TFLAG_POLLING)
4639 ata_qc_set_polling(qc);
4640
4092 ata_tf_to_host(ap, &qc->tf); 4641 ata_tf_to_host(ap, &qc->tf);
4093 ata_port_queue_task(ap, atapi_packet_task, ap, 0); 4642
4643 ap->hsm_task_state = HSM_ST_FIRST;
4644
4645 /* send cdb by polling if no cdb interrupt */
4646 if ((!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) ||
4647 (qc->tf.flags & ATA_TFLAG_POLLING))
4648 ata_port_queue_task(ap, ata_pio_task, qc, 0);
4094 break; 4649 break;
4095 4650
4096 case ATA_PROT_ATAPI_DMA: 4651 case ATA_PROT_ATAPI_DMA:
4097 ap->flags |= ATA_FLAG_NOINTR; 4652 WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
4653
4098 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */ 4654 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
4099 ap->ops->bmdma_setup(qc); /* set up bmdma */ 4655 ap->ops->bmdma_setup(qc); /* set up bmdma */
4100 ata_port_queue_task(ap, atapi_packet_task, ap, 0); 4656 ap->hsm_task_state = HSM_ST_FIRST;
4657
4658 /* send cdb by polling if no cdb interrupt */
4659 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
4660 ata_port_queue_task(ap, ata_pio_task, qc, 0);
4101 break; 4661 break;
4102 4662
4103 default: 4663 default:
@@ -4127,52 +4687,66 @@ unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc)
4127inline unsigned int ata_host_intr (struct ata_port *ap, 4687inline unsigned int ata_host_intr (struct ata_port *ap,
4128 struct ata_queued_cmd *qc) 4688 struct ata_queued_cmd *qc)
4129{ 4689{
4130 u8 status, host_stat; 4690 u8 status, host_stat = 0;
4131
4132 switch (qc->tf.protocol) {
4133
4134 case ATA_PROT_DMA:
4135 case ATA_PROT_ATAPI_DMA:
4136 case ATA_PROT_ATAPI:
4137 /* check status of DMA engine */
4138 host_stat = ap->ops->bmdma_status(ap);
4139 VPRINTK("ata%u: host_stat 0x%X\n", ap->id, host_stat);
4140
4141 /* if it's not our irq... */
4142 if (!(host_stat & ATA_DMA_INTR))
4143 goto idle_irq;
4144 4691
4145 /* before we do anything else, clear DMA-Start bit */ 4692 VPRINTK("ata%u: protocol %d task_state %d\n",
4146 ap->ops->bmdma_stop(qc); 4693 ap->id, qc->tf.protocol, ap->hsm_task_state);
4147 4694
4148 /* fall through */ 4695 /* Check whether we are expecting interrupt in this state */
4149 4696 switch (ap->hsm_task_state) {
4150 case ATA_PROT_ATAPI_NODATA: 4697 case HSM_ST_FIRST:
4151 case ATA_PROT_NODATA: 4698 /* Some pre-ATAPI-4 devices assert INTRQ
4152 /* check altstatus */ 4699 * at this state when ready to receive CDB.
4153 status = ata_altstatus(ap); 4700 */
4154 if (status & ATA_BUSY)
4155 goto idle_irq;
4156 4701
4157 /* check main status, clearing INTRQ */ 4702 /* Check the ATA_DFLAG_CDB_INTR flag is enough here.
4158 status = ata_chk_status(ap); 4703 * The flag was turned on only for atapi devices.
4159 if (unlikely(status & ATA_BUSY)) 4704 * No need to check is_atapi_taskfile(&qc->tf) again.
4705 */
4706 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
4160 goto idle_irq; 4707 goto idle_irq;
4161 DPRINTK("ata%u: protocol %d (dev_stat 0x%X)\n",
4162 ap->id, qc->tf.protocol, status);
4163
4164 /* ack bmdma irq events */
4165 ap->ops->irq_clear(ap);
4166
4167 /* complete taskfile transaction */
4168 qc->err_mask |= ac_err_mask(status);
4169 ata_qc_complete(qc);
4170 break; 4708 break;
4171 4709 case HSM_ST_LAST:
4710 if (qc->tf.protocol == ATA_PROT_DMA ||
4711 qc->tf.protocol == ATA_PROT_ATAPI_DMA) {
4712 /* check status of DMA engine */
4713 host_stat = ap->ops->bmdma_status(ap);
4714 VPRINTK("ata%u: host_stat 0x%X\n", ap->id, host_stat);
4715
4716 /* if it's not our irq... */
4717 if (!(host_stat & ATA_DMA_INTR))
4718 goto idle_irq;
4719
4720 /* before we do anything else, clear DMA-Start bit */
4721 ap->ops->bmdma_stop(qc);
4722
4723 if (unlikely(host_stat & ATA_DMA_ERR)) {
4724 /* error when transfering data to/from memory */
4725 qc->err_mask |= AC_ERR_HOST_BUS;
4726 ap->hsm_task_state = HSM_ST_ERR;
4727 }
4728 }
4729 break;
4730 case HSM_ST:
4731 break;
4172 default: 4732 default:
4173 goto idle_irq; 4733 goto idle_irq;
4174 } 4734 }
4175 4735
4736 /* check altstatus */
4737 status = ata_altstatus(ap);
4738 if (status & ATA_BUSY)
4739 goto idle_irq;
4740
4741 /* check main status, clearing INTRQ */
4742 status = ata_chk_status(ap);
4743 if (unlikely(status & ATA_BUSY))
4744 goto idle_irq;
4745
4746 /* ack bmdma irq events */
4747 ap->ops->irq_clear(ap);
4748
4749 ata_hsm_move(ap, qc, status, 0);
4176 return 1; /* irq handled */ 4750 return 1; /* irq handled */
4177 4751
4178idle_irq: 4752idle_irq:
@@ -4181,7 +4755,7 @@ idle_irq:
4181#ifdef ATA_IRQ_TRAP 4755#ifdef ATA_IRQ_TRAP
4182 if ((ap->stats.idle_irq % 1000) == 0) { 4756 if ((ap->stats.idle_irq % 1000) == 0) {
4183 ata_irq_ack(ap, 0); /* debug trap */ 4757 ata_irq_ack(ap, 0); /* debug trap */
4184 printk(KERN_WARNING "ata%d: irq trap\n", ap->id); 4758 ata_port_printk(ap, KERN_WARNING, "irq trap\n");
4185 return 1; 4759 return 1;
4186 } 4760 }
4187#endif 4761#endif
@@ -4219,11 +4793,11 @@ irqreturn_t ata_interrupt (int irq, void *dev_instance, struct pt_regs *regs)
4219 4793
4220 ap = host_set->ports[i]; 4794 ap = host_set->ports[i];
4221 if (ap && 4795 if (ap &&
4222 !(ap->flags & (ATA_FLAG_PORT_DISABLED | ATA_FLAG_NOINTR))) { 4796 !(ap->flags & ATA_FLAG_DISABLED)) {
4223 struct ata_queued_cmd *qc; 4797 struct ata_queued_cmd *qc;
4224 4798
4225 qc = ata_qc_from_tag(ap, ap->active_tag); 4799 qc = ata_qc_from_tag(ap, ap->active_tag);
4226 if (qc && (!(qc->tf.ctl & ATA_NIEN)) && 4800 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)) &&
4227 (qc->flags & ATA_QCFLAG_ACTIVE)) 4801 (qc->flags & ATA_QCFLAG_ACTIVE))
4228 handled |= ata_host_intr(ap, qc); 4802 handled |= ata_host_intr(ap, qc);
4229 } 4803 }
@@ -4234,32 +4808,168 @@ irqreturn_t ata_interrupt (int irq, void *dev_instance, struct pt_regs *regs)
4234 return IRQ_RETVAL(handled); 4808 return IRQ_RETVAL(handled);
4235} 4809}
4236 4810
4811/**
4812 * sata_scr_valid - test whether SCRs are accessible
4813 * @ap: ATA port to test SCR accessibility for
4814 *
4815 * Test whether SCRs are accessible for @ap.
4816 *
4817 * LOCKING:
4818 * None.
4819 *
4820 * RETURNS:
4821 * 1 if SCRs are accessible, 0 otherwise.
4822 */
4823int sata_scr_valid(struct ata_port *ap)
4824{
4825 return ap->cbl == ATA_CBL_SATA && ap->ops->scr_read;
4826}
4827
4828/**
4829 * sata_scr_read - read SCR register of the specified port
4830 * @ap: ATA port to read SCR for
4831 * @reg: SCR to read
4832 * @val: Place to store read value
4833 *
4834 * Read SCR register @reg of @ap into *@val. This function is
4835 * guaranteed to succeed if the cable type of the port is SATA
4836 * and the port implements ->scr_read.
4837 *
4838 * LOCKING:
4839 * None.
4840 *
4841 * RETURNS:
4842 * 0 on success, negative errno on failure.
4843 */
4844int sata_scr_read(struct ata_port *ap, int reg, u32 *val)
4845{
4846 if (sata_scr_valid(ap)) {
4847 *val = ap->ops->scr_read(ap, reg);
4848 return 0;
4849 }
4850 return -EOPNOTSUPP;
4851}
4852
4853/**
4854 * sata_scr_write - write SCR register of the specified port
4855 * @ap: ATA port to write SCR for
4856 * @reg: SCR to write
4857 * @val: value to write
4858 *
4859 * Write @val to SCR register @reg of @ap. This function is
4860 * guaranteed to succeed if the cable type of the port is SATA
4861 * and the port implements ->scr_read.
4862 *
4863 * LOCKING:
4864 * None.
4865 *
4866 * RETURNS:
4867 * 0 on success, negative errno on failure.
4868 */
4869int sata_scr_write(struct ata_port *ap, int reg, u32 val)
4870{
4871 if (sata_scr_valid(ap)) {
4872 ap->ops->scr_write(ap, reg, val);
4873 return 0;
4874 }
4875 return -EOPNOTSUPP;
4876}
4877
4878/**
4879 * sata_scr_write_flush - write SCR register of the specified port and flush
4880 * @ap: ATA port to write SCR for
4881 * @reg: SCR to write
4882 * @val: value to write
4883 *
4884 * This function is identical to sata_scr_write() except that this
4885 * function performs flush after writing to the register.
4886 *
4887 * LOCKING:
4888 * None.
4889 *
4890 * RETURNS:
4891 * 0 on success, negative errno on failure.
4892 */
4893int sata_scr_write_flush(struct ata_port *ap, int reg, u32 val)
4894{
4895 if (sata_scr_valid(ap)) {
4896 ap->ops->scr_write(ap, reg, val);
4897 ap->ops->scr_read(ap, reg);
4898 return 0;
4899 }
4900 return -EOPNOTSUPP;
4901}
4902
4903/**
4904 * ata_port_online - test whether the given port is online
4905 * @ap: ATA port to test
4906 *
4907 * Test whether @ap is online. Note that this function returns 0
4908 * if online status of @ap cannot be obtained, so
4909 * ata_port_online(ap) != !ata_port_offline(ap).
4910 *
4911 * LOCKING:
4912 * None.
4913 *
4914 * RETURNS:
4915 * 1 if the port online status is available and online.
4916 */
4917int ata_port_online(struct ata_port *ap)
4918{
4919 u32 sstatus;
4920
4921 if (!sata_scr_read(ap, SCR_STATUS, &sstatus) && (sstatus & 0xf) == 0x3)
4922 return 1;
4923 return 0;
4924}
4925
4926/**
4927 * ata_port_offline - test whether the given port is offline
4928 * @ap: ATA port to test
4929 *
4930 * Test whether @ap is offline. Note that this function returns
4931 * 0 if offline status of @ap cannot be obtained, so
4932 * ata_port_online(ap) != !ata_port_offline(ap).
4933 *
4934 * LOCKING:
4935 * None.
4936 *
4937 * RETURNS:
4938 * 1 if the port offline status is available and offline.
4939 */
4940int ata_port_offline(struct ata_port *ap)
4941{
4942 u32 sstatus;
4943
4944 if (!sata_scr_read(ap, SCR_STATUS, &sstatus) && (sstatus & 0xf) != 0x3)
4945 return 1;
4946 return 0;
4947}
4237 4948
4238/* 4949/*
4239 * Execute a 'simple' command, that only consists of the opcode 'cmd' itself, 4950 * Execute a 'simple' command, that only consists of the opcode 'cmd' itself,
4240 * without filling any other registers 4951 * without filling any other registers
4241 */ 4952 */
4242static int ata_do_simple_cmd(struct ata_port *ap, struct ata_device *dev, 4953static int ata_do_simple_cmd(struct ata_device *dev, u8 cmd)
4243 u8 cmd)
4244{ 4954{
4245 struct ata_taskfile tf; 4955 struct ata_taskfile tf;
4246 int err; 4956 int err;
4247 4957
4248 ata_tf_init(ap, &tf, dev->devno); 4958 ata_tf_init(dev, &tf);
4249 4959
4250 tf.command = cmd; 4960 tf.command = cmd;
4251 tf.flags |= ATA_TFLAG_DEVICE; 4961 tf.flags |= ATA_TFLAG_DEVICE;
4252 tf.protocol = ATA_PROT_NODATA; 4962 tf.protocol = ATA_PROT_NODATA;
4253 4963
4254 err = ata_exec_internal(ap, dev, &tf, DMA_NONE, NULL, 0); 4964 err = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
4255 if (err) 4965 if (err)
4256 printk(KERN_ERR "%s: ata command failed: %d\n", 4966 ata_dev_printk(dev, KERN_ERR, "%s: ata command failed: %d\n",
4257 __FUNCTION__, err); 4967 __FUNCTION__, err);
4258 4968
4259 return err; 4969 return err;
4260} 4970}
4261 4971
4262static int ata_flush_cache(struct ata_port *ap, struct ata_device *dev) 4972static int ata_flush_cache(struct ata_device *dev)
4263{ 4973{
4264 u8 cmd; 4974 u8 cmd;
4265 4975
@@ -4271,22 +4981,21 @@ static int ata_flush_cache(struct ata_port *ap, struct ata_device *dev)
4271 else 4981 else
4272 cmd = ATA_CMD_FLUSH; 4982 cmd = ATA_CMD_FLUSH;
4273 4983
4274 return ata_do_simple_cmd(ap, dev, cmd); 4984 return ata_do_simple_cmd(dev, cmd);
4275} 4985}
4276 4986
4277static int ata_standby_drive(struct ata_port *ap, struct ata_device *dev) 4987static int ata_standby_drive(struct ata_device *dev)
4278{ 4988{
4279 return ata_do_simple_cmd(ap, dev, ATA_CMD_STANDBYNOW1); 4989 return ata_do_simple_cmd(dev, ATA_CMD_STANDBYNOW1);
4280} 4990}
4281 4991
4282static int ata_start_drive(struct ata_port *ap, struct ata_device *dev) 4992static int ata_start_drive(struct ata_device *dev)
4283{ 4993{
4284 return ata_do_simple_cmd(ap, dev, ATA_CMD_IDLEIMMEDIATE); 4994 return ata_do_simple_cmd(dev, ATA_CMD_IDLEIMMEDIATE);
4285} 4995}
4286 4996
4287/** 4997/**
4288 * ata_device_resume - wakeup a previously suspended devices 4998 * ata_device_resume - wakeup a previously suspended devices
4289 * @ap: port the device is connected to
4290 * @dev: the device to resume 4999 * @dev: the device to resume
4291 * 5000 *
4292 * Kick the drive back into action, by sending it an idle immediate 5001 * Kick the drive back into action, by sending it an idle immediate
@@ -4294,40 +5003,47 @@ static int ata_start_drive(struct ata_port *ap, struct ata_device *dev)
4294 * and host. 5003 * and host.
4295 * 5004 *
4296 */ 5005 */
4297int ata_device_resume(struct ata_port *ap, struct ata_device *dev) 5006int ata_device_resume(struct ata_device *dev)
4298{ 5007{
5008 struct ata_port *ap = dev->ap;
5009
4299 if (ap->flags & ATA_FLAG_SUSPENDED) { 5010 if (ap->flags & ATA_FLAG_SUSPENDED) {
5011 struct ata_device *failed_dev;
5012
4300 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT); 5013 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
4301 ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 200000); 5014 ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 200000);
5015
4302 ap->flags &= ~ATA_FLAG_SUSPENDED; 5016 ap->flags &= ~ATA_FLAG_SUSPENDED;
4303 ata_set_mode(ap); 5017 while (ata_set_mode(ap, &failed_dev))
5018 ata_dev_disable(failed_dev);
4304 } 5019 }
4305 if (!ata_dev_present(dev)) 5020 if (!ata_dev_enabled(dev))
4306 return 0; 5021 return 0;
4307 if (dev->class == ATA_DEV_ATA) 5022 if (dev->class == ATA_DEV_ATA)
4308 ata_start_drive(ap, dev); 5023 ata_start_drive(dev);
4309 5024
4310 return 0; 5025 return 0;
4311} 5026}
4312 5027
4313/** 5028/**
4314 * ata_device_suspend - prepare a device for suspend 5029 * ata_device_suspend - prepare a device for suspend
4315 * @ap: port the device is connected to
4316 * @dev: the device to suspend 5030 * @dev: the device to suspend
4317 * @state: target power management state 5031 * @state: target power management state
4318 * 5032 *
4319 * Flush the cache on the drive, if appropriate, then issue a 5033 * Flush the cache on the drive, if appropriate, then issue a
4320 * standbynow command. 5034 * standbynow command.
4321 */ 5035 */
4322int ata_device_suspend(struct ata_port *ap, struct ata_device *dev, pm_message_t state) 5036int ata_device_suspend(struct ata_device *dev, pm_message_t state)
4323{ 5037{
4324 if (!ata_dev_present(dev)) 5038 struct ata_port *ap = dev->ap;
5039
5040 if (!ata_dev_enabled(dev))
4325 return 0; 5041 return 0;
4326 if (dev->class == ATA_DEV_ATA) 5042 if (dev->class == ATA_DEV_ATA)
4327 ata_flush_cache(ap, dev); 5043 ata_flush_cache(dev);
4328 5044
4329 if (state.event != PM_EVENT_FREEZE) 5045 if (state.event != PM_EVENT_FREEZE)
4330 ata_standby_drive(ap, dev); 5046 ata_standby_drive(dev);
4331 ap->flags |= ATA_FLAG_SUSPENDED; 5047 ap->flags |= ATA_FLAG_SUSPENDED;
4332 return 0; 5048 return 0;
4333} 5049}
@@ -4415,6 +5131,38 @@ static void ata_host_remove(struct ata_port *ap, unsigned int do_unregister)
4415} 5131}
4416 5132
4417/** 5133/**
5134 * ata_dev_init - Initialize an ata_device structure
5135 * @dev: Device structure to initialize
5136 *
5137 * Initialize @dev in preparation for probing.
5138 *
5139 * LOCKING:
5140 * Inherited from caller.
5141 */
5142void ata_dev_init(struct ata_device *dev)
5143{
5144 struct ata_port *ap = dev->ap;
5145 unsigned long flags;
5146
5147 /* SATA spd limit is bound to the first device */
5148 ap->sata_spd_limit = ap->hw_sata_spd_limit;
5149
5150 /* High bits of dev->flags are used to record warm plug
5151 * requests which occur asynchronously. Synchronize using
5152 * host_set lock.
5153 */
5154 spin_lock_irqsave(ap->lock, flags);
5155 dev->flags &= ~ATA_DFLAG_INIT_MASK;
5156 spin_unlock_irqrestore(ap->lock, flags);
5157
5158 memset((void *)dev + ATA_DEVICE_CLEAR_OFFSET, 0,
5159 sizeof(*dev) - ATA_DEVICE_CLEAR_OFFSET);
5160 dev->pio_mask = UINT_MAX;
5161 dev->mwdma_mask = UINT_MAX;
5162 dev->udma_mask = UINT_MAX;
5163}
5164
5165/**
4418 * ata_host_init - Initialize an ata_port structure 5166 * ata_host_init - Initialize an ata_port structure
4419 * @ap: Structure to initialize 5167 * @ap: Structure to initialize
4420 * @host: associated SCSI mid-layer structure 5168 * @host: associated SCSI mid-layer structure
@@ -4428,7 +5176,6 @@ static void ata_host_remove(struct ata_port *ap, unsigned int do_unregister)
4428 * LOCKING: 5176 * LOCKING:
4429 * Inherited from caller. 5177 * Inherited from caller.
4430 */ 5178 */
4431
4432static void ata_host_init(struct ata_port *ap, struct Scsi_Host *host, 5179static void ata_host_init(struct ata_port *ap, struct Scsi_Host *host,
4433 struct ata_host_set *host_set, 5180 struct ata_host_set *host_set,
4434 const struct ata_probe_ent *ent, unsigned int port_no) 5181 const struct ata_probe_ent *ent, unsigned int port_no)
@@ -4441,7 +5188,8 @@ static void ata_host_init(struct ata_port *ap, struct Scsi_Host *host,
4441 host->unique_id = ata_unique_id++; 5188 host->unique_id = ata_unique_id++;
4442 host->max_cmd_len = 12; 5189 host->max_cmd_len = 12;
4443 5190
4444 ap->flags = ATA_FLAG_PORT_DISABLED; 5191 ap->lock = &host_set->lock;
5192 ap->flags = ATA_FLAG_DISABLED;
4445 ap->id = host->unique_id; 5193 ap->id = host->unique_id;
4446 ap->host = host; 5194 ap->host = host;
4447 ap->ctl = ATA_DEVCTL_OBS; 5195 ap->ctl = ATA_DEVCTL_OBS;
@@ -4455,19 +5203,35 @@ static void ata_host_init(struct ata_port *ap, struct Scsi_Host *host,
4455 ap->udma_mask = ent->udma_mask; 5203 ap->udma_mask = ent->udma_mask;
4456 ap->flags |= ent->host_flags; 5204 ap->flags |= ent->host_flags;
4457 ap->ops = ent->port_ops; 5205 ap->ops = ent->port_ops;
4458 ap->cbl = ATA_CBL_NONE; 5206 ap->hw_sata_spd_limit = UINT_MAX;
4459 ap->active_tag = ATA_TAG_POISON; 5207 ap->active_tag = ATA_TAG_POISON;
4460 ap->last_ctl = 0xFF; 5208 ap->last_ctl = 0xFF;
4461 5209
5210#if defined(ATA_VERBOSE_DEBUG)
5211 /* turn on all debugging levels */
5212 ap->msg_enable = 0x00FF;
5213#elif defined(ATA_DEBUG)
5214 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_INFO | ATA_MSG_CTL | ATA_MSG_WARN | ATA_MSG_ERR;
5215#else
5216 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN;
5217#endif
5218
4462 INIT_WORK(&ap->port_task, NULL, NULL); 5219 INIT_WORK(&ap->port_task, NULL, NULL);
5220 INIT_WORK(&ap->hotplug_task, ata_scsi_hotplug, ap);
5221 INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan, ap);
4463 INIT_LIST_HEAD(&ap->eh_done_q); 5222 INIT_LIST_HEAD(&ap->eh_done_q);
5223 init_waitqueue_head(&ap->eh_wait_q);
5224
5225 /* set cable type */
5226 ap->cbl = ATA_CBL_NONE;
5227 if (ap->flags & ATA_FLAG_SATA)
5228 ap->cbl = ATA_CBL_SATA;
4464 5229
4465 for (i = 0; i < ATA_MAX_DEVICES; i++) { 5230 for (i = 0; i < ATA_MAX_DEVICES; i++) {
4466 struct ata_device *dev = &ap->device[i]; 5231 struct ata_device *dev = &ap->device[i];
5232 dev->ap = ap;
4467 dev->devno = i; 5233 dev->devno = i;
4468 dev->pio_mask = UINT_MAX; 5234 ata_dev_init(dev);
4469 dev->mwdma_mask = UINT_MAX;
4470 dev->udma_mask = UINT_MAX;
4471 } 5235 }
4472 5236
4473#ifdef ATA_IRQ_TRAP 5237#ifdef ATA_IRQ_TRAP
@@ -4503,7 +5267,7 @@ static struct ata_port * ata_host_add(const struct ata_probe_ent *ent,
4503 5267
4504 DPRINTK("ENTER\n"); 5268 DPRINTK("ENTER\n");
4505 5269
4506 if (!ent->port_ops->probe_reset && 5270 if (!ent->port_ops->error_handler &&
4507 !(ent->host_flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST))) { 5271 !(ent->host_flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST))) {
4508 printk(KERN_ERR "ata%u: no reset mechanism available\n", 5272 printk(KERN_ERR "ata%u: no reset mechanism available\n",
4509 port_no); 5273 port_no);
@@ -4516,7 +5280,7 @@ static struct ata_port * ata_host_add(const struct ata_probe_ent *ent,
4516 5280
4517 host->transportt = &ata_scsi_transport_template; 5281 host->transportt = &ata_scsi_transport_template;
4518 5282
4519 ap = (struct ata_port *) &host->hostdata[0]; 5283 ap = ata_shost_to_port(host);
4520 5284
4521 ata_host_init(ap, host, host_set, ent, port_no); 5285 ata_host_init(ap, host, host_set, ent, port_no);
4522 5286
@@ -4549,12 +5313,12 @@ err_out:
4549 * RETURNS: 5313 * RETURNS:
4550 * Number of ports registered. Zero on error (no ports registered). 5314 * Number of ports registered. Zero on error (no ports registered).
4551 */ 5315 */
4552
4553int ata_device_add(const struct ata_probe_ent *ent) 5316int ata_device_add(const struct ata_probe_ent *ent)
4554{ 5317{
4555 unsigned int count = 0, i; 5318 unsigned int count = 0, i;
4556 struct device *dev = ent->dev; 5319 struct device *dev = ent->dev;
4557 struct ata_host_set *host_set; 5320 struct ata_host_set *host_set;
5321 int rc;
4558 5322
4559 DPRINTK("ENTER\n"); 5323 DPRINTK("ENTER\n");
4560 /* alloc a container for our list of ATA ports (buses) */ 5324 /* alloc a container for our list of ATA ports (buses) */
@@ -4587,18 +5351,18 @@ int ata_device_add(const struct ata_probe_ent *ent)
4587 (ap->pio_mask << ATA_SHIFT_PIO); 5351 (ap->pio_mask << ATA_SHIFT_PIO);
4588 5352
4589 /* print per-port info to dmesg */ 5353 /* print per-port info to dmesg */
4590 printk(KERN_INFO "ata%u: %cATA max %s cmd 0x%lX ctl 0x%lX " 5354 ata_port_printk(ap, KERN_INFO, "%cATA max %s cmd 0x%lX "
4591 "bmdma 0x%lX irq %lu\n", 5355 "ctl 0x%lX bmdma 0x%lX irq %lu\n",
4592 ap->id, 5356 ap->flags & ATA_FLAG_SATA ? 'S' : 'P',
4593 ap->flags & ATA_FLAG_SATA ? 'S' : 'P', 5357 ata_mode_string(xfer_mode_mask),
4594 ata_mode_string(xfer_mode_mask), 5358 ap->ioaddr.cmd_addr,
4595 ap->ioaddr.cmd_addr, 5359 ap->ioaddr.ctl_addr,
4596 ap->ioaddr.ctl_addr, 5360 ap->ioaddr.bmdma_addr,
4597 ap->ioaddr.bmdma_addr, 5361 ent->irq);
4598 ent->irq);
4599 5362
4600 ata_chk_status(ap); 5363 ata_chk_status(ap);
4601 host_set->ops->irq_clear(ap); 5364 host_set->ops->irq_clear(ap);
5365 ata_eh_freeze_port(ap); /* freeze port before requesting IRQ */
4602 count++; 5366 count++;
4603 } 5367 }
4604 5368
@@ -4606,41 +5370,72 @@ int ata_device_add(const struct ata_probe_ent *ent)
4606 goto err_free_ret; 5370 goto err_free_ret;
4607 5371
4608 /* obtain irq, that is shared between channels */ 5372 /* obtain irq, that is shared between channels */
4609 if (request_irq(ent->irq, ent->port_ops->irq_handler, ent->irq_flags, 5373 rc = request_irq(ent->irq, ent->port_ops->irq_handler, ent->irq_flags,
4610 DRV_NAME, host_set)) 5374 DRV_NAME, host_set);
5375 if (rc) {
5376 dev_printk(KERN_ERR, dev, "irq %lu request failed: %d\n",
5377 ent->irq, rc);
4611 goto err_out; 5378 goto err_out;
5379 }
4612 5380
4613 /* perform each probe synchronously */ 5381 /* perform each probe synchronously */
4614 DPRINTK("probe begin\n"); 5382 DPRINTK("probe begin\n");
4615 for (i = 0; i < count; i++) { 5383 for (i = 0; i < count; i++) {
4616 struct ata_port *ap; 5384 struct ata_port *ap;
5385 u32 scontrol;
4617 int rc; 5386 int rc;
4618 5387
4619 ap = host_set->ports[i]; 5388 ap = host_set->ports[i];
4620 5389
4621 DPRINTK("ata%u: bus probe begin\n", ap->id); 5390 /* init sata_spd_limit to the current value */
4622 rc = ata_bus_probe(ap); 5391 if (sata_scr_read(ap, SCR_CONTROL, &scontrol) == 0) {
4623 DPRINTK("ata%u: bus probe end\n", ap->id); 5392 int spd = (scontrol >> 4) & 0xf;
4624 5393 ap->hw_sata_spd_limit &= (1 << spd) - 1;
4625 if (rc) {
4626 /* FIXME: do something useful here?
4627 * Current libata behavior will
4628 * tear down everything when
4629 * the module is removed
4630 * or the h/w is unplugged.
4631 */
4632 } 5394 }
5395 ap->sata_spd_limit = ap->hw_sata_spd_limit;
4633 5396
4634 rc = scsi_add_host(ap->host, dev); 5397 rc = scsi_add_host(ap->host, dev);
4635 if (rc) { 5398 if (rc) {
4636 printk(KERN_ERR "ata%u: scsi_add_host failed\n", 5399 ata_port_printk(ap, KERN_ERR, "scsi_add_host failed\n");
4637 ap->id);
4638 /* FIXME: do something useful here */ 5400 /* FIXME: do something useful here */
4639 /* FIXME: handle unconditional calls to 5401 /* FIXME: handle unconditional calls to
4640 * scsi_scan_host and ata_host_remove, below, 5402 * scsi_scan_host and ata_host_remove, below,
4641 * at the very least 5403 * at the very least
4642 */ 5404 */
4643 } 5405 }
5406
5407 if (ap->ops->error_handler) {
5408 unsigned long flags;
5409
5410 ata_port_probe(ap);
5411
5412 /* kick EH for boot probing */
5413 spin_lock_irqsave(ap->lock, flags);
5414
5415 ap->eh_info.probe_mask = (1 << ATA_MAX_DEVICES) - 1;
5416 ap->eh_info.action |= ATA_EH_SOFTRESET;
5417
5418 ap->flags |= ATA_FLAG_LOADING;
5419 ata_port_schedule_eh(ap);
5420
5421 spin_unlock_irqrestore(ap->lock, flags);
5422
5423 /* wait for EH to finish */
5424 ata_port_wait_eh(ap);
5425 } else {
5426 DPRINTK("ata%u: bus probe begin\n", ap->id);
5427 rc = ata_bus_probe(ap);
5428 DPRINTK("ata%u: bus probe end\n", ap->id);
5429
5430 if (rc) {
5431 /* FIXME: do something useful here?
5432 * Current libata behavior will
5433 * tear down everything when
5434 * the module is removed
5435 * or the h/w is unplugged.
5436 */
5437 }
5438 }
4644 } 5439 }
4645 5440
4646 /* probes are done, now scan each port's disk(s) */ 5441 /* probes are done, now scan each port's disk(s) */
@@ -4668,6 +5463,63 @@ err_free_ret:
4668} 5463}
4669 5464
4670/** 5465/**
5466 * ata_port_detach - Detach ATA port in prepration of device removal
5467 * @ap: ATA port to be detached
5468 *
5469 * Detach all ATA devices and the associated SCSI devices of @ap;
5470 * then, remove the associated SCSI host. @ap is guaranteed to
5471 * be quiescent on return from this function.
5472 *
5473 * LOCKING:
5474 * Kernel thread context (may sleep).
5475 */
5476void ata_port_detach(struct ata_port *ap)
5477{
5478 unsigned long flags;
5479 int i;
5480
5481 if (!ap->ops->error_handler)
5482 return;
5483
5484 /* tell EH we're leaving & flush EH */
5485 spin_lock_irqsave(ap->lock, flags);
5486 ap->flags |= ATA_FLAG_UNLOADING;
5487 spin_unlock_irqrestore(ap->lock, flags);
5488
5489 ata_port_wait_eh(ap);
5490
5491 /* EH is now guaranteed to see UNLOADING, so no new device
5492 * will be attached. Disable all existing devices.
5493 */
5494 spin_lock_irqsave(ap->lock, flags);
5495
5496 for (i = 0; i < ATA_MAX_DEVICES; i++)
5497 ata_dev_disable(&ap->device[i]);
5498
5499 spin_unlock_irqrestore(ap->lock, flags);
5500
5501 /* Final freeze & EH. All in-flight commands are aborted. EH
5502 * will be skipped and retrials will be terminated with bad
5503 * target.
5504 */
5505 spin_lock_irqsave(ap->lock, flags);
5506 ata_port_freeze(ap); /* won't be thawed */
5507 spin_unlock_irqrestore(ap->lock, flags);
5508
5509 ata_port_wait_eh(ap);
5510
5511 /* Flush hotplug task. The sequence is similar to
5512 * ata_port_flush_task().
5513 */
5514 flush_workqueue(ata_aux_wq);
5515 cancel_delayed_work(&ap->hotplug_task);
5516 flush_workqueue(ata_aux_wq);
5517
5518 /* remove the associated SCSI host */
5519 scsi_remove_host(ap->host);
5520}
5521
5522/**
4671 * ata_host_set_remove - PCI layer callback for device removal 5523 * ata_host_set_remove - PCI layer callback for device removal
4672 * @host_set: ATA host set that was removed 5524 * @host_set: ATA host set that was removed
4673 * 5525 *
@@ -4680,18 +5532,15 @@ err_free_ret:
4680 5532
4681void ata_host_set_remove(struct ata_host_set *host_set) 5533void ata_host_set_remove(struct ata_host_set *host_set)
4682{ 5534{
4683 struct ata_port *ap;
4684 unsigned int i; 5535 unsigned int i;
4685 5536
4686 for (i = 0; i < host_set->n_ports; i++) { 5537 for (i = 0; i < host_set->n_ports; i++)
4687 ap = host_set->ports[i]; 5538 ata_port_detach(host_set->ports[i]);
4688 scsi_remove_host(ap->host);
4689 }
4690 5539
4691 free_irq(host_set->irq, host_set); 5540 free_irq(host_set->irq, host_set);
4692 5541
4693 for (i = 0; i < host_set->n_ports; i++) { 5542 for (i = 0; i < host_set->n_ports; i++) {
4694 ap = host_set->ports[i]; 5543 struct ata_port *ap = host_set->ports[i];
4695 5544
4696 ata_scsi_release(ap->host); 5545 ata_scsi_release(ap->host);
4697 5546
@@ -4729,15 +5578,12 @@ void ata_host_set_remove(struct ata_host_set *host_set)
4729 5578
4730int ata_scsi_release(struct Scsi_Host *host) 5579int ata_scsi_release(struct Scsi_Host *host)
4731{ 5580{
4732 struct ata_port *ap = (struct ata_port *) &host->hostdata[0]; 5581 struct ata_port *ap = ata_shost_to_port(host);
4733 int i;
4734 5582
4735 DPRINTK("ENTER\n"); 5583 DPRINTK("ENTER\n");
4736 5584
4737 ap->ops->port_disable(ap); 5585 ap->ops->port_disable(ap);
4738 ata_host_remove(ap, 0); 5586 ata_host_remove(ap, 0);
4739 for (i = 0; i < ATA_MAX_DEVICES; i++)
4740 kfree(ap->device[i].id);
4741 5587
4742 DPRINTK("EXIT\n"); 5588 DPRINTK("EXIT\n");
4743 return 1; 5589 return 1;
@@ -4797,8 +5643,12 @@ void ata_pci_remove_one (struct pci_dev *pdev)
4797{ 5643{
4798 struct device *dev = pci_dev_to_dev(pdev); 5644 struct device *dev = pci_dev_to_dev(pdev);
4799 struct ata_host_set *host_set = dev_get_drvdata(dev); 5645 struct ata_host_set *host_set = dev_get_drvdata(dev);
5646 struct ata_host_set *host_set2 = host_set->next;
4800 5647
4801 ata_host_set_remove(host_set); 5648 ata_host_set_remove(host_set);
5649 if (host_set2)
5650 ata_host_set_remove(host_set2);
5651
4802 pci_release_regions(pdev); 5652 pci_release_regions(pdev);
4803 pci_disable_device(pdev); 5653 pci_disable_device(pdev);
4804 dev_set_drvdata(dev, NULL); 5654 dev_set_drvdata(dev, NULL);
@@ -4863,6 +5713,12 @@ static int __init ata_init(void)
4863 if (!ata_wq) 5713 if (!ata_wq)
4864 return -ENOMEM; 5714 return -ENOMEM;
4865 5715
5716 ata_aux_wq = create_singlethread_workqueue("ata_aux");
5717 if (!ata_aux_wq) {
5718 destroy_workqueue(ata_wq);
5719 return -ENOMEM;
5720 }
5721
4866 printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n"); 5722 printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
4867 return 0; 5723 return 0;
4868} 5724}
@@ -4870,6 +5726,7 @@ static int __init ata_init(void)
4870static void __exit ata_exit(void) 5726static void __exit ata_exit(void)
4871{ 5727{
4872 destroy_workqueue(ata_wq); 5728 destroy_workqueue(ata_wq);
5729 destroy_workqueue(ata_aux_wq);
4873} 5730}
4874 5731
4875module_init(ata_init); 5732module_init(ata_init);
@@ -4896,6 +5753,52 @@ int ata_ratelimit(void)
4896 return rc; 5753 return rc;
4897} 5754}
4898 5755
5756/**
5757 * ata_wait_register - wait until register value changes
5758 * @reg: IO-mapped register
5759 * @mask: Mask to apply to read register value
5760 * @val: Wait condition
5761 * @interval_msec: polling interval in milliseconds
5762 * @timeout_msec: timeout in milliseconds
5763 *
5764 * Waiting for some bits of register to change is a common
5765 * operation for ATA controllers. This function reads 32bit LE
5766 * IO-mapped register @reg and tests for the following condition.
5767 *
5768 * (*@reg & mask) != val
5769 *
5770 * If the condition is met, it returns; otherwise, the process is
5771 * repeated after @interval_msec until timeout.
5772 *
5773 * LOCKING:
5774 * Kernel thread context (may sleep)
5775 *
5776 * RETURNS:
5777 * The final register value.
5778 */
5779u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val,
5780 unsigned long interval_msec,
5781 unsigned long timeout_msec)
5782{
5783 unsigned long timeout;
5784 u32 tmp;
5785
5786 tmp = ioread32(reg);
5787
5788 /* Calculate timeout _after_ the first read to make sure
5789 * preceding writes reach the controller before starting to
5790 * eat away the timeout.
5791 */
5792 timeout = jiffies + (timeout_msec * HZ) / 1000;
5793
5794 while ((tmp & mask) == val && time_before(jiffies, timeout)) {
5795 msleep(interval_msec);
5796 tmp = ioread32(reg);
5797 }
5798
5799 return tmp;
5800}
5801
4899/* 5802/*
4900 * libata is essentially a library of internal helper functions for 5803 * libata is essentially a library of internal helper functions for
4901 * low-level ATA host controller drivers. As such, the API/ABI is 5804 * low-level ATA host controller drivers. As such, the API/ABI is
@@ -4903,15 +5806,20 @@ int ata_ratelimit(void)
4903 * Do not depend on ABI/API stability. 5806 * Do not depend on ABI/API stability.
4904 */ 5807 */
4905 5808
5809EXPORT_SYMBOL_GPL(sata_deb_timing_boot);
5810EXPORT_SYMBOL_GPL(sata_deb_timing_eh);
5811EXPORT_SYMBOL_GPL(sata_deb_timing_before_fsrst);
4906EXPORT_SYMBOL_GPL(ata_std_bios_param); 5812EXPORT_SYMBOL_GPL(ata_std_bios_param);
4907EXPORT_SYMBOL_GPL(ata_std_ports); 5813EXPORT_SYMBOL_GPL(ata_std_ports);
4908EXPORT_SYMBOL_GPL(ata_device_add); 5814EXPORT_SYMBOL_GPL(ata_device_add);
5815EXPORT_SYMBOL_GPL(ata_port_detach);
4909EXPORT_SYMBOL_GPL(ata_host_set_remove); 5816EXPORT_SYMBOL_GPL(ata_host_set_remove);
4910EXPORT_SYMBOL_GPL(ata_sg_init); 5817EXPORT_SYMBOL_GPL(ata_sg_init);
4911EXPORT_SYMBOL_GPL(ata_sg_init_one); 5818EXPORT_SYMBOL_GPL(ata_sg_init_one);
4912EXPORT_SYMBOL_GPL(__ata_qc_complete); 5819EXPORT_SYMBOL_GPL(ata_hsm_move);
5820EXPORT_SYMBOL_GPL(ata_qc_complete);
5821EXPORT_SYMBOL_GPL(ata_qc_complete_multiple);
4913EXPORT_SYMBOL_GPL(ata_qc_issue_prot); 5822EXPORT_SYMBOL_GPL(ata_qc_issue_prot);
4914EXPORT_SYMBOL_GPL(ata_eng_timeout);
4915EXPORT_SYMBOL_GPL(ata_tf_load); 5823EXPORT_SYMBOL_GPL(ata_tf_load);
4916EXPORT_SYMBOL_GPL(ata_tf_read); 5824EXPORT_SYMBOL_GPL(ata_tf_read);
4917EXPORT_SYMBOL_GPL(ata_noop_dev_select); 5825EXPORT_SYMBOL_GPL(ata_noop_dev_select);
@@ -4925,6 +5833,9 @@ EXPORT_SYMBOL_GPL(ata_port_start);
4925EXPORT_SYMBOL_GPL(ata_port_stop); 5833EXPORT_SYMBOL_GPL(ata_port_stop);
4926EXPORT_SYMBOL_GPL(ata_host_stop); 5834EXPORT_SYMBOL_GPL(ata_host_stop);
4927EXPORT_SYMBOL_GPL(ata_interrupt); 5835EXPORT_SYMBOL_GPL(ata_interrupt);
5836EXPORT_SYMBOL_GPL(ata_mmio_data_xfer);
5837EXPORT_SYMBOL_GPL(ata_pio_data_xfer);
5838EXPORT_SYMBOL_GPL(ata_pio_data_xfer_noirq);
4928EXPORT_SYMBOL_GPL(ata_qc_prep); 5839EXPORT_SYMBOL_GPL(ata_qc_prep);
4929EXPORT_SYMBOL_GPL(ata_noop_qc_prep); 5840EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
4930EXPORT_SYMBOL_GPL(ata_bmdma_setup); 5841EXPORT_SYMBOL_GPL(ata_bmdma_setup);
@@ -4932,33 +5843,46 @@ EXPORT_SYMBOL_GPL(ata_bmdma_start);
4932EXPORT_SYMBOL_GPL(ata_bmdma_irq_clear); 5843EXPORT_SYMBOL_GPL(ata_bmdma_irq_clear);
4933EXPORT_SYMBOL_GPL(ata_bmdma_status); 5844EXPORT_SYMBOL_GPL(ata_bmdma_status);
4934EXPORT_SYMBOL_GPL(ata_bmdma_stop); 5845EXPORT_SYMBOL_GPL(ata_bmdma_stop);
5846EXPORT_SYMBOL_GPL(ata_bmdma_freeze);
5847EXPORT_SYMBOL_GPL(ata_bmdma_thaw);
5848EXPORT_SYMBOL_GPL(ata_bmdma_drive_eh);
5849EXPORT_SYMBOL_GPL(ata_bmdma_error_handler);
5850EXPORT_SYMBOL_GPL(ata_bmdma_post_internal_cmd);
4935EXPORT_SYMBOL_GPL(ata_port_probe); 5851EXPORT_SYMBOL_GPL(ata_port_probe);
5852EXPORT_SYMBOL_GPL(sata_set_spd);
5853EXPORT_SYMBOL_GPL(sata_phy_debounce);
5854EXPORT_SYMBOL_GPL(sata_phy_resume);
4936EXPORT_SYMBOL_GPL(sata_phy_reset); 5855EXPORT_SYMBOL_GPL(sata_phy_reset);
4937EXPORT_SYMBOL_GPL(__sata_phy_reset); 5856EXPORT_SYMBOL_GPL(__sata_phy_reset);
4938EXPORT_SYMBOL_GPL(ata_bus_reset); 5857EXPORT_SYMBOL_GPL(ata_bus_reset);
4939EXPORT_SYMBOL_GPL(ata_std_probeinit); 5858EXPORT_SYMBOL_GPL(ata_std_prereset);
4940EXPORT_SYMBOL_GPL(ata_std_softreset); 5859EXPORT_SYMBOL_GPL(ata_std_softreset);
4941EXPORT_SYMBOL_GPL(sata_std_hardreset); 5860EXPORT_SYMBOL_GPL(sata_std_hardreset);
4942EXPORT_SYMBOL_GPL(ata_std_postreset); 5861EXPORT_SYMBOL_GPL(ata_std_postreset);
4943EXPORT_SYMBOL_GPL(ata_std_probe_reset);
4944EXPORT_SYMBOL_GPL(ata_drive_probe_reset);
4945EXPORT_SYMBOL_GPL(ata_dev_revalidate); 5862EXPORT_SYMBOL_GPL(ata_dev_revalidate);
4946EXPORT_SYMBOL_GPL(ata_dev_classify); 5863EXPORT_SYMBOL_GPL(ata_dev_classify);
4947EXPORT_SYMBOL_GPL(ata_dev_pair); 5864EXPORT_SYMBOL_GPL(ata_dev_pair);
4948EXPORT_SYMBOL_GPL(ata_port_disable); 5865EXPORT_SYMBOL_GPL(ata_port_disable);
4949EXPORT_SYMBOL_GPL(ata_ratelimit); 5866EXPORT_SYMBOL_GPL(ata_ratelimit);
5867EXPORT_SYMBOL_GPL(ata_wait_register);
4950EXPORT_SYMBOL_GPL(ata_busy_sleep); 5868EXPORT_SYMBOL_GPL(ata_busy_sleep);
4951EXPORT_SYMBOL_GPL(ata_port_queue_task); 5869EXPORT_SYMBOL_GPL(ata_port_queue_task);
4952EXPORT_SYMBOL_GPL(ata_scsi_ioctl); 5870EXPORT_SYMBOL_GPL(ata_scsi_ioctl);
4953EXPORT_SYMBOL_GPL(ata_scsi_queuecmd); 5871EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
4954EXPORT_SYMBOL_GPL(ata_scsi_slave_config); 5872EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
5873EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy);
5874EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth);
4955EXPORT_SYMBOL_GPL(ata_scsi_release); 5875EXPORT_SYMBOL_GPL(ata_scsi_release);
4956EXPORT_SYMBOL_GPL(ata_host_intr); 5876EXPORT_SYMBOL_GPL(ata_host_intr);
5877EXPORT_SYMBOL_GPL(sata_scr_valid);
5878EXPORT_SYMBOL_GPL(sata_scr_read);
5879EXPORT_SYMBOL_GPL(sata_scr_write);
5880EXPORT_SYMBOL_GPL(sata_scr_write_flush);
5881EXPORT_SYMBOL_GPL(ata_port_online);
5882EXPORT_SYMBOL_GPL(ata_port_offline);
4957EXPORT_SYMBOL_GPL(ata_id_string); 5883EXPORT_SYMBOL_GPL(ata_id_string);
4958EXPORT_SYMBOL_GPL(ata_id_c_string); 5884EXPORT_SYMBOL_GPL(ata_id_c_string);
4959EXPORT_SYMBOL_GPL(ata_scsi_simulate); 5885EXPORT_SYMBOL_GPL(ata_scsi_simulate);
4960EXPORT_SYMBOL_GPL(ata_eh_qc_complete);
4961EXPORT_SYMBOL_GPL(ata_eh_qc_retry);
4962 5886
4963EXPORT_SYMBOL_GPL(ata_pio_need_iordy); 5887EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
4964EXPORT_SYMBOL_GPL(ata_timing_compute); 5888EXPORT_SYMBOL_GPL(ata_timing_compute);
@@ -4980,3 +5904,13 @@ EXPORT_SYMBOL_GPL(ata_device_suspend);
4980EXPORT_SYMBOL_GPL(ata_device_resume); 5904EXPORT_SYMBOL_GPL(ata_device_resume);
4981EXPORT_SYMBOL_GPL(ata_scsi_device_suspend); 5905EXPORT_SYMBOL_GPL(ata_scsi_device_suspend);
4982EXPORT_SYMBOL_GPL(ata_scsi_device_resume); 5906EXPORT_SYMBOL_GPL(ata_scsi_device_resume);
5907
5908EXPORT_SYMBOL_GPL(ata_eng_timeout);
5909EXPORT_SYMBOL_GPL(ata_port_schedule_eh);
5910EXPORT_SYMBOL_GPL(ata_port_abort);
5911EXPORT_SYMBOL_GPL(ata_port_freeze);
5912EXPORT_SYMBOL_GPL(ata_eh_freeze_port);
5913EXPORT_SYMBOL_GPL(ata_eh_thaw_port);
5914EXPORT_SYMBOL_GPL(ata_eh_qc_complete);
5915EXPORT_SYMBOL_GPL(ata_eh_qc_retry);
5916EXPORT_SYMBOL_GPL(ata_do_eh);
diff --git a/drivers/scsi/libata-eh.c b/drivers/scsi/libata-eh.c
new file mode 100644
index 000000000000..823385981a7a
--- /dev/null
+++ b/drivers/scsi/libata-eh.c
@@ -0,0 +1,1907 @@
1/*
2 * libata-eh.c - libata error handling
3 *
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
7 *
8 * Copyright 2006 Tejun Heo <htejun@gmail.com>
9 *
10 *
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License as
13 * published by the Free Software Foundation; either version 2, or
14 * (at your option) any later version.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; see the file COPYING. If not, write to
23 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
24 * USA.
25 *
26 *
27 * libata documentation is available via 'make {ps|pdf}docs',
28 * as Documentation/DocBook/libata.*
29 *
30 * Hardware documentation available from http://www.t13.org/ and
31 * http://www.sata-io.org/
32 *
33 */
34
35#include <linux/config.h>
36#include <linux/kernel.h>
37#include <scsi/scsi.h>
38#include <scsi/scsi_host.h>
39#include <scsi/scsi_eh.h>
40#include <scsi/scsi_device.h>
41#include <scsi/scsi_cmnd.h>
42#include "scsi_transport_api.h"
43
44#include <linux/libata.h>
45
46#include "libata.h"
47
48static void __ata_port_freeze(struct ata_port *ap);
49static void ata_eh_finish(struct ata_port *ap);
50
51static void ata_ering_record(struct ata_ering *ering, int is_io,
52 unsigned int err_mask)
53{
54 struct ata_ering_entry *ent;
55
56 WARN_ON(!err_mask);
57
58 ering->cursor++;
59 ering->cursor %= ATA_ERING_SIZE;
60
61 ent = &ering->ring[ering->cursor];
62 ent->is_io = is_io;
63 ent->err_mask = err_mask;
64 ent->timestamp = get_jiffies_64();
65}
66
67static struct ata_ering_entry * ata_ering_top(struct ata_ering *ering)
68{
69 struct ata_ering_entry *ent = &ering->ring[ering->cursor];
70 if (!ent->err_mask)
71 return NULL;
72 return ent;
73}
74
75static int ata_ering_map(struct ata_ering *ering,
76 int (*map_fn)(struct ata_ering_entry *, void *),
77 void *arg)
78{
79 int idx, rc = 0;
80 struct ata_ering_entry *ent;
81
82 idx = ering->cursor;
83 do {
84 ent = &ering->ring[idx];
85 if (!ent->err_mask)
86 break;
87 rc = map_fn(ent, arg);
88 if (rc)
89 break;
90 idx = (idx - 1 + ATA_ERING_SIZE) % ATA_ERING_SIZE;
91 } while (idx != ering->cursor);
92
93 return rc;
94}
95
96/**
97 * ata_scsi_timed_out - SCSI layer time out callback
98 * @cmd: timed out SCSI command
99 *
100 * Handles SCSI layer timeout. We race with normal completion of
101 * the qc for @cmd. If the qc is already gone, we lose and let
102 * the scsi command finish (EH_HANDLED). Otherwise, the qc has
103 * timed out and EH should be invoked. Prevent ata_qc_complete()
104 * from finishing it by setting EH_SCHEDULED and return
105 * EH_NOT_HANDLED.
106 *
107 * TODO: kill this function once old EH is gone.
108 *
109 * LOCKING:
110 * Called from timer context
111 *
112 * RETURNS:
113 * EH_HANDLED or EH_NOT_HANDLED
114 */
115enum scsi_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd)
116{
117 struct Scsi_Host *host = cmd->device->host;
118 struct ata_port *ap = ata_shost_to_port(host);
119 unsigned long flags;
120 struct ata_queued_cmd *qc;
121 enum scsi_eh_timer_return ret;
122
123 DPRINTK("ENTER\n");
124
125 if (ap->ops->error_handler) {
126 ret = EH_NOT_HANDLED;
127 goto out;
128 }
129
130 ret = EH_HANDLED;
131 spin_lock_irqsave(ap->lock, flags);
132 qc = ata_qc_from_tag(ap, ap->active_tag);
133 if (qc) {
134 WARN_ON(qc->scsicmd != cmd);
135 qc->flags |= ATA_QCFLAG_EH_SCHEDULED;
136 qc->err_mask |= AC_ERR_TIMEOUT;
137 ret = EH_NOT_HANDLED;
138 }
139 spin_unlock_irqrestore(ap->lock, flags);
140
141 out:
142 DPRINTK("EXIT, ret=%d\n", ret);
143 return ret;
144}
145
146/**
147 * ata_scsi_error - SCSI layer error handler callback
148 * @host: SCSI host on which error occurred
149 *
150 * Handles SCSI-layer-thrown error events.
151 *
152 * LOCKING:
153 * Inherited from SCSI layer (none, can sleep)
154 *
155 * RETURNS:
156 * Zero.
157 */
158void ata_scsi_error(struct Scsi_Host *host)
159{
160 struct ata_port *ap = ata_shost_to_port(host);
161 spinlock_t *ap_lock = ap->lock;
162 int i, repeat_cnt = ATA_EH_MAX_REPEAT;
163 unsigned long flags;
164
165 DPRINTK("ENTER\n");
166
167 /* synchronize with port task */
168 ata_port_flush_task(ap);
169
170 /* synchronize with host_set lock and sort out timeouts */
171
172 /* For new EH, all qcs are finished in one of three ways -
173 * normal completion, error completion, and SCSI timeout.
174 * Both cmpletions can race against SCSI timeout. When normal
175 * completion wins, the qc never reaches EH. When error
176 * completion wins, the qc has ATA_QCFLAG_FAILED set.
177 *
178 * When SCSI timeout wins, things are a bit more complex.
179 * Normal or error completion can occur after the timeout but
180 * before this point. In such cases, both types of
181 * completions are honored. A scmd is determined to have
182 * timed out iff its associated qc is active and not failed.
183 */
184 if (ap->ops->error_handler) {
185 struct scsi_cmnd *scmd, *tmp;
186 int nr_timedout = 0;
187
188 spin_lock_irqsave(ap_lock, flags);
189
190 list_for_each_entry_safe(scmd, tmp, &host->eh_cmd_q, eh_entry) {
191 struct ata_queued_cmd *qc;
192
193 for (i = 0; i < ATA_MAX_QUEUE; i++) {
194 qc = __ata_qc_from_tag(ap, i);
195 if (qc->flags & ATA_QCFLAG_ACTIVE &&
196 qc->scsicmd == scmd)
197 break;
198 }
199
200 if (i < ATA_MAX_QUEUE) {
201 /* the scmd has an associated qc */
202 if (!(qc->flags & ATA_QCFLAG_FAILED)) {
203 /* which hasn't failed yet, timeout */
204 qc->err_mask |= AC_ERR_TIMEOUT;
205 qc->flags |= ATA_QCFLAG_FAILED;
206 nr_timedout++;
207 }
208 } else {
209 /* Normal completion occurred after
210 * SCSI timeout but before this point.
211 * Successfully complete it.
212 */
213 scmd->retries = scmd->allowed;
214 scsi_eh_finish_cmd(scmd, &ap->eh_done_q);
215 }
216 }
217
218 /* If we have timed out qcs. They belong to EH from
219 * this point but the state of the controller is
220 * unknown. Freeze the port to make sure the IRQ
221 * handler doesn't diddle with those qcs. This must
222 * be done atomically w.r.t. setting QCFLAG_FAILED.
223 */
224 if (nr_timedout)
225 __ata_port_freeze(ap);
226
227 spin_unlock_irqrestore(ap_lock, flags);
228 } else
229 spin_unlock_wait(ap_lock);
230
231 repeat:
232 /* invoke error handler */
233 if (ap->ops->error_handler) {
234 /* fetch & clear EH info */
235 spin_lock_irqsave(ap_lock, flags);
236
237 memset(&ap->eh_context, 0, sizeof(ap->eh_context));
238 ap->eh_context.i = ap->eh_info;
239 memset(&ap->eh_info, 0, sizeof(ap->eh_info));
240
241 ap->flags |= ATA_FLAG_EH_IN_PROGRESS;
242 ap->flags &= ~ATA_FLAG_EH_PENDING;
243
244 spin_unlock_irqrestore(ap_lock, flags);
245
246 /* invoke EH. if unloading, just finish failed qcs */
247 if (!(ap->flags & ATA_FLAG_UNLOADING))
248 ap->ops->error_handler(ap);
249 else
250 ata_eh_finish(ap);
251
252 /* Exception might have happend after ->error_handler
253 * recovered the port but before this point. Repeat
254 * EH in such case.
255 */
256 spin_lock_irqsave(ap_lock, flags);
257
258 if (ap->flags & ATA_FLAG_EH_PENDING) {
259 if (--repeat_cnt) {
260 ata_port_printk(ap, KERN_INFO,
261 "EH pending after completion, "
262 "repeating EH (cnt=%d)\n", repeat_cnt);
263 spin_unlock_irqrestore(ap_lock, flags);
264 goto repeat;
265 }
266 ata_port_printk(ap, KERN_ERR, "EH pending after %d "
267 "tries, giving up\n", ATA_EH_MAX_REPEAT);
268 }
269
270 /* this run is complete, make sure EH info is clear */
271 memset(&ap->eh_info, 0, sizeof(ap->eh_info));
272
273 /* Clear host_eh_scheduled while holding ap_lock such
274 * that if exception occurs after this point but
275 * before EH completion, SCSI midlayer will
276 * re-initiate EH.
277 */
278 host->host_eh_scheduled = 0;
279
280 spin_unlock_irqrestore(ap_lock, flags);
281 } else {
282 WARN_ON(ata_qc_from_tag(ap, ap->active_tag) == NULL);
283 ap->ops->eng_timeout(ap);
284 }
285
286 /* finish or retry handled scmd's and clean up */
287 WARN_ON(host->host_failed || !list_empty(&host->eh_cmd_q));
288
289 scsi_eh_flush_done_q(&ap->eh_done_q);
290
291 /* clean up */
292 spin_lock_irqsave(ap_lock, flags);
293
294 if (ap->flags & ATA_FLAG_LOADING) {
295 ap->flags &= ~ATA_FLAG_LOADING;
296 } else {
297 if (ap->flags & ATA_FLAG_SCSI_HOTPLUG)
298 queue_work(ata_aux_wq, &ap->hotplug_task);
299 if (ap->flags & ATA_FLAG_RECOVERED)
300 ata_port_printk(ap, KERN_INFO, "EH complete\n");
301 }
302
303 ap->flags &= ~(ATA_FLAG_SCSI_HOTPLUG | ATA_FLAG_RECOVERED);
304
305 /* tell wait_eh that we're done */
306 ap->flags &= ~ATA_FLAG_EH_IN_PROGRESS;
307 wake_up_all(&ap->eh_wait_q);
308
309 spin_unlock_irqrestore(ap_lock, flags);
310
311 DPRINTK("EXIT\n");
312}
313
314/**
315 * ata_port_wait_eh - Wait for the currently pending EH to complete
316 * @ap: Port to wait EH for
317 *
318 * Wait until the currently pending EH is complete.
319 *
320 * LOCKING:
321 * Kernel thread context (may sleep).
322 */
323void ata_port_wait_eh(struct ata_port *ap)
324{
325 unsigned long flags;
326 DEFINE_WAIT(wait);
327
328 retry:
329 spin_lock_irqsave(ap->lock, flags);
330
331 while (ap->flags & (ATA_FLAG_EH_PENDING | ATA_FLAG_EH_IN_PROGRESS)) {
332 prepare_to_wait(&ap->eh_wait_q, &wait, TASK_UNINTERRUPTIBLE);
333 spin_unlock_irqrestore(ap->lock, flags);
334 schedule();
335 spin_lock_irqsave(ap->lock, flags);
336 }
337 finish_wait(&ap->eh_wait_q, &wait);
338
339 spin_unlock_irqrestore(ap->lock, flags);
340
341 /* make sure SCSI EH is complete */
342 if (scsi_host_in_recovery(ap->host)) {
343 msleep(10);
344 goto retry;
345 }
346}
347
348/**
349 * ata_qc_timeout - Handle timeout of queued command
350 * @qc: Command that timed out
351 *
352 * Some part of the kernel (currently, only the SCSI layer)
353 * has noticed that the active command on port @ap has not
354 * completed after a specified length of time. Handle this
355 * condition by disabling DMA (if necessary) and completing
356 * transactions, with error if necessary.
357 *
358 * This also handles the case of the "lost interrupt", where
359 * for some reason (possibly hardware bug, possibly driver bug)
360 * an interrupt was not delivered to the driver, even though the
361 * transaction completed successfully.
362 *
363 * TODO: kill this function once old EH is gone.
364 *
365 * LOCKING:
366 * Inherited from SCSI layer (none, can sleep)
367 */
368static void ata_qc_timeout(struct ata_queued_cmd *qc)
369{
370 struct ata_port *ap = qc->ap;
371 u8 host_stat = 0, drv_stat;
372 unsigned long flags;
373
374 DPRINTK("ENTER\n");
375
376 ap->hsm_task_state = HSM_ST_IDLE;
377
378 spin_lock_irqsave(ap->lock, flags);
379
380 switch (qc->tf.protocol) {
381
382 case ATA_PROT_DMA:
383 case ATA_PROT_ATAPI_DMA:
384 host_stat = ap->ops->bmdma_status(ap);
385
386 /* before we do anything else, clear DMA-Start bit */
387 ap->ops->bmdma_stop(qc);
388
389 /* fall through */
390
391 default:
392 ata_altstatus(ap);
393 drv_stat = ata_chk_status(ap);
394
395 /* ack bmdma irq events */
396 ap->ops->irq_clear(ap);
397
398 ata_dev_printk(qc->dev, KERN_ERR, "command 0x%x timeout, "
399 "stat 0x%x host_stat 0x%x\n",
400 qc->tf.command, drv_stat, host_stat);
401
402 /* complete taskfile transaction */
403 qc->err_mask |= AC_ERR_TIMEOUT;
404 break;
405 }
406
407 spin_unlock_irqrestore(ap->lock, flags);
408
409 ata_eh_qc_complete(qc);
410
411 DPRINTK("EXIT\n");
412}
413
414/**
415 * ata_eng_timeout - Handle timeout of queued command
416 * @ap: Port on which timed-out command is active
417 *
418 * Some part of the kernel (currently, only the SCSI layer)
419 * has noticed that the active command on port @ap has not
420 * completed after a specified length of time. Handle this
421 * condition by disabling DMA (if necessary) and completing
422 * transactions, with error if necessary.
423 *
424 * This also handles the case of the "lost interrupt", where
425 * for some reason (possibly hardware bug, possibly driver bug)
426 * an interrupt was not delivered to the driver, even though the
427 * transaction completed successfully.
428 *
429 * TODO: kill this function once old EH is gone.
430 *
431 * LOCKING:
432 * Inherited from SCSI layer (none, can sleep)
433 */
434void ata_eng_timeout(struct ata_port *ap)
435{
436 DPRINTK("ENTER\n");
437
438 ata_qc_timeout(ata_qc_from_tag(ap, ap->active_tag));
439
440 DPRINTK("EXIT\n");
441}
442
443/**
444 * ata_qc_schedule_eh - schedule qc for error handling
445 * @qc: command to schedule error handling for
446 *
447 * Schedule error handling for @qc. EH will kick in as soon as
448 * other commands are drained.
449 *
450 * LOCKING:
451 * spin_lock_irqsave(host_set lock)
452 */
453void ata_qc_schedule_eh(struct ata_queued_cmd *qc)
454{
455 struct ata_port *ap = qc->ap;
456
457 WARN_ON(!ap->ops->error_handler);
458
459 qc->flags |= ATA_QCFLAG_FAILED;
460 qc->ap->flags |= ATA_FLAG_EH_PENDING;
461
462 /* The following will fail if timeout has already expired.
463 * ata_scsi_error() takes care of such scmds on EH entry.
464 * Note that ATA_QCFLAG_FAILED is unconditionally set after
465 * this function completes.
466 */
467 scsi_req_abort_cmd(qc->scsicmd);
468}
469
470/**
471 * ata_port_schedule_eh - schedule error handling without a qc
472 * @ap: ATA port to schedule EH for
473 *
474 * Schedule error handling for @ap. EH will kick in as soon as
475 * all commands are drained.
476 *
477 * LOCKING:
478 * spin_lock_irqsave(host_set lock)
479 */
480void ata_port_schedule_eh(struct ata_port *ap)
481{
482 WARN_ON(!ap->ops->error_handler);
483
484 ap->flags |= ATA_FLAG_EH_PENDING;
485 scsi_schedule_eh(ap->host);
486
487 DPRINTK("port EH scheduled\n");
488}
489
490/**
491 * ata_port_abort - abort all qc's on the port
492 * @ap: ATA port to abort qc's for
493 *
494 * Abort all active qc's of @ap and schedule EH.
495 *
496 * LOCKING:
497 * spin_lock_irqsave(host_set lock)
498 *
499 * RETURNS:
500 * Number of aborted qc's.
501 */
502int ata_port_abort(struct ata_port *ap)
503{
504 int tag, nr_aborted = 0;
505
506 WARN_ON(!ap->ops->error_handler);
507
508 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
509 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, tag);
510
511 if (qc) {
512 qc->flags |= ATA_QCFLAG_FAILED;
513 ata_qc_complete(qc);
514 nr_aborted++;
515 }
516 }
517
518 if (!nr_aborted)
519 ata_port_schedule_eh(ap);
520
521 return nr_aborted;
522}
523
524/**
525 * __ata_port_freeze - freeze port
526 * @ap: ATA port to freeze
527 *
528 * This function is called when HSM violation or some other
529 * condition disrupts normal operation of the port. Frozen port
530 * is not allowed to perform any operation until the port is
531 * thawed, which usually follows a successful reset.
532 *
533 * ap->ops->freeze() callback can be used for freezing the port
534 * hardware-wise (e.g. mask interrupt and stop DMA engine). If a
535 * port cannot be frozen hardware-wise, the interrupt handler
536 * must ack and clear interrupts unconditionally while the port
537 * is frozen.
538 *
539 * LOCKING:
540 * spin_lock_irqsave(host_set lock)
541 */
542static void __ata_port_freeze(struct ata_port *ap)
543{
544 WARN_ON(!ap->ops->error_handler);
545
546 if (ap->ops->freeze)
547 ap->ops->freeze(ap);
548
549 ap->flags |= ATA_FLAG_FROZEN;
550
551 DPRINTK("ata%u port frozen\n", ap->id);
552}
553
554/**
555 * ata_port_freeze - abort & freeze port
556 * @ap: ATA port to freeze
557 *
558 * Abort and freeze @ap.
559 *
560 * LOCKING:
561 * spin_lock_irqsave(host_set lock)
562 *
563 * RETURNS:
564 * Number of aborted commands.
565 */
566int ata_port_freeze(struct ata_port *ap)
567{
568 int nr_aborted;
569
570 WARN_ON(!ap->ops->error_handler);
571
572 nr_aborted = ata_port_abort(ap);
573 __ata_port_freeze(ap);
574
575 return nr_aborted;
576}
577
578/**
579 * ata_eh_freeze_port - EH helper to freeze port
580 * @ap: ATA port to freeze
581 *
582 * Freeze @ap.
583 *
584 * LOCKING:
585 * None.
586 */
587void ata_eh_freeze_port(struct ata_port *ap)
588{
589 unsigned long flags;
590
591 if (!ap->ops->error_handler)
592 return;
593
594 spin_lock_irqsave(ap->lock, flags);
595 __ata_port_freeze(ap);
596 spin_unlock_irqrestore(ap->lock, flags);
597}
598
599/**
600 * ata_port_thaw_port - EH helper to thaw port
601 * @ap: ATA port to thaw
602 *
603 * Thaw frozen port @ap.
604 *
605 * LOCKING:
606 * None.
607 */
608void ata_eh_thaw_port(struct ata_port *ap)
609{
610 unsigned long flags;
611
612 if (!ap->ops->error_handler)
613 return;
614
615 spin_lock_irqsave(ap->lock, flags);
616
617 ap->flags &= ~ATA_FLAG_FROZEN;
618
619 if (ap->ops->thaw)
620 ap->ops->thaw(ap);
621
622 spin_unlock_irqrestore(ap->lock, flags);
623
624 DPRINTK("ata%u port thawed\n", ap->id);
625}
626
627static void ata_eh_scsidone(struct scsi_cmnd *scmd)
628{
629 /* nada */
630}
631
632static void __ata_eh_qc_complete(struct ata_queued_cmd *qc)
633{
634 struct ata_port *ap = qc->ap;
635 struct scsi_cmnd *scmd = qc->scsicmd;
636 unsigned long flags;
637
638 spin_lock_irqsave(ap->lock, flags);
639 qc->scsidone = ata_eh_scsidone;
640 __ata_qc_complete(qc);
641 WARN_ON(ata_tag_valid(qc->tag));
642 spin_unlock_irqrestore(ap->lock, flags);
643
644 scsi_eh_finish_cmd(scmd, &ap->eh_done_q);
645}
646
647/**
648 * ata_eh_qc_complete - Complete an active ATA command from EH
649 * @qc: Command to complete
650 *
651 * Indicate to the mid and upper layers that an ATA command has
652 * completed. To be used from EH.
653 */
654void ata_eh_qc_complete(struct ata_queued_cmd *qc)
655{
656 struct scsi_cmnd *scmd = qc->scsicmd;
657 scmd->retries = scmd->allowed;
658 __ata_eh_qc_complete(qc);
659}
660
661/**
662 * ata_eh_qc_retry - Tell midlayer to retry an ATA command after EH
663 * @qc: Command to retry
664 *
665 * Indicate to the mid and upper layers that an ATA command
666 * should be retried. To be used from EH.
667 *
668 * SCSI midlayer limits the number of retries to scmd->allowed.
669 * scmd->retries is decremented for commands which get retried
670 * due to unrelated failures (qc->err_mask is zero).
671 */
672void ata_eh_qc_retry(struct ata_queued_cmd *qc)
673{
674 struct scsi_cmnd *scmd = qc->scsicmd;
675 if (!qc->err_mask && scmd->retries)
676 scmd->retries--;
677 __ata_eh_qc_complete(qc);
678}
679
680/**
681 * ata_eh_detach_dev - detach ATA device
682 * @dev: ATA device to detach
683 *
684 * Detach @dev.
685 *
686 * LOCKING:
687 * None.
688 */
689static void ata_eh_detach_dev(struct ata_device *dev)
690{
691 struct ata_port *ap = dev->ap;
692 unsigned long flags;
693
694 ata_dev_disable(dev);
695
696 spin_lock_irqsave(ap->lock, flags);
697
698 dev->flags &= ~ATA_DFLAG_DETACH;
699
700 if (ata_scsi_offline_dev(dev)) {
701 dev->flags |= ATA_DFLAG_DETACHED;
702 ap->flags |= ATA_FLAG_SCSI_HOTPLUG;
703 }
704
705 spin_unlock_irqrestore(ap->lock, flags);
706}
707
708static void ata_eh_clear_action(struct ata_device *dev,
709 struct ata_eh_info *ehi, unsigned int action)
710{
711 int i;
712
713 if (!dev) {
714 ehi->action &= ~action;
715 for (i = 0; i < ATA_MAX_DEVICES; i++)
716 ehi->dev_action[i] &= ~action;
717 } else {
718 /* doesn't make sense for port-wide EH actions */
719 WARN_ON(!(action & ATA_EH_PERDEV_MASK));
720
721 /* break ehi->action into ehi->dev_action */
722 if (ehi->action & action) {
723 for (i = 0; i < ATA_MAX_DEVICES; i++)
724 ehi->dev_action[i] |= ehi->action & action;
725 ehi->action &= ~action;
726 }
727
728 /* turn off the specified per-dev action */
729 ehi->dev_action[dev->devno] &= ~action;
730 }
731}
732
733/**
734 * ata_eh_about_to_do - about to perform eh_action
735 * @ap: target ATA port
736 * @dev: target ATA dev for per-dev action (can be NULL)
737 * @action: action about to be performed
738 *
739 * Called just before performing EH actions to clear related bits
740 * in @ap->eh_info such that eh actions are not unnecessarily
741 * repeated.
742 *
743 * LOCKING:
744 * None.
745 */
746static void ata_eh_about_to_do(struct ata_port *ap, struct ata_device *dev,
747 unsigned int action)
748{
749 unsigned long flags;
750
751 spin_lock_irqsave(ap->lock, flags);
752 ata_eh_clear_action(dev, &ap->eh_info, action);
753 ap->flags |= ATA_FLAG_RECOVERED;
754 spin_unlock_irqrestore(ap->lock, flags);
755}
756
757/**
758 * ata_eh_done - EH action complete
759 * @ap: target ATA port
760 * @dev: target ATA dev for per-dev action (can be NULL)
761 * @action: action just completed
762 *
763 * Called right after performing EH actions to clear related bits
764 * in @ap->eh_context.
765 *
766 * LOCKING:
767 * None.
768 */
769static void ata_eh_done(struct ata_port *ap, struct ata_device *dev,
770 unsigned int action)
771{
772 ata_eh_clear_action(dev, &ap->eh_context.i, action);
773}
774
775/**
776 * ata_err_string - convert err_mask to descriptive string
777 * @err_mask: error mask to convert to string
778 *
779 * Convert @err_mask to descriptive string. Errors are
780 * prioritized according to severity and only the most severe
781 * error is reported.
782 *
783 * LOCKING:
784 * None.
785 *
786 * RETURNS:
787 * Descriptive string for @err_mask
788 */
789static const char * ata_err_string(unsigned int err_mask)
790{
791 if (err_mask & AC_ERR_HOST_BUS)
792 return "host bus error";
793 if (err_mask & AC_ERR_ATA_BUS)
794 return "ATA bus error";
795 if (err_mask & AC_ERR_TIMEOUT)
796 return "timeout";
797 if (err_mask & AC_ERR_HSM)
798 return "HSM violation";
799 if (err_mask & AC_ERR_SYSTEM)
800 return "internal error";
801 if (err_mask & AC_ERR_MEDIA)
802 return "media error";
803 if (err_mask & AC_ERR_INVALID)
804 return "invalid argument";
805 if (err_mask & AC_ERR_DEV)
806 return "device error";
807 return "unknown error";
808}
809
810/**
811 * ata_read_log_page - read a specific log page
812 * @dev: target device
813 * @page: page to read
814 * @buf: buffer to store read page
815 * @sectors: number of sectors to read
816 *
817 * Read log page using READ_LOG_EXT command.
818 *
819 * LOCKING:
820 * Kernel thread context (may sleep).
821 *
822 * RETURNS:
823 * 0 on success, AC_ERR_* mask otherwise.
824 */
825static unsigned int ata_read_log_page(struct ata_device *dev,
826 u8 page, void *buf, unsigned int sectors)
827{
828 struct ata_taskfile tf;
829 unsigned int err_mask;
830
831 DPRINTK("read log page - page %d\n", page);
832
833 ata_tf_init(dev, &tf);
834 tf.command = ATA_CMD_READ_LOG_EXT;
835 tf.lbal = page;
836 tf.nsect = sectors;
837 tf.hob_nsect = sectors >> 8;
838 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_LBA48 | ATA_TFLAG_DEVICE;
839 tf.protocol = ATA_PROT_PIO;
840
841 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE,
842 buf, sectors * ATA_SECT_SIZE);
843
844 DPRINTK("EXIT, err_mask=%x\n", err_mask);
845 return err_mask;
846}
847
848/**
849 * ata_eh_read_log_10h - Read log page 10h for NCQ error details
850 * @dev: Device to read log page 10h from
851 * @tag: Resulting tag of the failed command
852 * @tf: Resulting taskfile registers of the failed command
853 *
854 * Read log page 10h to obtain NCQ error details and clear error
855 * condition.
856 *
857 * LOCKING:
858 * Kernel thread context (may sleep).
859 *
860 * RETURNS:
861 * 0 on success, -errno otherwise.
862 */
863static int ata_eh_read_log_10h(struct ata_device *dev,
864 int *tag, struct ata_taskfile *tf)
865{
866 u8 *buf = dev->ap->sector_buf;
867 unsigned int err_mask;
868 u8 csum;
869 int i;
870
871 err_mask = ata_read_log_page(dev, ATA_LOG_SATA_NCQ, buf, 1);
872 if (err_mask)
873 return -EIO;
874
875 csum = 0;
876 for (i = 0; i < ATA_SECT_SIZE; i++)
877 csum += buf[i];
878 if (csum)
879 ata_dev_printk(dev, KERN_WARNING,
880 "invalid checksum 0x%x on log page 10h\n", csum);
881
882 if (buf[0] & 0x80)
883 return -ENOENT;
884
885 *tag = buf[0] & 0x1f;
886
887 tf->command = buf[2];
888 tf->feature = buf[3];
889 tf->lbal = buf[4];
890 tf->lbam = buf[5];
891 tf->lbah = buf[6];
892 tf->device = buf[7];
893 tf->hob_lbal = buf[8];
894 tf->hob_lbam = buf[9];
895 tf->hob_lbah = buf[10];
896 tf->nsect = buf[12];
897 tf->hob_nsect = buf[13];
898
899 return 0;
900}
901
902/**
903 * atapi_eh_request_sense - perform ATAPI REQUEST_SENSE
904 * @dev: device to perform REQUEST_SENSE to
905 * @sense_buf: result sense data buffer (SCSI_SENSE_BUFFERSIZE bytes long)
906 *
907 * Perform ATAPI REQUEST_SENSE after the device reported CHECK
908 * SENSE. This function is EH helper.
909 *
910 * LOCKING:
911 * Kernel thread context (may sleep).
912 *
913 * RETURNS:
914 * 0 on success, AC_ERR_* mask on failure
915 */
916static unsigned int atapi_eh_request_sense(struct ata_device *dev,
917 unsigned char *sense_buf)
918{
919 struct ata_port *ap = dev->ap;
920 struct ata_taskfile tf;
921 u8 cdb[ATAPI_CDB_LEN];
922
923 DPRINTK("ATAPI request sense\n");
924
925 ata_tf_init(dev, &tf);
926
927 /* FIXME: is this needed? */
928 memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
929
930 /* XXX: why tf_read here? */
931 ap->ops->tf_read(ap, &tf);
932
933 /* fill these in, for the case where they are -not- overwritten */
934 sense_buf[0] = 0x70;
935 sense_buf[2] = tf.feature >> 4;
936
937 memset(cdb, 0, ATAPI_CDB_LEN);
938 cdb[0] = REQUEST_SENSE;
939 cdb[4] = SCSI_SENSE_BUFFERSIZE;
940
941 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
942 tf.command = ATA_CMD_PACKET;
943
944 /* is it pointless to prefer PIO for "safety reasons"? */
945 if (ap->flags & ATA_FLAG_PIO_DMA) {
946 tf.protocol = ATA_PROT_ATAPI_DMA;
947 tf.feature |= ATAPI_PKT_DMA;
948 } else {
949 tf.protocol = ATA_PROT_ATAPI;
950 tf.lbam = (8 * 1024) & 0xff;
951 tf.lbah = (8 * 1024) >> 8;
952 }
953
954 return ata_exec_internal(dev, &tf, cdb, DMA_FROM_DEVICE,
955 sense_buf, SCSI_SENSE_BUFFERSIZE);
956}
957
958/**
959 * ata_eh_analyze_serror - analyze SError for a failed port
960 * @ap: ATA port to analyze SError for
961 *
962 * Analyze SError if available and further determine cause of
963 * failure.
964 *
965 * LOCKING:
966 * None.
967 */
968static void ata_eh_analyze_serror(struct ata_port *ap)
969{
970 struct ata_eh_context *ehc = &ap->eh_context;
971 u32 serror = ehc->i.serror;
972 unsigned int err_mask = 0, action = 0;
973
974 if (serror & SERR_PERSISTENT) {
975 err_mask |= AC_ERR_ATA_BUS;
976 action |= ATA_EH_HARDRESET;
977 }
978 if (serror &
979 (SERR_DATA_RECOVERED | SERR_COMM_RECOVERED | SERR_DATA)) {
980 err_mask |= AC_ERR_ATA_BUS;
981 action |= ATA_EH_SOFTRESET;
982 }
983 if (serror & SERR_PROTOCOL) {
984 err_mask |= AC_ERR_HSM;
985 action |= ATA_EH_SOFTRESET;
986 }
987 if (serror & SERR_INTERNAL) {
988 err_mask |= AC_ERR_SYSTEM;
989 action |= ATA_EH_SOFTRESET;
990 }
991 if (serror & (SERR_PHYRDY_CHG | SERR_DEV_XCHG))
992 ata_ehi_hotplugged(&ehc->i);
993
994 ehc->i.err_mask |= err_mask;
995 ehc->i.action |= action;
996}
997
998/**
999 * ata_eh_analyze_ncq_error - analyze NCQ error
1000 * @ap: ATA port to analyze NCQ error for
1001 *
1002 * Read log page 10h, determine the offending qc and acquire
1003 * error status TF. For NCQ device errors, all LLDDs have to do
1004 * is setting AC_ERR_DEV in ehi->err_mask. This function takes
1005 * care of the rest.
1006 *
1007 * LOCKING:
1008 * Kernel thread context (may sleep).
1009 */
1010static void ata_eh_analyze_ncq_error(struct ata_port *ap)
1011{
1012 struct ata_eh_context *ehc = &ap->eh_context;
1013 struct ata_device *dev = ap->device;
1014 struct ata_queued_cmd *qc;
1015 struct ata_taskfile tf;
1016 int tag, rc;
1017
1018 /* if frozen, we can't do much */
1019 if (ap->flags & ATA_FLAG_FROZEN)
1020 return;
1021
1022 /* is it NCQ device error? */
1023 if (!ap->sactive || !(ehc->i.err_mask & AC_ERR_DEV))
1024 return;
1025
1026 /* has LLDD analyzed already? */
1027 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
1028 qc = __ata_qc_from_tag(ap, tag);
1029
1030 if (!(qc->flags & ATA_QCFLAG_FAILED))
1031 continue;
1032
1033 if (qc->err_mask)
1034 return;
1035 }
1036
1037 /* okay, this error is ours */
1038 rc = ata_eh_read_log_10h(dev, &tag, &tf);
1039 if (rc) {
1040 ata_port_printk(ap, KERN_ERR, "failed to read log page 10h "
1041 "(errno=%d)\n", rc);
1042 return;
1043 }
1044
1045 if (!(ap->sactive & (1 << tag))) {
1046 ata_port_printk(ap, KERN_ERR, "log page 10h reported "
1047 "inactive tag %d\n", tag);
1048 return;
1049 }
1050
1051 /* we've got the perpetrator, condemn it */
1052 qc = __ata_qc_from_tag(ap, tag);
1053 memcpy(&qc->result_tf, &tf, sizeof(tf));
1054 qc->err_mask |= AC_ERR_DEV;
1055 ehc->i.err_mask &= ~AC_ERR_DEV;
1056}
1057
1058/**
1059 * ata_eh_analyze_tf - analyze taskfile of a failed qc
1060 * @qc: qc to analyze
1061 * @tf: Taskfile registers to analyze
1062 *
1063 * Analyze taskfile of @qc and further determine cause of
1064 * failure. This function also requests ATAPI sense data if
1065 * avaliable.
1066 *
1067 * LOCKING:
1068 * Kernel thread context (may sleep).
1069 *
1070 * RETURNS:
1071 * Determined recovery action
1072 */
1073static unsigned int ata_eh_analyze_tf(struct ata_queued_cmd *qc,
1074 const struct ata_taskfile *tf)
1075{
1076 unsigned int tmp, action = 0;
1077 u8 stat = tf->command, err = tf->feature;
1078
1079 if ((stat & (ATA_BUSY | ATA_DRQ | ATA_DRDY)) != ATA_DRDY) {
1080 qc->err_mask |= AC_ERR_HSM;
1081 return ATA_EH_SOFTRESET;
1082 }
1083
1084 if (!(qc->err_mask & AC_ERR_DEV))
1085 return 0;
1086
1087 switch (qc->dev->class) {
1088 case ATA_DEV_ATA:
1089 if (err & ATA_ICRC)
1090 qc->err_mask |= AC_ERR_ATA_BUS;
1091 if (err & ATA_UNC)
1092 qc->err_mask |= AC_ERR_MEDIA;
1093 if (err & ATA_IDNF)
1094 qc->err_mask |= AC_ERR_INVALID;
1095 break;
1096
1097 case ATA_DEV_ATAPI:
1098 tmp = atapi_eh_request_sense(qc->dev,
1099 qc->scsicmd->sense_buffer);
1100 if (!tmp) {
1101 /* ATA_QCFLAG_SENSE_VALID is used to tell
1102 * atapi_qc_complete() that sense data is
1103 * already valid.
1104 *
1105 * TODO: interpret sense data and set
1106 * appropriate err_mask.
1107 */
1108 qc->flags |= ATA_QCFLAG_SENSE_VALID;
1109 } else
1110 qc->err_mask |= tmp;
1111 }
1112
1113 if (qc->err_mask & (AC_ERR_HSM | AC_ERR_TIMEOUT | AC_ERR_ATA_BUS))
1114 action |= ATA_EH_SOFTRESET;
1115
1116 return action;
1117}
1118
1119static int ata_eh_categorize_ering_entry(struct ata_ering_entry *ent)
1120{
1121 if (ent->err_mask & (AC_ERR_ATA_BUS | AC_ERR_TIMEOUT))
1122 return 1;
1123
1124 if (ent->is_io) {
1125 if (ent->err_mask & AC_ERR_HSM)
1126 return 1;
1127 if ((ent->err_mask &
1128 (AC_ERR_DEV|AC_ERR_MEDIA|AC_ERR_INVALID)) == AC_ERR_DEV)
1129 return 2;
1130 }
1131
1132 return 0;
1133}
1134
1135struct speed_down_needed_arg {
1136 u64 since;
1137 int nr_errors[3];
1138};
1139
1140static int speed_down_needed_cb(struct ata_ering_entry *ent, void *void_arg)
1141{
1142 struct speed_down_needed_arg *arg = void_arg;
1143
1144 if (ent->timestamp < arg->since)
1145 return -1;
1146
1147 arg->nr_errors[ata_eh_categorize_ering_entry(ent)]++;
1148 return 0;
1149}
1150
1151/**
1152 * ata_eh_speed_down_needed - Determine wheter speed down is necessary
1153 * @dev: Device of interest
1154 *
1155 * This function examines error ring of @dev and determines
1156 * whether speed down is necessary. Speed down is necessary if
1157 * there have been more than 3 of Cat-1 errors or 10 of Cat-2
1158 * errors during last 15 minutes.
1159 *
1160 * Cat-1 errors are ATA_BUS, TIMEOUT for any command and HSM
1161 * violation for known supported commands.
1162 *
1163 * Cat-2 errors are unclassified DEV error for known supported
1164 * command.
1165 *
1166 * LOCKING:
1167 * Inherited from caller.
1168 *
1169 * RETURNS:
1170 * 1 if speed down is necessary, 0 otherwise
1171 */
1172static int ata_eh_speed_down_needed(struct ata_device *dev)
1173{
1174 const u64 interval = 15LLU * 60 * HZ;
1175 static const int err_limits[3] = { -1, 3, 10 };
1176 struct speed_down_needed_arg arg;
1177 struct ata_ering_entry *ent;
1178 int err_cat;
1179 u64 j64;
1180
1181 ent = ata_ering_top(&dev->ering);
1182 if (!ent)
1183 return 0;
1184
1185 err_cat = ata_eh_categorize_ering_entry(ent);
1186 if (err_cat == 0)
1187 return 0;
1188
1189 memset(&arg, 0, sizeof(arg));
1190
1191 j64 = get_jiffies_64();
1192 if (j64 >= interval)
1193 arg.since = j64 - interval;
1194 else
1195 arg.since = 0;
1196
1197 ata_ering_map(&dev->ering, speed_down_needed_cb, &arg);
1198
1199 return arg.nr_errors[err_cat] > err_limits[err_cat];
1200}
1201
1202/**
1203 * ata_eh_speed_down - record error and speed down if necessary
1204 * @dev: Failed device
1205 * @is_io: Did the device fail during normal IO?
1206 * @err_mask: err_mask of the error
1207 *
1208 * Record error and examine error history to determine whether
1209 * adjusting transmission speed is necessary. It also sets
1210 * transmission limits appropriately if such adjustment is
1211 * necessary.
1212 *
1213 * LOCKING:
1214 * Kernel thread context (may sleep).
1215 *
1216 * RETURNS:
1217 * 0 on success, -errno otherwise
1218 */
1219static int ata_eh_speed_down(struct ata_device *dev, int is_io,
1220 unsigned int err_mask)
1221{
1222 if (!err_mask)
1223 return 0;
1224
1225 /* record error and determine whether speed down is necessary */
1226 ata_ering_record(&dev->ering, is_io, err_mask);
1227
1228 if (!ata_eh_speed_down_needed(dev))
1229 return 0;
1230
1231 /* speed down SATA link speed if possible */
1232 if (sata_down_spd_limit(dev->ap) == 0)
1233 return ATA_EH_HARDRESET;
1234
1235 /* lower transfer mode */
1236 if (ata_down_xfermask_limit(dev, 0) == 0)
1237 return ATA_EH_SOFTRESET;
1238
1239 ata_dev_printk(dev, KERN_ERR,
1240 "speed down requested but no transfer mode left\n");
1241 return 0;
1242}
1243
1244/**
1245 * ata_eh_autopsy - analyze error and determine recovery action
1246 * @ap: ATA port to perform autopsy on
1247 *
1248 * Analyze why @ap failed and determine which recovery action is
1249 * needed. This function also sets more detailed AC_ERR_* values
1250 * and fills sense data for ATAPI CHECK SENSE.
1251 *
1252 * LOCKING:
1253 * Kernel thread context (may sleep).
1254 */
1255static void ata_eh_autopsy(struct ata_port *ap)
1256{
1257 struct ata_eh_context *ehc = &ap->eh_context;
1258 unsigned int action = ehc->i.action;
1259 struct ata_device *failed_dev = NULL;
1260 unsigned int all_err_mask = 0;
1261 int tag, is_io = 0;
1262 u32 serror;
1263 int rc;
1264
1265 DPRINTK("ENTER\n");
1266
1267 /* obtain and analyze SError */
1268 rc = sata_scr_read(ap, SCR_ERROR, &serror);
1269 if (rc == 0) {
1270 ehc->i.serror |= serror;
1271 ata_eh_analyze_serror(ap);
1272 } else if (rc != -EOPNOTSUPP)
1273 action |= ATA_EH_HARDRESET;
1274
1275 /* analyze NCQ failure */
1276 ata_eh_analyze_ncq_error(ap);
1277
1278 /* any real error trumps AC_ERR_OTHER */
1279 if (ehc->i.err_mask & ~AC_ERR_OTHER)
1280 ehc->i.err_mask &= ~AC_ERR_OTHER;
1281
1282 all_err_mask |= ehc->i.err_mask;
1283
1284 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
1285 struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
1286
1287 if (!(qc->flags & ATA_QCFLAG_FAILED))
1288 continue;
1289
1290 /* inherit upper level err_mask */
1291 qc->err_mask |= ehc->i.err_mask;
1292
1293 /* analyze TF */
1294 action |= ata_eh_analyze_tf(qc, &qc->result_tf);
1295
1296 /* DEV errors are probably spurious in case of ATA_BUS error */
1297 if (qc->err_mask & AC_ERR_ATA_BUS)
1298 qc->err_mask &= ~(AC_ERR_DEV | AC_ERR_MEDIA |
1299 AC_ERR_INVALID);
1300
1301 /* any real error trumps unknown error */
1302 if (qc->err_mask & ~AC_ERR_OTHER)
1303 qc->err_mask &= ~AC_ERR_OTHER;
1304
1305 /* SENSE_VALID trumps dev/unknown error and revalidation */
1306 if (qc->flags & ATA_QCFLAG_SENSE_VALID) {
1307 qc->err_mask &= ~(AC_ERR_DEV | AC_ERR_OTHER);
1308 action &= ~ATA_EH_REVALIDATE;
1309 }
1310
1311 /* accumulate error info */
1312 failed_dev = qc->dev;
1313 all_err_mask |= qc->err_mask;
1314 if (qc->flags & ATA_QCFLAG_IO)
1315 is_io = 1;
1316 }
1317
1318 /* enforce default EH actions */
1319 if (ap->flags & ATA_FLAG_FROZEN ||
1320 all_err_mask & (AC_ERR_HSM | AC_ERR_TIMEOUT))
1321 action |= ATA_EH_SOFTRESET;
1322 else if (all_err_mask)
1323 action |= ATA_EH_REVALIDATE;
1324
1325 /* if we have offending qcs and the associated failed device */
1326 if (failed_dev) {
1327 /* speed down */
1328 action |= ata_eh_speed_down(failed_dev, is_io, all_err_mask);
1329
1330 /* perform per-dev EH action only on the offending device */
1331 ehc->i.dev_action[failed_dev->devno] |=
1332 action & ATA_EH_PERDEV_MASK;
1333 action &= ~ATA_EH_PERDEV_MASK;
1334 }
1335
1336 /* record autopsy result */
1337 ehc->i.dev = failed_dev;
1338 ehc->i.action = action;
1339
1340 DPRINTK("EXIT\n");
1341}
1342
1343/**
1344 * ata_eh_report - report error handling to user
1345 * @ap: ATA port EH is going on
1346 *
1347 * Report EH to user.
1348 *
1349 * LOCKING:
1350 * None.
1351 */
1352static void ata_eh_report(struct ata_port *ap)
1353{
1354 struct ata_eh_context *ehc = &ap->eh_context;
1355 const char *frozen, *desc;
1356 int tag, nr_failed = 0;
1357
1358 desc = NULL;
1359 if (ehc->i.desc[0] != '\0')
1360 desc = ehc->i.desc;
1361
1362 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
1363 struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
1364
1365 if (!(qc->flags & ATA_QCFLAG_FAILED))
1366 continue;
1367 if (qc->flags & ATA_QCFLAG_SENSE_VALID && !qc->err_mask)
1368 continue;
1369
1370 nr_failed++;
1371 }
1372
1373 if (!nr_failed && !ehc->i.err_mask)
1374 return;
1375
1376 frozen = "";
1377 if (ap->flags & ATA_FLAG_FROZEN)
1378 frozen = " frozen";
1379
1380 if (ehc->i.dev) {
1381 ata_dev_printk(ehc->i.dev, KERN_ERR, "exception Emask 0x%x "
1382 "SAct 0x%x SErr 0x%x action 0x%x%s\n",
1383 ehc->i.err_mask, ap->sactive, ehc->i.serror,
1384 ehc->i.action, frozen);
1385 if (desc)
1386 ata_dev_printk(ehc->i.dev, KERN_ERR, "(%s)\n", desc);
1387 } else {
1388 ata_port_printk(ap, KERN_ERR, "exception Emask 0x%x "
1389 "SAct 0x%x SErr 0x%x action 0x%x%s\n",
1390 ehc->i.err_mask, ap->sactive, ehc->i.serror,
1391 ehc->i.action, frozen);
1392 if (desc)
1393 ata_port_printk(ap, KERN_ERR, "(%s)\n", desc);
1394 }
1395
1396 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
1397 struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
1398
1399 if (!(qc->flags & ATA_QCFLAG_FAILED) || !qc->err_mask)
1400 continue;
1401
1402 ata_dev_printk(qc->dev, KERN_ERR, "tag %d cmd 0x%x "
1403 "Emask 0x%x stat 0x%x err 0x%x (%s)\n",
1404 qc->tag, qc->tf.command, qc->err_mask,
1405 qc->result_tf.command, qc->result_tf.feature,
1406 ata_err_string(qc->err_mask));
1407 }
1408}
1409
1410static int ata_do_reset(struct ata_port *ap, ata_reset_fn_t reset,
1411 unsigned int *classes)
1412{
1413 int i, rc;
1414
1415 for (i = 0; i < ATA_MAX_DEVICES; i++)
1416 classes[i] = ATA_DEV_UNKNOWN;
1417
1418 rc = reset(ap, classes);
1419 if (rc)
1420 return rc;
1421
1422 /* If any class isn't ATA_DEV_UNKNOWN, consider classification
1423 * is complete and convert all ATA_DEV_UNKNOWN to
1424 * ATA_DEV_NONE.
1425 */
1426 for (i = 0; i < ATA_MAX_DEVICES; i++)
1427 if (classes[i] != ATA_DEV_UNKNOWN)
1428 break;
1429
1430 if (i < ATA_MAX_DEVICES)
1431 for (i = 0; i < ATA_MAX_DEVICES; i++)
1432 if (classes[i] == ATA_DEV_UNKNOWN)
1433 classes[i] = ATA_DEV_NONE;
1434
1435 return 0;
1436}
1437
1438static int ata_eh_followup_srst_needed(int rc, int classify,
1439 const unsigned int *classes)
1440{
1441 if (rc == -EAGAIN)
1442 return 1;
1443 if (rc != 0)
1444 return 0;
1445 if (classify && classes[0] == ATA_DEV_UNKNOWN)
1446 return 1;
1447 return 0;
1448}
1449
1450static int ata_eh_reset(struct ata_port *ap, int classify,
1451 ata_prereset_fn_t prereset, ata_reset_fn_t softreset,
1452 ata_reset_fn_t hardreset, ata_postreset_fn_t postreset)
1453{
1454 struct ata_eh_context *ehc = &ap->eh_context;
1455 unsigned int *classes = ehc->classes;
1456 int tries = ATA_EH_RESET_TRIES;
1457 int verbose = !(ap->flags & ATA_FLAG_LOADING);
1458 unsigned int action;
1459 ata_reset_fn_t reset;
1460 int i, did_followup_srst, rc;
1461
1462 /* Determine which reset to use and record in ehc->i.action.
1463 * prereset() may examine and modify it.
1464 */
1465 action = ehc->i.action;
1466 ehc->i.action &= ~ATA_EH_RESET_MASK;
1467 if (softreset && (!hardreset || (!sata_set_spd_needed(ap) &&
1468 !(action & ATA_EH_HARDRESET))))
1469 ehc->i.action |= ATA_EH_SOFTRESET;
1470 else
1471 ehc->i.action |= ATA_EH_HARDRESET;
1472
1473 if (prereset) {
1474 rc = prereset(ap);
1475 if (rc) {
1476 ata_port_printk(ap, KERN_ERR,
1477 "prereset failed (errno=%d)\n", rc);
1478 return rc;
1479 }
1480 }
1481
1482 /* prereset() might have modified ehc->i.action */
1483 if (ehc->i.action & ATA_EH_HARDRESET)
1484 reset = hardreset;
1485 else if (ehc->i.action & ATA_EH_SOFTRESET)
1486 reset = softreset;
1487 else {
1488 /* prereset told us not to reset, bang classes and return */
1489 for (i = 0; i < ATA_MAX_DEVICES; i++)
1490 classes[i] = ATA_DEV_NONE;
1491 return 0;
1492 }
1493
1494 /* did prereset() screw up? if so, fix up to avoid oopsing */
1495 if (!reset) {
1496 ata_port_printk(ap, KERN_ERR, "BUG: prereset() requested "
1497 "invalid reset type\n");
1498 if (softreset)
1499 reset = softreset;
1500 else
1501 reset = hardreset;
1502 }
1503
1504 retry:
1505 /* shut up during boot probing */
1506 if (verbose)
1507 ata_port_printk(ap, KERN_INFO, "%s resetting port\n",
1508 reset == softreset ? "soft" : "hard");
1509
1510 /* reset */
1511 ata_eh_about_to_do(ap, NULL, ATA_EH_RESET_MASK);
1512 ehc->i.flags |= ATA_EHI_DID_RESET;
1513
1514 rc = ata_do_reset(ap, reset, classes);
1515
1516 did_followup_srst = 0;
1517 if (reset == hardreset &&
1518 ata_eh_followup_srst_needed(rc, classify, classes)) {
1519 /* okay, let's do follow-up softreset */
1520 did_followup_srst = 1;
1521 reset = softreset;
1522
1523 if (!reset) {
1524 ata_port_printk(ap, KERN_ERR,
1525 "follow-up softreset required "
1526 "but no softreset avaliable\n");
1527 return -EINVAL;
1528 }
1529
1530 ata_eh_about_to_do(ap, NULL, ATA_EH_RESET_MASK);
1531 rc = ata_do_reset(ap, reset, classes);
1532
1533 if (rc == 0 && classify &&
1534 classes[0] == ATA_DEV_UNKNOWN) {
1535 ata_port_printk(ap, KERN_ERR,
1536 "classification failed\n");
1537 return -EINVAL;
1538 }
1539 }
1540
1541 if (rc && --tries) {
1542 const char *type;
1543
1544 if (reset == softreset) {
1545 if (did_followup_srst)
1546 type = "follow-up soft";
1547 else
1548 type = "soft";
1549 } else
1550 type = "hard";
1551
1552 ata_port_printk(ap, KERN_WARNING,
1553 "%sreset failed, retrying in 5 secs\n", type);
1554 ssleep(5);
1555
1556 if (reset == hardreset)
1557 sata_down_spd_limit(ap);
1558 if (hardreset)
1559 reset = hardreset;
1560 goto retry;
1561 }
1562
1563 if (rc == 0) {
1564 /* After the reset, the device state is PIO 0 and the
1565 * controller state is undefined. Record the mode.
1566 */
1567 for (i = 0; i < ATA_MAX_DEVICES; i++)
1568 ap->device[i].pio_mode = XFER_PIO_0;
1569
1570 if (postreset)
1571 postreset(ap, classes);
1572
1573 /* reset successful, schedule revalidation */
1574 ata_eh_done(ap, NULL, ATA_EH_RESET_MASK);
1575 ehc->i.action |= ATA_EH_REVALIDATE;
1576 }
1577
1578 return rc;
1579}
1580
1581static int ata_eh_revalidate_and_attach(struct ata_port *ap,
1582 struct ata_device **r_failed_dev)
1583{
1584 struct ata_eh_context *ehc = &ap->eh_context;
1585 struct ata_device *dev;
1586 unsigned long flags;
1587 int i, rc = 0;
1588
1589 DPRINTK("ENTER\n");
1590
1591 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1592 unsigned int action;
1593
1594 dev = &ap->device[i];
1595 action = ehc->i.action | ehc->i.dev_action[dev->devno];
1596
1597 if (action & ATA_EH_REVALIDATE && ata_dev_enabled(dev)) {
1598 if (ata_port_offline(ap)) {
1599 rc = -EIO;
1600 break;
1601 }
1602
1603 ata_eh_about_to_do(ap, dev, ATA_EH_REVALIDATE);
1604 rc = ata_dev_revalidate(dev,
1605 ehc->i.flags & ATA_EHI_DID_RESET);
1606 if (rc)
1607 break;
1608
1609 ata_eh_done(ap, dev, ATA_EH_REVALIDATE);
1610
1611 /* schedule the scsi_rescan_device() here */
1612 queue_work(ata_aux_wq, &(ap->scsi_rescan_task));
1613 } else if (dev->class == ATA_DEV_UNKNOWN &&
1614 ehc->tries[dev->devno] &&
1615 ata_class_enabled(ehc->classes[dev->devno])) {
1616 dev->class = ehc->classes[dev->devno];
1617
1618 rc = ata_dev_read_id(dev, &dev->class, 1, dev->id);
1619 if (rc == 0)
1620 rc = ata_dev_configure(dev, 1);
1621
1622 if (rc) {
1623 dev->class = ATA_DEV_UNKNOWN;
1624 break;
1625 }
1626
1627 spin_lock_irqsave(ap->lock, flags);
1628 ap->flags |= ATA_FLAG_SCSI_HOTPLUG;
1629 spin_unlock_irqrestore(ap->lock, flags);
1630 }
1631 }
1632
1633 if (rc)
1634 *r_failed_dev = dev;
1635
1636 DPRINTK("EXIT\n");
1637 return rc;
1638}
1639
1640static int ata_port_nr_enabled(struct ata_port *ap)
1641{
1642 int i, cnt = 0;
1643
1644 for (i = 0; i < ATA_MAX_DEVICES; i++)
1645 if (ata_dev_enabled(&ap->device[i]))
1646 cnt++;
1647 return cnt;
1648}
1649
1650static int ata_port_nr_vacant(struct ata_port *ap)
1651{
1652 int i, cnt = 0;
1653
1654 for (i = 0; i < ATA_MAX_DEVICES; i++)
1655 if (ap->device[i].class == ATA_DEV_UNKNOWN)
1656 cnt++;
1657 return cnt;
1658}
1659
1660static int ata_eh_skip_recovery(struct ata_port *ap)
1661{
1662 struct ata_eh_context *ehc = &ap->eh_context;
1663 int i;
1664
1665 if (ap->flags & ATA_FLAG_FROZEN || ata_port_nr_enabled(ap))
1666 return 0;
1667
1668 /* skip if class codes for all vacant slots are ATA_DEV_NONE */
1669 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1670 struct ata_device *dev = &ap->device[i];
1671
1672 if (dev->class == ATA_DEV_UNKNOWN &&
1673 ehc->classes[dev->devno] != ATA_DEV_NONE)
1674 return 0;
1675 }
1676
1677 return 1;
1678}
1679
1680/**
1681 * ata_eh_recover - recover host port after error
1682 * @ap: host port to recover
1683 * @prereset: prereset method (can be NULL)
1684 * @softreset: softreset method (can be NULL)
1685 * @hardreset: hardreset method (can be NULL)
1686 * @postreset: postreset method (can be NULL)
1687 *
1688 * This is the alpha and omega, eum and yang, heart and soul of
1689 * libata exception handling. On entry, actions required to
1690 * recover the port and hotplug requests are recorded in
1691 * eh_context. This function executes all the operations with
1692 * appropriate retrials and fallbacks to resurrect failed
1693 * devices, detach goners and greet newcomers.
1694 *
1695 * LOCKING:
1696 * Kernel thread context (may sleep).
1697 *
1698 * RETURNS:
1699 * 0 on success, -errno on failure.
1700 */
1701static int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset,
1702 ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
1703 ata_postreset_fn_t postreset)
1704{
1705 struct ata_eh_context *ehc = &ap->eh_context;
1706 struct ata_device *dev;
1707 int down_xfermask, i, rc;
1708
1709 DPRINTK("ENTER\n");
1710
1711 /* prep for recovery */
1712 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1713 dev = &ap->device[i];
1714
1715 ehc->tries[dev->devno] = ATA_EH_DEV_TRIES;
1716
1717 /* process hotplug request */
1718 if (dev->flags & ATA_DFLAG_DETACH)
1719 ata_eh_detach_dev(dev);
1720
1721 if (!ata_dev_enabled(dev) &&
1722 ((ehc->i.probe_mask & (1 << dev->devno)) &&
1723 !(ehc->did_probe_mask & (1 << dev->devno)))) {
1724 ata_eh_detach_dev(dev);
1725 ata_dev_init(dev);
1726 ehc->did_probe_mask |= (1 << dev->devno);
1727 ehc->i.action |= ATA_EH_SOFTRESET;
1728 }
1729 }
1730
1731 retry:
1732 down_xfermask = 0;
1733 rc = 0;
1734
1735 /* if UNLOADING, finish immediately */
1736 if (ap->flags & ATA_FLAG_UNLOADING)
1737 goto out;
1738
1739 /* skip EH if possible. */
1740 if (ata_eh_skip_recovery(ap))
1741 ehc->i.action = 0;
1742
1743 for (i = 0; i < ATA_MAX_DEVICES; i++)
1744 ehc->classes[i] = ATA_DEV_UNKNOWN;
1745
1746 /* reset */
1747 if (ehc->i.action & ATA_EH_RESET_MASK) {
1748 ata_eh_freeze_port(ap);
1749
1750 rc = ata_eh_reset(ap, ata_port_nr_vacant(ap), prereset,
1751 softreset, hardreset, postreset);
1752 if (rc) {
1753 ata_port_printk(ap, KERN_ERR,
1754 "reset failed, giving up\n");
1755 goto out;
1756 }
1757
1758 ata_eh_thaw_port(ap);
1759 }
1760
1761 /* revalidate existing devices and attach new ones */
1762 rc = ata_eh_revalidate_and_attach(ap, &dev);
1763 if (rc)
1764 goto dev_fail;
1765
1766 /* configure transfer mode if the port has been reset */
1767 if (ehc->i.flags & ATA_EHI_DID_RESET) {
1768 rc = ata_set_mode(ap, &dev);
1769 if (rc) {
1770 down_xfermask = 1;
1771 goto dev_fail;
1772 }
1773 }
1774
1775 goto out;
1776
1777 dev_fail:
1778 switch (rc) {
1779 case -ENODEV:
1780 /* device missing, schedule probing */
1781 ehc->i.probe_mask |= (1 << dev->devno);
1782 case -EINVAL:
1783 ehc->tries[dev->devno] = 0;
1784 break;
1785 case -EIO:
1786 sata_down_spd_limit(ap);
1787 default:
1788 ehc->tries[dev->devno]--;
1789 if (down_xfermask &&
1790 ata_down_xfermask_limit(dev, ehc->tries[dev->devno] == 1))
1791 ehc->tries[dev->devno] = 0;
1792 }
1793
1794 if (ata_dev_enabled(dev) && !ehc->tries[dev->devno]) {
1795 /* disable device if it has used up all its chances */
1796 ata_dev_disable(dev);
1797
1798 /* detach if offline */
1799 if (ata_port_offline(ap))
1800 ata_eh_detach_dev(dev);
1801
1802 /* probe if requested */
1803 if ((ehc->i.probe_mask & (1 << dev->devno)) &&
1804 !(ehc->did_probe_mask & (1 << dev->devno))) {
1805 ata_eh_detach_dev(dev);
1806 ata_dev_init(dev);
1807
1808 ehc->tries[dev->devno] = ATA_EH_DEV_TRIES;
1809 ehc->did_probe_mask |= (1 << dev->devno);
1810 ehc->i.action |= ATA_EH_SOFTRESET;
1811 }
1812 } else {
1813 /* soft didn't work? be haaaaard */
1814 if (ehc->i.flags & ATA_EHI_DID_RESET)
1815 ehc->i.action |= ATA_EH_HARDRESET;
1816 else
1817 ehc->i.action |= ATA_EH_SOFTRESET;
1818 }
1819
1820 if (ata_port_nr_enabled(ap)) {
1821 ata_port_printk(ap, KERN_WARNING, "failed to recover some "
1822 "devices, retrying in 5 secs\n");
1823 ssleep(5);
1824 } else {
1825 /* no device left, repeat fast */
1826 msleep(500);
1827 }
1828
1829 goto retry;
1830
1831 out:
1832 if (rc) {
1833 for (i = 0; i < ATA_MAX_DEVICES; i++)
1834 ata_dev_disable(&ap->device[i]);
1835 }
1836
1837 DPRINTK("EXIT, rc=%d\n", rc);
1838 return rc;
1839}
1840
1841/**
1842 * ata_eh_finish - finish up EH
1843 * @ap: host port to finish EH for
1844 *
1845 * Recovery is complete. Clean up EH states and retry or finish
1846 * failed qcs.
1847 *
1848 * LOCKING:
1849 * None.
1850 */
1851static void ata_eh_finish(struct ata_port *ap)
1852{
1853 int tag;
1854
1855 /* retry or finish qcs */
1856 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
1857 struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
1858
1859 if (!(qc->flags & ATA_QCFLAG_FAILED))
1860 continue;
1861
1862 if (qc->err_mask) {
1863 /* FIXME: Once EH migration is complete,
1864 * generate sense data in this function,
1865 * considering both err_mask and tf.
1866 */
1867 if (qc->err_mask & AC_ERR_INVALID)
1868 ata_eh_qc_complete(qc);
1869 else
1870 ata_eh_qc_retry(qc);
1871 } else {
1872 if (qc->flags & ATA_QCFLAG_SENSE_VALID) {
1873 ata_eh_qc_complete(qc);
1874 } else {
1875 /* feed zero TF to sense generation */
1876 memset(&qc->result_tf, 0, sizeof(qc->result_tf));
1877 ata_eh_qc_retry(qc);
1878 }
1879 }
1880 }
1881}
1882
1883/**
1884 * ata_do_eh - do standard error handling
1885 * @ap: host port to handle error for
1886 * @prereset: prereset method (can be NULL)
1887 * @softreset: softreset method (can be NULL)
1888 * @hardreset: hardreset method (can be NULL)
1889 * @postreset: postreset method (can be NULL)
1890 *
1891 * Perform standard error handling sequence.
1892 *
1893 * LOCKING:
1894 * Kernel thread context (may sleep).
1895 */
1896void ata_do_eh(struct ata_port *ap, ata_prereset_fn_t prereset,
1897 ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
1898 ata_postreset_fn_t postreset)
1899{
1900 if (!(ap->flags & ATA_FLAG_LOADING)) {
1901 ata_eh_autopsy(ap);
1902 ata_eh_report(ap);
1903 }
1904
1905 ata_eh_recover(ap, prereset, softreset, hardreset, postreset);
1906 ata_eh_finish(ap);
1907}
diff --git a/drivers/scsi/libata-scsi.c b/drivers/scsi/libata-scsi.c
index f1e129b7c972..93d18a74c401 100644
--- a/drivers/scsi/libata-scsi.c
+++ b/drivers/scsi/libata-scsi.c
@@ -41,6 +41,7 @@
41#include <scsi/scsi_cmnd.h> 41#include <scsi/scsi_cmnd.h>
42#include <scsi/scsi_eh.h> 42#include <scsi/scsi_eh.h>
43#include <scsi/scsi_device.h> 43#include <scsi/scsi_device.h>
44#include <scsi/scsi_tcq.h>
44#include <scsi/scsi_transport.h> 45#include <scsi/scsi_transport.h>
45#include <linux/libata.h> 46#include <linux/libata.h>
46#include <linux/hdreg.h> 47#include <linux/hdreg.h>
@@ -51,10 +52,14 @@
51#define SECTOR_SIZE 512 52#define SECTOR_SIZE 512
52 53
53typedef unsigned int (*ata_xlat_func_t)(struct ata_queued_cmd *qc, const u8 *scsicmd); 54typedef unsigned int (*ata_xlat_func_t)(struct ata_queued_cmd *qc, const u8 *scsicmd);
54static struct ata_device * 55
55ata_scsi_find_dev(struct ata_port *ap, const struct scsi_device *scsidev); 56static struct ata_device * __ata_scsi_find_dev(struct ata_port *ap,
56static void ata_scsi_error(struct Scsi_Host *host); 57 const struct scsi_device *scsidev);
57enum scsi_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd); 58static struct ata_device * ata_scsi_find_dev(struct ata_port *ap,
59 const struct scsi_device *scsidev);
60static int ata_scsi_user_scan(struct Scsi_Host *shost, unsigned int channel,
61 unsigned int id, unsigned int lun);
62
58 63
59#define RW_RECOVERY_MPAGE 0x1 64#define RW_RECOVERY_MPAGE 0x1
60#define RW_RECOVERY_MPAGE_LEN 12 65#define RW_RECOVERY_MPAGE_LEN 12
@@ -102,6 +107,7 @@ static const u8 def_control_mpage[CONTROL_MPAGE_LEN] = {
102struct scsi_transport_template ata_scsi_transport_template = { 107struct scsi_transport_template ata_scsi_transport_template = {
103 .eh_strategy_handler = ata_scsi_error, 108 .eh_strategy_handler = ata_scsi_error,
104 .eh_timed_out = ata_scsi_timed_out, 109 .eh_timed_out = ata_scsi_timed_out,
110 .user_scan = ata_scsi_user_scan,
105}; 111};
106 112
107 113
@@ -304,7 +310,6 @@ int ata_scsi_ioctl(struct scsi_device *scsidev, int cmd, void __user *arg)
304 310
305/** 311/**
306 * ata_scsi_qc_new - acquire new ata_queued_cmd reference 312 * ata_scsi_qc_new - acquire new ata_queued_cmd reference
307 * @ap: ATA port to which the new command is attached
308 * @dev: ATA device to which the new command is attached 313 * @dev: ATA device to which the new command is attached
309 * @cmd: SCSI command that originated this ATA command 314 * @cmd: SCSI command that originated this ATA command
310 * @done: SCSI command completion function 315 * @done: SCSI command completion function
@@ -323,14 +328,13 @@ int ata_scsi_ioctl(struct scsi_device *scsidev, int cmd, void __user *arg)
323 * RETURNS: 328 * RETURNS:
324 * Command allocated, or %NULL if none available. 329 * Command allocated, or %NULL if none available.
325 */ 330 */
326struct ata_queued_cmd *ata_scsi_qc_new(struct ata_port *ap, 331struct ata_queued_cmd *ata_scsi_qc_new(struct ata_device *dev,
327 struct ata_device *dev,
328 struct scsi_cmnd *cmd, 332 struct scsi_cmnd *cmd,
329 void (*done)(struct scsi_cmnd *)) 333 void (*done)(struct scsi_cmnd *))
330{ 334{
331 struct ata_queued_cmd *qc; 335 struct ata_queued_cmd *qc;
332 336
333 qc = ata_qc_new_init(ap, dev); 337 qc = ata_qc_new_init(dev);
334 if (qc) { 338 if (qc) {
335 qc->scsicmd = cmd; 339 qc->scsicmd = cmd;
336 qc->scsidone = done; 340 qc->scsidone = done;
@@ -397,18 +401,18 @@ void ata_dump_status(unsigned id, struct ata_taskfile *tf)
397 401
398int ata_scsi_device_resume(struct scsi_device *sdev) 402int ata_scsi_device_resume(struct scsi_device *sdev)
399{ 403{
400 struct ata_port *ap = (struct ata_port *) &sdev->host->hostdata[0]; 404 struct ata_port *ap = ata_shost_to_port(sdev->host);
401 struct ata_device *dev = &ap->device[sdev->id]; 405 struct ata_device *dev = __ata_scsi_find_dev(ap, sdev);
402 406
403 return ata_device_resume(ap, dev); 407 return ata_device_resume(dev);
404} 408}
405 409
406int ata_scsi_device_suspend(struct scsi_device *sdev, pm_message_t state) 410int ata_scsi_device_suspend(struct scsi_device *sdev, pm_message_t state)
407{ 411{
408 struct ata_port *ap = (struct ata_port *) &sdev->host->hostdata[0]; 412 struct ata_port *ap = ata_shost_to_port(sdev->host);
409 struct ata_device *dev = &ap->device[sdev->id]; 413 struct ata_device *dev = __ata_scsi_find_dev(ap, sdev);
410 414
411 return ata_device_suspend(ap, dev, state); 415 return ata_device_suspend(dev, state);
412} 416}
413 417
414/** 418/**
@@ -419,6 +423,7 @@ int ata_scsi_device_suspend(struct scsi_device *sdev, pm_message_t state)
419 * @sk: the sense key we'll fill out 423 * @sk: the sense key we'll fill out
420 * @asc: the additional sense code we'll fill out 424 * @asc: the additional sense code we'll fill out
421 * @ascq: the additional sense code qualifier we'll fill out 425 * @ascq: the additional sense code qualifier we'll fill out
426 * @verbose: be verbose
422 * 427 *
423 * Converts an ATA error into a SCSI error. Fill out pointers to 428 * Converts an ATA error into a SCSI error. Fill out pointers to
424 * SK, ASC, and ASCQ bytes for later use in fixed or descriptor 429 * SK, ASC, and ASCQ bytes for later use in fixed or descriptor
@@ -428,7 +433,7 @@ int ata_scsi_device_suspend(struct scsi_device *sdev, pm_message_t state)
428 * spin_lock_irqsave(host_set lock) 433 * spin_lock_irqsave(host_set lock)
429 */ 434 */
430void ata_to_sense_error(unsigned id, u8 drv_stat, u8 drv_err, u8 *sk, u8 *asc, 435void ata_to_sense_error(unsigned id, u8 drv_stat, u8 drv_err, u8 *sk, u8 *asc,
431 u8 *ascq) 436 u8 *ascq, int verbose)
432{ 437{
433 int i; 438 int i;
434 439
@@ -493,8 +498,9 @@ void ata_to_sense_error(unsigned id, u8 drv_stat, u8 drv_err, u8 *sk, u8 *asc,
493 } 498 }
494 } 499 }
495 /* No immediate match */ 500 /* No immediate match */
496 printk(KERN_WARNING "ata%u: no sense translation for " 501 if (verbose)
497 "error 0x%02x\n", id, drv_err); 502 printk(KERN_WARNING "ata%u: no sense translation for "
503 "error 0x%02x\n", id, drv_err);
498 } 504 }
499 505
500 /* Fall back to interpreting status bits */ 506 /* Fall back to interpreting status bits */
@@ -507,8 +513,9 @@ void ata_to_sense_error(unsigned id, u8 drv_stat, u8 drv_err, u8 *sk, u8 *asc,
507 } 513 }
508 } 514 }
509 /* No error? Undecoded? */ 515 /* No error? Undecoded? */
510 printk(KERN_WARNING "ata%u: no sense translation for status: 0x%02x\n", 516 if (verbose)
511 id, drv_stat); 517 printk(KERN_WARNING "ata%u: no sense translation for "
518 "status: 0x%02x\n", id, drv_stat);
512 519
513 /* We need a sensible error return here, which is tricky, and one 520 /* We need a sensible error return here, which is tricky, and one
514 that won't cause people to do things like return a disk wrongly */ 521 that won't cause people to do things like return a disk wrongly */
@@ -517,9 +524,10 @@ void ata_to_sense_error(unsigned id, u8 drv_stat, u8 drv_err, u8 *sk, u8 *asc,
517 *ascq = 0x00; 524 *ascq = 0x00;
518 525
519 translate_done: 526 translate_done:
520 printk(KERN_ERR "ata%u: translated ATA stat/err 0x%02x/%02x to " 527 if (verbose)
521 "SCSI SK/ASC/ASCQ 0x%x/%02x/%02x\n", id, drv_stat, drv_err, 528 printk(KERN_ERR "ata%u: translated ATA stat/err 0x%02x/%02x "
522 *sk, *asc, *ascq); 529 "to SCSI SK/ASC/ASCQ 0x%x/%02x/%02x\n",
530 id, drv_stat, drv_err, *sk, *asc, *ascq);
523 return; 531 return;
524} 532}
525 533
@@ -539,27 +547,23 @@ void ata_to_sense_error(unsigned id, u8 drv_stat, u8 drv_err, u8 *sk, u8 *asc,
539void ata_gen_ata_desc_sense(struct ata_queued_cmd *qc) 547void ata_gen_ata_desc_sense(struct ata_queued_cmd *qc)
540{ 548{
541 struct scsi_cmnd *cmd = qc->scsicmd; 549 struct scsi_cmnd *cmd = qc->scsicmd;
542 struct ata_taskfile *tf = &qc->tf; 550 struct ata_taskfile *tf = &qc->result_tf;
543 unsigned char *sb = cmd->sense_buffer; 551 unsigned char *sb = cmd->sense_buffer;
544 unsigned char *desc = sb + 8; 552 unsigned char *desc = sb + 8;
553 int verbose = qc->ap->ops->error_handler == NULL;
545 554
546 memset(sb, 0, SCSI_SENSE_BUFFERSIZE); 555 memset(sb, 0, SCSI_SENSE_BUFFERSIZE);
547 556
548 cmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION; 557 cmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
549 558
550 /* 559 /*
551 * Read the controller registers.
552 */
553 WARN_ON(qc->ap->ops->tf_read == NULL);
554 qc->ap->ops->tf_read(qc->ap, tf);
555
556 /*
557 * Use ata_to_sense_error() to map status register bits 560 * Use ata_to_sense_error() to map status register bits
558 * onto sense key, asc & ascq. 561 * onto sense key, asc & ascq.
559 */ 562 */
560 if (tf->command & (ATA_BUSY | ATA_DF | ATA_ERR | ATA_DRQ)) { 563 if (qc->err_mask ||
564 tf->command & (ATA_BUSY | ATA_DF | ATA_ERR | ATA_DRQ)) {
561 ata_to_sense_error(qc->ap->id, tf->command, tf->feature, 565 ata_to_sense_error(qc->ap->id, tf->command, tf->feature,
562 &sb[1], &sb[2], &sb[3]); 566 &sb[1], &sb[2], &sb[3], verbose);
563 sb[1] &= 0x0f; 567 sb[1] &= 0x0f;
564 } 568 }
565 569
@@ -615,26 +619,22 @@ void ata_gen_ata_desc_sense(struct ata_queued_cmd *qc)
615void ata_gen_fixed_sense(struct ata_queued_cmd *qc) 619void ata_gen_fixed_sense(struct ata_queued_cmd *qc)
616{ 620{
617 struct scsi_cmnd *cmd = qc->scsicmd; 621 struct scsi_cmnd *cmd = qc->scsicmd;
618 struct ata_taskfile *tf = &qc->tf; 622 struct ata_taskfile *tf = &qc->result_tf;
619 unsigned char *sb = cmd->sense_buffer; 623 unsigned char *sb = cmd->sense_buffer;
624 int verbose = qc->ap->ops->error_handler == NULL;
620 625
621 memset(sb, 0, SCSI_SENSE_BUFFERSIZE); 626 memset(sb, 0, SCSI_SENSE_BUFFERSIZE);
622 627
623 cmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION; 628 cmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
624 629
625 /* 630 /*
626 * Read the controller registers.
627 */
628 WARN_ON(qc->ap->ops->tf_read == NULL);
629 qc->ap->ops->tf_read(qc->ap, tf);
630
631 /*
632 * Use ata_to_sense_error() to map status register bits 631 * Use ata_to_sense_error() to map status register bits
633 * onto sense key, asc & ascq. 632 * onto sense key, asc & ascq.
634 */ 633 */
635 if (tf->command & (ATA_BUSY | ATA_DF | ATA_ERR | ATA_DRQ)) { 634 if (qc->err_mask ||
635 tf->command & (ATA_BUSY | ATA_DF | ATA_ERR | ATA_DRQ)) {
636 ata_to_sense_error(qc->ap->id, tf->command, tf->feature, 636 ata_to_sense_error(qc->ap->id, tf->command, tf->feature,
637 &sb[2], &sb[12], &sb[13]); 637 &sb[2], &sb[12], &sb[13], verbose);
638 sb[2] &= 0x0f; 638 sb[2] &= 0x0f;
639 } 639 }
640 640
@@ -677,7 +677,7 @@ static void ata_scsi_dev_config(struct scsi_device *sdev,
677 */ 677 */
678 max_sectors = ATA_MAX_SECTORS; 678 max_sectors = ATA_MAX_SECTORS;
679 if (dev->flags & ATA_DFLAG_LBA48) 679 if (dev->flags & ATA_DFLAG_LBA48)
680 max_sectors = 2048; 680 max_sectors = ATA_MAX_SECTORS_LBA48;
681 if (dev->max_sectors) 681 if (dev->max_sectors)
682 max_sectors = dev->max_sectors; 682 max_sectors = dev->max_sectors;
683 683
@@ -692,6 +692,14 @@ static void ata_scsi_dev_config(struct scsi_device *sdev,
692 request_queue_t *q = sdev->request_queue; 692 request_queue_t *q = sdev->request_queue;
693 blk_queue_max_hw_segments(q, q->max_hw_segments - 1); 693 blk_queue_max_hw_segments(q, q->max_hw_segments - 1);
694 } 694 }
695
696 if (dev->flags & ATA_DFLAG_NCQ) {
697 int depth;
698
699 depth = min(sdev->host->can_queue, ata_id_queue_depth(dev->id));
700 depth = min(ATA_MAX_QUEUE - 1, depth);
701 scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, depth);
702 }
695} 703}
696 704
697/** 705/**
@@ -708,152 +716,88 @@ static void ata_scsi_dev_config(struct scsi_device *sdev,
708 716
709int ata_scsi_slave_config(struct scsi_device *sdev) 717int ata_scsi_slave_config(struct scsi_device *sdev)
710{ 718{
719 struct ata_port *ap = ata_shost_to_port(sdev->host);
720 struct ata_device *dev = __ata_scsi_find_dev(ap, sdev);
721
711 ata_scsi_sdev_config(sdev); 722 ata_scsi_sdev_config(sdev);
712 723
713 blk_queue_max_phys_segments(sdev->request_queue, LIBATA_MAX_PRD); 724 blk_queue_max_phys_segments(sdev->request_queue, LIBATA_MAX_PRD);
714 725
715 if (sdev->id < ATA_MAX_DEVICES) { 726 if (dev)
716 struct ata_port *ap;
717 struct ata_device *dev;
718
719 ap = (struct ata_port *) &sdev->host->hostdata[0];
720 dev = &ap->device[sdev->id];
721
722 ata_scsi_dev_config(sdev, dev); 727 ata_scsi_dev_config(sdev, dev);
723 }
724 728
725 return 0; /* scsi layer doesn't check return value, sigh */ 729 return 0; /* scsi layer doesn't check return value, sigh */
726} 730}
727 731
728/** 732/**
729 * ata_scsi_timed_out - SCSI layer time out callback 733 * ata_scsi_slave_destroy - SCSI device is about to be destroyed
730 * @cmd: timed out SCSI command 734 * @sdev: SCSI device to be destroyed
731 * 735 *
732 * Handles SCSI layer timeout. We race with normal completion of 736 * @sdev is about to be destroyed for hot/warm unplugging. If
733 * the qc for @cmd. If the qc is already gone, we lose and let 737 * this unplugging was initiated by libata as indicated by NULL
734 * the scsi command finish (EH_HANDLED). Otherwise, the qc has 738 * dev->sdev, this function doesn't have to do anything.
735 * timed out and EH should be invoked. Prevent ata_qc_complete() 739 * Otherwise, SCSI layer initiated warm-unplug is in progress.
736 * from finishing it by setting EH_SCHEDULED and return 740 * Clear dev->sdev, schedule the device for ATA detach and invoke
737 * EH_NOT_HANDLED. 741 * EH.
738 * 742 *
739 * LOCKING: 743 * LOCKING:
740 * Called from timer context 744 * Defined by SCSI layer. We don't really care.
741 *
742 * RETURNS:
743 * EH_HANDLED or EH_NOT_HANDLED
744 */ 745 */
745enum scsi_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd) 746void ata_scsi_slave_destroy(struct scsi_device *sdev)
746{ 747{
747 struct Scsi_Host *host = cmd->device->host; 748 struct ata_port *ap = ata_shost_to_port(sdev->host);
748 struct ata_port *ap = (struct ata_port *) &host->hostdata[0];
749 unsigned long flags; 749 unsigned long flags;
750 struct ata_queued_cmd *qc; 750 struct ata_device *dev;
751 enum scsi_eh_timer_return ret = EH_HANDLED;
752 751
753 DPRINTK("ENTER\n"); 752 if (!ap->ops->error_handler)
753 return;
754 754
755 spin_lock_irqsave(&ap->host_set->lock, flags); 755 spin_lock_irqsave(ap->lock, flags);
756 qc = ata_qc_from_tag(ap, ap->active_tag); 756 dev = __ata_scsi_find_dev(ap, sdev);
757 if (qc) { 757 if (dev && dev->sdev) {
758 WARN_ON(qc->scsicmd != cmd); 758 /* SCSI device already in CANCEL state, no need to offline it */
759 qc->flags |= ATA_QCFLAG_EH_SCHEDULED; 759 dev->sdev = NULL;
760 qc->err_mask |= AC_ERR_TIMEOUT; 760 dev->flags |= ATA_DFLAG_DETACH;
761 ret = EH_NOT_HANDLED; 761 ata_port_schedule_eh(ap);
762 } 762 }
763 spin_unlock_irqrestore(&ap->host_set->lock, flags); 763 spin_unlock_irqrestore(ap->lock, flags);
764
765 DPRINTK("EXIT, ret=%d\n", ret);
766 return ret;
767} 764}
768 765
769/** 766/**
770 * ata_scsi_error - SCSI layer error handler callback 767 * ata_scsi_change_queue_depth - SCSI callback for queue depth config
771 * @host: SCSI host on which error occurred 768 * @sdev: SCSI device to configure queue depth for
769 * @queue_depth: new queue depth
772 * 770 *
773 * Handles SCSI-layer-thrown error events. 771 * This is libata standard hostt->change_queue_depth callback.
772 * SCSI will call into this callback when user tries to set queue
773 * depth via sysfs.
774 * 774 *
775 * LOCKING: 775 * LOCKING:
776 * Inherited from SCSI layer (none, can sleep) 776 * SCSI layer (we don't care)
777 *
778 * RETURNS:
779 * Newly configured queue depth.
777 */ 780 */
778 781int ata_scsi_change_queue_depth(struct scsi_device *sdev, int queue_depth)
779static void ata_scsi_error(struct Scsi_Host *host)
780{ 782{
781 struct ata_port *ap; 783 struct ata_port *ap = ata_shost_to_port(sdev->host);
782 unsigned long flags; 784 struct ata_device *dev;
783 785 int max_depth;
784 DPRINTK("ENTER\n");
785
786 ap = (struct ata_port *) &host->hostdata[0];
787
788 spin_lock_irqsave(&ap->host_set->lock, flags);
789 WARN_ON(ap->flags & ATA_FLAG_IN_EH);
790 ap->flags |= ATA_FLAG_IN_EH;
791 WARN_ON(ata_qc_from_tag(ap, ap->active_tag) == NULL);
792 spin_unlock_irqrestore(&ap->host_set->lock, flags);
793
794 ata_port_flush_task(ap);
795
796 ap->ops->eng_timeout(ap);
797
798 WARN_ON(host->host_failed || !list_empty(&host->eh_cmd_q));
799
800 scsi_eh_flush_done_q(&ap->eh_done_q);
801
802 spin_lock_irqsave(&ap->host_set->lock, flags);
803 ap->flags &= ~ATA_FLAG_IN_EH;
804 spin_unlock_irqrestore(&ap->host_set->lock, flags);
805
806 DPRINTK("EXIT\n");
807}
808
809static void ata_eh_scsidone(struct scsi_cmnd *scmd)
810{
811 /* nada */
812}
813
814static void __ata_eh_qc_complete(struct ata_queued_cmd *qc)
815{
816 struct ata_port *ap = qc->ap;
817 struct scsi_cmnd *scmd = qc->scsicmd;
818 unsigned long flags;
819 786
820 spin_lock_irqsave(&ap->host_set->lock, flags); 787 if (queue_depth < 1)
821 qc->scsidone = ata_eh_scsidone; 788 return sdev->queue_depth;
822 __ata_qc_complete(qc);
823 WARN_ON(ata_tag_valid(qc->tag));
824 spin_unlock_irqrestore(&ap->host_set->lock, flags);
825 789
826 scsi_eh_finish_cmd(scmd, &ap->eh_done_q); 790 dev = ata_scsi_find_dev(ap, sdev);
827} 791 if (!dev || !ata_dev_enabled(dev))
792 return sdev->queue_depth;
828 793
829/** 794 max_depth = min(sdev->host->can_queue, ata_id_queue_depth(dev->id));
830 * ata_eh_qc_complete - Complete an active ATA command from EH 795 max_depth = min(ATA_MAX_QUEUE - 1, max_depth);
831 * @qc: Command to complete 796 if (queue_depth > max_depth)
832 * 797 queue_depth = max_depth;
833 * Indicate to the mid and upper layers that an ATA command has
834 * completed. To be used from EH.
835 */
836void ata_eh_qc_complete(struct ata_queued_cmd *qc)
837{
838 struct scsi_cmnd *scmd = qc->scsicmd;
839 scmd->retries = scmd->allowed;
840 __ata_eh_qc_complete(qc);
841}
842 798
843/** 799 scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, queue_depth);
844 * ata_eh_qc_retry - Tell midlayer to retry an ATA command after EH 800 return queue_depth;
845 * @qc: Command to retry
846 *
847 * Indicate to the mid and upper layers that an ATA command
848 * should be retried. To be used from EH.
849 *
850 * SCSI midlayer limits the number of retries to scmd->allowed.
851 * This function might need to adjust scmd->retries for commands
852 * which get retried due to unrelated NCQ failures.
853 */
854void ata_eh_qc_retry(struct ata_queued_cmd *qc)
855{
856 __ata_eh_qc_complete(qc);
857} 801}
858 802
859/** 803/**
@@ -891,7 +835,7 @@ static unsigned int ata_scsi_start_stop_xlat(struct ata_queued_cmd *qc,
891 tf->nsect = 1; /* 1 sector, lba=0 */ 835 tf->nsect = 1; /* 1 sector, lba=0 */
892 836
893 if (qc->dev->flags & ATA_DFLAG_LBA) { 837 if (qc->dev->flags & ATA_DFLAG_LBA) {
894 qc->tf.flags |= ATA_TFLAG_LBA; 838 tf->flags |= ATA_TFLAG_LBA;
895 839
896 tf->lbah = 0x0; 840 tf->lbah = 0x0;
897 tf->lbam = 0x0; 841 tf->lbam = 0x0;
@@ -1195,6 +1139,7 @@ static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc, const u8 *scsicm
1195 u64 block; 1139 u64 block;
1196 u32 n_block; 1140 u32 n_block;
1197 1141
1142 qc->flags |= ATA_QCFLAG_IO;
1198 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; 1143 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1199 1144
1200 if (scsicmd[0] == WRITE_10 || scsicmd[0] == WRITE_6 || 1145 if (scsicmd[0] == WRITE_10 || scsicmd[0] == WRITE_6 ||
@@ -1241,7 +1186,36 @@ static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc, const u8 *scsicm
1241 */ 1186 */
1242 goto nothing_to_do; 1187 goto nothing_to_do;
1243 1188
1244 if (dev->flags & ATA_DFLAG_LBA) { 1189 if ((dev->flags & (ATA_DFLAG_PIO | ATA_DFLAG_NCQ)) == ATA_DFLAG_NCQ) {
1190 /* yay, NCQ */
1191 if (!lba_48_ok(block, n_block))
1192 goto out_of_range;
1193
1194 tf->protocol = ATA_PROT_NCQ;
1195 tf->flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
1196
1197 if (tf->flags & ATA_TFLAG_WRITE)
1198 tf->command = ATA_CMD_FPDMA_WRITE;
1199 else
1200 tf->command = ATA_CMD_FPDMA_READ;
1201
1202 qc->nsect = n_block;
1203
1204 tf->nsect = qc->tag << 3;
1205 tf->hob_feature = (n_block >> 8) & 0xff;
1206 tf->feature = n_block & 0xff;
1207
1208 tf->hob_lbah = (block >> 40) & 0xff;
1209 tf->hob_lbam = (block >> 32) & 0xff;
1210 tf->hob_lbal = (block >> 24) & 0xff;
1211 tf->lbah = (block >> 16) & 0xff;
1212 tf->lbam = (block >> 8) & 0xff;
1213 tf->lbal = block & 0xff;
1214
1215 tf->device = 1 << 6;
1216 if (tf->flags & ATA_TFLAG_FUA)
1217 tf->device |= 1 << 7;
1218 } else if (dev->flags & ATA_DFLAG_LBA) {
1245 tf->flags |= ATA_TFLAG_LBA; 1219 tf->flags |= ATA_TFLAG_LBA;
1246 1220
1247 if (lba_28_ok(block, n_block)) { 1221 if (lba_28_ok(block, n_block)) {
@@ -1332,6 +1306,17 @@ static void ata_scsi_qc_complete(struct ata_queued_cmd *qc)
1332 u8 *cdb = cmd->cmnd; 1306 u8 *cdb = cmd->cmnd;
1333 int need_sense = (qc->err_mask != 0); 1307 int need_sense = (qc->err_mask != 0);
1334 1308
1309 /* We snoop the SET_FEATURES - Write Cache ON/OFF command, and
1310 * schedule EH_REVALIDATE operation to update the IDENTIFY DEVICE
1311 * cache
1312 */
1313 if (!need_sense && (qc->tf.command == ATA_CMD_SET_FEATURES) &&
1314 ((qc->tf.feature == SETFEATURES_WC_ON) ||
1315 (qc->tf.feature == SETFEATURES_WC_OFF))) {
1316 qc->ap->eh_info.action |= ATA_EH_REVALIDATE;
1317 ata_port_schedule_eh(qc->ap);
1318 }
1319
1335 /* For ATA pass thru (SAT) commands, generate a sense block if 1320 /* For ATA pass thru (SAT) commands, generate a sense block if
1336 * user mandated it or if there's an error. Note that if we 1321 * user mandated it or if there's an error. Note that if we
1337 * generate because the user forced us to, a check condition 1322 * generate because the user forced us to, a check condition
@@ -1356,10 +1341,8 @@ static void ata_scsi_qc_complete(struct ata_queued_cmd *qc)
1356 } 1341 }
1357 } 1342 }
1358 1343
1359 if (need_sense) { 1344 if (need_sense && !qc->ap->ops->error_handler)
1360 /* The ata_gen_..._sense routines fill in tf */ 1345 ata_dump_status(qc->ap->id, &qc->result_tf);
1361 ata_dump_status(qc->ap->id, &qc->tf);
1362 }
1363 1346
1364 qc->scsidone(cmd); 1347 qc->scsidone(cmd);
1365 1348
@@ -1367,8 +1350,40 @@ static void ata_scsi_qc_complete(struct ata_queued_cmd *qc)
1367} 1350}
1368 1351
1369/** 1352/**
1353 * ata_scmd_need_defer - Check whether we need to defer scmd
1354 * @dev: ATA device to which the command is addressed
1355 * @is_io: Is the command IO (and thus possibly NCQ)?
1356 *
1357 * NCQ and non-NCQ commands cannot run together. As upper layer
1358 * only knows the queue depth, we are responsible for maintaining
1359 * exclusion. This function checks whether a new command can be
1360 * issued to @dev.
1361 *
1362 * LOCKING:
1363 * spin_lock_irqsave(host_set lock)
1364 *
1365 * RETURNS:
1366 * 1 if deferring is needed, 0 otherwise.
1367 */
1368static int ata_scmd_need_defer(struct ata_device *dev, int is_io)
1369{
1370 struct ata_port *ap = dev->ap;
1371
1372 if (!(dev->flags & ATA_DFLAG_NCQ))
1373 return 0;
1374
1375 if (is_io) {
1376 if (!ata_tag_valid(ap->active_tag))
1377 return 0;
1378 } else {
1379 if (!ata_tag_valid(ap->active_tag) && !ap->sactive)
1380 return 0;
1381 }
1382 return 1;
1383}
1384
1385/**
1370 * ata_scsi_translate - Translate then issue SCSI command to ATA device 1386 * ata_scsi_translate - Translate then issue SCSI command to ATA device
1371 * @ap: ATA port to which the command is addressed
1372 * @dev: ATA device to which the command is addressed 1387 * @dev: ATA device to which the command is addressed
1373 * @cmd: SCSI command to execute 1388 * @cmd: SCSI command to execute
1374 * @done: SCSI command completion function 1389 * @done: SCSI command completion function
@@ -1389,19 +1404,25 @@ static void ata_scsi_qc_complete(struct ata_queued_cmd *qc)
1389 * 1404 *
1390 * LOCKING: 1405 * LOCKING:
1391 * spin_lock_irqsave(host_set lock) 1406 * spin_lock_irqsave(host_set lock)
1407 *
1408 * RETURNS:
1409 * 0 on success, SCSI_ML_QUEUE_DEVICE_BUSY if the command
1410 * needs to be deferred.
1392 */ 1411 */
1393 1412static int ata_scsi_translate(struct ata_device *dev, struct scsi_cmnd *cmd,
1394static void ata_scsi_translate(struct ata_port *ap, struct ata_device *dev,
1395 struct scsi_cmnd *cmd,
1396 void (*done)(struct scsi_cmnd *), 1413 void (*done)(struct scsi_cmnd *),
1397 ata_xlat_func_t xlat_func) 1414 ata_xlat_func_t xlat_func)
1398{ 1415{
1399 struct ata_queued_cmd *qc; 1416 struct ata_queued_cmd *qc;
1400 u8 *scsicmd = cmd->cmnd; 1417 u8 *scsicmd = cmd->cmnd;
1418 int is_io = xlat_func == ata_scsi_rw_xlat;
1401 1419
1402 VPRINTK("ENTER\n"); 1420 VPRINTK("ENTER\n");
1403 1421
1404 qc = ata_scsi_qc_new(ap, dev, cmd, done); 1422 if (unlikely(ata_scmd_need_defer(dev, is_io)))
1423 goto defer;
1424
1425 qc = ata_scsi_qc_new(dev, cmd, done);
1405 if (!qc) 1426 if (!qc)
1406 goto err_mem; 1427 goto err_mem;
1407 1428
@@ -1409,8 +1430,8 @@ static void ata_scsi_translate(struct ata_port *ap, struct ata_device *dev,
1409 if (cmd->sc_data_direction == DMA_FROM_DEVICE || 1430 if (cmd->sc_data_direction == DMA_FROM_DEVICE ||
1410 cmd->sc_data_direction == DMA_TO_DEVICE) { 1431 cmd->sc_data_direction == DMA_TO_DEVICE) {
1411 if (unlikely(cmd->request_bufflen < 1)) { 1432 if (unlikely(cmd->request_bufflen < 1)) {
1412 printk(KERN_WARNING "ata%u(%u): WARNING: zero len r/w req\n", 1433 ata_dev_printk(dev, KERN_WARNING,
1413 ap->id, dev->devno); 1434 "WARNING: zero len r/w req\n");
1414 goto err_did; 1435 goto err_did;
1415 } 1436 }
1416 1437
@@ -1432,13 +1453,13 @@ static void ata_scsi_translate(struct ata_port *ap, struct ata_device *dev,
1432 ata_qc_issue(qc); 1453 ata_qc_issue(qc);
1433 1454
1434 VPRINTK("EXIT\n"); 1455 VPRINTK("EXIT\n");
1435 return; 1456 return 0;
1436 1457
1437early_finish: 1458early_finish:
1438 ata_qc_free(qc); 1459 ata_qc_free(qc);
1439 done(cmd); 1460 done(cmd);
1440 DPRINTK("EXIT - early finish (good or error)\n"); 1461 DPRINTK("EXIT - early finish (good or error)\n");
1441 return; 1462 return 0;
1442 1463
1443err_did: 1464err_did:
1444 ata_qc_free(qc); 1465 ata_qc_free(qc);
@@ -1446,7 +1467,11 @@ err_mem:
1446 cmd->result = (DID_ERROR << 16); 1467 cmd->result = (DID_ERROR << 16);
1447 done(cmd); 1468 done(cmd);
1448 DPRINTK("EXIT - internal\n"); 1469 DPRINTK("EXIT - internal\n");
1449 return; 1470 return 0;
1471
1472defer:
1473 DPRINTK("EXIT - defer\n");
1474 return SCSI_MLQUEUE_DEVICE_BUSY;
1450} 1475}
1451 1476
1452/** 1477/**
@@ -1944,7 +1969,7 @@ unsigned int ata_scsiop_mode_sense(struct ata_scsi_args *args, u8 *rbuf,
1944 return 0; 1969 return 0;
1945 1970
1946 dpofua = 0; 1971 dpofua = 0;
1947 if (ata_dev_supports_fua(args->id) && dev->flags & ATA_DFLAG_LBA48 && 1972 if (ata_dev_supports_fua(args->id) && (dev->flags & ATA_DFLAG_LBA48) &&
1948 (!(dev->flags & ATA_DFLAG_PIO) || dev->multi_count)) 1973 (!(dev->flags & ATA_DFLAG_PIO) || dev->multi_count))
1949 dpofua = 1 << 4; 1974 dpofua = 1 << 4;
1950 1975
@@ -2137,13 +2162,14 @@ void ata_scsi_badcmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *), u8
2137 2162
2138static void atapi_sense_complete(struct ata_queued_cmd *qc) 2163static void atapi_sense_complete(struct ata_queued_cmd *qc)
2139{ 2164{
2140 if (qc->err_mask && ((qc->err_mask & AC_ERR_DEV) == 0)) 2165 if (qc->err_mask && ((qc->err_mask & AC_ERR_DEV) == 0)) {
2141 /* FIXME: not quite right; we don't want the 2166 /* FIXME: not quite right; we don't want the
2142 * translation of taskfile registers into 2167 * translation of taskfile registers into
2143 * a sense descriptors, since that's only 2168 * a sense descriptors, since that's only
2144 * correct for ATA, not ATAPI 2169 * correct for ATA, not ATAPI
2145 */ 2170 */
2146 ata_gen_ata_desc_sense(qc); 2171 ata_gen_ata_desc_sense(qc);
2172 }
2147 2173
2148 qc->scsidone(qc->scsicmd); 2174 qc->scsidone(qc->scsicmd);
2149 ata_qc_free(qc); 2175 ata_qc_free(qc);
@@ -2207,21 +2233,38 @@ static void atapi_qc_complete(struct ata_queued_cmd *qc)
2207 2233
2208 VPRINTK("ENTER, err_mask 0x%X\n", err_mask); 2234 VPRINTK("ENTER, err_mask 0x%X\n", err_mask);
2209 2235
2236 /* handle completion from new EH */
2237 if (unlikely(qc->ap->ops->error_handler &&
2238 (err_mask || qc->flags & ATA_QCFLAG_SENSE_VALID))) {
2239
2240 if (!(qc->flags & ATA_QCFLAG_SENSE_VALID)) {
2241 /* FIXME: not quite right; we don't want the
2242 * translation of taskfile registers into a
2243 * sense descriptors, since that's only
2244 * correct for ATA, not ATAPI
2245 */
2246 ata_gen_ata_desc_sense(qc);
2247 }
2248
2249 qc->scsicmd->result = SAM_STAT_CHECK_CONDITION;
2250 qc->scsidone(cmd);
2251 ata_qc_free(qc);
2252 return;
2253 }
2254
2255 /* successful completion or old EH failure path */
2210 if (unlikely(err_mask & AC_ERR_DEV)) { 2256 if (unlikely(err_mask & AC_ERR_DEV)) {
2211 cmd->result = SAM_STAT_CHECK_CONDITION; 2257 cmd->result = SAM_STAT_CHECK_CONDITION;
2212 atapi_request_sense(qc); 2258 atapi_request_sense(qc);
2213 return; 2259 return;
2214 } 2260 } else if (unlikely(err_mask)) {
2215
2216 else if (unlikely(err_mask))
2217 /* FIXME: not quite right; we don't want the 2261 /* FIXME: not quite right; we don't want the
2218 * translation of taskfile registers into 2262 * translation of taskfile registers into
2219 * a sense descriptors, since that's only 2263 * a sense descriptors, since that's only
2220 * correct for ATA, not ATAPI 2264 * correct for ATA, not ATAPI
2221 */ 2265 */
2222 ata_gen_ata_desc_sense(qc); 2266 ata_gen_ata_desc_sense(qc);
2223 2267 } else {
2224 else {
2225 u8 *scsicmd = cmd->cmnd; 2268 u8 *scsicmd = cmd->cmnd;
2226 2269
2227 if ((scsicmd[0] == INQUIRY) && ((scsicmd[1] & 0x03) == 0)) { 2270 if ((scsicmd[0] == INQUIRY) && ((scsicmd[1] & 0x03) == 0)) {
@@ -2303,11 +2346,9 @@ static unsigned int atapi_xlat(struct ata_queued_cmd *qc, const u8 *scsicmd)
2303 qc->tf.protocol = ATA_PROT_ATAPI_DMA; 2346 qc->tf.protocol = ATA_PROT_ATAPI_DMA;
2304 qc->tf.feature |= ATAPI_PKT_DMA; 2347 qc->tf.feature |= ATAPI_PKT_DMA;
2305 2348
2306#ifdef ATAPI_ENABLE_DMADIR 2349 if (atapi_dmadir && (cmd->sc_data_direction != DMA_TO_DEVICE))
2307 /* some SATA bridges need us to indicate data xfer direction */ 2350 /* some SATA bridges need us to indicate data xfer direction */
2308 if (cmd->sc_data_direction != DMA_TO_DEVICE)
2309 qc->tf.feature |= ATAPI_DMADIR; 2351 qc->tf.feature |= ATAPI_DMADIR;
2310#endif
2311 } 2352 }
2312 2353
2313 qc->nbytes = cmd->request_bufflen; 2354 qc->nbytes = cmd->request_bufflen;
@@ -2315,6 +2356,53 @@ static unsigned int atapi_xlat(struct ata_queued_cmd *qc, const u8 *scsicmd)
2315 return 0; 2356 return 0;
2316} 2357}
2317 2358
2359static struct ata_device * ata_find_dev(struct ata_port *ap, int id)
2360{
2361 if (likely(id < ATA_MAX_DEVICES))
2362 return &ap->device[id];
2363 return NULL;
2364}
2365
2366static struct ata_device * __ata_scsi_find_dev(struct ata_port *ap,
2367 const struct scsi_device *scsidev)
2368{
2369 /* skip commands not addressed to targets we simulate */
2370 if (unlikely(scsidev->channel || scsidev->lun))
2371 return NULL;
2372
2373 return ata_find_dev(ap, scsidev->id);
2374}
2375
2376/**
2377 * ata_scsi_dev_enabled - determine if device is enabled
2378 * @dev: ATA device
2379 *
2380 * Determine if commands should be sent to the specified device.
2381 *
2382 * LOCKING:
2383 * spin_lock_irqsave(host_set lock)
2384 *
2385 * RETURNS:
2386 * 0 if commands are not allowed / 1 if commands are allowed
2387 */
2388
2389static int ata_scsi_dev_enabled(struct ata_device *dev)
2390{
2391 if (unlikely(!ata_dev_enabled(dev)))
2392 return 0;
2393
2394 if (!atapi_enabled || (dev->ap->flags & ATA_FLAG_NO_ATAPI)) {
2395 if (unlikely(dev->class == ATA_DEV_ATAPI)) {
2396 ata_dev_printk(dev, KERN_WARNING,
2397 "WARNING: ATAPI is %s, device ignored.\n",
2398 atapi_enabled ? "not supported with this driver" : "disabled");
2399 return 0;
2400 }
2401 }
2402
2403 return 1;
2404}
2405
2318/** 2406/**
2319 * ata_scsi_find_dev - lookup ata_device from scsi_cmnd 2407 * ata_scsi_find_dev - lookup ata_device from scsi_cmnd
2320 * @ap: ATA port to which the device is attached 2408 * @ap: ATA port to which the device is attached
@@ -2331,33 +2419,14 @@ static unsigned int atapi_xlat(struct ata_queued_cmd *qc, const u8 *scsicmd)
2331 * RETURNS: 2419 * RETURNS:
2332 * Associated ATA device, or %NULL if not found. 2420 * Associated ATA device, or %NULL if not found.
2333 */ 2421 */
2334
2335static struct ata_device * 2422static struct ata_device *
2336ata_scsi_find_dev(struct ata_port *ap, const struct scsi_device *scsidev) 2423ata_scsi_find_dev(struct ata_port *ap, const struct scsi_device *scsidev)
2337{ 2424{
2338 struct ata_device *dev; 2425 struct ata_device *dev = __ata_scsi_find_dev(ap, scsidev);
2339 2426
2340 /* skip commands not addressed to targets we simulate */ 2427 if (unlikely(!dev || !ata_scsi_dev_enabled(dev)))
2341 if (likely(scsidev->id < ATA_MAX_DEVICES))
2342 dev = &ap->device[scsidev->id];
2343 else
2344 return NULL; 2428 return NULL;
2345 2429
2346 if (unlikely((scsidev->channel != 0) ||
2347 (scsidev->lun != 0)))
2348 return NULL;
2349
2350 if (unlikely(!ata_dev_present(dev)))
2351 return NULL;
2352
2353 if (!atapi_enabled || (ap->flags & ATA_FLAG_NO_ATAPI)) {
2354 if (unlikely(dev->class == ATA_DEV_ATAPI)) {
2355 printk(KERN_WARNING "ata%u(%u): WARNING: ATAPI is %s, device ignored.\n",
2356 ap->id, dev->devno, atapi_enabled ? "not supported with this driver" : "disabled");
2357 return NULL;
2358 }
2359 }
2360
2361 return dev; 2430 return dev;
2362} 2431}
2363 2432
@@ -2414,10 +2483,15 @@ ata_scsi_pass_thru(struct ata_queued_cmd *qc, const u8 *scsicmd)
2414{ 2483{
2415 struct ata_taskfile *tf = &(qc->tf); 2484 struct ata_taskfile *tf = &(qc->tf);
2416 struct scsi_cmnd *cmd = qc->scsicmd; 2485 struct scsi_cmnd *cmd = qc->scsicmd;
2486 struct ata_device *dev = qc->dev;
2417 2487
2418 if ((tf->protocol = ata_scsi_map_proto(scsicmd[1])) == ATA_PROT_UNKNOWN) 2488 if ((tf->protocol = ata_scsi_map_proto(scsicmd[1])) == ATA_PROT_UNKNOWN)
2419 goto invalid_fld; 2489 goto invalid_fld;
2420 2490
2491 /* We may not issue DMA commands if no DMA mode is set */
2492 if (tf->protocol == ATA_PROT_DMA && dev->dma_mode == 0)
2493 goto invalid_fld;
2494
2421 if (scsicmd[1] & 0xe0) 2495 if (scsicmd[1] & 0xe0)
2422 /* PIO multi not supported yet */ 2496 /* PIO multi not supported yet */
2423 goto invalid_fld; 2497 goto invalid_fld;
@@ -2502,6 +2576,9 @@ ata_scsi_pass_thru(struct ata_queued_cmd *qc, const u8 *scsicmd)
2502 */ 2576 */
2503 qc->nsect = cmd->request_bufflen / ATA_SECT_SIZE; 2577 qc->nsect = cmd->request_bufflen / ATA_SECT_SIZE;
2504 2578
2579 /* request result TF */
2580 qc->flags |= ATA_QCFLAG_RESULT_TF;
2581
2505 return 0; 2582 return 0;
2506 2583
2507 invalid_fld: 2584 invalid_fld:
@@ -2578,19 +2655,24 @@ static inline void ata_scsi_dump_cdb(struct ata_port *ap,
2578#endif 2655#endif
2579} 2656}
2580 2657
2581static inline void __ata_scsi_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *), 2658static inline int __ata_scsi_queuecmd(struct scsi_cmnd *cmd,
2582 struct ata_port *ap, struct ata_device *dev) 2659 void (*done)(struct scsi_cmnd *),
2660 struct ata_device *dev)
2583{ 2661{
2662 int rc = 0;
2663
2584 if (dev->class == ATA_DEV_ATA) { 2664 if (dev->class == ATA_DEV_ATA) {
2585 ata_xlat_func_t xlat_func = ata_get_xlat_func(dev, 2665 ata_xlat_func_t xlat_func = ata_get_xlat_func(dev,
2586 cmd->cmnd[0]); 2666 cmd->cmnd[0]);
2587 2667
2588 if (xlat_func) 2668 if (xlat_func)
2589 ata_scsi_translate(ap, dev, cmd, done, xlat_func); 2669 rc = ata_scsi_translate(dev, cmd, done, xlat_func);
2590 else 2670 else
2591 ata_scsi_simulate(ap, dev, cmd, done); 2671 ata_scsi_simulate(dev, cmd, done);
2592 } else 2672 } else
2593 ata_scsi_translate(ap, dev, cmd, done, atapi_xlat); 2673 rc = ata_scsi_translate(dev, cmd, done, atapi_xlat);
2674
2675 return rc;
2594} 2676}
2595 2677
2596/** 2678/**
@@ -2609,39 +2691,39 @@ static inline void __ata_scsi_queuecmd(struct scsi_cmnd *cmd, void (*done)(struc
2609 * Releases scsi-layer-held lock, and obtains host_set lock. 2691 * Releases scsi-layer-held lock, and obtains host_set lock.
2610 * 2692 *
2611 * RETURNS: 2693 * RETURNS:
2612 * Zero. 2694 * Return value from __ata_scsi_queuecmd() if @cmd can be queued,
2695 * 0 otherwise.
2613 */ 2696 */
2614
2615int ata_scsi_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)) 2697int ata_scsi_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
2616{ 2698{
2617 struct ata_port *ap; 2699 struct ata_port *ap;
2618 struct ata_device *dev; 2700 struct ata_device *dev;
2619 struct scsi_device *scsidev = cmd->device; 2701 struct scsi_device *scsidev = cmd->device;
2620 struct Scsi_Host *shost = scsidev->host; 2702 struct Scsi_Host *shost = scsidev->host;
2703 int rc = 0;
2621 2704
2622 ap = (struct ata_port *) &shost->hostdata[0]; 2705 ap = ata_shost_to_port(shost);
2623 2706
2624 spin_unlock(shost->host_lock); 2707 spin_unlock(shost->host_lock);
2625 spin_lock(&ap->host_set->lock); 2708 spin_lock(ap->lock);
2626 2709
2627 ata_scsi_dump_cdb(ap, cmd); 2710 ata_scsi_dump_cdb(ap, cmd);
2628 2711
2629 dev = ata_scsi_find_dev(ap, scsidev); 2712 dev = ata_scsi_find_dev(ap, scsidev);
2630 if (likely(dev)) 2713 if (likely(dev))
2631 __ata_scsi_queuecmd(cmd, done, ap, dev); 2714 rc = __ata_scsi_queuecmd(cmd, done, dev);
2632 else { 2715 else {
2633 cmd->result = (DID_BAD_TARGET << 16); 2716 cmd->result = (DID_BAD_TARGET << 16);
2634 done(cmd); 2717 done(cmd);
2635 } 2718 }
2636 2719
2637 spin_unlock(&ap->host_set->lock); 2720 spin_unlock(ap->lock);
2638 spin_lock(shost->host_lock); 2721 spin_lock(shost->host_lock);
2639 return 0; 2722 return rc;
2640} 2723}
2641 2724
2642/** 2725/**
2643 * ata_scsi_simulate - simulate SCSI command on ATA device 2726 * ata_scsi_simulate - simulate SCSI command on ATA device
2644 * @ap: port the device is connected to
2645 * @dev: the target device 2727 * @dev: the target device
2646 * @cmd: SCSI command being sent to device. 2728 * @cmd: SCSI command being sent to device.
2647 * @done: SCSI command completion function. 2729 * @done: SCSI command completion function.
@@ -2653,14 +2735,12 @@ int ata_scsi_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
2653 * spin_lock_irqsave(host_set lock) 2735 * spin_lock_irqsave(host_set lock)
2654 */ 2736 */
2655 2737
2656void ata_scsi_simulate(struct ata_port *ap, struct ata_device *dev, 2738void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd,
2657 struct scsi_cmnd *cmd,
2658 void (*done)(struct scsi_cmnd *)) 2739 void (*done)(struct scsi_cmnd *))
2659{ 2740{
2660 struct ata_scsi_args args; 2741 struct ata_scsi_args args;
2661 const u8 *scsicmd = cmd->cmnd; 2742 const u8 *scsicmd = cmd->cmnd;
2662 2743
2663 args.ap = ap;
2664 args.dev = dev; 2744 args.dev = dev;
2665 args.id = dev->id; 2745 args.id = dev->id;
2666 args.cmd = cmd; 2746 args.cmd = cmd;
@@ -2732,17 +2812,241 @@ void ata_scsi_simulate(struct ata_port *ap, struct ata_device *dev,
2732 2812
2733void ata_scsi_scan_host(struct ata_port *ap) 2813void ata_scsi_scan_host(struct ata_port *ap)
2734{ 2814{
2735 struct ata_device *dev;
2736 unsigned int i; 2815 unsigned int i;
2737 2816
2738 if (ap->flags & ATA_FLAG_PORT_DISABLED) 2817 if (ap->flags & ATA_FLAG_DISABLED)
2739 return; 2818 return;
2740 2819
2741 for (i = 0; i < ATA_MAX_DEVICES; i++) { 2820 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2742 dev = &ap->device[i]; 2821 struct ata_device *dev = &ap->device[i];
2822 struct scsi_device *sdev;
2743 2823
2744 if (ata_dev_present(dev)) 2824 if (!ata_dev_enabled(dev) || dev->sdev)
2745 scsi_scan_target(&ap->host->shost_gendev, 0, i, 0, 0); 2825 continue;
2826
2827 sdev = __scsi_add_device(ap->host, 0, i, 0, NULL);
2828 if (!IS_ERR(sdev)) {
2829 dev->sdev = sdev;
2830 scsi_device_put(sdev);
2831 }
2832 }
2833}
2834
2835/**
2836 * ata_scsi_offline_dev - offline attached SCSI device
2837 * @dev: ATA device to offline attached SCSI device for
2838 *
2839 * This function is called from ata_eh_hotplug() and responsible
2840 * for taking the SCSI device attached to @dev offline. This
2841 * function is called with host_set lock which protects dev->sdev
2842 * against clearing.
2843 *
2844 * LOCKING:
2845 * spin_lock_irqsave(host_set lock)
2846 *
2847 * RETURNS:
2848 * 1 if attached SCSI device exists, 0 otherwise.
2849 */
2850int ata_scsi_offline_dev(struct ata_device *dev)
2851{
2852 if (dev->sdev) {
2853 scsi_device_set_state(dev->sdev, SDEV_OFFLINE);
2854 return 1;
2746 } 2855 }
2856 return 0;
2747} 2857}
2748 2858
2859/**
2860 * ata_scsi_remove_dev - remove attached SCSI device
2861 * @dev: ATA device to remove attached SCSI device for
2862 *
2863 * This function is called from ata_eh_scsi_hotplug() and
2864 * responsible for removing the SCSI device attached to @dev.
2865 *
2866 * LOCKING:
2867 * Kernel thread context (may sleep).
2868 */
2869static void ata_scsi_remove_dev(struct ata_device *dev)
2870{
2871 struct ata_port *ap = dev->ap;
2872 struct scsi_device *sdev;
2873 unsigned long flags;
2874
2875 /* Alas, we need to grab scan_mutex to ensure SCSI device
2876 * state doesn't change underneath us and thus
2877 * scsi_device_get() always succeeds. The mutex locking can
2878 * be removed if there is __scsi_device_get() interface which
2879 * increments reference counts regardless of device state.
2880 */
2881 mutex_lock(&ap->host->scan_mutex);
2882 spin_lock_irqsave(ap->lock, flags);
2883
2884 /* clearing dev->sdev is protected by host_set lock */
2885 sdev = dev->sdev;
2886 dev->sdev = NULL;
2887
2888 if (sdev) {
2889 /* If user initiated unplug races with us, sdev can go
2890 * away underneath us after the host_set lock and
2891 * scan_mutex are released. Hold onto it.
2892 */
2893 if (scsi_device_get(sdev) == 0) {
2894 /* The following ensures the attached sdev is
2895 * offline on return from ata_scsi_offline_dev()
2896 * regardless it wins or loses the race
2897 * against this function.
2898 */
2899 scsi_device_set_state(sdev, SDEV_OFFLINE);
2900 } else {
2901 WARN_ON(1);
2902 sdev = NULL;
2903 }
2904 }
2905
2906 spin_unlock_irqrestore(ap->lock, flags);
2907 mutex_unlock(&ap->host->scan_mutex);
2908
2909 if (sdev) {
2910 ata_dev_printk(dev, KERN_INFO, "detaching (SCSI %s)\n",
2911 sdev->sdev_gendev.bus_id);
2912
2913 scsi_remove_device(sdev);
2914 scsi_device_put(sdev);
2915 }
2916}
2917
2918/**
2919 * ata_scsi_hotplug - SCSI part of hotplug
2920 * @data: Pointer to ATA port to perform SCSI hotplug on
2921 *
2922 * Perform SCSI part of hotplug. It's executed from a separate
2923 * workqueue after EH completes. This is necessary because SCSI
2924 * hot plugging requires working EH and hot unplugging is
2925 * synchronized with hot plugging with a mutex.
2926 *
2927 * LOCKING:
2928 * Kernel thread context (may sleep).
2929 */
2930void ata_scsi_hotplug(void *data)
2931{
2932 struct ata_port *ap = data;
2933 int i;
2934
2935 if (ap->flags & ATA_FLAG_UNLOADING) {
2936 DPRINTK("ENTER/EXIT - unloading\n");
2937 return;
2938 }
2939
2940 DPRINTK("ENTER\n");
2941
2942 /* unplug detached devices */
2943 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2944 struct ata_device *dev = &ap->device[i];
2945 unsigned long flags;
2946
2947 if (!(dev->flags & ATA_DFLAG_DETACHED))
2948 continue;
2949
2950 spin_lock_irqsave(ap->lock, flags);
2951 dev->flags &= ~ATA_DFLAG_DETACHED;
2952 spin_unlock_irqrestore(ap->lock, flags);
2953
2954 ata_scsi_remove_dev(dev);
2955 }
2956
2957 /* scan for new ones */
2958 ata_scsi_scan_host(ap);
2959
2960 /* If we scanned while EH was in progress, scan would have
2961 * failed silently. Requeue if there are enabled but
2962 * unattached devices.
2963 */
2964 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2965 struct ata_device *dev = &ap->device[i];
2966 if (ata_dev_enabled(dev) && !dev->sdev) {
2967 queue_delayed_work(ata_aux_wq, &ap->hotplug_task, HZ);
2968 break;
2969 }
2970 }
2971
2972 DPRINTK("EXIT\n");
2973}
2974
2975/**
2976 * ata_scsi_user_scan - indication for user-initiated bus scan
2977 * @shost: SCSI host to scan
2978 * @channel: Channel to scan
2979 * @id: ID to scan
2980 * @lun: LUN to scan
2981 *
2982 * This function is called when user explicitly requests bus
2983 * scan. Set probe pending flag and invoke EH.
2984 *
2985 * LOCKING:
2986 * SCSI layer (we don't care)
2987 *
2988 * RETURNS:
2989 * Zero.
2990 */
2991static int ata_scsi_user_scan(struct Scsi_Host *shost, unsigned int channel,
2992 unsigned int id, unsigned int lun)
2993{
2994 struct ata_port *ap = ata_shost_to_port(shost);
2995 unsigned long flags;
2996 int rc = 0;
2997
2998 if (!ap->ops->error_handler)
2999 return -EOPNOTSUPP;
3000
3001 if ((channel != SCAN_WILD_CARD && channel != 0) ||
3002 (lun != SCAN_WILD_CARD && lun != 0))
3003 return -EINVAL;
3004
3005 spin_lock_irqsave(ap->lock, flags);
3006
3007 if (id == SCAN_WILD_CARD) {
3008 ap->eh_info.probe_mask |= (1 << ATA_MAX_DEVICES) - 1;
3009 ap->eh_info.action |= ATA_EH_SOFTRESET;
3010 } else {
3011 struct ata_device *dev = ata_find_dev(ap, id);
3012
3013 if (dev) {
3014 ap->eh_info.probe_mask |= 1 << dev->devno;
3015 ap->eh_info.action |= ATA_EH_SOFTRESET;
3016 } else
3017 rc = -EINVAL;
3018 }
3019
3020 if (rc == 0)
3021 ata_port_schedule_eh(ap);
3022
3023 spin_unlock_irqrestore(ap->lock, flags);
3024
3025 return rc;
3026}
3027
3028/**
3029 * ata_scsi_dev_rescan - initiate scsi_rescan_device()
3030 * @data: Pointer to ATA port to perform scsi_rescan_device()
3031 *
3032 * After ATA pass thru (SAT) commands are executed successfully,
3033 * libata need to propagate the changes to SCSI layer. This
3034 * function must be executed from ata_aux_wq such that sdev
3035 * attach/detach don't race with rescan.
3036 *
3037 * LOCKING:
3038 * Kernel thread context (may sleep).
3039 */
3040void ata_scsi_dev_rescan(void *data)
3041{
3042 struct ata_port *ap = data;
3043 struct ata_device *dev;
3044 unsigned int i;
3045
3046 for (i = 0; i < ATA_MAX_DEVICES; i++) {
3047 dev = &ap->device[i];
3048
3049 if (ata_dev_enabled(dev) && dev->sdev)
3050 scsi_rescan_device(&(dev->sdev->sdev_gendev));
3051 }
3052}
diff --git a/drivers/scsi/libata.h b/drivers/scsi/libata.h
index bac8cbae06fe..bdd488897096 100644
--- a/drivers/scsi/libata.h
+++ b/drivers/scsi/libata.h
@@ -29,10 +29,9 @@
29#define __LIBATA_H__ 29#define __LIBATA_H__
30 30
31#define DRV_NAME "libata" 31#define DRV_NAME "libata"
32#define DRV_VERSION "1.20" /* must be exactly four chars */ 32#define DRV_VERSION "1.30" /* must be exactly four chars */
33 33
34struct ata_scsi_args { 34struct ata_scsi_args {
35 struct ata_port *ap;
36 struct ata_device *dev; 35 struct ata_device *dev;
37 u16 *id; 36 u16 *id;
38 struct scsi_cmnd *cmd; 37 struct scsi_cmnd *cmd;
@@ -40,18 +39,32 @@ struct ata_scsi_args {
40}; 39};
41 40
42/* libata-core.c */ 41/* libata-core.c */
42extern struct workqueue_struct *ata_aux_wq;
43extern int atapi_enabled; 43extern int atapi_enabled;
44extern int atapi_dmadir;
44extern int libata_fua; 45extern int libata_fua;
45extern struct ata_queued_cmd *ata_qc_new_init(struct ata_port *ap, 46extern struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev);
46 struct ata_device *dev);
47extern int ata_rwcmd_protocol(struct ata_queued_cmd *qc); 47extern int ata_rwcmd_protocol(struct ata_queued_cmd *qc);
48extern void ata_dev_disable(struct ata_device *dev);
48extern void ata_port_flush_task(struct ata_port *ap); 49extern void ata_port_flush_task(struct ata_port *ap);
50extern unsigned ata_exec_internal(struct ata_device *dev,
51 struct ata_taskfile *tf, const u8 *cdb,
52 int dma_dir, void *buf, unsigned int buflen);
53extern int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
54 int post_reset, u16 *id);
55extern int ata_dev_configure(struct ata_device *dev, int print_info);
56extern int sata_down_spd_limit(struct ata_port *ap);
57extern int sata_set_spd_needed(struct ata_port *ap);
58extern int ata_down_xfermask_limit(struct ata_device *dev, int force_pio0);
59extern int ata_set_mode(struct ata_port *ap, struct ata_device **r_failed_dev);
49extern void ata_qc_free(struct ata_queued_cmd *qc); 60extern void ata_qc_free(struct ata_queued_cmd *qc);
50extern void ata_qc_issue(struct ata_queued_cmd *qc); 61extern void ata_qc_issue(struct ata_queued_cmd *qc);
62extern void __ata_qc_complete(struct ata_queued_cmd *qc);
51extern int ata_check_atapi_dma(struct ata_queued_cmd *qc); 63extern int ata_check_atapi_dma(struct ata_queued_cmd *qc);
52extern void ata_dev_select(struct ata_port *ap, unsigned int device, 64extern void ata_dev_select(struct ata_port *ap, unsigned int device,
53 unsigned int wait, unsigned int can_sleep); 65 unsigned int wait, unsigned int can_sleep);
54extern void swap_buf_le16(u16 *buf, unsigned int buf_words); 66extern void swap_buf_le16(u16 *buf, unsigned int buf_words);
67extern void ata_dev_init(struct ata_device *dev);
55extern int ata_task_ioctl(struct scsi_device *scsidev, void __user *arg); 68extern int ata_task_ioctl(struct scsi_device *scsidev, void __user *arg);
56extern int ata_cmd_ioctl(struct scsi_device *scsidev, void __user *arg); 69extern int ata_cmd_ioctl(struct scsi_device *scsidev, void __user *arg);
57 70
@@ -60,6 +73,8 @@ extern int ata_cmd_ioctl(struct scsi_device *scsidev, void __user *arg);
60extern struct scsi_transport_template ata_scsi_transport_template; 73extern struct scsi_transport_template ata_scsi_transport_template;
61 74
62extern void ata_scsi_scan_host(struct ata_port *ap); 75extern void ata_scsi_scan_host(struct ata_port *ap);
76extern int ata_scsi_offline_dev(struct ata_device *dev);
77extern void ata_scsi_hotplug(void *data);
63extern unsigned int ata_scsiop_inq_std(struct ata_scsi_args *args, u8 *rbuf, 78extern unsigned int ata_scsiop_inq_std(struct ata_scsi_args *args, u8 *rbuf,
64 unsigned int buflen); 79 unsigned int buflen);
65 80
@@ -88,5 +103,13 @@ extern void ata_scsi_set_sense(struct scsi_cmnd *cmd,
88extern void ata_scsi_rbuf_fill(struct ata_scsi_args *args, 103extern void ata_scsi_rbuf_fill(struct ata_scsi_args *args,
89 unsigned int (*actor) (struct ata_scsi_args *args, 104 unsigned int (*actor) (struct ata_scsi_args *args,
90 u8 *rbuf, unsigned int buflen)); 105 u8 *rbuf, unsigned int buflen));
106extern void ata_schedule_scsi_eh(struct Scsi_Host *shost);
107extern void ata_scsi_dev_rescan(void *data);
108
109/* libata-eh.c */
110extern enum scsi_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd);
111extern void ata_scsi_error(struct Scsi_Host *host);
112extern void ata_port_wait_eh(struct ata_port *ap);
113extern void ata_qc_schedule_eh(struct ata_queued_cmd *qc);
91 114
92#endif /* __LIBATA_H__ */ 115#endif /* __LIBATA_H__ */
diff --git a/drivers/scsi/pdc_adma.c b/drivers/scsi/pdc_adma.c
index 5cda16cfacb0..7ebe8e03aa96 100644
--- a/drivers/scsi/pdc_adma.c
+++ b/drivers/scsi/pdc_adma.c
@@ -46,7 +46,7 @@
46#include <linux/libata.h> 46#include <linux/libata.h>
47 47
48#define DRV_NAME "pdc_adma" 48#define DRV_NAME "pdc_adma"
49#define DRV_VERSION "0.03" 49#define DRV_VERSION "0.04"
50 50
51/* macro to calculate base address for ATA regs */ 51/* macro to calculate base address for ATA regs */
52#define ADMA_ATA_REGS(base,port_no) ((base) + ((port_no) * 0x40)) 52#define ADMA_ATA_REGS(base,port_no) ((base) + ((port_no) * 0x40))
@@ -152,6 +152,7 @@ static struct scsi_host_template adma_ata_sht = {
152 .proc_name = DRV_NAME, 152 .proc_name = DRV_NAME,
153 .dma_boundary = ADMA_DMA_BOUNDARY, 153 .dma_boundary = ADMA_DMA_BOUNDARY,
154 .slave_configure = ata_scsi_slave_config, 154 .slave_configure = ata_scsi_slave_config,
155 .slave_destroy = ata_scsi_slave_destroy,
155 .bios_param = ata_std_bios_param, 156 .bios_param = ata_std_bios_param,
156}; 157};
157 158
@@ -167,6 +168,7 @@ static const struct ata_port_operations adma_ata_ops = {
167 .qc_prep = adma_qc_prep, 168 .qc_prep = adma_qc_prep,
168 .qc_issue = adma_qc_issue, 169 .qc_issue = adma_qc_issue,
169 .eng_timeout = adma_eng_timeout, 170 .eng_timeout = adma_eng_timeout,
171 .data_xfer = ata_mmio_data_xfer,
170 .irq_handler = adma_intr, 172 .irq_handler = adma_intr,
171 .irq_clear = adma_irq_clear, 173 .irq_clear = adma_irq_clear,
172 .port_start = adma_port_start, 174 .port_start = adma_port_start,
@@ -455,13 +457,13 @@ static inline unsigned int adma_intr_pkt(struct ata_host_set *host_set)
455 continue; 457 continue;
456 handled = 1; 458 handled = 1;
457 adma_enter_reg_mode(ap); 459 adma_enter_reg_mode(ap);
458 if (ap->flags & (ATA_FLAG_PORT_DISABLED | ATA_FLAG_NOINTR)) 460 if (ap->flags & ATA_FLAG_DISABLED)
459 continue; 461 continue;
460 pp = ap->private_data; 462 pp = ap->private_data;
461 if (!pp || pp->state != adma_state_pkt) 463 if (!pp || pp->state != adma_state_pkt)
462 continue; 464 continue;
463 qc = ata_qc_from_tag(ap, ap->active_tag); 465 qc = ata_qc_from_tag(ap, ap->active_tag);
464 if (qc && (!(qc->tf.ctl & ATA_NIEN))) { 466 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) {
465 if ((status & (aPERR | aPSD | aUIRQ))) 467 if ((status & (aPERR | aPSD | aUIRQ)))
466 qc->err_mask |= AC_ERR_OTHER; 468 qc->err_mask |= AC_ERR_OTHER;
467 else if (pp->pkt[0] != cDONE) 469 else if (pp->pkt[0] != cDONE)
@@ -480,13 +482,13 @@ static inline unsigned int adma_intr_mmio(struct ata_host_set *host_set)
480 for (port_no = 0; port_no < host_set->n_ports; ++port_no) { 482 for (port_no = 0; port_no < host_set->n_ports; ++port_no) {
481 struct ata_port *ap; 483 struct ata_port *ap;
482 ap = host_set->ports[port_no]; 484 ap = host_set->ports[port_no];
483 if (ap && (!(ap->flags & (ATA_FLAG_PORT_DISABLED | ATA_FLAG_NOINTR)))) { 485 if (ap && (!(ap->flags & ATA_FLAG_DISABLED))) {
484 struct ata_queued_cmd *qc; 486 struct ata_queued_cmd *qc;
485 struct adma_port_priv *pp = ap->private_data; 487 struct adma_port_priv *pp = ap->private_data;
486 if (!pp || pp->state != adma_state_mmio) 488 if (!pp || pp->state != adma_state_mmio)
487 continue; 489 continue;
488 qc = ata_qc_from_tag(ap, ap->active_tag); 490 qc = ata_qc_from_tag(ap, ap->active_tag);
489 if (qc && (!(qc->tf.ctl & ATA_NIEN))) { 491 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) {
490 492
491 /* check main status, clearing INTRQ */ 493 /* check main status, clearing INTRQ */
492 u8 status = ata_check_status(ap); 494 u8 status = ata_check_status(ap);
diff --git a/drivers/scsi/sata_mv.c b/drivers/scsi/sata_mv.c
index f16f92a6ec0f..4a71578df3c1 100644
--- a/drivers/scsi/sata_mv.c
+++ b/drivers/scsi/sata_mv.c
@@ -93,7 +93,7 @@ enum {
93 MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */ 93 MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */
94 MV_COMMON_FLAGS = (ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 94 MV_COMMON_FLAGS = (ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
95 ATA_FLAG_SATA_RESET | ATA_FLAG_MMIO | 95 ATA_FLAG_SATA_RESET | ATA_FLAG_MMIO |
96 ATA_FLAG_NO_ATAPI), 96 ATA_FLAG_NO_ATAPI | ATA_FLAG_PIO_POLLING),
97 MV_6XXX_FLAGS = MV_FLAG_IRQ_COALESCE, 97 MV_6XXX_FLAGS = MV_FLAG_IRQ_COALESCE,
98 98
99 CRQB_FLAG_READ = (1 << 0), 99 CRQB_FLAG_READ = (1 << 0),
@@ -272,33 +272,33 @@ enum chip_type {
272 272
273/* Command ReQuest Block: 32B */ 273/* Command ReQuest Block: 32B */
274struct mv_crqb { 274struct mv_crqb {
275 u32 sg_addr; 275 __le32 sg_addr;
276 u32 sg_addr_hi; 276 __le32 sg_addr_hi;
277 u16 ctrl_flags; 277 __le16 ctrl_flags;
278 u16 ata_cmd[11]; 278 __le16 ata_cmd[11];
279}; 279};
280 280
281struct mv_crqb_iie { 281struct mv_crqb_iie {
282 u32 addr; 282 __le32 addr;
283 u32 addr_hi; 283 __le32 addr_hi;
284 u32 flags; 284 __le32 flags;
285 u32 len; 285 __le32 len;
286 u32 ata_cmd[4]; 286 __le32 ata_cmd[4];
287}; 287};
288 288
289/* Command ResPonse Block: 8B */ 289/* Command ResPonse Block: 8B */
290struct mv_crpb { 290struct mv_crpb {
291 u16 id; 291 __le16 id;
292 u16 flags; 292 __le16 flags;
293 u32 tmstmp; 293 __le32 tmstmp;
294}; 294};
295 295
296/* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */ 296/* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */
297struct mv_sg { 297struct mv_sg {
298 u32 addr; 298 __le32 addr;
299 u32 flags_size; 299 __le32 flags_size;
300 u32 addr_hi; 300 __le32 addr_hi;
301 u32 reserved; 301 __le32 reserved;
302}; 302};
303 303
304struct mv_port_priv { 304struct mv_port_priv {
@@ -390,6 +390,7 @@ static struct scsi_host_template mv_sht = {
390 .proc_name = DRV_NAME, 390 .proc_name = DRV_NAME,
391 .dma_boundary = MV_DMA_BOUNDARY, 391 .dma_boundary = MV_DMA_BOUNDARY,
392 .slave_configure = ata_scsi_slave_config, 392 .slave_configure = ata_scsi_slave_config,
393 .slave_destroy = ata_scsi_slave_destroy,
393 .bios_param = ata_std_bios_param, 394 .bios_param = ata_std_bios_param,
394}; 395};
395 396
@@ -406,6 +407,7 @@ static const struct ata_port_operations mv5_ops = {
406 407
407 .qc_prep = mv_qc_prep, 408 .qc_prep = mv_qc_prep,
408 .qc_issue = mv_qc_issue, 409 .qc_issue = mv_qc_issue,
410 .data_xfer = ata_mmio_data_xfer,
409 411
410 .eng_timeout = mv_eng_timeout, 412 .eng_timeout = mv_eng_timeout,
411 413
@@ -433,6 +435,7 @@ static const struct ata_port_operations mv6_ops = {
433 435
434 .qc_prep = mv_qc_prep, 436 .qc_prep = mv_qc_prep,
435 .qc_issue = mv_qc_issue, 437 .qc_issue = mv_qc_issue,
438 .data_xfer = ata_mmio_data_xfer,
436 439
437 .eng_timeout = mv_eng_timeout, 440 .eng_timeout = mv_eng_timeout,
438 441
@@ -683,7 +686,7 @@ static void mv_stop_dma(struct ata_port *ap)
683 } 686 }
684 687
685 if (EDMA_EN & reg) { 688 if (EDMA_EN & reg) {
686 printk(KERN_ERR "ata%u: Unable to stop eDMA\n", ap->id); 689 ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n");
687 /* FIXME: Consider doing a reset here to recover */ 690 /* FIXME: Consider doing a reset here to recover */
688 } 691 }
689} 692}
@@ -1028,7 +1031,7 @@ static inline unsigned mv_inc_q_index(unsigned index)
1028 return (index + 1) & MV_MAX_Q_DEPTH_MASK; 1031 return (index + 1) & MV_MAX_Q_DEPTH_MASK;
1029} 1032}
1030 1033
1031static inline void mv_crqb_pack_cmd(u16 *cmdw, u8 data, u8 addr, unsigned last) 1034static inline void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last)
1032{ 1035{
1033 u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS | 1036 u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS |
1034 (last ? CRQB_CMD_LAST : 0); 1037 (last ? CRQB_CMD_LAST : 0);
@@ -1051,7 +1054,7 @@ static void mv_qc_prep(struct ata_queued_cmd *qc)
1051{ 1054{
1052 struct ata_port *ap = qc->ap; 1055 struct ata_port *ap = qc->ap;
1053 struct mv_port_priv *pp = ap->private_data; 1056 struct mv_port_priv *pp = ap->private_data;
1054 u16 *cw; 1057 __le16 *cw;
1055 struct ata_taskfile *tf; 1058 struct ata_taskfile *tf;
1056 u16 flags = 0; 1059 u16 flags = 0;
1057 unsigned in_index; 1060 unsigned in_index;
@@ -1307,8 +1310,8 @@ static void mv_err_intr(struct ata_port *ap, int reset_allowed)
1307 edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS); 1310 edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1308 1311
1309 if (EDMA_ERR_SERR & edma_err_cause) { 1312 if (EDMA_ERR_SERR & edma_err_cause) {
1310 serr = scr_read(ap, SCR_ERROR); 1313 sata_scr_read(ap, SCR_ERROR, &serr);
1311 scr_write_flush(ap, SCR_ERROR, serr); 1314 sata_scr_write_flush(ap, SCR_ERROR, serr);
1312 } 1315 }
1313 if (EDMA_ERR_SELF_DIS & edma_err_cause) { 1316 if (EDMA_ERR_SELF_DIS & edma_err_cause) {
1314 struct mv_port_priv *pp = ap->private_data; 1317 struct mv_port_priv *pp = ap->private_data;
@@ -1377,7 +1380,7 @@ static void mv_host_intr(struct ata_host_set *host_set, u32 relevant,
1377 /* Note that DEV_IRQ might happen spuriously during EDMA, 1380 /* Note that DEV_IRQ might happen spuriously during EDMA,
1378 * and should be ignored in such cases. 1381 * and should be ignored in such cases.
1379 * The cause of this is still under investigation. 1382 * The cause of this is still under investigation.
1380 */ 1383 */
1381 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) { 1384 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
1382 /* EDMA: check for response queue interrupt */ 1385 /* EDMA: check for response queue interrupt */
1383 if ((CRPB_DMA_DONE << hard_port) & hc_irq_cause) { 1386 if ((CRPB_DMA_DONE << hard_port) & hc_irq_cause) {
@@ -1398,7 +1401,7 @@ static void mv_host_intr(struct ata_host_set *host_set, u32 relevant,
1398 } 1401 }
1399 } 1402 }
1400 1403
1401 if (ap->flags & (ATA_FLAG_PORT_DISABLED | ATA_FLAG_NOINTR)) 1404 if (ap && (ap->flags & ATA_FLAG_DISABLED))
1402 continue; 1405 continue;
1403 1406
1404 err_mask = ac_err_mask(ata_status); 1407 err_mask = ac_err_mask(ata_status);
@@ -1419,7 +1422,7 @@ static void mv_host_intr(struct ata_host_set *host_set, u32 relevant,
1419 VPRINTK("port %u IRQ found for qc, " 1422 VPRINTK("port %u IRQ found for qc, "
1420 "ata_status 0x%x\n", port,ata_status); 1423 "ata_status 0x%x\n", port,ata_status);
1421 /* mark qc status appropriately */ 1424 /* mark qc status appropriately */
1422 if (!(qc->tf.ctl & ATA_NIEN)) { 1425 if (!(qc->tf.flags & ATA_TFLAG_POLLING)) {
1423 qc->err_mask |= err_mask; 1426 qc->err_mask |= err_mask;
1424 ata_qc_complete(qc); 1427 ata_qc_complete(qc);
1425 } 1428 }
@@ -1949,15 +1952,16 @@ static void __mv_phy_reset(struct ata_port *ap, int can_sleep)
1949 1952
1950 /* Issue COMRESET via SControl */ 1953 /* Issue COMRESET via SControl */
1951comreset_retry: 1954comreset_retry:
1952 scr_write_flush(ap, SCR_CONTROL, 0x301); 1955 sata_scr_write_flush(ap, SCR_CONTROL, 0x301);
1953 __msleep(1, can_sleep); 1956 __msleep(1, can_sleep);
1954 1957
1955 scr_write_flush(ap, SCR_CONTROL, 0x300); 1958 sata_scr_write_flush(ap, SCR_CONTROL, 0x300);
1956 __msleep(20, can_sleep); 1959 __msleep(20, can_sleep);
1957 1960
1958 timeout = jiffies + msecs_to_jiffies(200); 1961 timeout = jiffies + msecs_to_jiffies(200);
1959 do { 1962 do {
1960 sstatus = scr_read(ap, SCR_STATUS) & 0x3; 1963 sata_scr_read(ap, SCR_STATUS, &sstatus);
1964 sstatus &= 0x3;
1961 if ((sstatus == 3) || (sstatus == 0)) 1965 if ((sstatus == 3) || (sstatus == 0))
1962 break; 1966 break;
1963 1967
@@ -1974,11 +1978,12 @@ comreset_retry:
1974 "SCtrl 0x%08x\n", mv_scr_read(ap, SCR_STATUS), 1978 "SCtrl 0x%08x\n", mv_scr_read(ap, SCR_STATUS),
1975 mv_scr_read(ap, SCR_ERROR), mv_scr_read(ap, SCR_CONTROL)); 1979 mv_scr_read(ap, SCR_ERROR), mv_scr_read(ap, SCR_CONTROL));
1976 1980
1977 if (sata_dev_present(ap)) { 1981 if (ata_port_online(ap)) {
1978 ata_port_probe(ap); 1982 ata_port_probe(ap);
1979 } else { 1983 } else {
1980 printk(KERN_INFO "ata%u: no device found (phy stat %08x)\n", 1984 sata_scr_read(ap, SCR_STATUS, &sstatus);
1981 ap->id, scr_read(ap, SCR_STATUS)); 1985 ata_port_printk(ap, KERN_INFO,
1986 "no device found (phy stat %08x)\n", sstatus);
1982 ata_port_disable(ap); 1987 ata_port_disable(ap);
1983 return; 1988 return;
1984 } 1989 }
@@ -2005,7 +2010,7 @@ comreset_retry:
2005 tf.nsect = readb((void __iomem *) ap->ioaddr.nsect_addr); 2010 tf.nsect = readb((void __iomem *) ap->ioaddr.nsect_addr);
2006 2011
2007 dev->class = ata_dev_classify(&tf); 2012 dev->class = ata_dev_classify(&tf);
2008 if (!ata_dev_present(dev)) { 2013 if (!ata_dev_enabled(dev)) {
2009 VPRINTK("Port disabled post-sig: No device present.\n"); 2014 VPRINTK("Port disabled post-sig: No device present.\n");
2010 ata_port_disable(ap); 2015 ata_port_disable(ap);
2011 } 2016 }
@@ -2037,7 +2042,7 @@ static void mv_eng_timeout(struct ata_port *ap)
2037 struct ata_queued_cmd *qc; 2042 struct ata_queued_cmd *qc;
2038 unsigned long flags; 2043 unsigned long flags;
2039 2044
2040 printk(KERN_ERR "ata%u: Entering mv_eng_timeout\n",ap->id); 2045 ata_port_printk(ap, KERN_ERR, "Entering mv_eng_timeout\n");
2041 DPRINTK("All regs @ start of eng_timeout\n"); 2046 DPRINTK("All regs @ start of eng_timeout\n");
2042 mv_dump_all_regs(ap->host_set->mmio_base, ap->port_no, 2047 mv_dump_all_regs(ap->host_set->mmio_base, ap->port_no,
2043 to_pci_dev(ap->host_set->dev)); 2048 to_pci_dev(ap->host_set->dev));
diff --git a/drivers/scsi/sata_nv.c b/drivers/scsi/sata_nv.c
index 9f553081b5e8..d18e7e0932ef 100644
--- a/drivers/scsi/sata_nv.c
+++ b/drivers/scsi/sata_nv.c
@@ -44,7 +44,7 @@
44#include <linux/libata.h> 44#include <linux/libata.h>
45 45
46#define DRV_NAME "sata_nv" 46#define DRV_NAME "sata_nv"
47#define DRV_VERSION "0.8" 47#define DRV_VERSION "0.9"
48 48
49enum { 49enum {
50 NV_PORTS = 2, 50 NV_PORTS = 2,
@@ -54,40 +54,25 @@ enum {
54 NV_PORT0_SCR_REG_OFFSET = 0x00, 54 NV_PORT0_SCR_REG_OFFSET = 0x00,
55 NV_PORT1_SCR_REG_OFFSET = 0x40, 55 NV_PORT1_SCR_REG_OFFSET = 0x40,
56 56
57 /* INT_STATUS/ENABLE */
57 NV_INT_STATUS = 0x10, 58 NV_INT_STATUS = 0x10,
58 NV_INT_STATUS_CK804 = 0x440,
59 NV_INT_STATUS_PDEV_INT = 0x01,
60 NV_INT_STATUS_PDEV_PM = 0x02,
61 NV_INT_STATUS_PDEV_ADDED = 0x04,
62 NV_INT_STATUS_PDEV_REMOVED = 0x08,
63 NV_INT_STATUS_SDEV_INT = 0x10,
64 NV_INT_STATUS_SDEV_PM = 0x20,
65 NV_INT_STATUS_SDEV_ADDED = 0x40,
66 NV_INT_STATUS_SDEV_REMOVED = 0x80,
67 NV_INT_STATUS_PDEV_HOTPLUG = (NV_INT_STATUS_PDEV_ADDED |
68 NV_INT_STATUS_PDEV_REMOVED),
69 NV_INT_STATUS_SDEV_HOTPLUG = (NV_INT_STATUS_SDEV_ADDED |
70 NV_INT_STATUS_SDEV_REMOVED),
71 NV_INT_STATUS_HOTPLUG = (NV_INT_STATUS_PDEV_HOTPLUG |
72 NV_INT_STATUS_SDEV_HOTPLUG),
73
74 NV_INT_ENABLE = 0x11, 59 NV_INT_ENABLE = 0x11,
60 NV_INT_STATUS_CK804 = 0x440,
75 NV_INT_ENABLE_CK804 = 0x441, 61 NV_INT_ENABLE_CK804 = 0x441,
76 NV_INT_ENABLE_PDEV_MASK = 0x01,
77 NV_INT_ENABLE_PDEV_PM = 0x02,
78 NV_INT_ENABLE_PDEV_ADDED = 0x04,
79 NV_INT_ENABLE_PDEV_REMOVED = 0x08,
80 NV_INT_ENABLE_SDEV_MASK = 0x10,
81 NV_INT_ENABLE_SDEV_PM = 0x20,
82 NV_INT_ENABLE_SDEV_ADDED = 0x40,
83 NV_INT_ENABLE_SDEV_REMOVED = 0x80,
84 NV_INT_ENABLE_PDEV_HOTPLUG = (NV_INT_ENABLE_PDEV_ADDED |
85 NV_INT_ENABLE_PDEV_REMOVED),
86 NV_INT_ENABLE_SDEV_HOTPLUG = (NV_INT_ENABLE_SDEV_ADDED |
87 NV_INT_ENABLE_SDEV_REMOVED),
88 NV_INT_ENABLE_HOTPLUG = (NV_INT_ENABLE_PDEV_HOTPLUG |
89 NV_INT_ENABLE_SDEV_HOTPLUG),
90 62
63 /* INT_STATUS/ENABLE bits */
64 NV_INT_DEV = 0x01,
65 NV_INT_PM = 0x02,
66 NV_INT_ADDED = 0x04,
67 NV_INT_REMOVED = 0x08,
68
69 NV_INT_PORT_SHIFT = 4, /* each port occupies 4 bits */
70
71 NV_INT_ALL = 0x0f,
72 NV_INT_MASK = NV_INT_DEV |
73 NV_INT_ADDED | NV_INT_REMOVED,
74
75 /* INT_CONFIG */
91 NV_INT_CONFIG = 0x12, 76 NV_INT_CONFIG = 0x12,
92 NV_INT_CONFIG_METHD = 0x01, // 0 = INT, 1 = SMI 77 NV_INT_CONFIG_METHD = 0x01, // 0 = INT, 1 = SMI
93 78
@@ -97,23 +82,27 @@ enum {
97}; 82};
98 83
99static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent); 84static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent);
100static irqreturn_t nv_interrupt (int irq, void *dev_instance, 85static void nv_ck804_host_stop(struct ata_host_set *host_set);
101 struct pt_regs *regs); 86static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance,
87 struct pt_regs *regs);
88static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance,
89 struct pt_regs *regs);
90static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance,
91 struct pt_regs *regs);
102static u32 nv_scr_read (struct ata_port *ap, unsigned int sc_reg); 92static u32 nv_scr_read (struct ata_port *ap, unsigned int sc_reg);
103static void nv_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val); 93static void nv_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val);
104static void nv_host_stop (struct ata_host_set *host_set); 94
105static void nv_enable_hotplug(struct ata_probe_ent *probe_ent); 95static void nv_nf2_freeze(struct ata_port *ap);
106static void nv_disable_hotplug(struct ata_host_set *host_set); 96static void nv_nf2_thaw(struct ata_port *ap);
107static int nv_check_hotplug(struct ata_host_set *host_set); 97static void nv_ck804_freeze(struct ata_port *ap);
108static void nv_enable_hotplug_ck804(struct ata_probe_ent *probe_ent); 98static void nv_ck804_thaw(struct ata_port *ap);
109static void nv_disable_hotplug_ck804(struct ata_host_set *host_set); 99static void nv_error_handler(struct ata_port *ap);
110static int nv_check_hotplug_ck804(struct ata_host_set *host_set);
111 100
112enum nv_host_type 101enum nv_host_type
113{ 102{
114 GENERIC, 103 GENERIC,
115 NFORCE2, 104 NFORCE2,
116 NFORCE3, 105 NFORCE3 = NFORCE2, /* NF2 == NF3 as far as sata_nv is concerned */
117 CK804 106 CK804
118}; 107};
119 108
@@ -140,6 +129,16 @@ static const struct pci_device_id nv_pci_tbl[] = {
140 PCI_ANY_ID, PCI_ANY_ID, 0, 0, GENERIC }, 129 PCI_ANY_ID, PCI_ANY_ID, 0, 0, GENERIC },
141 { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2, 130 { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2,
142 PCI_ANY_ID, PCI_ANY_ID, 0, 0, GENERIC }, 131 PCI_ANY_ID, PCI_ANY_ID, 0, 0, GENERIC },
132 { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA,
133 PCI_ANY_ID, PCI_ANY_ID, 0, 0, GENERIC },
134 { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA2,
135 PCI_ANY_ID, PCI_ANY_ID, 0, 0, GENERIC },
136 { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA3,
137 PCI_ANY_ID, PCI_ANY_ID, 0, 0, GENERIC },
138 { PCI_VENDOR_ID_NVIDIA, 0x045c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, GENERIC },
139 { PCI_VENDOR_ID_NVIDIA, 0x045d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, GENERIC },
140 { PCI_VENDOR_ID_NVIDIA, 0x045e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, GENERIC },
141 { PCI_VENDOR_ID_NVIDIA, 0x045f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, GENERIC },
143 { PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID, 142 { PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
144 PCI_ANY_ID, PCI_ANY_ID, 143 PCI_ANY_ID, PCI_ANY_ID,
145 PCI_CLASS_STORAGE_IDE<<8, 0xffff00, GENERIC }, 144 PCI_CLASS_STORAGE_IDE<<8, 0xffff00, GENERIC },
@@ -149,46 +148,6 @@ static const struct pci_device_id nv_pci_tbl[] = {
149 { 0, } /* terminate list */ 148 { 0, } /* terminate list */
150}; 149};
151 150
152struct nv_host_desc
153{
154 enum nv_host_type host_type;
155 void (*enable_hotplug)(struct ata_probe_ent *probe_ent);
156 void (*disable_hotplug)(struct ata_host_set *host_set);
157 int (*check_hotplug)(struct ata_host_set *host_set);
158
159};
160static struct nv_host_desc nv_device_tbl[] = {
161 {
162 .host_type = GENERIC,
163 .enable_hotplug = NULL,
164 .disable_hotplug= NULL,
165 .check_hotplug = NULL,
166 },
167 {
168 .host_type = NFORCE2,
169 .enable_hotplug = nv_enable_hotplug,
170 .disable_hotplug= nv_disable_hotplug,
171 .check_hotplug = nv_check_hotplug,
172 },
173 {
174 .host_type = NFORCE3,
175 .enable_hotplug = nv_enable_hotplug,
176 .disable_hotplug= nv_disable_hotplug,
177 .check_hotplug = nv_check_hotplug,
178 },
179 { .host_type = CK804,
180 .enable_hotplug = nv_enable_hotplug_ck804,
181 .disable_hotplug= nv_disable_hotplug_ck804,
182 .check_hotplug = nv_check_hotplug_ck804,
183 },
184};
185
186struct nv_host
187{
188 struct nv_host_desc *host_desc;
189 unsigned long host_flags;
190};
191
192static struct pci_driver nv_pci_driver = { 151static struct pci_driver nv_pci_driver = {
193 .name = DRV_NAME, 152 .name = DRV_NAME,
194 .id_table = nv_pci_tbl, 153 .id_table = nv_pci_tbl,
@@ -210,51 +169,119 @@ static struct scsi_host_template nv_sht = {
210 .proc_name = DRV_NAME, 169 .proc_name = DRV_NAME,
211 .dma_boundary = ATA_DMA_BOUNDARY, 170 .dma_boundary = ATA_DMA_BOUNDARY,
212 .slave_configure = ata_scsi_slave_config, 171 .slave_configure = ata_scsi_slave_config,
172 .slave_destroy = ata_scsi_slave_destroy,
213 .bios_param = ata_std_bios_param, 173 .bios_param = ata_std_bios_param,
214}; 174};
215 175
216static const struct ata_port_operations nv_ops = { 176static const struct ata_port_operations nv_generic_ops = {
217 .port_disable = ata_port_disable, 177 .port_disable = ata_port_disable,
218 .tf_load = ata_tf_load, 178 .tf_load = ata_tf_load,
219 .tf_read = ata_tf_read, 179 .tf_read = ata_tf_read,
220 .exec_command = ata_exec_command, 180 .exec_command = ata_exec_command,
221 .check_status = ata_check_status, 181 .check_status = ata_check_status,
222 .dev_select = ata_std_dev_select, 182 .dev_select = ata_std_dev_select,
223 .phy_reset = sata_phy_reset,
224 .bmdma_setup = ata_bmdma_setup, 183 .bmdma_setup = ata_bmdma_setup,
225 .bmdma_start = ata_bmdma_start, 184 .bmdma_start = ata_bmdma_start,
226 .bmdma_stop = ata_bmdma_stop, 185 .bmdma_stop = ata_bmdma_stop,
227 .bmdma_status = ata_bmdma_status, 186 .bmdma_status = ata_bmdma_status,
228 .qc_prep = ata_qc_prep, 187 .qc_prep = ata_qc_prep,
229 .qc_issue = ata_qc_issue_prot, 188 .qc_issue = ata_qc_issue_prot,
230 .eng_timeout = ata_eng_timeout, 189 .freeze = ata_bmdma_freeze,
231 .irq_handler = nv_interrupt, 190 .thaw = ata_bmdma_thaw,
191 .error_handler = nv_error_handler,
192 .post_internal_cmd = ata_bmdma_post_internal_cmd,
193 .data_xfer = ata_pio_data_xfer,
194 .irq_handler = nv_generic_interrupt,
232 .irq_clear = ata_bmdma_irq_clear, 195 .irq_clear = ata_bmdma_irq_clear,
233 .scr_read = nv_scr_read, 196 .scr_read = nv_scr_read,
234 .scr_write = nv_scr_write, 197 .scr_write = nv_scr_write,
235 .port_start = ata_port_start, 198 .port_start = ata_port_start,
236 .port_stop = ata_port_stop, 199 .port_stop = ata_port_stop,
237 .host_stop = nv_host_stop, 200 .host_stop = ata_pci_host_stop,
238}; 201};
239 202
240/* FIXME: The hardware provides the necessary SATA PHY controls 203static const struct ata_port_operations nv_nf2_ops = {
241 * to support ATA_FLAG_SATA_RESET. However, it is currently 204 .port_disable = ata_port_disable,
242 * necessary to disable that flag, to solve misdetection problems. 205 .tf_load = ata_tf_load,
243 * See http://bugme.osdl.org/show_bug.cgi?id=3352 for more info. 206 .tf_read = ata_tf_read,
244 * 207 .exec_command = ata_exec_command,
245 * This problem really needs to be investigated further. But in the 208 .check_status = ata_check_status,
246 * meantime, we avoid ATA_FLAG_SATA_RESET to get people working. 209 .dev_select = ata_std_dev_select,
247 */ 210 .bmdma_setup = ata_bmdma_setup,
248static struct ata_port_info nv_port_info = { 211 .bmdma_start = ata_bmdma_start,
249 .sht = &nv_sht, 212 .bmdma_stop = ata_bmdma_stop,
250 .host_flags = ATA_FLAG_SATA | 213 .bmdma_status = ata_bmdma_status,
251 /* ATA_FLAG_SATA_RESET | */ 214 .qc_prep = ata_qc_prep,
252 ATA_FLAG_SRST | 215 .qc_issue = ata_qc_issue_prot,
253 ATA_FLAG_NO_LEGACY, 216 .freeze = nv_nf2_freeze,
254 .pio_mask = NV_PIO_MASK, 217 .thaw = nv_nf2_thaw,
255 .mwdma_mask = NV_MWDMA_MASK, 218 .error_handler = nv_error_handler,
256 .udma_mask = NV_UDMA_MASK, 219 .post_internal_cmd = ata_bmdma_post_internal_cmd,
257 .port_ops = &nv_ops, 220 .data_xfer = ata_pio_data_xfer,
221 .irq_handler = nv_nf2_interrupt,
222 .irq_clear = ata_bmdma_irq_clear,
223 .scr_read = nv_scr_read,
224 .scr_write = nv_scr_write,
225 .port_start = ata_port_start,
226 .port_stop = ata_port_stop,
227 .host_stop = ata_pci_host_stop,
228};
229
230static const struct ata_port_operations nv_ck804_ops = {
231 .port_disable = ata_port_disable,
232 .tf_load = ata_tf_load,
233 .tf_read = ata_tf_read,
234 .exec_command = ata_exec_command,
235 .check_status = ata_check_status,
236 .dev_select = ata_std_dev_select,
237 .bmdma_setup = ata_bmdma_setup,
238 .bmdma_start = ata_bmdma_start,
239 .bmdma_stop = ata_bmdma_stop,
240 .bmdma_status = ata_bmdma_status,
241 .qc_prep = ata_qc_prep,
242 .qc_issue = ata_qc_issue_prot,
243 .freeze = nv_ck804_freeze,
244 .thaw = nv_ck804_thaw,
245 .error_handler = nv_error_handler,
246 .post_internal_cmd = ata_bmdma_post_internal_cmd,
247 .data_xfer = ata_pio_data_xfer,
248 .irq_handler = nv_ck804_interrupt,
249 .irq_clear = ata_bmdma_irq_clear,
250 .scr_read = nv_scr_read,
251 .scr_write = nv_scr_write,
252 .port_start = ata_port_start,
253 .port_stop = ata_port_stop,
254 .host_stop = nv_ck804_host_stop,
255};
256
257static struct ata_port_info nv_port_info[] = {
258 /* generic */
259 {
260 .sht = &nv_sht,
261 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
262 .pio_mask = NV_PIO_MASK,
263 .mwdma_mask = NV_MWDMA_MASK,
264 .udma_mask = NV_UDMA_MASK,
265 .port_ops = &nv_generic_ops,
266 },
267 /* nforce2/3 */
268 {
269 .sht = &nv_sht,
270 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
271 .pio_mask = NV_PIO_MASK,
272 .mwdma_mask = NV_MWDMA_MASK,
273 .udma_mask = NV_UDMA_MASK,
274 .port_ops = &nv_nf2_ops,
275 },
276 /* ck804 */
277 {
278 .sht = &nv_sht,
279 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
280 .pio_mask = NV_PIO_MASK,
281 .mwdma_mask = NV_MWDMA_MASK,
282 .udma_mask = NV_UDMA_MASK,
283 .port_ops = &nv_ck804_ops,
284 },
258}; 285};
259 286
260MODULE_AUTHOR("NVIDIA"); 287MODULE_AUTHOR("NVIDIA");
@@ -263,11 +290,10 @@ MODULE_LICENSE("GPL");
263MODULE_DEVICE_TABLE(pci, nv_pci_tbl); 290MODULE_DEVICE_TABLE(pci, nv_pci_tbl);
264MODULE_VERSION(DRV_VERSION); 291MODULE_VERSION(DRV_VERSION);
265 292
266static irqreturn_t nv_interrupt (int irq, void *dev_instance, 293static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance,
267 struct pt_regs *regs) 294 struct pt_regs *regs)
268{ 295{
269 struct ata_host_set *host_set = dev_instance; 296 struct ata_host_set *host_set = dev_instance;
270 struct nv_host *host = host_set->private_data;
271 unsigned int i; 297 unsigned int i;
272 unsigned int handled = 0; 298 unsigned int handled = 0;
273 unsigned long flags; 299 unsigned long flags;
@@ -279,11 +305,11 @@ static irqreturn_t nv_interrupt (int irq, void *dev_instance,
279 305
280 ap = host_set->ports[i]; 306 ap = host_set->ports[i];
281 if (ap && 307 if (ap &&
282 !(ap->flags & (ATA_FLAG_PORT_DISABLED | ATA_FLAG_NOINTR))) { 308 !(ap->flags & ATA_FLAG_DISABLED)) {
283 struct ata_queued_cmd *qc; 309 struct ata_queued_cmd *qc;
284 310
285 qc = ata_qc_from_tag(ap, ap->active_tag); 311 qc = ata_qc_from_tag(ap, ap->active_tag);
286 if (qc && (!(qc->tf.ctl & ATA_NIEN))) 312 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)))
287 handled += ata_host_intr(ap, qc); 313 handled += ata_host_intr(ap, qc);
288 else 314 else
289 // No request pending? Clear interrupt status 315 // No request pending? Clear interrupt status
@@ -293,14 +319,88 @@ static irqreturn_t nv_interrupt (int irq, void *dev_instance,
293 319
294 } 320 }
295 321
296 if (host->host_desc->check_hotplug)
297 handled += host->host_desc->check_hotplug(host_set);
298
299 spin_unlock_irqrestore(&host_set->lock, flags); 322 spin_unlock_irqrestore(&host_set->lock, flags);
300 323
301 return IRQ_RETVAL(handled); 324 return IRQ_RETVAL(handled);
302} 325}
303 326
327static int nv_host_intr(struct ata_port *ap, u8 irq_stat)
328{
329 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->active_tag);
330 int handled;
331
332 /* freeze if hotplugged */
333 if (unlikely(irq_stat & (NV_INT_ADDED | NV_INT_REMOVED))) {
334 ata_port_freeze(ap);
335 return 1;
336 }
337
338 /* bail out if not our interrupt */
339 if (!(irq_stat & NV_INT_DEV))
340 return 0;
341
342 /* DEV interrupt w/ no active qc? */
343 if (unlikely(!qc || (qc->tf.flags & ATA_TFLAG_POLLING))) {
344 ata_check_status(ap);
345 return 1;
346 }
347
348 /* handle interrupt */
349 handled = ata_host_intr(ap, qc);
350 if (unlikely(!handled)) {
351 /* spurious, clear it */
352 ata_check_status(ap);
353 }
354
355 return 1;
356}
357
358static irqreturn_t nv_do_interrupt(struct ata_host_set *host_set, u8 irq_stat)
359{
360 int i, handled = 0;
361
362 for (i = 0; i < host_set->n_ports; i++) {
363 struct ata_port *ap = host_set->ports[i];
364
365 if (ap && !(ap->flags & ATA_FLAG_DISABLED))
366 handled += nv_host_intr(ap, irq_stat);
367
368 irq_stat >>= NV_INT_PORT_SHIFT;
369 }
370
371 return IRQ_RETVAL(handled);
372}
373
374static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance,
375 struct pt_regs *regs)
376{
377 struct ata_host_set *host_set = dev_instance;
378 u8 irq_stat;
379 irqreturn_t ret;
380
381 spin_lock(&host_set->lock);
382 irq_stat = inb(host_set->ports[0]->ioaddr.scr_addr + NV_INT_STATUS);
383 ret = nv_do_interrupt(host_set, irq_stat);
384 spin_unlock(&host_set->lock);
385
386 return ret;
387}
388
389static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance,
390 struct pt_regs *regs)
391{
392 struct ata_host_set *host_set = dev_instance;
393 u8 irq_stat;
394 irqreturn_t ret;
395
396 spin_lock(&host_set->lock);
397 irq_stat = readb(host_set->mmio_base + NV_INT_STATUS_CK804);
398 ret = nv_do_interrupt(host_set, irq_stat);
399 spin_unlock(&host_set->lock);
400
401 return ret;
402}
403
304static u32 nv_scr_read (struct ata_port *ap, unsigned int sc_reg) 404static u32 nv_scr_read (struct ata_port *ap, unsigned int sc_reg)
305{ 405{
306 if (sc_reg > SCR_CONTROL) 406 if (sc_reg > SCR_CONTROL)
@@ -317,23 +417,74 @@ static void nv_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val)
317 iowrite32(val, (void __iomem *)ap->ioaddr.scr_addr + (sc_reg * 4)); 417 iowrite32(val, (void __iomem *)ap->ioaddr.scr_addr + (sc_reg * 4));
318} 418}
319 419
320static void nv_host_stop (struct ata_host_set *host_set) 420static void nv_nf2_freeze(struct ata_port *ap)
321{ 421{
322 struct nv_host *host = host_set->private_data; 422 unsigned long scr_addr = ap->host_set->ports[0]->ioaddr.scr_addr;
423 int shift = ap->port_no * NV_INT_PORT_SHIFT;
424 u8 mask;
323 425
324 // Disable hotplug event interrupts. 426 mask = inb(scr_addr + NV_INT_ENABLE);
325 if (host->host_desc->disable_hotplug) 427 mask &= ~(NV_INT_ALL << shift);
326 host->host_desc->disable_hotplug(host_set); 428 outb(mask, scr_addr + NV_INT_ENABLE);
429}
327 430
328 kfree(host); 431static void nv_nf2_thaw(struct ata_port *ap)
432{
433 unsigned long scr_addr = ap->host_set->ports[0]->ioaddr.scr_addr;
434 int shift = ap->port_no * NV_INT_PORT_SHIFT;
435 u8 mask;
329 436
330 ata_pci_host_stop(host_set); 437 outb(NV_INT_ALL << shift, scr_addr + NV_INT_STATUS);
438
439 mask = inb(scr_addr + NV_INT_ENABLE);
440 mask |= (NV_INT_MASK << shift);
441 outb(mask, scr_addr + NV_INT_ENABLE);
442}
443
444static void nv_ck804_freeze(struct ata_port *ap)
445{
446 void __iomem *mmio_base = ap->host_set->mmio_base;
447 int shift = ap->port_no * NV_INT_PORT_SHIFT;
448 u8 mask;
449
450 mask = readb(mmio_base + NV_INT_ENABLE_CK804);
451 mask &= ~(NV_INT_ALL << shift);
452 writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
453}
454
455static void nv_ck804_thaw(struct ata_port *ap)
456{
457 void __iomem *mmio_base = ap->host_set->mmio_base;
458 int shift = ap->port_no * NV_INT_PORT_SHIFT;
459 u8 mask;
460
461 writeb(NV_INT_ALL << shift, mmio_base + NV_INT_STATUS_CK804);
462
463 mask = readb(mmio_base + NV_INT_ENABLE_CK804);
464 mask |= (NV_INT_MASK << shift);
465 writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
466}
467
468static int nv_hardreset(struct ata_port *ap, unsigned int *class)
469{
470 unsigned int dummy;
471
472 /* SATA hardreset fails to retrieve proper device signature on
473 * some controllers. Don't classify on hardreset. For more
474 * info, see http://bugme.osdl.org/show_bug.cgi?id=3352
475 */
476 return sata_std_hardreset(ap, &dummy);
477}
478
479static void nv_error_handler(struct ata_port *ap)
480{
481 ata_bmdma_drive_eh(ap, ata_std_prereset, ata_std_softreset,
482 nv_hardreset, ata_std_postreset);
331} 483}
332 484
333static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) 485static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
334{ 486{
335 static int printed_version = 0; 487 static int printed_version = 0;
336 struct nv_host *host;
337 struct ata_port_info *ppi; 488 struct ata_port_info *ppi;
338 struct ata_probe_ent *probe_ent; 489 struct ata_probe_ent *probe_ent;
339 int pci_dev_busy = 0; 490 int pci_dev_busy = 0;
@@ -370,24 +521,15 @@ static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
370 521
371 rc = -ENOMEM; 522 rc = -ENOMEM;
372 523
373 ppi = &nv_port_info; 524 ppi = &nv_port_info[ent->driver_data];
374 probe_ent = ata_pci_init_native_mode(pdev, &ppi, ATA_PORT_PRIMARY | ATA_PORT_SECONDARY); 525 probe_ent = ata_pci_init_native_mode(pdev, &ppi, ATA_PORT_PRIMARY | ATA_PORT_SECONDARY);
375 if (!probe_ent) 526 if (!probe_ent)
376 goto err_out_regions; 527 goto err_out_regions;
377 528
378 host = kmalloc(sizeof(struct nv_host), GFP_KERNEL);
379 if (!host)
380 goto err_out_free_ent;
381
382 memset(host, 0, sizeof(struct nv_host));
383 host->host_desc = &nv_device_tbl[ent->driver_data];
384
385 probe_ent->private_data = host;
386
387 probe_ent->mmio_base = pci_iomap(pdev, 5, 0); 529 probe_ent->mmio_base = pci_iomap(pdev, 5, 0);
388 if (!probe_ent->mmio_base) { 530 if (!probe_ent->mmio_base) {
389 rc = -EIO; 531 rc = -EIO;
390 goto err_out_free_host; 532 goto err_out_free_ent;
391 } 533 }
392 534
393 base = (unsigned long)probe_ent->mmio_base; 535 base = (unsigned long)probe_ent->mmio_base;
@@ -395,24 +537,27 @@ static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
395 probe_ent->port[0].scr_addr = base + NV_PORT0_SCR_REG_OFFSET; 537 probe_ent->port[0].scr_addr = base + NV_PORT0_SCR_REG_OFFSET;
396 probe_ent->port[1].scr_addr = base + NV_PORT1_SCR_REG_OFFSET; 538 probe_ent->port[1].scr_addr = base + NV_PORT1_SCR_REG_OFFSET;
397 539
540 /* enable SATA space for CK804 */
541 if (ent->driver_data == CK804) {
542 u8 regval;
543
544 pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
545 regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
546 pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
547 }
548
398 pci_set_master(pdev); 549 pci_set_master(pdev);
399 550
400 rc = ata_device_add(probe_ent); 551 rc = ata_device_add(probe_ent);
401 if (rc != NV_PORTS) 552 if (rc != NV_PORTS)
402 goto err_out_iounmap; 553 goto err_out_iounmap;
403 554
404 // Enable hotplug event interrupts.
405 if (host->host_desc->enable_hotplug)
406 host->host_desc->enable_hotplug(probe_ent);
407
408 kfree(probe_ent); 555 kfree(probe_ent);
409 556
410 return 0; 557 return 0;
411 558
412err_out_iounmap: 559err_out_iounmap:
413 pci_iounmap(pdev, probe_ent->mmio_base); 560 pci_iounmap(pdev, probe_ent->mmio_base);
414err_out_free_host:
415 kfree(host);
416err_out_free_ent: 561err_out_free_ent:
417 kfree(probe_ent); 562 kfree(probe_ent);
418err_out_regions: 563err_out_regions:
@@ -424,127 +569,17 @@ err_out:
424 return rc; 569 return rc;
425} 570}
426 571
427static void nv_enable_hotplug(struct ata_probe_ent *probe_ent) 572static void nv_ck804_host_stop(struct ata_host_set *host_set)
428{
429 u8 intr_mask;
430
431 outb(NV_INT_STATUS_HOTPLUG,
432 probe_ent->port[0].scr_addr + NV_INT_STATUS);
433
434 intr_mask = inb(probe_ent->port[0].scr_addr + NV_INT_ENABLE);
435 intr_mask |= NV_INT_ENABLE_HOTPLUG;
436
437 outb(intr_mask, probe_ent->port[0].scr_addr + NV_INT_ENABLE);
438}
439
440static void nv_disable_hotplug(struct ata_host_set *host_set)
441{
442 u8 intr_mask;
443
444 intr_mask = inb(host_set->ports[0]->ioaddr.scr_addr + NV_INT_ENABLE);
445
446 intr_mask &= ~(NV_INT_ENABLE_HOTPLUG);
447
448 outb(intr_mask, host_set->ports[0]->ioaddr.scr_addr + NV_INT_ENABLE);
449}
450
451static int nv_check_hotplug(struct ata_host_set *host_set)
452{
453 u8 intr_status;
454
455 intr_status = inb(host_set->ports[0]->ioaddr.scr_addr + NV_INT_STATUS);
456
457 // Clear interrupt status.
458 outb(0xff, host_set->ports[0]->ioaddr.scr_addr + NV_INT_STATUS);
459
460 if (intr_status & NV_INT_STATUS_HOTPLUG) {
461 if (intr_status & NV_INT_STATUS_PDEV_ADDED)
462 printk(KERN_WARNING "nv_sata: "
463 "Primary device added\n");
464
465 if (intr_status & NV_INT_STATUS_PDEV_REMOVED)
466 printk(KERN_WARNING "nv_sata: "
467 "Primary device removed\n");
468
469 if (intr_status & NV_INT_STATUS_SDEV_ADDED)
470 printk(KERN_WARNING "nv_sata: "
471 "Secondary device added\n");
472
473 if (intr_status & NV_INT_STATUS_SDEV_REMOVED)
474 printk(KERN_WARNING "nv_sata: "
475 "Secondary device removed\n");
476
477 return 1;
478 }
479
480 return 0;
481}
482
483static void nv_enable_hotplug_ck804(struct ata_probe_ent *probe_ent)
484{
485 struct pci_dev *pdev = to_pci_dev(probe_ent->dev);
486 u8 intr_mask;
487 u8 regval;
488
489 pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
490 regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
491 pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
492
493 writeb(NV_INT_STATUS_HOTPLUG, probe_ent->mmio_base + NV_INT_STATUS_CK804);
494
495 intr_mask = readb(probe_ent->mmio_base + NV_INT_ENABLE_CK804);
496 intr_mask |= NV_INT_ENABLE_HOTPLUG;
497
498 writeb(intr_mask, probe_ent->mmio_base + NV_INT_ENABLE_CK804);
499}
500
501static void nv_disable_hotplug_ck804(struct ata_host_set *host_set)
502{ 573{
503 struct pci_dev *pdev = to_pci_dev(host_set->dev); 574 struct pci_dev *pdev = to_pci_dev(host_set->dev);
504 u8 intr_mask;
505 u8 regval; 575 u8 regval;
506 576
507 intr_mask = readb(host_set->mmio_base + NV_INT_ENABLE_CK804); 577 /* disable SATA space for CK804 */
508
509 intr_mask &= ~(NV_INT_ENABLE_HOTPLUG);
510
511 writeb(intr_mask, host_set->mmio_base + NV_INT_ENABLE_CK804);
512
513 pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval); 578 pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
514 regval &= ~NV_MCP_SATA_CFG_20_SATA_SPACE_EN; 579 regval &= ~NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
515 pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval); 580 pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
516}
517
518static int nv_check_hotplug_ck804(struct ata_host_set *host_set)
519{
520 u8 intr_status;
521 581
522 intr_status = readb(host_set->mmio_base + NV_INT_STATUS_CK804); 582 ata_pci_host_stop(host_set);
523
524 // Clear interrupt status.
525 writeb(0xff, host_set->mmio_base + NV_INT_STATUS_CK804);
526
527 if (intr_status & NV_INT_STATUS_HOTPLUG) {
528 if (intr_status & NV_INT_STATUS_PDEV_ADDED)
529 printk(KERN_WARNING "nv_sata: "
530 "Primary device added\n");
531
532 if (intr_status & NV_INT_STATUS_PDEV_REMOVED)
533 printk(KERN_WARNING "nv_sata: "
534 "Primary device removed\n");
535
536 if (intr_status & NV_INT_STATUS_SDEV_ADDED)
537 printk(KERN_WARNING "nv_sata: "
538 "Secondary device added\n");
539
540 if (intr_status & NV_INT_STATUS_SDEV_REMOVED)
541 printk(KERN_WARNING "nv_sata: "
542 "Secondary device removed\n");
543
544 return 1;
545 }
546
547 return 0;
548} 583}
549 584
550static int __init nv_init(void) 585static int __init nv_init(void)
diff --git a/drivers/scsi/sata_promise.c b/drivers/scsi/sata_promise.c
index 7eb67a6bdc64..b2b6ed5216e0 100644
--- a/drivers/scsi/sata_promise.c
+++ b/drivers/scsi/sata_promise.c
@@ -76,7 +76,8 @@ enum {
76 PDC_RESET = (1 << 11), /* HDMA reset */ 76 PDC_RESET = (1 << 11), /* HDMA reset */
77 77
78 PDC_COMMON_FLAGS = ATA_FLAG_NO_LEGACY | ATA_FLAG_SRST | 78 PDC_COMMON_FLAGS = ATA_FLAG_NO_LEGACY | ATA_FLAG_SRST |
79 ATA_FLAG_MMIO | ATA_FLAG_NO_ATAPI, 79 ATA_FLAG_MMIO | ATA_FLAG_NO_ATAPI |
80 ATA_FLAG_PIO_POLLING,
80}; 81};
81 82
82 83
@@ -120,6 +121,7 @@ static struct scsi_host_template pdc_ata_sht = {
120 .proc_name = DRV_NAME, 121 .proc_name = DRV_NAME,
121 .dma_boundary = ATA_DMA_BOUNDARY, 122 .dma_boundary = ATA_DMA_BOUNDARY,
122 .slave_configure = ata_scsi_slave_config, 123 .slave_configure = ata_scsi_slave_config,
124 .slave_destroy = ata_scsi_slave_destroy,
123 .bios_param = ata_std_bios_param, 125 .bios_param = ata_std_bios_param,
124}; 126};
125 127
@@ -136,6 +138,7 @@ static const struct ata_port_operations pdc_sata_ops = {
136 .qc_prep = pdc_qc_prep, 138 .qc_prep = pdc_qc_prep,
137 .qc_issue = pdc_qc_issue_prot, 139 .qc_issue = pdc_qc_issue_prot,
138 .eng_timeout = pdc_eng_timeout, 140 .eng_timeout = pdc_eng_timeout,
141 .data_xfer = ata_mmio_data_xfer,
139 .irq_handler = pdc_interrupt, 142 .irq_handler = pdc_interrupt,
140 .irq_clear = pdc_irq_clear, 143 .irq_clear = pdc_irq_clear,
141 144
@@ -158,6 +161,7 @@ static const struct ata_port_operations pdc_pata_ops = {
158 161
159 .qc_prep = pdc_qc_prep, 162 .qc_prep = pdc_qc_prep,
160 .qc_issue = pdc_qc_issue_prot, 163 .qc_issue = pdc_qc_issue_prot,
164 .data_xfer = ata_mmio_data_xfer,
161 .eng_timeout = pdc_eng_timeout, 165 .eng_timeout = pdc_eng_timeout,
162 .irq_handler = pdc_interrupt, 166 .irq_handler = pdc_interrupt,
163 .irq_clear = pdc_irq_clear, 167 .irq_clear = pdc_irq_clear,
@@ -363,12 +367,23 @@ static void pdc_sata_phy_reset(struct ata_port *ap)
363 sata_phy_reset(ap); 367 sata_phy_reset(ap);
364} 368}
365 369
366static void pdc_pata_phy_reset(struct ata_port *ap) 370static void pdc_pata_cbl_detect(struct ata_port *ap)
367{ 371{
368 /* FIXME: add cable detect. Don't assume 40-pin cable */ 372 u8 tmp;
369 ap->cbl = ATA_CBL_PATA40; 373 void __iomem *mmio = (void *) ap->ioaddr.cmd_addr + PDC_CTLSTAT + 0x03;
370 ap->udma_mask &= ATA_UDMA_MASK_40C; 374
375 tmp = readb(mmio);
376
377 if (tmp & 0x01) {
378 ap->cbl = ATA_CBL_PATA40;
379 ap->udma_mask &= ATA_UDMA_MASK_40C;
380 } else
381 ap->cbl = ATA_CBL_PATA80;
382}
371 383
384static void pdc_pata_phy_reset(struct ata_port *ap)
385{
386 pdc_pata_cbl_detect(ap);
372 pdc_reset_port(ap); 387 pdc_reset_port(ap);
373 ata_port_probe(ap); 388 ata_port_probe(ap);
374 ata_bus_reset(ap); 389 ata_bus_reset(ap);
@@ -435,7 +450,7 @@ static void pdc_eng_timeout(struct ata_port *ap)
435 switch (qc->tf.protocol) { 450 switch (qc->tf.protocol) {
436 case ATA_PROT_DMA: 451 case ATA_PROT_DMA:
437 case ATA_PROT_NODATA: 452 case ATA_PROT_NODATA:
438 printk(KERN_ERR "ata%u: command timeout\n", ap->id); 453 ata_port_printk(ap, KERN_ERR, "command timeout\n");
439 drv_stat = ata_wait_idle(ap); 454 drv_stat = ata_wait_idle(ap);
440 qc->err_mask |= __ac_err_mask(drv_stat); 455 qc->err_mask |= __ac_err_mask(drv_stat);
441 break; 456 break;
@@ -443,8 +458,9 @@ static void pdc_eng_timeout(struct ata_port *ap)
443 default: 458 default:
444 drv_stat = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 1000); 459 drv_stat = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 1000);
445 460
446 printk(KERN_ERR "ata%u: unknown timeout, cmd 0x%x stat 0x%x\n", 461 ata_port_printk(ap, KERN_ERR,
447 ap->id, qc->tf.command, drv_stat); 462 "unknown timeout, cmd 0x%x stat 0x%x\n",
463 qc->tf.command, drv_stat);
448 464
449 qc->err_mask |= ac_err_mask(drv_stat); 465 qc->err_mask |= ac_err_mask(drv_stat);
450 break; 466 break;
@@ -533,11 +549,11 @@ static irqreturn_t pdc_interrupt (int irq, void *dev_instance, struct pt_regs *r
533 ap = host_set->ports[i]; 549 ap = host_set->ports[i];
534 tmp = mask & (1 << (i + 1)); 550 tmp = mask & (1 << (i + 1));
535 if (tmp && ap && 551 if (tmp && ap &&
536 !(ap->flags & (ATA_FLAG_PORT_DISABLED | ATA_FLAG_NOINTR))) { 552 !(ap->flags & ATA_FLAG_DISABLED)) {
537 struct ata_queued_cmd *qc; 553 struct ata_queued_cmd *qc;
538 554
539 qc = ata_qc_from_tag(ap, ap->active_tag); 555 qc = ata_qc_from_tag(ap, ap->active_tag);
540 if (qc && (!(qc->tf.ctl & ATA_NIEN))) 556 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)))
541 handled += pdc_host_intr(ap, qc); 557 handled += pdc_host_intr(ap, qc);
542 } 558 }
543 } 559 }
@@ -676,10 +692,6 @@ static int pdc_ata_init_one (struct pci_dev *pdev, const struct pci_device_id *e
676 if (!printed_version++) 692 if (!printed_version++)
677 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n"); 693 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
678 694
679 /*
680 * If this driver happens to only be useful on Apple's K2, then
681 * we should check that here as it has a normal Serverworks ID
682 */
683 rc = pci_enable_device(pdev); 695 rc = pci_enable_device(pdev);
684 if (rc) 696 if (rc)
685 return rc; 697 return rc;
diff --git a/drivers/scsi/sata_qstor.c b/drivers/scsi/sata_qstor.c
index 886f3447dd48..98ddc25655f0 100644
--- a/drivers/scsi/sata_qstor.c
+++ b/drivers/scsi/sata_qstor.c
@@ -41,7 +41,7 @@
41#include <linux/libata.h> 41#include <linux/libata.h>
42 42
43#define DRV_NAME "sata_qstor" 43#define DRV_NAME "sata_qstor"
44#define DRV_VERSION "0.05" 44#define DRV_VERSION "0.06"
45 45
46enum { 46enum {
47 QS_PORTS = 4, 47 QS_PORTS = 4,
@@ -142,6 +142,7 @@ static struct scsi_host_template qs_ata_sht = {
142 .proc_name = DRV_NAME, 142 .proc_name = DRV_NAME,
143 .dma_boundary = QS_DMA_BOUNDARY, 143 .dma_boundary = QS_DMA_BOUNDARY,
144 .slave_configure = ata_scsi_slave_config, 144 .slave_configure = ata_scsi_slave_config,
145 .slave_destroy = ata_scsi_slave_destroy,
145 .bios_param = ata_std_bios_param, 146 .bios_param = ata_std_bios_param,
146}; 147};
147 148
@@ -156,6 +157,7 @@ static const struct ata_port_operations qs_ata_ops = {
156 .phy_reset = qs_phy_reset, 157 .phy_reset = qs_phy_reset,
157 .qc_prep = qs_qc_prep, 158 .qc_prep = qs_qc_prep,
158 .qc_issue = qs_qc_issue, 159 .qc_issue = qs_qc_issue,
160 .data_xfer = ata_mmio_data_xfer,
159 .eng_timeout = qs_eng_timeout, 161 .eng_timeout = qs_eng_timeout,
160 .irq_handler = qs_intr, 162 .irq_handler = qs_intr,
161 .irq_clear = qs_irq_clear, 163 .irq_clear = qs_irq_clear,
@@ -175,7 +177,7 @@ static const struct ata_port_info qs_port_info[] = {
175 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 177 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
176 ATA_FLAG_SATA_RESET | 178 ATA_FLAG_SATA_RESET |
177 //FIXME ATA_FLAG_SRST | 179 //FIXME ATA_FLAG_SRST |
178 ATA_FLAG_MMIO, 180 ATA_FLAG_MMIO | ATA_FLAG_PIO_POLLING,
179 .pio_mask = 0x10, /* pio4 */ 181 .pio_mask = 0x10, /* pio4 */
180 .udma_mask = 0x7f, /* udma0-6 */ 182 .udma_mask = 0x7f, /* udma0-6 */
181 .port_ops = &qs_ata_ops, 183 .port_ops = &qs_ata_ops,
@@ -394,14 +396,13 @@ static inline unsigned int qs_intr_pkt(struct ata_host_set *host_set)
394 DPRINTK("SFF=%08x%08x: sCHAN=%u sHST=%d sDST=%02x\n", 396 DPRINTK("SFF=%08x%08x: sCHAN=%u sHST=%d sDST=%02x\n",
395 sff1, sff0, port_no, sHST, sDST); 397 sff1, sff0, port_no, sHST, sDST);
396 handled = 1; 398 handled = 1;
397 if (ap && !(ap->flags & 399 if (ap && !(ap->flags & ATA_FLAG_DISABLED)) {
398 (ATA_FLAG_PORT_DISABLED|ATA_FLAG_NOINTR))) {
399 struct ata_queued_cmd *qc; 400 struct ata_queued_cmd *qc;
400 struct qs_port_priv *pp = ap->private_data; 401 struct qs_port_priv *pp = ap->private_data;
401 if (!pp || pp->state != qs_state_pkt) 402 if (!pp || pp->state != qs_state_pkt)
402 continue; 403 continue;
403 qc = ata_qc_from_tag(ap, ap->active_tag); 404 qc = ata_qc_from_tag(ap, ap->active_tag);
404 if (qc && (!(qc->tf.ctl & ATA_NIEN))) { 405 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) {
405 switch (sHST) { 406 switch (sHST) {
406 case 0: /* successful CPB */ 407 case 0: /* successful CPB */
407 case 3: /* device error */ 408 case 3: /* device error */
@@ -428,13 +429,13 @@ static inline unsigned int qs_intr_mmio(struct ata_host_set *host_set)
428 struct ata_port *ap; 429 struct ata_port *ap;
429 ap = host_set->ports[port_no]; 430 ap = host_set->ports[port_no];
430 if (ap && 431 if (ap &&
431 !(ap->flags & (ATA_FLAG_PORT_DISABLED | ATA_FLAG_NOINTR))) { 432 !(ap->flags & ATA_FLAG_DISABLED)) {
432 struct ata_queued_cmd *qc; 433 struct ata_queued_cmd *qc;
433 struct qs_port_priv *pp = ap->private_data; 434 struct qs_port_priv *pp = ap->private_data;
434 if (!pp || pp->state != qs_state_mmio) 435 if (!pp || pp->state != qs_state_mmio)
435 continue; 436 continue;
436 qc = ata_qc_from_tag(ap, ap->active_tag); 437 qc = ata_qc_from_tag(ap, ap->active_tag);
437 if (qc && (!(qc->tf.ctl & ATA_NIEN))) { 438 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) {
438 439
439 /* check main status, clearing INTRQ */ 440 /* check main status, clearing INTRQ */
440 u8 status = ata_check_status(ap); 441 u8 status = ata_check_status(ap);
diff --git a/drivers/scsi/sata_sil.c b/drivers/scsi/sata_sil.c
index 106627299d55..bc9f918a7f28 100644
--- a/drivers/scsi/sata_sil.c
+++ b/drivers/scsi/sata_sil.c
@@ -46,7 +46,7 @@
46#include <linux/libata.h> 46#include <linux/libata.h>
47 47
48#define DRV_NAME "sata_sil" 48#define DRV_NAME "sata_sil"
49#define DRV_VERSION "0.9" 49#define DRV_VERSION "1.0"
50 50
51enum { 51enum {
52 /* 52 /*
@@ -54,8 +54,9 @@ enum {
54 */ 54 */
55 SIL_FLAG_RERR_ON_DMA_ACT = (1 << 29), 55 SIL_FLAG_RERR_ON_DMA_ACT = (1 << 29),
56 SIL_FLAG_MOD15WRITE = (1 << 30), 56 SIL_FLAG_MOD15WRITE = (1 << 30),
57
57 SIL_DFL_HOST_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 58 SIL_DFL_HOST_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
58 ATA_FLAG_MMIO, 59 ATA_FLAG_MMIO | ATA_FLAG_HRST_TO_RESUME,
59 60
60 /* 61 /*
61 * Controller IDs 62 * Controller IDs
@@ -84,6 +85,20 @@ enum {
84 /* BMDMA/BMDMA2 */ 85 /* BMDMA/BMDMA2 */
85 SIL_INTR_STEERING = (1 << 1), 86 SIL_INTR_STEERING = (1 << 1),
86 87
88 SIL_DMA_ENABLE = (1 << 0), /* DMA run switch */
89 SIL_DMA_RDWR = (1 << 3), /* DMA Rd-Wr */
90 SIL_DMA_SATA_IRQ = (1 << 4), /* OR of all SATA IRQs */
91 SIL_DMA_ACTIVE = (1 << 16), /* DMA running */
92 SIL_DMA_ERROR = (1 << 17), /* PCI bus error */
93 SIL_DMA_COMPLETE = (1 << 18), /* cmd complete / IRQ pending */
94 SIL_DMA_N_SATA_IRQ = (1 << 6), /* SATA_IRQ for the next channel */
95 SIL_DMA_N_ACTIVE = (1 << 24), /* ACTIVE for the next channel */
96 SIL_DMA_N_ERROR = (1 << 25), /* ERROR for the next channel */
97 SIL_DMA_N_COMPLETE = (1 << 26), /* COMPLETE for the next channel */
98
99 /* SIEN */
100 SIL_SIEN_N = (1 << 16), /* triggered by SError.N */
101
87 /* 102 /*
88 * Others 103 * Others
89 */ 104 */
@@ -96,6 +111,10 @@ static void sil_dev_config(struct ata_port *ap, struct ata_device *dev);
96static u32 sil_scr_read (struct ata_port *ap, unsigned int sc_reg); 111static u32 sil_scr_read (struct ata_port *ap, unsigned int sc_reg);
97static void sil_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val); 112static void sil_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val);
98static void sil_post_set_mode (struct ata_port *ap); 113static void sil_post_set_mode (struct ata_port *ap);
114static irqreturn_t sil_interrupt(int irq, void *dev_instance,
115 struct pt_regs *regs);
116static void sil_freeze(struct ata_port *ap);
117static void sil_thaw(struct ata_port *ap);
99 118
100 119
101static const struct pci_device_id sil_pci_tbl[] = { 120static const struct pci_device_id sil_pci_tbl[] = {
@@ -155,6 +174,7 @@ static struct scsi_host_template sil_sht = {
155 .proc_name = DRV_NAME, 174 .proc_name = DRV_NAME,
156 .dma_boundary = ATA_DMA_BOUNDARY, 175 .dma_boundary = ATA_DMA_BOUNDARY,
157 .slave_configure = ata_scsi_slave_config, 176 .slave_configure = ata_scsi_slave_config,
177 .slave_destroy = ata_scsi_slave_destroy,
158 .bios_param = ata_std_bios_param, 178 .bios_param = ata_std_bios_param,
159}; 179};
160 180
@@ -166,7 +186,6 @@ static const struct ata_port_operations sil_ops = {
166 .check_status = ata_check_status, 186 .check_status = ata_check_status,
167 .exec_command = ata_exec_command, 187 .exec_command = ata_exec_command,
168 .dev_select = ata_std_dev_select, 188 .dev_select = ata_std_dev_select,
169 .probe_reset = ata_std_probe_reset,
170 .post_set_mode = sil_post_set_mode, 189 .post_set_mode = sil_post_set_mode,
171 .bmdma_setup = ata_bmdma_setup, 190 .bmdma_setup = ata_bmdma_setup,
172 .bmdma_start = ata_bmdma_start, 191 .bmdma_start = ata_bmdma_start,
@@ -174,8 +193,12 @@ static const struct ata_port_operations sil_ops = {
174 .bmdma_status = ata_bmdma_status, 193 .bmdma_status = ata_bmdma_status,
175 .qc_prep = ata_qc_prep, 194 .qc_prep = ata_qc_prep,
176 .qc_issue = ata_qc_issue_prot, 195 .qc_issue = ata_qc_issue_prot,
177 .eng_timeout = ata_eng_timeout, 196 .data_xfer = ata_mmio_data_xfer,
178 .irq_handler = ata_interrupt, 197 .freeze = sil_freeze,
198 .thaw = sil_thaw,
199 .error_handler = ata_bmdma_error_handler,
200 .post_internal_cmd = ata_bmdma_post_internal_cmd,
201 .irq_handler = sil_interrupt,
179 .irq_clear = ata_bmdma_irq_clear, 202 .irq_clear = ata_bmdma_irq_clear,
180 .scr_read = sil_scr_read, 203 .scr_read = sil_scr_read,
181 .scr_write = sil_scr_write, 204 .scr_write = sil_scr_write,
@@ -220,6 +243,7 @@ static const struct {
220 unsigned long tf; /* ATA taskfile register block */ 243 unsigned long tf; /* ATA taskfile register block */
221 unsigned long ctl; /* ATA control/altstatus register block */ 244 unsigned long ctl; /* ATA control/altstatus register block */
222 unsigned long bmdma; /* DMA register block */ 245 unsigned long bmdma; /* DMA register block */
246 unsigned long bmdma2; /* DMA register block #2 */
223 unsigned long fifo_cfg; /* FIFO Valid Byte Count and Control */ 247 unsigned long fifo_cfg; /* FIFO Valid Byte Count and Control */
224 unsigned long scr; /* SATA control register block */ 248 unsigned long scr; /* SATA control register block */
225 unsigned long sien; /* SATA Interrupt Enable register */ 249 unsigned long sien; /* SATA Interrupt Enable register */
@@ -227,10 +251,10 @@ static const struct {
227 unsigned long sfis_cfg; /* SATA FIS reception config register */ 251 unsigned long sfis_cfg; /* SATA FIS reception config register */
228} sil_port[] = { 252} sil_port[] = {
229 /* port 0 ... */ 253 /* port 0 ... */
230 { 0x80, 0x8A, 0x00, 0x40, 0x100, 0x148, 0xb4, 0x14c }, 254 { 0x80, 0x8A, 0x00, 0x10, 0x40, 0x100, 0x148, 0xb4, 0x14c },
231 { 0xC0, 0xCA, 0x08, 0x44, 0x180, 0x1c8, 0xf4, 0x1cc }, 255 { 0xC0, 0xCA, 0x08, 0x18, 0x44, 0x180, 0x1c8, 0xf4, 0x1cc },
232 { 0x280, 0x28A, 0x200, 0x240, 0x300, 0x348, 0x2b4, 0x34c }, 256 { 0x280, 0x28A, 0x200, 0x210, 0x240, 0x300, 0x348, 0x2b4, 0x34c },
233 { 0x2C0, 0x2CA, 0x208, 0x244, 0x380, 0x3c8, 0x2f4, 0x3cc }, 257 { 0x2C0, 0x2CA, 0x208, 0x218, 0x244, 0x380, 0x3c8, 0x2f4, 0x3cc },
234 /* ... port 3 */ 258 /* ... port 3 */
235}; 259};
236 260
@@ -263,7 +287,7 @@ static void sil_post_set_mode (struct ata_port *ap)
263 287
264 for (i = 0; i < 2; i++) { 288 for (i = 0; i < 2; i++) {
265 dev = &ap->device[i]; 289 dev = &ap->device[i];
266 if (!ata_dev_present(dev)) 290 if (!ata_dev_enabled(dev))
267 dev_mode[i] = 0; /* PIO0/1/2 */ 291 dev_mode[i] = 0; /* PIO0/1/2 */
268 else if (dev->flags & ATA_DFLAG_PIO) 292 else if (dev->flags & ATA_DFLAG_PIO)
269 dev_mode[i] = 1; /* PIO3/4 */ 293 dev_mode[i] = 1; /* PIO3/4 */
@@ -314,6 +338,151 @@ static void sil_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val)
314 writel(val, mmio); 338 writel(val, mmio);
315} 339}
316 340
341static void sil_host_intr(struct ata_port *ap, u32 bmdma2)
342{
343 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->active_tag);
344 u8 status;
345
346 if (unlikely(bmdma2 & SIL_DMA_SATA_IRQ)) {
347 u32 serror;
348
349 /* SIEN doesn't mask SATA IRQs on some 3112s. Those
350 * controllers continue to assert IRQ as long as
351 * SError bits are pending. Clear SError immediately.
352 */
353 serror = sil_scr_read(ap, SCR_ERROR);
354 sil_scr_write(ap, SCR_ERROR, serror);
355
356 /* Trigger hotplug and accumulate SError only if the
357 * port isn't already frozen. Otherwise, PHY events
358 * during hardreset makes controllers with broken SIEN
359 * repeat probing needlessly.
360 */
361 if (!(ap->flags & ATA_FLAG_FROZEN)) {
362 ata_ehi_hotplugged(&ap->eh_info);
363 ap->eh_info.serror |= serror;
364 }
365
366 goto freeze;
367 }
368
369 if (unlikely(!qc || qc->tf.ctl & ATA_NIEN))
370 goto freeze;
371
372 /* Check whether we are expecting interrupt in this state */
373 switch (ap->hsm_task_state) {
374 case HSM_ST_FIRST:
375 /* Some pre-ATAPI-4 devices assert INTRQ
376 * at this state when ready to receive CDB.
377 */
378
379 /* Check the ATA_DFLAG_CDB_INTR flag is enough here.
380 * The flag was turned on only for atapi devices.
381 * No need to check is_atapi_taskfile(&qc->tf) again.
382 */
383 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
384 goto err_hsm;
385 break;
386 case HSM_ST_LAST:
387 if (qc->tf.protocol == ATA_PROT_DMA ||
388 qc->tf.protocol == ATA_PROT_ATAPI_DMA) {
389 /* clear DMA-Start bit */
390 ap->ops->bmdma_stop(qc);
391
392 if (bmdma2 & SIL_DMA_ERROR) {
393 qc->err_mask |= AC_ERR_HOST_BUS;
394 ap->hsm_task_state = HSM_ST_ERR;
395 }
396 }
397 break;
398 case HSM_ST:
399 break;
400 default:
401 goto err_hsm;
402 }
403
404 /* check main status, clearing INTRQ */
405 status = ata_chk_status(ap);
406 if (unlikely(status & ATA_BUSY))
407 goto err_hsm;
408
409 /* ack bmdma irq events */
410 ata_bmdma_irq_clear(ap);
411
412 /* kick HSM in the ass */
413 ata_hsm_move(ap, qc, status, 0);
414
415 return;
416
417 err_hsm:
418 qc->err_mask |= AC_ERR_HSM;
419 freeze:
420 ata_port_freeze(ap);
421}
422
423static irqreturn_t sil_interrupt(int irq, void *dev_instance,
424 struct pt_regs *regs)
425{
426 struct ata_host_set *host_set = dev_instance;
427 void __iomem *mmio_base = host_set->mmio_base;
428 int handled = 0;
429 int i;
430
431 spin_lock(&host_set->lock);
432
433 for (i = 0; i < host_set->n_ports; i++) {
434 struct ata_port *ap = host_set->ports[i];
435 u32 bmdma2 = readl(mmio_base + sil_port[ap->port_no].bmdma2);
436
437 if (unlikely(!ap || ap->flags & ATA_FLAG_DISABLED))
438 continue;
439
440 if (bmdma2 == 0xffffffff ||
441 !(bmdma2 & (SIL_DMA_COMPLETE | SIL_DMA_SATA_IRQ)))
442 continue;
443
444 sil_host_intr(ap, bmdma2);
445 handled = 1;
446 }
447
448 spin_unlock(&host_set->lock);
449
450 return IRQ_RETVAL(handled);
451}
452
453static void sil_freeze(struct ata_port *ap)
454{
455 void __iomem *mmio_base = ap->host_set->mmio_base;
456 u32 tmp;
457
458 /* global IRQ mask doesn't block SATA IRQ, turn off explicitly */
459 writel(0, mmio_base + sil_port[ap->port_no].sien);
460
461 /* plug IRQ */
462 tmp = readl(mmio_base + SIL_SYSCFG);
463 tmp |= SIL_MASK_IDE0_INT << ap->port_no;
464 writel(tmp, mmio_base + SIL_SYSCFG);
465 readl(mmio_base + SIL_SYSCFG); /* flush */
466}
467
468static void sil_thaw(struct ata_port *ap)
469{
470 void __iomem *mmio_base = ap->host_set->mmio_base;
471 u32 tmp;
472
473 /* clear IRQ */
474 ata_chk_status(ap);
475 ata_bmdma_irq_clear(ap);
476
477 /* turn on SATA IRQ */
478 writel(SIL_SIEN_N, mmio_base + sil_port[ap->port_no].sien);
479
480 /* turn on IRQ */
481 tmp = readl(mmio_base + SIL_SYSCFG);
482 tmp &= ~(SIL_MASK_IDE0_INT << ap->port_no);
483 writel(tmp, mmio_base + SIL_SYSCFG);
484}
485
317/** 486/**
318 * sil_dev_config - Apply device/host-specific errata fixups 487 * sil_dev_config - Apply device/host-specific errata fixups
319 * @ap: Port containing device to be examined 488 * @ap: Port containing device to be examined
@@ -360,16 +529,16 @@ static void sil_dev_config(struct ata_port *ap, struct ata_device *dev)
360 if (slow_down || 529 if (slow_down ||
361 ((ap->flags & SIL_FLAG_MOD15WRITE) && 530 ((ap->flags & SIL_FLAG_MOD15WRITE) &&
362 (quirks & SIL_QUIRK_MOD15WRITE))) { 531 (quirks & SIL_QUIRK_MOD15WRITE))) {
363 printk(KERN_INFO "ata%u(%u): applying Seagate errata fix (mod15write workaround)\n", 532 ata_dev_printk(dev, KERN_INFO, "applying Seagate errata fix "
364 ap->id, dev->devno); 533 "(mod15write workaround)\n");
365 dev->max_sectors = 15; 534 dev->max_sectors = 15;
366 return; 535 return;
367 } 536 }
368 537
369 /* limit to udma5 */ 538 /* limit to udma5 */
370 if (quirks & SIL_QUIRK_UDMA5MAX) { 539 if (quirks & SIL_QUIRK_UDMA5MAX) {
371 printk(KERN_INFO "ata%u(%u): applying Maxtor errata fix %s\n", 540 ata_dev_printk(dev, KERN_INFO,
372 ap->id, dev->devno, model_num); 541 "applying Maxtor errata fix %s\n", model_num);
373 dev->udma_mask &= ATA_UDMA5; 542 dev->udma_mask &= ATA_UDMA5;
374 return; 543 return;
375 } 544 }
@@ -384,16 +553,12 @@ static int sil_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
384 int rc; 553 int rc;
385 unsigned int i; 554 unsigned int i;
386 int pci_dev_busy = 0; 555 int pci_dev_busy = 0;
387 u32 tmp, irq_mask; 556 u32 tmp;
388 u8 cls; 557 u8 cls;
389 558
390 if (!printed_version++) 559 if (!printed_version++)
391 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n"); 560 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
392 561
393 /*
394 * If this driver happens to only be useful on Apple's K2, then
395 * we should check that here as it has a normal Serverworks ID
396 */
397 rc = pci_enable_device(pdev); 562 rc = pci_enable_device(pdev);
398 if (rc) 563 if (rc)
399 return rc; 564 return rc;
@@ -478,31 +643,13 @@ static int sil_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
478 } 643 }
479 644
480 if (ent->driver_data == sil_3114) { 645 if (ent->driver_data == sil_3114) {
481 irq_mask = SIL_MASK_4PORT;
482
483 /* flip the magic "make 4 ports work" bit */ 646 /* flip the magic "make 4 ports work" bit */
484 tmp = readl(mmio_base + sil_port[2].bmdma); 647 tmp = readl(mmio_base + sil_port[2].bmdma);
485 if ((tmp & SIL_INTR_STEERING) == 0) 648 if ((tmp & SIL_INTR_STEERING) == 0)
486 writel(tmp | SIL_INTR_STEERING, 649 writel(tmp | SIL_INTR_STEERING,
487 mmio_base + sil_port[2].bmdma); 650 mmio_base + sil_port[2].bmdma);
488
489 } else {
490 irq_mask = SIL_MASK_2PORT;
491 }
492
493 /* make sure IDE0/1/2/3 interrupts are not masked */
494 tmp = readl(mmio_base + SIL_SYSCFG);
495 if (tmp & irq_mask) {
496 tmp &= ~irq_mask;
497 writel(tmp, mmio_base + SIL_SYSCFG);
498 readl(mmio_base + SIL_SYSCFG); /* flush */
499 } 651 }
500 652
501 /* mask all SATA phy-related interrupts */
502 /* TODO: unmask bit 6 (SError N bit) for hotplug */
503 for (i = 0; i < probe_ent->n_ports; i++)
504 writel(0, mmio_base + sil_port[i].sien);
505
506 pci_set_master(pdev); 653 pci_set_master(pdev);
507 654
508 /* FIXME: check ata_device_add return value */ 655 /* FIXME: check ata_device_add return value */
diff --git a/drivers/scsi/sata_sil24.c b/drivers/scsi/sata_sil24.c
index cb9082fd7e2f..c8b477c67247 100644
--- a/drivers/scsi/sata_sil24.c
+++ b/drivers/scsi/sata_sil24.c
@@ -31,15 +31,15 @@
31#include <asm/io.h> 31#include <asm/io.h>
32 32
33#define DRV_NAME "sata_sil24" 33#define DRV_NAME "sata_sil24"
34#define DRV_VERSION "0.23" 34#define DRV_VERSION "0.24"
35 35
36/* 36/*
37 * Port request block (PRB) 32 bytes 37 * Port request block (PRB) 32 bytes
38 */ 38 */
39struct sil24_prb { 39struct sil24_prb {
40 u16 ctrl; 40 __le16 ctrl;
41 u16 prot; 41 __le16 prot;
42 u32 rx_cnt; 42 __le32 rx_cnt;
43 u8 fis[6 * 4]; 43 u8 fis[6 * 4];
44}; 44};
45 45
@@ -47,17 +47,17 @@ struct sil24_prb {
47 * Scatter gather entry (SGE) 16 bytes 47 * Scatter gather entry (SGE) 16 bytes
48 */ 48 */
49struct sil24_sge { 49struct sil24_sge {
50 u64 addr; 50 __le64 addr;
51 u32 cnt; 51 __le32 cnt;
52 u32 flags; 52 __le32 flags;
53}; 53};
54 54
55/* 55/*
56 * Port multiplier 56 * Port multiplier
57 */ 57 */
58struct sil24_port_multiplier { 58struct sil24_port_multiplier {
59 u32 diag; 59 __le32 diag;
60 u32 sactive; 60 __le32 sactive;
61}; 61};
62 62
63enum { 63enum {
@@ -86,12 +86,21 @@ enum {
86 /* HOST_SLOT_STAT bits */ 86 /* HOST_SLOT_STAT bits */
87 HOST_SSTAT_ATTN = (1 << 31), 87 HOST_SSTAT_ATTN = (1 << 31),
88 88
89 /* HOST_CTRL bits */
90 HOST_CTRL_M66EN = (1 << 16), /* M66EN PCI bus signal */
91 HOST_CTRL_TRDY = (1 << 17), /* latched PCI TRDY */
92 HOST_CTRL_STOP = (1 << 18), /* latched PCI STOP */
93 HOST_CTRL_DEVSEL = (1 << 19), /* latched PCI DEVSEL */
94 HOST_CTRL_REQ64 = (1 << 20), /* latched PCI REQ64 */
95
89 /* 96 /*
90 * Port registers 97 * Port registers
91 * (8192 bytes @ +0x0000, +0x2000, +0x4000 and +0x6000 @ BAR2) 98 * (8192 bytes @ +0x0000, +0x2000, +0x4000 and +0x6000 @ BAR2)
92 */ 99 */
93 PORT_REGS_SIZE = 0x2000, 100 PORT_REGS_SIZE = 0x2000,
94 PORT_PRB = 0x0000, /* (32 bytes PRB + 16 bytes SGEs * 6) * 31 (3968 bytes) */ 101
102 PORT_LRAM = 0x0000, /* 31 LRAM slots and PM regs */
103 PORT_LRAM_SLOT_SZ = 0x0080, /* 32 bytes PRB + 2 SGE, ACT... */
95 104
96 PORT_PM = 0x0f80, /* 8 bytes PM * 16 (128 bytes) */ 105 PORT_PM = 0x0f80, /* 8 bytes PM * 16 (128 bytes) */
97 /* 32 bit regs */ 106 /* 32 bit regs */
@@ -142,8 +151,16 @@ enum {
142 PORT_IRQ_PWR_CHG = (1 << 3), /* power management change */ 151 PORT_IRQ_PWR_CHG = (1 << 3), /* power management change */
143 PORT_IRQ_PHYRDY_CHG = (1 << 4), /* PHY ready change */ 152 PORT_IRQ_PHYRDY_CHG = (1 << 4), /* PHY ready change */
144 PORT_IRQ_COMWAKE = (1 << 5), /* COMWAKE received */ 153 PORT_IRQ_COMWAKE = (1 << 5), /* COMWAKE received */
145 PORT_IRQ_UNK_FIS = (1 << 6), /* Unknown FIS received */ 154 PORT_IRQ_UNK_FIS = (1 << 6), /* unknown FIS received */
146 PORT_IRQ_SDB_FIS = (1 << 11), /* SDB FIS received */ 155 PORT_IRQ_DEV_XCHG = (1 << 7), /* device exchanged */
156 PORT_IRQ_8B10B = (1 << 8), /* 8b/10b decode error threshold */
157 PORT_IRQ_CRC = (1 << 9), /* CRC error threshold */
158 PORT_IRQ_HANDSHAKE = (1 << 10), /* handshake error threshold */
159 PORT_IRQ_SDB_NOTIFY = (1 << 11), /* SDB notify received */
160
161 DEF_PORT_IRQ = PORT_IRQ_COMPLETE | PORT_IRQ_ERROR |
162 PORT_IRQ_PHYRDY_CHG | PORT_IRQ_DEV_XCHG |
163 PORT_IRQ_UNK_FIS,
147 164
148 /* bits[27:16] are unmasked (raw) */ 165 /* bits[27:16] are unmasked (raw) */
149 PORT_IRQ_RAW_SHIFT = 16, 166 PORT_IRQ_RAW_SHIFT = 16,
@@ -174,7 +191,7 @@ enum {
174 PORT_CERR_CMD_PCIPERR = 27, /* ctrl[15:13] 110 - PCI parity err while fetching PRB */ 191 PORT_CERR_CMD_PCIPERR = 27, /* ctrl[15:13] 110 - PCI parity err while fetching PRB */
175 PORT_CERR_XFR_UNDEF = 32, /* PSD ecode 00 - undefined */ 192 PORT_CERR_XFR_UNDEF = 32, /* PSD ecode 00 - undefined */
176 PORT_CERR_XFR_TGTABRT = 33, /* PSD ecode 01 - target abort */ 193 PORT_CERR_XFR_TGTABRT = 33, /* PSD ecode 01 - target abort */
177 PORT_CERR_XFR_MSGABRT = 34, /* PSD ecode 10 - master abort */ 194 PORT_CERR_XFR_MSTABRT = 34, /* PSD ecode 10 - master abort */
178 PORT_CERR_XFR_PCIPERR = 35, /* PSD ecode 11 - PCI prity err during transfer */ 195 PORT_CERR_XFR_PCIPERR = 35, /* PSD ecode 11 - PCI prity err during transfer */
179 PORT_CERR_SENDSERVICE = 36, /* FIS received while sending service */ 196 PORT_CERR_SENDSERVICE = 36, /* FIS received while sending service */
180 197
@@ -202,11 +219,19 @@ enum {
202 SGE_DRD = (1 << 29), /* discard data read (/dev/null) 219 SGE_DRD = (1 << 29), /* discard data read (/dev/null)
203 data address ignored */ 220 data address ignored */
204 221
222 SIL24_MAX_CMDS = 31,
223
205 /* board id */ 224 /* board id */
206 BID_SIL3124 = 0, 225 BID_SIL3124 = 0,
207 BID_SIL3132 = 1, 226 BID_SIL3132 = 1,
208 BID_SIL3131 = 2, 227 BID_SIL3131 = 2,
209 228
229 /* host flags */
230 SIL24_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
231 ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA |
232 ATA_FLAG_NCQ | ATA_FLAG_SKIP_D2H_BSY,
233 SIL24_FLAG_PCIX_IRQ_WOC = (1 << 24), /* IRQ loss errata on PCI-X */
234
210 IRQ_STAT_4PORTS = 0xf, 235 IRQ_STAT_4PORTS = 0xf,
211}; 236};
212 237
@@ -226,6 +251,58 @@ union sil24_cmd_block {
226 struct sil24_atapi_block atapi; 251 struct sil24_atapi_block atapi;
227}; 252};
228 253
254static struct sil24_cerr_info {
255 unsigned int err_mask, action;
256 const char *desc;
257} sil24_cerr_db[] = {
258 [0] = { AC_ERR_DEV, ATA_EH_REVALIDATE,
259 "device error" },
260 [PORT_CERR_DEV] = { AC_ERR_DEV, ATA_EH_REVALIDATE,
261 "device error via D2H FIS" },
262 [PORT_CERR_SDB] = { AC_ERR_DEV, ATA_EH_REVALIDATE,
263 "device error via SDB FIS" },
264 [PORT_CERR_DATA] = { AC_ERR_ATA_BUS, ATA_EH_SOFTRESET,
265 "error in data FIS" },
266 [PORT_CERR_SEND] = { AC_ERR_ATA_BUS, ATA_EH_SOFTRESET,
267 "failed to transmit command FIS" },
268 [PORT_CERR_INCONSISTENT] = { AC_ERR_HSM, ATA_EH_SOFTRESET,
269 "protocol mismatch" },
270 [PORT_CERR_DIRECTION] = { AC_ERR_HSM, ATA_EH_SOFTRESET,
271 "data directon mismatch" },
272 [PORT_CERR_UNDERRUN] = { AC_ERR_HSM, ATA_EH_SOFTRESET,
273 "ran out of SGEs while writing" },
274 [PORT_CERR_OVERRUN] = { AC_ERR_HSM, ATA_EH_SOFTRESET,
275 "ran out of SGEs while reading" },
276 [PORT_CERR_PKT_PROT] = { AC_ERR_HSM, ATA_EH_SOFTRESET,
277 "invalid data directon for ATAPI CDB" },
278 [PORT_CERR_SGT_BOUNDARY] = { AC_ERR_SYSTEM, ATA_EH_SOFTRESET,
279 "SGT no on qword boundary" },
280 [PORT_CERR_SGT_TGTABRT] = { AC_ERR_HOST_BUS, ATA_EH_SOFTRESET,
281 "PCI target abort while fetching SGT" },
282 [PORT_CERR_SGT_MSTABRT] = { AC_ERR_HOST_BUS, ATA_EH_SOFTRESET,
283 "PCI master abort while fetching SGT" },
284 [PORT_CERR_SGT_PCIPERR] = { AC_ERR_HOST_BUS, ATA_EH_SOFTRESET,
285 "PCI parity error while fetching SGT" },
286 [PORT_CERR_CMD_BOUNDARY] = { AC_ERR_SYSTEM, ATA_EH_SOFTRESET,
287 "PRB not on qword boundary" },
288 [PORT_CERR_CMD_TGTABRT] = { AC_ERR_HOST_BUS, ATA_EH_SOFTRESET,
289 "PCI target abort while fetching PRB" },
290 [PORT_CERR_CMD_MSTABRT] = { AC_ERR_HOST_BUS, ATA_EH_SOFTRESET,
291 "PCI master abort while fetching PRB" },
292 [PORT_CERR_CMD_PCIPERR] = { AC_ERR_HOST_BUS, ATA_EH_SOFTRESET,
293 "PCI parity error while fetching PRB" },
294 [PORT_CERR_XFR_UNDEF] = { AC_ERR_HOST_BUS, ATA_EH_SOFTRESET,
295 "undefined error while transferring data" },
296 [PORT_CERR_XFR_TGTABRT] = { AC_ERR_HOST_BUS, ATA_EH_SOFTRESET,
297 "PCI target abort while transferring data" },
298 [PORT_CERR_XFR_MSTABRT] = { AC_ERR_HOST_BUS, ATA_EH_SOFTRESET,
299 "PCI master abort while transferring data" },
300 [PORT_CERR_XFR_PCIPERR] = { AC_ERR_HOST_BUS, ATA_EH_SOFTRESET,
301 "PCI parity error while transferring data" },
302 [PORT_CERR_SENDSERVICE] = { AC_ERR_HSM, ATA_EH_SOFTRESET,
303 "FIS received while sending service FIS" },
304};
305
229/* 306/*
230 * ap->private_data 307 * ap->private_data
231 * 308 *
@@ -249,12 +326,14 @@ static u8 sil24_check_status(struct ata_port *ap);
249static u32 sil24_scr_read(struct ata_port *ap, unsigned sc_reg); 326static u32 sil24_scr_read(struct ata_port *ap, unsigned sc_reg);
250static void sil24_scr_write(struct ata_port *ap, unsigned sc_reg, u32 val); 327static void sil24_scr_write(struct ata_port *ap, unsigned sc_reg, u32 val);
251static void sil24_tf_read(struct ata_port *ap, struct ata_taskfile *tf); 328static void sil24_tf_read(struct ata_port *ap, struct ata_taskfile *tf);
252static int sil24_probe_reset(struct ata_port *ap, unsigned int *classes);
253static void sil24_qc_prep(struct ata_queued_cmd *qc); 329static void sil24_qc_prep(struct ata_queued_cmd *qc);
254static unsigned int sil24_qc_issue(struct ata_queued_cmd *qc); 330static unsigned int sil24_qc_issue(struct ata_queued_cmd *qc);
255static void sil24_irq_clear(struct ata_port *ap); 331static void sil24_irq_clear(struct ata_port *ap);
256static void sil24_eng_timeout(struct ata_port *ap);
257static irqreturn_t sil24_interrupt(int irq, void *dev_instance, struct pt_regs *regs); 332static irqreturn_t sil24_interrupt(int irq, void *dev_instance, struct pt_regs *regs);
333static void sil24_freeze(struct ata_port *ap);
334static void sil24_thaw(struct ata_port *ap);
335static void sil24_error_handler(struct ata_port *ap);
336static void sil24_post_internal_cmd(struct ata_queued_cmd *qc);
258static int sil24_port_start(struct ata_port *ap); 337static int sil24_port_start(struct ata_port *ap);
259static void sil24_port_stop(struct ata_port *ap); 338static void sil24_port_stop(struct ata_port *ap);
260static void sil24_host_stop(struct ata_host_set *host_set); 339static void sil24_host_stop(struct ata_host_set *host_set);
@@ -281,7 +360,8 @@ static struct scsi_host_template sil24_sht = {
281 .name = DRV_NAME, 360 .name = DRV_NAME,
282 .ioctl = ata_scsi_ioctl, 361 .ioctl = ata_scsi_ioctl,
283 .queuecommand = ata_scsi_queuecmd, 362 .queuecommand = ata_scsi_queuecmd,
284 .can_queue = ATA_DEF_QUEUE, 363 .change_queue_depth = ata_scsi_change_queue_depth,
364 .can_queue = SIL24_MAX_CMDS,
285 .this_id = ATA_SHT_THIS_ID, 365 .this_id = ATA_SHT_THIS_ID,
286 .sg_tablesize = LIBATA_MAX_PRD, 366 .sg_tablesize = LIBATA_MAX_PRD,
287 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 367 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
@@ -290,6 +370,7 @@ static struct scsi_host_template sil24_sht = {
290 .proc_name = DRV_NAME, 370 .proc_name = DRV_NAME,
291 .dma_boundary = ATA_DMA_BOUNDARY, 371 .dma_boundary = ATA_DMA_BOUNDARY,
292 .slave_configure = ata_scsi_slave_config, 372 .slave_configure = ata_scsi_slave_config,
373 .slave_destroy = ata_scsi_slave_destroy,
293 .bios_param = ata_std_bios_param, 374 .bios_param = ata_std_bios_param,
294}; 375};
295 376
@@ -304,19 +385,20 @@ static const struct ata_port_operations sil24_ops = {
304 385
305 .tf_read = sil24_tf_read, 386 .tf_read = sil24_tf_read,
306 387
307 .probe_reset = sil24_probe_reset,
308
309 .qc_prep = sil24_qc_prep, 388 .qc_prep = sil24_qc_prep,
310 .qc_issue = sil24_qc_issue, 389 .qc_issue = sil24_qc_issue,
311 390
312 .eng_timeout = sil24_eng_timeout,
313
314 .irq_handler = sil24_interrupt, 391 .irq_handler = sil24_interrupt,
315 .irq_clear = sil24_irq_clear, 392 .irq_clear = sil24_irq_clear,
316 393
317 .scr_read = sil24_scr_read, 394 .scr_read = sil24_scr_read,
318 .scr_write = sil24_scr_write, 395 .scr_write = sil24_scr_write,
319 396
397 .freeze = sil24_freeze,
398 .thaw = sil24_thaw,
399 .error_handler = sil24_error_handler,
400 .post_internal_cmd = sil24_post_internal_cmd,
401
320 .port_start = sil24_port_start, 402 .port_start = sil24_port_start,
321 .port_stop = sil24_port_stop, 403 .port_stop = sil24_port_stop,
322 .host_stop = sil24_host_stop, 404 .host_stop = sil24_host_stop,
@@ -333,9 +415,8 @@ static struct ata_port_info sil24_port_info[] = {
333 /* sil_3124 */ 415 /* sil_3124 */
334 { 416 {
335 .sht = &sil24_sht, 417 .sht = &sil24_sht,
336 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 418 .host_flags = SIL24_COMMON_FLAGS | SIL24_NPORTS2FLAG(4) |
337 ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA | 419 SIL24_FLAG_PCIX_IRQ_WOC,
338 SIL24_NPORTS2FLAG(4),
339 .pio_mask = 0x1f, /* pio0-4 */ 420 .pio_mask = 0x1f, /* pio0-4 */
340 .mwdma_mask = 0x07, /* mwdma0-2 */ 421 .mwdma_mask = 0x07, /* mwdma0-2 */
341 .udma_mask = 0x3f, /* udma0-5 */ 422 .udma_mask = 0x3f, /* udma0-5 */
@@ -344,9 +425,7 @@ static struct ata_port_info sil24_port_info[] = {
344 /* sil_3132 */ 425 /* sil_3132 */
345 { 426 {
346 .sht = &sil24_sht, 427 .sht = &sil24_sht,
347 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 428 .host_flags = SIL24_COMMON_FLAGS | SIL24_NPORTS2FLAG(2),
348 ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA |
349 SIL24_NPORTS2FLAG(2),
350 .pio_mask = 0x1f, /* pio0-4 */ 429 .pio_mask = 0x1f, /* pio0-4 */
351 .mwdma_mask = 0x07, /* mwdma0-2 */ 430 .mwdma_mask = 0x07, /* mwdma0-2 */
352 .udma_mask = 0x3f, /* udma0-5 */ 431 .udma_mask = 0x3f, /* udma0-5 */
@@ -355,9 +434,7 @@ static struct ata_port_info sil24_port_info[] = {
355 /* sil_3131/sil_3531 */ 434 /* sil_3131/sil_3531 */
356 { 435 {
357 .sht = &sil24_sht, 436 .sht = &sil24_sht,
358 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 437 .host_flags = SIL24_COMMON_FLAGS | SIL24_NPORTS2FLAG(1),
359 ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA |
360 SIL24_NPORTS2FLAG(1),
361 .pio_mask = 0x1f, /* pio0-4 */ 438 .pio_mask = 0x1f, /* pio0-4 */
362 .mwdma_mask = 0x07, /* mwdma0-2 */ 439 .mwdma_mask = 0x07, /* mwdma0-2 */
363 .udma_mask = 0x3f, /* udma0-5 */ 440 .udma_mask = 0x3f, /* udma0-5 */
@@ -365,6 +442,13 @@ static struct ata_port_info sil24_port_info[] = {
365 }, 442 },
366}; 443};
367 444
445static int sil24_tag(int tag)
446{
447 if (unlikely(ata_tag_internal(tag)))
448 return 0;
449 return tag;
450}
451
368static void sil24_dev_config(struct ata_port *ap, struct ata_device *dev) 452static void sil24_dev_config(struct ata_port *ap, struct ata_device *dev)
369{ 453{
370 void __iomem *port = (void __iomem *)ap->ioaddr.cmd_addr; 454 void __iomem *port = (void __iomem *)ap->ioaddr.cmd_addr;
@@ -426,56 +510,65 @@ static void sil24_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
426 *tf = pp->tf; 510 *tf = pp->tf;
427} 511}
428 512
429static int sil24_softreset(struct ata_port *ap, int verbose, 513static int sil24_init_port(struct ata_port *ap)
430 unsigned int *class) 514{
515 void __iomem *port = (void __iomem *)ap->ioaddr.cmd_addr;
516 u32 tmp;
517
518 writel(PORT_CS_INIT, port + PORT_CTRL_STAT);
519 ata_wait_register(port + PORT_CTRL_STAT,
520 PORT_CS_INIT, PORT_CS_INIT, 10, 100);
521 tmp = ata_wait_register(port + PORT_CTRL_STAT,
522 PORT_CS_RDY, 0, 10, 100);
523
524 if ((tmp & (PORT_CS_INIT | PORT_CS_RDY)) != PORT_CS_RDY)
525 return -EIO;
526 return 0;
527}
528
529static int sil24_softreset(struct ata_port *ap, unsigned int *class)
431{ 530{
432 void __iomem *port = (void __iomem *)ap->ioaddr.cmd_addr; 531 void __iomem *port = (void __iomem *)ap->ioaddr.cmd_addr;
433 struct sil24_port_priv *pp = ap->private_data; 532 struct sil24_port_priv *pp = ap->private_data;
434 struct sil24_prb *prb = &pp->cmd_block[0].ata.prb; 533 struct sil24_prb *prb = &pp->cmd_block[0].ata.prb;
435 dma_addr_t paddr = pp->cmd_block_dma; 534 dma_addr_t paddr = pp->cmd_block_dma;
436 unsigned long timeout = jiffies + ATA_TMOUT_BOOT * HZ; 535 u32 mask, irq_stat;
437 u32 irq_enable, irq_stat; 536 const char *reason;
438 537
439 DPRINTK("ENTER\n"); 538 DPRINTK("ENTER\n");
440 539
441 if (!sata_dev_present(ap)) { 540 if (ata_port_offline(ap)) {
442 DPRINTK("PHY reports no device\n"); 541 DPRINTK("PHY reports no device\n");
443 *class = ATA_DEV_NONE; 542 *class = ATA_DEV_NONE;
444 goto out; 543 goto out;
445 } 544 }
446 545
447 /* temporarily turn off IRQs during SRST */ 546 /* put the port into known state */
448 irq_enable = readl(port + PORT_IRQ_ENABLE_SET); 547 if (sil24_init_port(ap)) {
449 writel(irq_enable, port + PORT_IRQ_ENABLE_CLR); 548 reason ="port not ready";
450 549 goto err;
451 /* 550 }
452 * XXX: Not sure whether the following sleep is needed or not.
453 * The original driver had it. So....
454 */
455 msleep(10);
456 551
552 /* do SRST */
457 prb->ctrl = cpu_to_le16(PRB_CTRL_SRST); 553 prb->ctrl = cpu_to_le16(PRB_CTRL_SRST);
458 prb->fis[1] = 0; /* no PM yet */ 554 prb->fis[1] = 0; /* no PM yet */
459 555
460 writel((u32)paddr, port + PORT_CMD_ACTIVATE); 556 writel((u32)paddr, port + PORT_CMD_ACTIVATE);
557 writel((u64)paddr >> 32, port + PORT_CMD_ACTIVATE + 4);
461 558
462 do { 559 mask = (PORT_IRQ_COMPLETE | PORT_IRQ_ERROR) << PORT_IRQ_RAW_SHIFT;
463 irq_stat = readl(port + PORT_IRQ_STAT); 560 irq_stat = ata_wait_register(port + PORT_IRQ_STAT, mask, 0x0,
464 writel(irq_stat, port + PORT_IRQ_STAT); /* clear irq */ 561 100, ATA_TMOUT_BOOT / HZ * 1000);
465
466 irq_stat >>= PORT_IRQ_RAW_SHIFT;
467 if (irq_stat & (PORT_IRQ_COMPLETE | PORT_IRQ_ERROR))
468 break;
469 562
470 msleep(100); 563 writel(irq_stat, port + PORT_IRQ_STAT); /* clear IRQs */
471 } while (time_before(jiffies, timeout)); 564 irq_stat >>= PORT_IRQ_RAW_SHIFT;
472
473 /* restore IRQs */
474 writel(irq_enable, port + PORT_IRQ_ENABLE_SET);
475 565
476 if (!(irq_stat & PORT_IRQ_COMPLETE)) { 566 if (!(irq_stat & PORT_IRQ_COMPLETE)) {
477 DPRINTK("EXIT, srst failed\n"); 567 if (irq_stat & PORT_IRQ_ERROR)
478 return -EIO; 568 reason = "SRST command error";
569 else
570 reason = "timeout";
571 goto err;
479 } 572 }
480 573
481 sil24_update_tf(ap); 574 sil24_update_tf(ap);
@@ -487,22 +580,57 @@ static int sil24_softreset(struct ata_port *ap, int verbose,
487 out: 580 out:
488 DPRINTK("EXIT, class=%u\n", *class); 581 DPRINTK("EXIT, class=%u\n", *class);
489 return 0; 582 return 0;
583
584 err:
585 ata_port_printk(ap, KERN_ERR, "softreset failed (%s)\n", reason);
586 return -EIO;
490} 587}
491 588
492static int sil24_hardreset(struct ata_port *ap, int verbose, 589static int sil24_hardreset(struct ata_port *ap, unsigned int *class)
493 unsigned int *class)
494{ 590{
495 unsigned int dummy_class; 591 void __iomem *port = (void __iomem *)ap->ioaddr.cmd_addr;
592 const char *reason;
593 int tout_msec, rc;
594 u32 tmp;
496 595
497 /* sil24 doesn't report device signature after hard reset */ 596 /* sil24 does the right thing(tm) without any protection */
498 return sata_std_hardreset(ap, verbose, &dummy_class); 597 sata_set_spd(ap);
499}
500 598
501static int sil24_probe_reset(struct ata_port *ap, unsigned int *classes) 599 tout_msec = 100;
502{ 600 if (ata_port_online(ap))
503 return ata_drive_probe_reset(ap, ata_std_probeinit, 601 tout_msec = 5000;
504 sil24_softreset, sil24_hardreset, 602
505 ata_std_postreset, classes); 603 writel(PORT_CS_DEV_RST, port + PORT_CTRL_STAT);
604 tmp = ata_wait_register(port + PORT_CTRL_STAT,
605 PORT_CS_DEV_RST, PORT_CS_DEV_RST, 10, tout_msec);
606
607 /* SStatus oscillates between zero and valid status after
608 * DEV_RST, debounce it.
609 */
610 rc = sata_phy_debounce(ap, sata_deb_timing_before_fsrst);
611 if (rc) {
612 reason = "PHY debouncing failed";
613 goto err;
614 }
615
616 if (tmp & PORT_CS_DEV_RST) {
617 if (ata_port_offline(ap))
618 return 0;
619 reason = "link not ready";
620 goto err;
621 }
622
623 /* Sil24 doesn't store signature FIS after hardreset, so we
624 * can't wait for BSY to clear. Some devices take a long time
625 * to get ready and those devices will choke if we don't wait
626 * for BSY clearance here. Tell libata to perform follow-up
627 * softreset.
628 */
629 return -EAGAIN;
630
631 err:
632 ata_port_printk(ap, KERN_ERR, "hardreset failed (%s)\n", reason);
633 return -EIO;
506} 634}
507 635
508static inline void sil24_fill_sg(struct ata_queued_cmd *qc, 636static inline void sil24_fill_sg(struct ata_queued_cmd *qc,
@@ -528,17 +656,20 @@ static void sil24_qc_prep(struct ata_queued_cmd *qc)
528{ 656{
529 struct ata_port *ap = qc->ap; 657 struct ata_port *ap = qc->ap;
530 struct sil24_port_priv *pp = ap->private_data; 658 struct sil24_port_priv *pp = ap->private_data;
531 union sil24_cmd_block *cb = pp->cmd_block + qc->tag; 659 union sil24_cmd_block *cb;
532 struct sil24_prb *prb; 660 struct sil24_prb *prb;
533 struct sil24_sge *sge; 661 struct sil24_sge *sge;
662 u16 ctrl = 0;
663
664 cb = &pp->cmd_block[sil24_tag(qc->tag)];
534 665
535 switch (qc->tf.protocol) { 666 switch (qc->tf.protocol) {
536 case ATA_PROT_PIO: 667 case ATA_PROT_PIO:
537 case ATA_PROT_DMA: 668 case ATA_PROT_DMA:
669 case ATA_PROT_NCQ:
538 case ATA_PROT_NODATA: 670 case ATA_PROT_NODATA:
539 prb = &cb->ata.prb; 671 prb = &cb->ata.prb;
540 sge = cb->ata.sge; 672 sge = cb->ata.sge;
541 prb->ctrl = 0;
542 break; 673 break;
543 674
544 case ATA_PROT_ATAPI: 675 case ATA_PROT_ATAPI:
@@ -551,12 +682,10 @@ static void sil24_qc_prep(struct ata_queued_cmd *qc)
551 682
552 if (qc->tf.protocol != ATA_PROT_ATAPI_NODATA) { 683 if (qc->tf.protocol != ATA_PROT_ATAPI_NODATA) {
553 if (qc->tf.flags & ATA_TFLAG_WRITE) 684 if (qc->tf.flags & ATA_TFLAG_WRITE)
554 prb->ctrl = cpu_to_le16(PRB_CTRL_PACKET_WRITE); 685 ctrl = PRB_CTRL_PACKET_WRITE;
555 else 686 else
556 prb->ctrl = cpu_to_le16(PRB_CTRL_PACKET_READ); 687 ctrl = PRB_CTRL_PACKET_READ;
557 } else 688 }
558 prb->ctrl = 0;
559
560 break; 689 break;
561 690
562 default: 691 default:
@@ -565,6 +694,7 @@ static void sil24_qc_prep(struct ata_queued_cmd *qc)
565 BUG(); 694 BUG();
566 } 695 }
567 696
697 prb->ctrl = cpu_to_le16(ctrl);
568 ata_tf_to_fis(&qc->tf, prb->fis, 0); 698 ata_tf_to_fis(&qc->tf, prb->fis, 0);
569 699
570 if (qc->flags & ATA_QCFLAG_DMAMAP) 700 if (qc->flags & ATA_QCFLAG_DMAMAP)
@@ -574,11 +704,18 @@ static void sil24_qc_prep(struct ata_queued_cmd *qc)
574static unsigned int sil24_qc_issue(struct ata_queued_cmd *qc) 704static unsigned int sil24_qc_issue(struct ata_queued_cmd *qc)
575{ 705{
576 struct ata_port *ap = qc->ap; 706 struct ata_port *ap = qc->ap;
577 void __iomem *port = (void __iomem *)ap->ioaddr.cmd_addr;
578 struct sil24_port_priv *pp = ap->private_data; 707 struct sil24_port_priv *pp = ap->private_data;
579 dma_addr_t paddr = pp->cmd_block_dma + qc->tag * sizeof(*pp->cmd_block); 708 void __iomem *port = (void __iomem *)ap->ioaddr.cmd_addr;
709 unsigned int tag = sil24_tag(qc->tag);
710 dma_addr_t paddr;
711 void __iomem *activate;
712
713 paddr = pp->cmd_block_dma + tag * sizeof(*pp->cmd_block);
714 activate = port + PORT_CMD_ACTIVATE + tag * 8;
715
716 writel((u32)paddr, activate);
717 writel((u64)paddr >> 32, activate + 4);
580 718
581 writel((u32)paddr, port + PORT_CMD_ACTIVATE);
582 return 0; 719 return 0;
583} 720}
584 721
@@ -587,162 +724,139 @@ static void sil24_irq_clear(struct ata_port *ap)
587 /* unused */ 724 /* unused */
588} 725}
589 726
590static int __sil24_restart_controller(void __iomem *port) 727static void sil24_freeze(struct ata_port *ap)
591{ 728{
592 u32 tmp; 729 void __iomem *port = (void __iomem *)ap->ioaddr.cmd_addr;
593 int cnt;
594
595 writel(PORT_CS_INIT, port + PORT_CTRL_STAT);
596
597 /* Max ~10ms */
598 for (cnt = 0; cnt < 10000; cnt++) {
599 tmp = readl(port + PORT_CTRL_STAT);
600 if (tmp & PORT_CS_RDY)
601 return 0;
602 udelay(1);
603 }
604 730
605 return -1; 731 /* Port-wide IRQ mask in HOST_CTRL doesn't really work, clear
732 * PORT_IRQ_ENABLE instead.
733 */
734 writel(0xffff, port + PORT_IRQ_ENABLE_CLR);
606} 735}
607 736
608static void sil24_restart_controller(struct ata_port *ap) 737static void sil24_thaw(struct ata_port *ap)
609{ 738{
610 if (__sil24_restart_controller((void __iomem *)ap->ioaddr.cmd_addr)) 739 void __iomem *port = (void __iomem *)ap->ioaddr.cmd_addr;
611 printk(KERN_ERR DRV_NAME 740 u32 tmp;
612 " ata%u: failed to restart controller\n", ap->id); 741
742 /* clear IRQ */
743 tmp = readl(port + PORT_IRQ_STAT);
744 writel(tmp, port + PORT_IRQ_STAT);
745
746 /* turn IRQ back on */
747 writel(DEF_PORT_IRQ, port + PORT_IRQ_ENABLE_SET);
613} 748}
614 749
615static int __sil24_reset_controller(void __iomem *port) 750static void sil24_error_intr(struct ata_port *ap)
616{ 751{
617 int cnt; 752 void __iomem *port = (void __iomem *)ap->ioaddr.cmd_addr;
618 u32 tmp; 753 struct ata_eh_info *ehi = &ap->eh_info;
754 int freeze = 0;
755 u32 irq_stat;
619 756
620 /* Reset controller state. Is this correct? */ 757 /* on error, we need to clear IRQ explicitly */
621 writel(PORT_CS_DEV_RST, port + PORT_CTRL_STAT); 758 irq_stat = readl(port + PORT_IRQ_STAT);
622 readl(port + PORT_CTRL_STAT); /* sync */ 759 writel(irq_stat, port + PORT_IRQ_STAT);
623 760
624 /* Max ~100ms */ 761 /* first, analyze and record host port events */
625 for (cnt = 0; cnt < 1000; cnt++) { 762 ata_ehi_clear_desc(ehi);
626 udelay(100);
627 tmp = readl(port + PORT_CTRL_STAT);
628 if (!(tmp & PORT_CS_DEV_RST))
629 break;
630 }
631 763
632 if (tmp & PORT_CS_DEV_RST) 764 ata_ehi_push_desc(ehi, "irq_stat 0x%08x", irq_stat);
633 return -1;
634 765
635 if (tmp & PORT_CS_RDY) 766 if (irq_stat & (PORT_IRQ_PHYRDY_CHG | PORT_IRQ_DEV_XCHG)) {
636 return 0; 767 ata_ehi_hotplugged(ehi);
768 ata_ehi_push_desc(ehi, ", %s",
769 irq_stat & PORT_IRQ_PHYRDY_CHG ?
770 "PHY RDY changed" : "device exchanged");
771 freeze = 1;
772 }
637 773
638 return __sil24_restart_controller(port); 774 if (irq_stat & PORT_IRQ_UNK_FIS) {
639} 775 ehi->err_mask |= AC_ERR_HSM;
776 ehi->action |= ATA_EH_SOFTRESET;
777 ata_ehi_push_desc(ehi , ", unknown FIS");
778 freeze = 1;
779 }
640 780
641static void sil24_reset_controller(struct ata_port *ap) 781 /* deal with command error */
642{ 782 if (irq_stat & PORT_IRQ_ERROR) {
643 printk(KERN_NOTICE DRV_NAME 783 struct sil24_cerr_info *ci = NULL;
644 " ata%u: resetting controller...\n", ap->id); 784 unsigned int err_mask = 0, action = 0;
645 if (__sil24_reset_controller((void __iomem *)ap->ioaddr.cmd_addr)) 785 struct ata_queued_cmd *qc;
646 printk(KERN_ERR DRV_NAME 786 u32 cerr;
647 " ata%u: failed to reset controller\n", ap->id); 787
648} 788 /* analyze CMD_ERR */
789 cerr = readl(port + PORT_CMD_ERR);
790 if (cerr < ARRAY_SIZE(sil24_cerr_db))
791 ci = &sil24_cerr_db[cerr];
792
793 if (ci && ci->desc) {
794 err_mask |= ci->err_mask;
795 action |= ci->action;
796 ata_ehi_push_desc(ehi, ", %s", ci->desc);
797 } else {
798 err_mask |= AC_ERR_OTHER;
799 action |= ATA_EH_SOFTRESET;
800 ata_ehi_push_desc(ehi, ", unknown command error %d",
801 cerr);
802 }
649 803
650static void sil24_eng_timeout(struct ata_port *ap) 804 /* record error info */
651{ 805 qc = ata_qc_from_tag(ap, ap->active_tag);
652 struct ata_queued_cmd *qc; 806 if (qc) {
807 sil24_update_tf(ap);
808 qc->err_mask |= err_mask;
809 } else
810 ehi->err_mask |= err_mask;
653 811
654 qc = ata_qc_from_tag(ap, ap->active_tag); 812 ehi->action |= action;
813 }
655 814
656 printk(KERN_ERR "ata%u: command timeout\n", ap->id); 815 /* freeze or abort */
657 qc->err_mask |= AC_ERR_TIMEOUT; 816 if (freeze)
658 ata_eh_qc_complete(qc); 817 ata_port_freeze(ap);
818 else
819 ata_port_abort(ap);
820}
659 821
660 sil24_reset_controller(ap); 822static void sil24_finish_qc(struct ata_queued_cmd *qc)
823{
824 if (qc->flags & ATA_QCFLAG_RESULT_TF)
825 sil24_update_tf(qc->ap);
661} 826}
662 827
663static void sil24_error_intr(struct ata_port *ap, u32 slot_stat) 828static inline void sil24_host_intr(struct ata_port *ap)
664{ 829{
665 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->active_tag);
666 struct sil24_port_priv *pp = ap->private_data;
667 void __iomem *port = (void __iomem *)ap->ioaddr.cmd_addr; 830 void __iomem *port = (void __iomem *)ap->ioaddr.cmd_addr;
668 u32 irq_stat, cmd_err, sstatus, serror; 831 u32 slot_stat, qc_active;
669 unsigned int err_mask; 832 int rc;
670 833
671 irq_stat = readl(port + PORT_IRQ_STAT); 834 slot_stat = readl(port + PORT_SLOT_STAT);
672 writel(irq_stat, port + PORT_IRQ_STAT); /* clear irq */
673 835
674 if (!(irq_stat & PORT_IRQ_ERROR)) { 836 if (unlikely(slot_stat & HOST_SSTAT_ATTN)) {
675 /* ignore non-completion, non-error irqs for now */ 837 sil24_error_intr(ap);
676 printk(KERN_WARNING DRV_NAME
677 "ata%u: non-error exception irq (irq_stat %x)\n",
678 ap->id, irq_stat);
679 return; 838 return;
680 } 839 }
681 840
682 cmd_err = readl(port + PORT_CMD_ERR); 841 if (ap->flags & SIL24_FLAG_PCIX_IRQ_WOC)
683 sstatus = readl(port + PORT_SSTATUS); 842 writel(PORT_IRQ_COMPLETE, port + PORT_IRQ_STAT);
684 serror = readl(port + PORT_SERROR);
685 if (serror)
686 writel(serror, port + PORT_SERROR);
687 843
688 /* 844 qc_active = slot_stat & ~HOST_SSTAT_ATTN;
689 * Don't log ATAPI device errors. They're supposed to happen 845 rc = ata_qc_complete_multiple(ap, qc_active, sil24_finish_qc);
690 * and any serious errors will be logged using sense data by 846 if (rc > 0)
691 * the SCSI layer. 847 return;
692 */ 848 if (rc < 0) {
693 if (ap->device[0].class != ATA_DEV_ATAPI || cmd_err > PORT_CERR_SDB) 849 struct ata_eh_info *ehi = &ap->eh_info;
694 printk("ata%u: error interrupt on port%d\n" 850 ehi->err_mask |= AC_ERR_HSM;
695 " stat=0x%x irq=0x%x cmd_err=%d sstatus=0x%x serror=0x%x\n", 851 ehi->action |= ATA_EH_SOFTRESET;
696 ap->id, ap->port_no, slot_stat, irq_stat, cmd_err, sstatus, serror); 852 ata_port_freeze(ap);
697 853 return;
698 if (cmd_err == PORT_CERR_DEV || cmd_err == PORT_CERR_SDB) {
699 /*
700 * Device is reporting error, tf registers are valid.
701 */
702 sil24_update_tf(ap);
703 err_mask = ac_err_mask(pp->tf.command);
704 sil24_restart_controller(ap);
705 } else {
706 /*
707 * Other errors. libata currently doesn't have any
708 * mechanism to report these errors. Just turn on
709 * ATA_ERR.
710 */
711 err_mask = AC_ERR_OTHER;
712 sil24_reset_controller(ap);
713 } 854 }
714 855
715 if (qc) { 856 if (ata_ratelimit())
716 qc->err_mask |= err_mask; 857 ata_port_printk(ap, KERN_INFO, "spurious interrupt "
717 ata_qc_complete(qc); 858 "(slot_stat 0x%x active_tag %d sactive 0x%x)\n",
718 } 859 slot_stat, ap->active_tag, ap->sactive);
719}
720
721static inline void sil24_host_intr(struct ata_port *ap)
722{
723 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->active_tag);
724 void __iomem *port = (void __iomem *)ap->ioaddr.cmd_addr;
725 u32 slot_stat;
726
727 slot_stat = readl(port + PORT_SLOT_STAT);
728 if (!(slot_stat & HOST_SSTAT_ATTN)) {
729 struct sil24_port_priv *pp = ap->private_data;
730 /*
731 * !HOST_SSAT_ATTN guarantees successful completion,
732 * so reading back tf registers is unnecessary for
733 * most commands. TODO: read tf registers for
734 * commands which require these values on successful
735 * completion (EXECUTE DEVICE DIAGNOSTIC, CHECK POWER,
736 * DEVICE RESET and READ PORT MULTIPLIER (any more?).
737 */
738 sil24_update_tf(ap);
739
740 if (qc) {
741 qc->err_mask |= ac_err_mask(pp->tf.command);
742 ata_qc_complete(qc);
743 }
744 } else
745 sil24_error_intr(ap, slot_stat);
746} 860}
747 861
748static irqreturn_t sil24_interrupt(int irq, void *dev_instance, struct pt_regs *regs) 862static irqreturn_t sil24_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
@@ -769,7 +883,7 @@ static irqreturn_t sil24_interrupt(int irq, void *dev_instance, struct pt_regs *
769 for (i = 0; i < host_set->n_ports; i++) 883 for (i = 0; i < host_set->n_ports; i++)
770 if (status & (1 << i)) { 884 if (status & (1 << i)) {
771 struct ata_port *ap = host_set->ports[i]; 885 struct ata_port *ap = host_set->ports[i];
772 if (ap && !(ap->flags & ATA_FLAG_PORT_DISABLED)) { 886 if (ap && !(ap->flags & ATA_FLAG_DISABLED)) {
773 sil24_host_intr(host_set->ports[i]); 887 sil24_host_intr(host_set->ports[i]);
774 handled++; 888 handled++;
775 } else 889 } else
@@ -782,9 +896,35 @@ static irqreturn_t sil24_interrupt(int irq, void *dev_instance, struct pt_regs *
782 return IRQ_RETVAL(handled); 896 return IRQ_RETVAL(handled);
783} 897}
784 898
899static void sil24_error_handler(struct ata_port *ap)
900{
901 struct ata_eh_context *ehc = &ap->eh_context;
902
903 if (sil24_init_port(ap)) {
904 ata_eh_freeze_port(ap);
905 ehc->i.action |= ATA_EH_HARDRESET;
906 }
907
908 /* perform recovery */
909 ata_do_eh(ap, ata_std_prereset, sil24_softreset, sil24_hardreset,
910 ata_std_postreset);
911}
912
913static void sil24_post_internal_cmd(struct ata_queued_cmd *qc)
914{
915 struct ata_port *ap = qc->ap;
916
917 if (qc->flags & ATA_QCFLAG_FAILED)
918 qc->err_mask |= AC_ERR_OTHER;
919
920 /* make DMA engine forget about the failed command */
921 if (qc->err_mask)
922 sil24_init_port(ap);
923}
924
785static inline void sil24_cblk_free(struct sil24_port_priv *pp, struct device *dev) 925static inline void sil24_cblk_free(struct sil24_port_priv *pp, struct device *dev)
786{ 926{
787 const size_t cb_size = sizeof(*pp->cmd_block); 927 const size_t cb_size = sizeof(*pp->cmd_block) * SIL24_MAX_CMDS;
788 928
789 dma_free_coherent(dev, cb_size, pp->cmd_block, pp->cmd_block_dma); 929 dma_free_coherent(dev, cb_size, pp->cmd_block, pp->cmd_block_dma);
790} 930}
@@ -794,7 +934,7 @@ static int sil24_port_start(struct ata_port *ap)
794 struct device *dev = ap->host_set->dev; 934 struct device *dev = ap->host_set->dev;
795 struct sil24_port_priv *pp; 935 struct sil24_port_priv *pp;
796 union sil24_cmd_block *cb; 936 union sil24_cmd_block *cb;
797 size_t cb_size = sizeof(*cb); 937 size_t cb_size = sizeof(*cb) * SIL24_MAX_CMDS;
798 dma_addr_t cb_dma; 938 dma_addr_t cb_dma;
799 int rc = -ENOMEM; 939 int rc = -ENOMEM;
800 940
@@ -858,6 +998,7 @@ static int sil24_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
858 void __iomem *host_base = NULL; 998 void __iomem *host_base = NULL;
859 void __iomem *port_base = NULL; 999 void __iomem *port_base = NULL;
860 int i, rc; 1000 int i, rc;
1001 u32 tmp;
861 1002
862 if (!printed_version++) 1003 if (!printed_version++)
863 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n"); 1004 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
@@ -910,37 +1051,53 @@ static int sil24_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
910 /* 1051 /*
911 * Configure the device 1052 * Configure the device
912 */ 1053 */
913 /* 1054 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
914 * FIXME: This device is certainly 64-bit capable. We just 1055 rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
915 * don't know how to use it. After fixing 32bit activation in 1056 if (rc) {
916 * this function, enable 64bit masks here. 1057 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
917 */ 1058 if (rc) {
918 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK); 1059 dev_printk(KERN_ERR, &pdev->dev,
919 if (rc) { 1060 "64-bit DMA enable failed\n");
920 dev_printk(KERN_ERR, &pdev->dev, 1061 goto out_free;
921 "32-bit DMA enable failed\n"); 1062 }
922 goto out_free; 1063 }
923 } 1064 } else {
924 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK); 1065 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
925 if (rc) { 1066 if (rc) {
926 dev_printk(KERN_ERR, &pdev->dev, 1067 dev_printk(KERN_ERR, &pdev->dev,
927 "32-bit consistent DMA enable failed\n"); 1068 "32-bit DMA enable failed\n");
928 goto out_free; 1069 goto out_free;
1070 }
1071 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
1072 if (rc) {
1073 dev_printk(KERN_ERR, &pdev->dev,
1074 "32-bit consistent DMA enable failed\n");
1075 goto out_free;
1076 }
929 } 1077 }
930 1078
931 /* GPIO off */ 1079 /* GPIO off */
932 writel(0, host_base + HOST_FLASH_CMD); 1080 writel(0, host_base + HOST_FLASH_CMD);
933 1081
934 /* Mask interrupts during initialization */ 1082 /* Apply workaround for completion IRQ loss on PCI-X errata */
1083 if (probe_ent->host_flags & SIL24_FLAG_PCIX_IRQ_WOC) {
1084 tmp = readl(host_base + HOST_CTRL);
1085 if (tmp & (HOST_CTRL_TRDY | HOST_CTRL_STOP | HOST_CTRL_DEVSEL))
1086 dev_printk(KERN_INFO, &pdev->dev,
1087 "Applying completion IRQ loss on PCI-X "
1088 "errata fix\n");
1089 else
1090 probe_ent->host_flags &= ~SIL24_FLAG_PCIX_IRQ_WOC;
1091 }
1092
1093 /* clear global reset & mask interrupts during initialization */
935 writel(0, host_base + HOST_CTRL); 1094 writel(0, host_base + HOST_CTRL);
936 1095
937 for (i = 0; i < probe_ent->n_ports; i++) { 1096 for (i = 0; i < probe_ent->n_ports; i++) {
938 void __iomem *port = port_base + i * PORT_REGS_SIZE; 1097 void __iomem *port = port_base + i * PORT_REGS_SIZE;
939 unsigned long portu = (unsigned long)port; 1098 unsigned long portu = (unsigned long)port;
940 u32 tmp;
941 int cnt;
942 1099
943 probe_ent->port[i].cmd_addr = portu + PORT_PRB; 1100 probe_ent->port[i].cmd_addr = portu;
944 probe_ent->port[i].scr_addr = portu + PORT_SCONTROL; 1101 probe_ent->port[i].scr_addr = portu + PORT_SCONTROL;
945 1102
946 ata_std_ports(&probe_ent->port[i]); 1103 ata_std_ports(&probe_ent->port[i]);
@@ -952,18 +1109,20 @@ static int sil24_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
952 tmp = readl(port + PORT_CTRL_STAT); 1109 tmp = readl(port + PORT_CTRL_STAT);
953 if (tmp & PORT_CS_PORT_RST) { 1110 if (tmp & PORT_CS_PORT_RST) {
954 writel(PORT_CS_PORT_RST, port + PORT_CTRL_CLR); 1111 writel(PORT_CS_PORT_RST, port + PORT_CTRL_CLR);
955 readl(port + PORT_CTRL_STAT); /* sync */ 1112 tmp = ata_wait_register(port + PORT_CTRL_STAT,
956 for (cnt = 0; cnt < 10; cnt++) { 1113 PORT_CS_PORT_RST,
957 msleep(10); 1114 PORT_CS_PORT_RST, 10, 100);
958 tmp = readl(port + PORT_CTRL_STAT);
959 if (!(tmp & PORT_CS_PORT_RST))
960 break;
961 }
962 if (tmp & PORT_CS_PORT_RST) 1115 if (tmp & PORT_CS_PORT_RST)
963 dev_printk(KERN_ERR, &pdev->dev, 1116 dev_printk(KERN_ERR, &pdev->dev,
964 "failed to clear port RST\n"); 1117 "failed to clear port RST\n");
965 } 1118 }
966 1119
1120 /* Configure IRQ WoC */
1121 if (probe_ent->host_flags & SIL24_FLAG_PCIX_IRQ_WOC)
1122 writel(PORT_CS_IRQ_WOC, port + PORT_CTRL_STAT);
1123 else
1124 writel(PORT_CS_IRQ_WOC, port + PORT_CTRL_CLR);
1125
967 /* Zero error counters. */ 1126 /* Zero error counters. */
968 writel(0x8000, port + PORT_DECODE_ERR_THRESH); 1127 writel(0x8000, port + PORT_DECODE_ERR_THRESH);
969 writel(0x8000, port + PORT_CRC_ERR_THRESH); 1128 writel(0x8000, port + PORT_CRC_ERR_THRESH);
@@ -972,26 +1131,11 @@ static int sil24_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
972 writel(0x0000, port + PORT_CRC_ERR_CNT); 1131 writel(0x0000, port + PORT_CRC_ERR_CNT);
973 writel(0x0000, port + PORT_HSHK_ERR_CNT); 1132 writel(0x0000, port + PORT_HSHK_ERR_CNT);
974 1133
975 /* FIXME: 32bit activation? */ 1134 /* Always use 64bit activation */
976 writel(0, port + PORT_ACTIVATE_UPPER_ADDR); 1135 writel(PORT_CS_32BIT_ACTV, port + PORT_CTRL_CLR);
977 writel(PORT_CS_32BIT_ACTV, port + PORT_CTRL_STAT);
978
979 /* Configure interrupts */
980 writel(0xffff, port + PORT_IRQ_ENABLE_CLR);
981 writel(PORT_IRQ_COMPLETE | PORT_IRQ_ERROR | PORT_IRQ_SDB_FIS,
982 port + PORT_IRQ_ENABLE_SET);
983
984 /* Clear interrupts */
985 writel(0x0fff0fff, port + PORT_IRQ_STAT);
986 writel(PORT_CS_IRQ_WOC, port + PORT_CTRL_CLR);
987 1136
988 /* Clear port multiplier enable and resume bits */ 1137 /* Clear port multiplier enable and resume bits */
989 writel(PORT_CS_PM_EN | PORT_CS_RESUME, port + PORT_CTRL_CLR); 1138 writel(PORT_CS_PM_EN | PORT_CS_RESUME, port + PORT_CTRL_CLR);
990
991 /* Reset itself */
992 if (__sil24_reset_controller(port))
993 dev_printk(KERN_ERR, &pdev->dev,
994 "failed to reset controller\n");
995 } 1139 }
996 1140
997 /* Turn on interrupts */ 1141 /* Turn on interrupts */
diff --git a/drivers/scsi/sata_sis.c b/drivers/scsi/sata_sis.c
index 728530df2e07..809d337ed641 100644
--- a/drivers/scsi/sata_sis.c
+++ b/drivers/scsi/sata_sis.c
@@ -43,7 +43,7 @@
43#include <linux/libata.h> 43#include <linux/libata.h>
44 44
45#define DRV_NAME "sata_sis" 45#define DRV_NAME "sata_sis"
46#define DRV_VERSION "0.5" 46#define DRV_VERSION "0.6"
47 47
48enum { 48enum {
49 sis_180 = 0, 49 sis_180 = 0,
@@ -96,6 +96,7 @@ static struct scsi_host_template sis_sht = {
96 .proc_name = DRV_NAME, 96 .proc_name = DRV_NAME,
97 .dma_boundary = ATA_DMA_BOUNDARY, 97 .dma_boundary = ATA_DMA_BOUNDARY,
98 .slave_configure = ata_scsi_slave_config, 98 .slave_configure = ata_scsi_slave_config,
99 .slave_destroy = ata_scsi_slave_destroy,
99 .bios_param = ata_std_bios_param, 100 .bios_param = ata_std_bios_param,
100}; 101};
101 102
@@ -106,14 +107,17 @@ static const struct ata_port_operations sis_ops = {
106 .check_status = ata_check_status, 107 .check_status = ata_check_status,
107 .exec_command = ata_exec_command, 108 .exec_command = ata_exec_command,
108 .dev_select = ata_std_dev_select, 109 .dev_select = ata_std_dev_select,
109 .phy_reset = sata_phy_reset,
110 .bmdma_setup = ata_bmdma_setup, 110 .bmdma_setup = ata_bmdma_setup,
111 .bmdma_start = ata_bmdma_start, 111 .bmdma_start = ata_bmdma_start,
112 .bmdma_stop = ata_bmdma_stop, 112 .bmdma_stop = ata_bmdma_stop,
113 .bmdma_status = ata_bmdma_status, 113 .bmdma_status = ata_bmdma_status,
114 .qc_prep = ata_qc_prep, 114 .qc_prep = ata_qc_prep,
115 .qc_issue = ata_qc_issue_prot, 115 .qc_issue = ata_qc_issue_prot,
116 .eng_timeout = ata_eng_timeout, 116 .data_xfer = ata_pio_data_xfer,
117 .freeze = ata_bmdma_freeze,
118 .thaw = ata_bmdma_thaw,
119 .error_handler = ata_bmdma_error_handler,
120 .post_internal_cmd = ata_bmdma_post_internal_cmd,
117 .irq_handler = ata_interrupt, 121 .irq_handler = ata_interrupt,
118 .irq_clear = ata_bmdma_irq_clear, 122 .irq_clear = ata_bmdma_irq_clear,
119 .scr_read = sis_scr_read, 123 .scr_read = sis_scr_read,
@@ -125,8 +129,7 @@ static const struct ata_port_operations sis_ops = {
125 129
126static struct ata_port_info sis_port_info = { 130static struct ata_port_info sis_port_info = {
127 .sht = &sis_sht, 131 .sht = &sis_sht,
128 .host_flags = ATA_FLAG_SATA | ATA_FLAG_SATA_RESET | 132 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
129 ATA_FLAG_NO_LEGACY,
130 .pio_mask = 0x1f, 133 .pio_mask = 0x1f,
131 .mwdma_mask = 0x7, 134 .mwdma_mask = 0x7,
132 .udma_mask = 0x7f, 135 .udma_mask = 0x7f,
diff --git a/drivers/scsi/sata_svw.c b/drivers/scsi/sata_svw.c
index 53b0d5c0a61f..c94b870cf378 100644
--- a/drivers/scsi/sata_svw.c
+++ b/drivers/scsi/sata_svw.c
@@ -54,7 +54,7 @@
54#endif /* CONFIG_PPC_OF */ 54#endif /* CONFIG_PPC_OF */
55 55
56#define DRV_NAME "sata_svw" 56#define DRV_NAME "sata_svw"
57#define DRV_VERSION "1.07" 57#define DRV_VERSION "1.8"
58 58
59enum { 59enum {
60 /* Taskfile registers offsets */ 60 /* Taskfile registers offsets */
@@ -257,7 +257,7 @@ static int k2_sata_proc_info(struct Scsi_Host *shost, char *page, char **start,
257 int len, index; 257 int len, index;
258 258
259 /* Find the ata_port */ 259 /* Find the ata_port */
260 ap = (struct ata_port *) &shost->hostdata[0]; 260 ap = ata_shost_to_port(shost);
261 if (ap == NULL) 261 if (ap == NULL)
262 return 0; 262 return 0;
263 263
@@ -299,6 +299,7 @@ static struct scsi_host_template k2_sata_sht = {
299 .proc_name = DRV_NAME, 299 .proc_name = DRV_NAME,
300 .dma_boundary = ATA_DMA_BOUNDARY, 300 .dma_boundary = ATA_DMA_BOUNDARY,
301 .slave_configure = ata_scsi_slave_config, 301 .slave_configure = ata_scsi_slave_config,
302 .slave_destroy = ata_scsi_slave_destroy,
302#ifdef CONFIG_PPC_OF 303#ifdef CONFIG_PPC_OF
303 .proc_info = k2_sata_proc_info, 304 .proc_info = k2_sata_proc_info,
304#endif 305#endif
@@ -313,14 +314,17 @@ static const struct ata_port_operations k2_sata_ops = {
313 .check_status = k2_stat_check_status, 314 .check_status = k2_stat_check_status,
314 .exec_command = ata_exec_command, 315 .exec_command = ata_exec_command,
315 .dev_select = ata_std_dev_select, 316 .dev_select = ata_std_dev_select,
316 .phy_reset = sata_phy_reset,
317 .bmdma_setup = k2_bmdma_setup_mmio, 317 .bmdma_setup = k2_bmdma_setup_mmio,
318 .bmdma_start = k2_bmdma_start_mmio, 318 .bmdma_start = k2_bmdma_start_mmio,
319 .bmdma_stop = ata_bmdma_stop, 319 .bmdma_stop = ata_bmdma_stop,
320 .bmdma_status = ata_bmdma_status, 320 .bmdma_status = ata_bmdma_status,
321 .qc_prep = ata_qc_prep, 321 .qc_prep = ata_qc_prep,
322 .qc_issue = ata_qc_issue_prot, 322 .qc_issue = ata_qc_issue_prot,
323 .eng_timeout = ata_eng_timeout, 323 .data_xfer = ata_mmio_data_xfer,
324 .freeze = ata_bmdma_freeze,
325 .thaw = ata_bmdma_thaw,
326 .error_handler = ata_bmdma_error_handler,
327 .post_internal_cmd = ata_bmdma_post_internal_cmd,
324 .irq_handler = ata_interrupt, 328 .irq_handler = ata_interrupt,
325 .irq_clear = ata_bmdma_irq_clear, 329 .irq_clear = ata_bmdma_irq_clear,
326 .scr_read = k2_sata_scr_read, 330 .scr_read = k2_sata_scr_read,
@@ -420,8 +424,8 @@ static int k2_sata_init_one (struct pci_dev *pdev, const struct pci_device_id *e
420 writel(0x0, mmio_base + K2_SATA_SIM_OFFSET); 424 writel(0x0, mmio_base + K2_SATA_SIM_OFFSET);
421 425
422 probe_ent->sht = &k2_sata_sht; 426 probe_ent->sht = &k2_sata_sht;
423 probe_ent->host_flags = ATA_FLAG_SATA | ATA_FLAG_SATA_RESET | 427 probe_ent->host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
424 ATA_FLAG_NO_LEGACY | ATA_FLAG_MMIO; 428 ATA_FLAG_MMIO;
425 probe_ent->port_ops = &k2_sata_ops; 429 probe_ent->port_ops = &k2_sata_ops;
426 probe_ent->n_ports = 4; 430 probe_ent->n_ports = 4;
427 probe_ent->irq = pdev->irq; 431 probe_ent->irq = pdev->irq;
diff --git a/drivers/scsi/sata_sx4.c b/drivers/scsi/sata_sx4.c
index 4139ad4b1df0..7f864410f7c2 100644
--- a/drivers/scsi/sata_sx4.c
+++ b/drivers/scsi/sata_sx4.c
@@ -46,7 +46,7 @@
46#include "sata_promise.h" 46#include "sata_promise.h"
47 47
48#define DRV_NAME "sata_sx4" 48#define DRV_NAME "sata_sx4"
49#define DRV_VERSION "0.8" 49#define DRV_VERSION "0.9"
50 50
51 51
52enum { 52enum {
@@ -191,6 +191,7 @@ static struct scsi_host_template pdc_sata_sht = {
191 .proc_name = DRV_NAME, 191 .proc_name = DRV_NAME,
192 .dma_boundary = ATA_DMA_BOUNDARY, 192 .dma_boundary = ATA_DMA_BOUNDARY,
193 .slave_configure = ata_scsi_slave_config, 193 .slave_configure = ata_scsi_slave_config,
194 .slave_destroy = ata_scsi_slave_destroy,
194 .bios_param = ata_std_bios_param, 195 .bios_param = ata_std_bios_param,
195}; 196};
196 197
@@ -204,6 +205,7 @@ static const struct ata_port_operations pdc_20621_ops = {
204 .phy_reset = pdc_20621_phy_reset, 205 .phy_reset = pdc_20621_phy_reset,
205 .qc_prep = pdc20621_qc_prep, 206 .qc_prep = pdc20621_qc_prep,
206 .qc_issue = pdc20621_qc_issue_prot, 207 .qc_issue = pdc20621_qc_issue_prot,
208 .data_xfer = ata_mmio_data_xfer,
207 .eng_timeout = pdc_eng_timeout, 209 .eng_timeout = pdc_eng_timeout,
208 .irq_handler = pdc20621_interrupt, 210 .irq_handler = pdc20621_interrupt,
209 .irq_clear = pdc20621_irq_clear, 211 .irq_clear = pdc20621_irq_clear,
@@ -218,7 +220,7 @@ static const struct ata_port_info pdc_port_info[] = {
218 .sht = &pdc_sata_sht, 220 .sht = &pdc_sata_sht,
219 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 221 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
220 ATA_FLAG_SRST | ATA_FLAG_MMIO | 222 ATA_FLAG_SRST | ATA_FLAG_MMIO |
221 ATA_FLAG_NO_ATAPI, 223 ATA_FLAG_NO_ATAPI | ATA_FLAG_PIO_POLLING,
222 .pio_mask = 0x1f, /* pio0-4 */ 224 .pio_mask = 0x1f, /* pio0-4 */
223 .mwdma_mask = 0x07, /* mwdma0-2 */ 225 .mwdma_mask = 0x07, /* mwdma0-2 */
224 .udma_mask = 0x7f, /* udma0-6 ; FIXME */ 226 .udma_mask = 0x7f, /* udma0-6 ; FIXME */
@@ -833,11 +835,11 @@ static irqreturn_t pdc20621_interrupt (int irq, void *dev_instance, struct pt_re
833 tmp = mask & (1 << i); 835 tmp = mask & (1 << i);
834 VPRINTK("seq %u, port_no %u, ap %p, tmp %x\n", i, port_no, ap, tmp); 836 VPRINTK("seq %u, port_no %u, ap %p, tmp %x\n", i, port_no, ap, tmp);
835 if (tmp && ap && 837 if (tmp && ap &&
836 !(ap->flags & (ATA_FLAG_PORT_DISABLED | ATA_FLAG_NOINTR))) { 838 !(ap->flags & ATA_FLAG_DISABLED)) {
837 struct ata_queued_cmd *qc; 839 struct ata_queued_cmd *qc;
838 840
839 qc = ata_qc_from_tag(ap, ap->active_tag); 841 qc = ata_qc_from_tag(ap, ap->active_tag);
840 if (qc && (!(qc->tf.ctl & ATA_NIEN))) 842 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)))
841 handled += pdc20621_host_intr(ap, qc, (i > 4), 843 handled += pdc20621_host_intr(ap, qc, (i > 4),
842 mmio_base); 844 mmio_base);
843 } 845 }
@@ -868,15 +870,16 @@ static void pdc_eng_timeout(struct ata_port *ap)
868 switch (qc->tf.protocol) { 870 switch (qc->tf.protocol) {
869 case ATA_PROT_DMA: 871 case ATA_PROT_DMA:
870 case ATA_PROT_NODATA: 872 case ATA_PROT_NODATA:
871 printk(KERN_ERR "ata%u: command timeout\n", ap->id); 873 ata_port_printk(ap, KERN_ERR, "command timeout\n");
872 qc->err_mask |= __ac_err_mask(ata_wait_idle(ap)); 874 qc->err_mask |= __ac_err_mask(ata_wait_idle(ap));
873 break; 875 break;
874 876
875 default: 877 default:
876 drv_stat = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 1000); 878 drv_stat = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 1000);
877 879
878 printk(KERN_ERR "ata%u: unknown timeout, cmd 0x%x stat 0x%x\n", 880 ata_port_printk(ap, KERN_ERR,
879 ap->id, qc->tf.command, drv_stat); 881 "unknown timeout, cmd 0x%x stat 0x%x\n",
882 qc->tf.command, drv_stat);
880 883
881 qc->err_mask |= ac_err_mask(drv_stat); 884 qc->err_mask |= ac_err_mask(drv_stat);
882 break; 885 break;
@@ -1375,10 +1378,6 @@ static int pdc_sata_init_one (struct pci_dev *pdev, const struct pci_device_id *
1375 if (!printed_version++) 1378 if (!printed_version++)
1376 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n"); 1379 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
1377 1380
1378 /*
1379 * If this driver happens to only be useful on Apple's K2, then
1380 * we should check that here as it has a normal Serverworks ID
1381 */
1382 rc = pci_enable_device(pdev); 1381 rc = pci_enable_device(pdev);
1383 if (rc) 1382 if (rc)
1384 return rc; 1383 return rc;
diff --git a/drivers/scsi/sata_uli.c b/drivers/scsi/sata_uli.c
index 38b52bd3fa3f..f668c997e9af 100644
--- a/drivers/scsi/sata_uli.c
+++ b/drivers/scsi/sata_uli.c
@@ -37,7 +37,7 @@
37#include <linux/libata.h> 37#include <linux/libata.h>
38 38
39#define DRV_NAME "sata_uli" 39#define DRV_NAME "sata_uli"
40#define DRV_VERSION "0.5" 40#define DRV_VERSION "0.6"
41 41
42enum { 42enum {
43 uli_5289 = 0, 43 uli_5289 = 0,
@@ -90,6 +90,7 @@ static struct scsi_host_template uli_sht = {
90 .proc_name = DRV_NAME, 90 .proc_name = DRV_NAME,
91 .dma_boundary = ATA_DMA_BOUNDARY, 91 .dma_boundary = ATA_DMA_BOUNDARY,
92 .slave_configure = ata_scsi_slave_config, 92 .slave_configure = ata_scsi_slave_config,
93 .slave_destroy = ata_scsi_slave_destroy,
93 .bios_param = ata_std_bios_param, 94 .bios_param = ata_std_bios_param,
94}; 95};
95 96
@@ -102,16 +103,18 @@ static const struct ata_port_operations uli_ops = {
102 .exec_command = ata_exec_command, 103 .exec_command = ata_exec_command,
103 .dev_select = ata_std_dev_select, 104 .dev_select = ata_std_dev_select,
104 105
105 .phy_reset = sata_phy_reset,
106
107 .bmdma_setup = ata_bmdma_setup, 106 .bmdma_setup = ata_bmdma_setup,
108 .bmdma_start = ata_bmdma_start, 107 .bmdma_start = ata_bmdma_start,
109 .bmdma_stop = ata_bmdma_stop, 108 .bmdma_stop = ata_bmdma_stop,
110 .bmdma_status = ata_bmdma_status, 109 .bmdma_status = ata_bmdma_status,
111 .qc_prep = ata_qc_prep, 110 .qc_prep = ata_qc_prep,
112 .qc_issue = ata_qc_issue_prot, 111 .qc_issue = ata_qc_issue_prot,
112 .data_xfer = ata_pio_data_xfer,
113 113
114 .eng_timeout = ata_eng_timeout, 114 .freeze = ata_bmdma_freeze,
115 .thaw = ata_bmdma_thaw,
116 .error_handler = ata_bmdma_error_handler,
117 .post_internal_cmd = ata_bmdma_post_internal_cmd,
115 118
116 .irq_handler = ata_interrupt, 119 .irq_handler = ata_interrupt,
117 .irq_clear = ata_bmdma_irq_clear, 120 .irq_clear = ata_bmdma_irq_clear,
@@ -126,8 +129,7 @@ static const struct ata_port_operations uli_ops = {
126 129
127static struct ata_port_info uli_port_info = { 130static struct ata_port_info uli_port_info = {
128 .sht = &uli_sht, 131 .sht = &uli_sht,
129 .host_flags = ATA_FLAG_SATA | ATA_FLAG_SATA_RESET | 132 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
130 ATA_FLAG_NO_LEGACY,
131 .pio_mask = 0x1f, /* pio0-4 */ 133 .pio_mask = 0x1f, /* pio0-4 */
132 .udma_mask = 0x7f, /* udma0-6 */ 134 .udma_mask = 0x7f, /* udma0-6 */
133 .port_ops = &uli_ops, 135 .port_ops = &uli_ops,
diff --git a/drivers/scsi/sata_via.c b/drivers/scsi/sata_via.c
index 9e7ae4e0db32..322890b400a6 100644
--- a/drivers/scsi/sata_via.c
+++ b/drivers/scsi/sata_via.c
@@ -47,7 +47,7 @@
47#include <asm/io.h> 47#include <asm/io.h>
48 48
49#define DRV_NAME "sata_via" 49#define DRV_NAME "sata_via"
50#define DRV_VERSION "1.1" 50#define DRV_VERSION "1.2"
51 51
52enum board_ids_enum { 52enum board_ids_enum {
53 vt6420, 53 vt6420,
@@ -103,6 +103,7 @@ static struct scsi_host_template svia_sht = {
103 .proc_name = DRV_NAME, 103 .proc_name = DRV_NAME,
104 .dma_boundary = ATA_DMA_BOUNDARY, 104 .dma_boundary = ATA_DMA_BOUNDARY,
105 .slave_configure = ata_scsi_slave_config, 105 .slave_configure = ata_scsi_slave_config,
106 .slave_destroy = ata_scsi_slave_destroy,
106 .bios_param = ata_std_bios_param, 107 .bios_param = ata_std_bios_param,
107}; 108};
108 109
@@ -115,8 +116,6 @@ static const struct ata_port_operations svia_sata_ops = {
115 .exec_command = ata_exec_command, 116 .exec_command = ata_exec_command,
116 .dev_select = ata_std_dev_select, 117 .dev_select = ata_std_dev_select,
117 118
118 .phy_reset = sata_phy_reset,
119
120 .bmdma_setup = ata_bmdma_setup, 119 .bmdma_setup = ata_bmdma_setup,
121 .bmdma_start = ata_bmdma_start, 120 .bmdma_start = ata_bmdma_start,
122 .bmdma_stop = ata_bmdma_stop, 121 .bmdma_stop = ata_bmdma_stop,
@@ -124,8 +123,12 @@ static const struct ata_port_operations svia_sata_ops = {
124 123
125 .qc_prep = ata_qc_prep, 124 .qc_prep = ata_qc_prep,
126 .qc_issue = ata_qc_issue_prot, 125 .qc_issue = ata_qc_issue_prot,
126 .data_xfer = ata_pio_data_xfer,
127 127
128 .eng_timeout = ata_eng_timeout, 128 .freeze = ata_bmdma_freeze,
129 .thaw = ata_bmdma_thaw,
130 .error_handler = ata_bmdma_error_handler,
131 .post_internal_cmd = ata_bmdma_post_internal_cmd,
129 132
130 .irq_handler = ata_interrupt, 133 .irq_handler = ata_interrupt,
131 .irq_clear = ata_bmdma_irq_clear, 134 .irq_clear = ata_bmdma_irq_clear,
@@ -140,7 +143,7 @@ static const struct ata_port_operations svia_sata_ops = {
140 143
141static struct ata_port_info svia_port_info = { 144static struct ata_port_info svia_port_info = {
142 .sht = &svia_sht, 145 .sht = &svia_sht,
143 .host_flags = ATA_FLAG_SATA | ATA_FLAG_SRST | ATA_FLAG_NO_LEGACY, 146 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
144 .pio_mask = 0x1f, 147 .pio_mask = 0x1f,
145 .mwdma_mask = 0x07, 148 .mwdma_mask = 0x07,
146 .udma_mask = 0x7f, 149 .udma_mask = 0x7f,
@@ -235,8 +238,7 @@ static struct ata_probe_ent *vt6421_init_probe_ent(struct pci_dev *pdev)
235 INIT_LIST_HEAD(&probe_ent->node); 238 INIT_LIST_HEAD(&probe_ent->node);
236 239
237 probe_ent->sht = &svia_sht; 240 probe_ent->sht = &svia_sht;
238 probe_ent->host_flags = ATA_FLAG_SATA | ATA_FLAG_SATA_RESET | 241 probe_ent->host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY;
239 ATA_FLAG_NO_LEGACY;
240 probe_ent->port_ops = &svia_sata_ops; 242 probe_ent->port_ops = &svia_sata_ops;
241 probe_ent->n_ports = N_PORTS; 243 probe_ent->n_ports = N_PORTS;
242 probe_ent->irq = pdev->irq; 244 probe_ent->irq = pdev->irq;
diff --git a/drivers/scsi/sata_vsc.c b/drivers/scsi/sata_vsc.c
index 27d658704cf9..6d0c4f18e652 100644
--- a/drivers/scsi/sata_vsc.c
+++ b/drivers/scsi/sata_vsc.c
@@ -221,14 +221,21 @@ static irqreturn_t vsc_sata_interrupt (int irq, void *dev_instance,
221 221
222 ap = host_set->ports[i]; 222 ap = host_set->ports[i];
223 223
224 if (ap && !(ap->flags & 224 if (is_vsc_sata_int_err(i, int_status)) {
225 (ATA_FLAG_PORT_DISABLED|ATA_FLAG_NOINTR))) { 225 u32 err_status;
226 printk(KERN_DEBUG "%s: ignoring interrupt(s)\n", __FUNCTION__);
227 err_status = ap ? vsc_sata_scr_read(ap, SCR_ERROR) : 0;
228 vsc_sata_scr_write(ap, SCR_ERROR, err_status);
229 handled++;
230 }
231
232 if (ap && !(ap->flags & ATA_FLAG_DISABLED)) {
226 struct ata_queued_cmd *qc; 233 struct ata_queued_cmd *qc;
227 234
228 qc = ata_qc_from_tag(ap, ap->active_tag); 235 qc = ata_qc_from_tag(ap, ap->active_tag);
229 if (qc && (!(qc->tf.ctl & ATA_NIEN))) { 236 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)))
230 handled += ata_host_intr(ap, qc); 237 handled += ata_host_intr(ap, qc);
231 } else if (is_vsc_sata_int_err(i, int_status)) { 238 else if (is_vsc_sata_int_err(i, int_status)) {
232 /* 239 /*
233 * On some chips (i.e. Intel 31244), an error 240 * On some chips (i.e. Intel 31244), an error
234 * interrupt will sneak in at initialization 241 * interrupt will sneak in at initialization
@@ -272,6 +279,7 @@ static struct scsi_host_template vsc_sata_sht = {
272 .proc_name = DRV_NAME, 279 .proc_name = DRV_NAME,
273 .dma_boundary = ATA_DMA_BOUNDARY, 280 .dma_boundary = ATA_DMA_BOUNDARY,
274 .slave_configure = ata_scsi_slave_config, 281 .slave_configure = ata_scsi_slave_config,
282 .slave_destroy = ata_scsi_slave_destroy,
275 .bios_param = ata_std_bios_param, 283 .bios_param = ata_std_bios_param,
276}; 284};
277 285
@@ -283,14 +291,17 @@ static const struct ata_port_operations vsc_sata_ops = {
283 .exec_command = ata_exec_command, 291 .exec_command = ata_exec_command,
284 .check_status = ata_check_status, 292 .check_status = ata_check_status,
285 .dev_select = ata_std_dev_select, 293 .dev_select = ata_std_dev_select,
286 .phy_reset = sata_phy_reset,
287 .bmdma_setup = ata_bmdma_setup, 294 .bmdma_setup = ata_bmdma_setup,
288 .bmdma_start = ata_bmdma_start, 295 .bmdma_start = ata_bmdma_start,
289 .bmdma_stop = ata_bmdma_stop, 296 .bmdma_stop = ata_bmdma_stop,
290 .bmdma_status = ata_bmdma_status, 297 .bmdma_status = ata_bmdma_status,
291 .qc_prep = ata_qc_prep, 298 .qc_prep = ata_qc_prep,
292 .qc_issue = ata_qc_issue_prot, 299 .qc_issue = ata_qc_issue_prot,
293 .eng_timeout = ata_eng_timeout, 300 .data_xfer = ata_pio_data_xfer,
301 .freeze = ata_bmdma_freeze,
302 .thaw = ata_bmdma_thaw,
303 .error_handler = ata_bmdma_error_handler,
304 .post_internal_cmd = ata_bmdma_post_internal_cmd,
294 .irq_handler = vsc_sata_interrupt, 305 .irq_handler = vsc_sata_interrupt,
295 .irq_clear = ata_bmdma_irq_clear, 306 .irq_clear = ata_bmdma_irq_clear,
296 .scr_read = vsc_sata_scr_read, 307 .scr_read = vsc_sata_scr_read,
@@ -385,7 +396,7 @@ static int __devinit vsc_sata_init_one (struct pci_dev *pdev, const struct pci_d
385 396
386 probe_ent->sht = &vsc_sata_sht; 397 probe_ent->sht = &vsc_sata_sht;
387 probe_ent->host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 398 probe_ent->host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
388 ATA_FLAG_MMIO | ATA_FLAG_SATA_RESET; 399 ATA_FLAG_MMIO;
389 probe_ent->port_ops = &vsc_sata_ops; 400 probe_ent->port_ops = &vsc_sata_ops;
390 probe_ent->n_ports = 4; 401 probe_ent->n_ports = 4;
391 probe_ent->irq = pdev->irq; 402 probe_ent->irq = pdev->irq;
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index 3e90ba797df2..2ab7df0dcfe8 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -579,6 +579,24 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
579static DEFINE_PER_CPU(struct list_head, scsi_done_q); 579static DEFINE_PER_CPU(struct list_head, scsi_done_q);
580 580
581/** 581/**
582 * scsi_req_abort_cmd -- Request command recovery for the specified command
583 * cmd: pointer to the SCSI command of interest
584 *
585 * This function requests that SCSI Core start recovery for the
586 * command by deleting the timer and adding the command to the eh
587 * queue. It can be called by either LLDDs or SCSI Core. LLDDs who
588 * implement their own error recovery MAY ignore the timeout event if
589 * they generated scsi_req_abort_cmd.
590 */
591void scsi_req_abort_cmd(struct scsi_cmnd *cmd)
592{
593 if (!scsi_delete_timer(cmd))
594 return;
595 scsi_times_out(cmd);
596}
597EXPORT_SYMBOL(scsi_req_abort_cmd);
598
599/**
582 * scsi_done - Enqueue the finished SCSI command into the done queue. 600 * scsi_done - Enqueue the finished SCSI command into the done queue.
583 * @cmd: The SCSI Command for which a low-level device driver (LLDD) gives 601 * @cmd: The SCSI Command for which a low-level device driver (LLDD) gives
584 * ownership back to SCSI Core -- i.e. the LLDD has finished with it. 602 * ownership back to SCSI Core -- i.e. the LLDD has finished with it.
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index 1c7d993fa8ad..6a7a60fc0a4e 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -58,6 +58,28 @@ void scsi_eh_wakeup(struct Scsi_Host *shost)
58} 58}
59 59
60/** 60/**
61 * scsi_schedule_eh - schedule EH for SCSI host
62 * @shost: SCSI host to invoke error handling on.
63 *
64 * Schedule SCSI EH without scmd.
65 **/
66void scsi_schedule_eh(struct Scsi_Host *shost)
67{
68 unsigned long flags;
69
70 spin_lock_irqsave(shost->host_lock, flags);
71
72 if (scsi_host_set_state(shost, SHOST_RECOVERY) == 0 ||
73 scsi_host_set_state(shost, SHOST_CANCEL_RECOVERY) == 0) {
74 shost->host_eh_scheduled++;
75 scsi_eh_wakeup(shost);
76 }
77
78 spin_unlock_irqrestore(shost->host_lock, flags);
79}
80EXPORT_SYMBOL_GPL(scsi_schedule_eh);
81
82/**
61 * scsi_eh_scmd_add - add scsi cmd to error handling. 83 * scsi_eh_scmd_add - add scsi cmd to error handling.
62 * @scmd: scmd to run eh on. 84 * @scmd: scmd to run eh on.
63 * @eh_flag: optional SCSI_EH flag. 85 * @eh_flag: optional SCSI_EH flag.
@@ -1515,7 +1537,7 @@ int scsi_error_handler(void *data)
1515 */ 1537 */
1516 set_current_state(TASK_INTERRUPTIBLE); 1538 set_current_state(TASK_INTERRUPTIBLE);
1517 while (!kthread_should_stop()) { 1539 while (!kthread_should_stop()) {
1518 if (shost->host_failed == 0 || 1540 if ((shost->host_failed == 0 && shost->host_eh_scheduled == 0) ||
1519 shost->host_failed != shost->host_busy) { 1541 shost->host_failed != shost->host_busy) {
1520 SCSI_LOG_ERROR_RECOVERY(1, 1542 SCSI_LOG_ERROR_RECOVERY(1,
1521 printk("Error handler scsi_eh_%d sleeping\n", 1543 printk("Error handler scsi_eh_%d sleeping\n",
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 3302d8068c41..3d04a9f386ac 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -500,7 +500,7 @@ void scsi_device_unbusy(struct scsi_device *sdev)
500 spin_lock_irqsave(shost->host_lock, flags); 500 spin_lock_irqsave(shost->host_lock, flags);
501 shost->host_busy--; 501 shost->host_busy--;
502 if (unlikely(scsi_host_in_recovery(shost) && 502 if (unlikely(scsi_host_in_recovery(shost) &&
503 shost->host_failed)) 503 (shost->host_failed || shost->host_eh_scheduled)))
504 scsi_eh_wakeup(shost); 504 scsi_eh_wakeup(shost);
505 spin_unlock(shost->host_lock); 505 spin_unlock(shost->host_lock);
506 spin_lock(sdev->request_queue->queue_lock); 506 spin_lock(sdev->request_queue->queue_lock);
diff --git a/drivers/scsi/scsi_transport_api.h b/drivers/scsi/scsi_transport_api.h
new file mode 100644
index 000000000000..934f0e62bb5c
--- /dev/null
+++ b/drivers/scsi/scsi_transport_api.h
@@ -0,0 +1,6 @@
1#ifndef _SCSI_TRANSPORT_API_H
2#define _SCSI_TRANSPORT_API_H
3
4void scsi_schedule_eh(struct Scsi_Host *shost);
5
6#endif /* _SCSI_TRANSPORT_API_H */
diff --git a/include/linux/ata.h b/include/linux/ata.h
index 312a2c0c64e6..3671af869696 100644
--- a/include/linux/ata.h
+++ b/include/linux/ata.h
@@ -97,6 +97,9 @@ enum {
97 ATA_DRQ = (1 << 3), /* data request i/o */ 97 ATA_DRQ = (1 << 3), /* data request i/o */
98 ATA_ERR = (1 << 0), /* have an error */ 98 ATA_ERR = (1 << 0), /* have an error */
99 ATA_SRST = (1 << 2), /* software reset */ 99 ATA_SRST = (1 << 2), /* software reset */
100 ATA_ICRC = (1 << 7), /* interface CRC error */
101 ATA_UNC = (1 << 6), /* uncorrectable media error */
102 ATA_IDNF = (1 << 4), /* ID not found */
100 ATA_ABORTED = (1 << 2), /* command aborted */ 103 ATA_ABORTED = (1 << 2), /* command aborted */
101 104
102 /* ATA command block registers */ 105 /* ATA command block registers */
@@ -130,6 +133,8 @@ enum {
130 ATA_CMD_WRITE = 0xCA, 133 ATA_CMD_WRITE = 0xCA,
131 ATA_CMD_WRITE_EXT = 0x35, 134 ATA_CMD_WRITE_EXT = 0x35,
132 ATA_CMD_WRITE_FUA_EXT = 0x3D, 135 ATA_CMD_WRITE_FUA_EXT = 0x3D,
136 ATA_CMD_FPDMA_READ = 0x60,
137 ATA_CMD_FPDMA_WRITE = 0x61,
133 ATA_CMD_PIO_READ = 0x20, 138 ATA_CMD_PIO_READ = 0x20,
134 ATA_CMD_PIO_READ_EXT = 0x24, 139 ATA_CMD_PIO_READ_EXT = 0x24,
135 ATA_CMD_PIO_WRITE = 0x30, 140 ATA_CMD_PIO_WRITE = 0x30,
@@ -148,6 +153,10 @@ enum {
148 ATA_CMD_INIT_DEV_PARAMS = 0x91, 153 ATA_CMD_INIT_DEV_PARAMS = 0x91,
149 ATA_CMD_READ_NATIVE_MAX = 0xF8, 154 ATA_CMD_READ_NATIVE_MAX = 0xF8,
150 ATA_CMD_READ_NATIVE_MAX_EXT = 0x27, 155 ATA_CMD_READ_NATIVE_MAX_EXT = 0x27,
156 ATA_CMD_READ_LOG_EXT = 0x2f,
157
158 /* READ_LOG_EXT pages */
159 ATA_LOG_SATA_NCQ = 0x10,
151 160
152 /* SETFEATURES stuff */ 161 /* SETFEATURES stuff */
153 SETFEATURES_XFER = 0x03, 162 SETFEATURES_XFER = 0x03,
@@ -172,6 +181,9 @@ enum {
172 XFER_PIO_0 = 0x08, 181 XFER_PIO_0 = 0x08,
173 XFER_PIO_SLOW = 0x00, 182 XFER_PIO_SLOW = 0x00,
174 183
184 SETFEATURES_WC_ON = 0x02, /* Enable write cache */
185 SETFEATURES_WC_OFF = 0x82, /* Disable write cache */
186
175 /* ATAPI stuff */ 187 /* ATAPI stuff */
176 ATAPI_PKT_DMA = (1 << 0), 188 ATAPI_PKT_DMA = (1 << 0),
177 ATAPI_DMADIR = (1 << 2), /* ATAPI data dir: 189 ATAPI_DMADIR = (1 << 2), /* ATAPI data dir:
@@ -192,6 +204,16 @@ enum {
192 SCR_ACTIVE = 3, 204 SCR_ACTIVE = 3,
193 SCR_NOTIFICATION = 4, 205 SCR_NOTIFICATION = 4,
194 206
207 /* SError bits */
208 SERR_DATA_RECOVERED = (1 << 0), /* recovered data error */
209 SERR_COMM_RECOVERED = (1 << 1), /* recovered comm failure */
210 SERR_DATA = (1 << 8), /* unrecovered data error */
211 SERR_PERSISTENT = (1 << 9), /* persistent data/comm error */
212 SERR_PROTOCOL = (1 << 10), /* protocol violation */
213 SERR_INTERNAL = (1 << 11), /* host internal error */
214 SERR_PHYRDY_CHG = (1 << 16), /* PHY RDY changed */
215 SERR_DEV_XCHG = (1 << 26), /* device exchanged */
216
195 /* struct ata_taskfile flags */ 217 /* struct ata_taskfile flags */
196 ATA_TFLAG_LBA48 = (1 << 0), /* enable 48-bit LBA and "HOB" */ 218 ATA_TFLAG_LBA48 = (1 << 0), /* enable 48-bit LBA and "HOB" */
197 ATA_TFLAG_ISADDR = (1 << 1), /* enable r/w to nsect/lba regs */ 219 ATA_TFLAG_ISADDR = (1 << 1), /* enable r/w to nsect/lba regs */
@@ -199,6 +221,7 @@ enum {
199 ATA_TFLAG_WRITE = (1 << 3), /* data dir: host->dev==1 (write) */ 221 ATA_TFLAG_WRITE = (1 << 3), /* data dir: host->dev==1 (write) */
200 ATA_TFLAG_LBA = (1 << 4), /* enable LBA */ 222 ATA_TFLAG_LBA = (1 << 4), /* enable LBA */
201 ATA_TFLAG_FUA = (1 << 5), /* enable FUA */ 223 ATA_TFLAG_FUA = (1 << 5), /* enable FUA */
224 ATA_TFLAG_POLLING = (1 << 6), /* set nIEN to 1 and use polling */
202}; 225};
203 226
204enum ata_tf_protocols { 227enum ata_tf_protocols {
@@ -207,6 +230,7 @@ enum ata_tf_protocols {
207 ATA_PROT_NODATA, /* no data */ 230 ATA_PROT_NODATA, /* no data */
208 ATA_PROT_PIO, /* PIO single sector */ 231 ATA_PROT_PIO, /* PIO single sector */
209 ATA_PROT_DMA, /* DMA */ 232 ATA_PROT_DMA, /* DMA */
233 ATA_PROT_NCQ, /* NCQ */
210 ATA_PROT_ATAPI, /* packet command, PIO data xfer*/ 234 ATA_PROT_ATAPI, /* packet command, PIO data xfer*/
211 ATA_PROT_ATAPI_NODATA, /* packet command, no data */ 235 ATA_PROT_ATAPI_NODATA, /* packet command, no data */
212 ATA_PROT_ATAPI_DMA, /* packet command with special DMA sauce */ 236 ATA_PROT_ATAPI_DMA, /* packet command with special DMA sauce */
@@ -262,6 +286,8 @@ struct ata_taskfile {
262#define ata_id_has_pm(id) ((id)[82] & (1 << 3)) 286#define ata_id_has_pm(id) ((id)[82] & (1 << 3))
263#define ata_id_has_lba(id) ((id)[49] & (1 << 9)) 287#define ata_id_has_lba(id) ((id)[49] & (1 << 9))
264#define ata_id_has_dma(id) ((id)[49] & (1 << 8)) 288#define ata_id_has_dma(id) ((id)[49] & (1 << 8))
289#define ata_id_has_ncq(id) ((id)[76] & (1 << 8))
290#define ata_id_queue_depth(id) (((id)[75] & 0x1f) + 1)
265#define ata_id_removeable(id) ((id)[0] & (1 << 7)) 291#define ata_id_removeable(id) ((id)[0] & (1 << 7))
266#define ata_id_has_dword_io(id) ((id)[50] & (1 << 0)) 292#define ata_id_has_dword_io(id) ((id)[50] & (1 << 0))
267#define ata_id_u32(id,n) \ 293#define ata_id_u32(id,n) \
@@ -272,6 +298,8 @@ struct ata_taskfile {
272 ((u64) (id)[(n) + 1] << 16) | \ 298 ((u64) (id)[(n) + 1] << 16) | \
273 ((u64) (id)[(n) + 0]) ) 299 ((u64) (id)[(n) + 0]) )
274 300
301#define ata_id_cdb_intr(id) (((id)[0] & 0x60) == 0x20)
302
275static inline unsigned int ata_id_major_version(const u16 *id) 303static inline unsigned int ata_id_major_version(const u16 *id)
276{ 304{
277 unsigned int mver; 305 unsigned int mver;
@@ -311,6 +339,15 @@ static inline int is_atapi_taskfile(const struct ata_taskfile *tf)
311 (tf->protocol == ATA_PROT_ATAPI_DMA); 339 (tf->protocol == ATA_PROT_ATAPI_DMA);
312} 340}
313 341
342static inline int is_multi_taskfile(struct ata_taskfile *tf)
343{
344 return (tf->command == ATA_CMD_READ_MULTI) ||
345 (tf->command == ATA_CMD_WRITE_MULTI) ||
346 (tf->command == ATA_CMD_READ_MULTI_EXT) ||
347 (tf->command == ATA_CMD_WRITE_MULTI_EXT) ||
348 (tf->command == ATA_CMD_WRITE_MULTI_FUA_EXT);
349}
350
314static inline int ata_ok(u8 status) 351static inline int ata_ok(u8 status)
315{ 352{
316 return ((status & (ATA_BUSY | ATA_DRDY | ATA_DF | ATA_DRQ | ATA_ERR)) 353 return ((status & (ATA_BUSY | ATA_DRDY | ATA_DF | ATA_DRQ | ATA_ERR))
diff --git a/include/linux/libata.h b/include/linux/libata.h
index b80d2e7fa6d2..20b1cf527c60 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -33,6 +33,7 @@
33#include <asm/io.h> 33#include <asm/io.h>
34#include <linux/ata.h> 34#include <linux/ata.h>
35#include <linux/workqueue.h> 35#include <linux/workqueue.h>
36#include <scsi/scsi_host.h>
36 37
37/* 38/*
38 * compile-time options: to be removed as soon as all the drivers are 39 * compile-time options: to be removed as soon as all the drivers are
@@ -44,7 +45,6 @@
44#undef ATA_NDEBUG /* define to disable quick runtime checks */ 45#undef ATA_NDEBUG /* define to disable quick runtime checks */
45#undef ATA_ENABLE_PATA /* define to enable PATA support in some 46#undef ATA_ENABLE_PATA /* define to enable PATA support in some
46 * low-level drivers */ 47 * low-level drivers */
47#undef ATAPI_ENABLE_DMADIR /* enables ATAPI DMADIR bridge support */
48 48
49 49
50/* note: prints function name for you */ 50/* note: prints function name for you */
@@ -108,8 +108,11 @@ enum {
108 LIBATA_MAX_PRD = ATA_MAX_PRD / 2, 108 LIBATA_MAX_PRD = ATA_MAX_PRD / 2,
109 ATA_MAX_PORTS = 8, 109 ATA_MAX_PORTS = 8,
110 ATA_DEF_QUEUE = 1, 110 ATA_DEF_QUEUE = 1,
111 ATA_MAX_QUEUE = 1, 111 /* tag ATA_MAX_QUEUE - 1 is reserved for internal commands */
112 ATA_MAX_QUEUE = 32,
113 ATA_TAG_INTERNAL = ATA_MAX_QUEUE - 1,
112 ATA_MAX_SECTORS = 200, /* FIXME */ 114 ATA_MAX_SECTORS = 200, /* FIXME */
115 ATA_MAX_SECTORS_LBA48 = 65535,
113 ATA_MAX_BUS = 2, 116 ATA_MAX_BUS = 2,
114 ATA_DEF_BUSY_WAIT = 10000, 117 ATA_DEF_BUSY_WAIT = 10000,
115 ATA_SHORT_PAUSE = (HZ >> 6) + 1, 118 ATA_SHORT_PAUSE = (HZ >> 6) + 1,
@@ -120,9 +123,17 @@ enum {
120 ATA_SHT_USE_CLUSTERING = 1, 123 ATA_SHT_USE_CLUSTERING = 1,
121 124
122 /* struct ata_device stuff */ 125 /* struct ata_device stuff */
123 ATA_DFLAG_LBA48 = (1 << 0), /* device supports LBA48 */ 126 ATA_DFLAG_LBA = (1 << 0), /* device supports LBA */
124 ATA_DFLAG_PIO = (1 << 1), /* device currently in PIO mode */ 127 ATA_DFLAG_LBA48 = (1 << 1), /* device supports LBA48 */
125 ATA_DFLAG_LBA = (1 << 2), /* device supports LBA */ 128 ATA_DFLAG_CDB_INTR = (1 << 2), /* device asserts INTRQ when ready for CDB */
129 ATA_DFLAG_NCQ = (1 << 3), /* device supports NCQ */
130 ATA_DFLAG_CFG_MASK = (1 << 8) - 1,
131
132 ATA_DFLAG_PIO = (1 << 8), /* device currently in PIO mode */
133 ATA_DFLAG_INIT_MASK = (1 << 16) - 1,
134
135 ATA_DFLAG_DETACH = (1 << 16),
136 ATA_DFLAG_DETACHED = (1 << 17),
126 137
127 ATA_DEV_UNKNOWN = 0, /* unknown device */ 138 ATA_DEV_UNKNOWN = 0, /* unknown device */
128 ATA_DEV_ATA = 1, /* ATA device */ 139 ATA_DEV_ATA = 1, /* ATA device */
@@ -132,43 +143,57 @@ enum {
132 ATA_DEV_NONE = 5, /* no device */ 143 ATA_DEV_NONE = 5, /* no device */
133 144
134 /* struct ata_port flags */ 145 /* struct ata_port flags */
135 ATA_FLAG_SLAVE_POSS = (1 << 1), /* host supports slave dev */ 146 ATA_FLAG_SLAVE_POSS = (1 << 0), /* host supports slave dev */
136 /* (doesn't imply presence) */ 147 /* (doesn't imply presence) */
137 ATA_FLAG_PORT_DISABLED = (1 << 2), /* port is disabled, ignore it */ 148 ATA_FLAG_SATA = (1 << 1),
138 ATA_FLAG_SATA = (1 << 3), 149 ATA_FLAG_NO_LEGACY = (1 << 2), /* no legacy mode check */
139 ATA_FLAG_NO_LEGACY = (1 << 4), /* no legacy mode check */ 150 ATA_FLAG_MMIO = (1 << 3), /* use MMIO, not PIO */
140 ATA_FLAG_SRST = (1 << 5), /* (obsolete) use ATA SRST, not E.D.D. */ 151 ATA_FLAG_SRST = (1 << 4), /* (obsolete) use ATA SRST, not E.D.D. */
141 ATA_FLAG_MMIO = (1 << 6), /* use MMIO, not PIO */ 152 ATA_FLAG_SATA_RESET = (1 << 5), /* (obsolete) use COMRESET */
142 ATA_FLAG_SATA_RESET = (1 << 7), /* (obsolete) use COMRESET */ 153 ATA_FLAG_NO_ATAPI = (1 << 6), /* No ATAPI support */
143 ATA_FLAG_PIO_DMA = (1 << 8), /* PIO cmds via DMA */ 154 ATA_FLAG_PIO_DMA = (1 << 7), /* PIO cmds via DMA */
144 ATA_FLAG_NOINTR = (1 << 9), /* FIXME: Remove this once 155 ATA_FLAG_PIO_LBA48 = (1 << 8), /* Host DMA engine is LBA28 only */
145 * proper HSM is in place. */ 156 ATA_FLAG_PIO_POLLING = (1 << 9), /* use polling PIO if LLD
146 ATA_FLAG_DEBUGMSG = (1 << 10), 157 * doesn't handle PIO interrupts */
147 ATA_FLAG_NO_ATAPI = (1 << 11), /* No ATAPI support */ 158 ATA_FLAG_NCQ = (1 << 10), /* host supports NCQ */
148 159 ATA_FLAG_HRST_TO_RESUME = (1 << 11), /* hardreset to resume phy */
149 ATA_FLAG_SUSPENDED = (1 << 12), /* port is suspended */ 160 ATA_FLAG_SKIP_D2H_BSY = (1 << 12), /* can't wait for the first D2H
150 161 * Register FIS clearing BSY */
151 ATA_FLAG_PIO_LBA48 = (1 << 13), /* Host DMA engine is LBA28 only */ 162
152 ATA_FLAG_IRQ_MASK = (1 << 14), /* Mask IRQ in PIO xfers */ 163 ATA_FLAG_DEBUGMSG = (1 << 13),
153 164 ATA_FLAG_FLUSH_PORT_TASK = (1 << 14), /* flush port task */
154 ATA_FLAG_FLUSH_PORT_TASK = (1 << 15), /* Flush port task */ 165
155 ATA_FLAG_IN_EH = (1 << 16), /* EH in progress */ 166 ATA_FLAG_EH_PENDING = (1 << 15), /* EH pending */
156 167 ATA_FLAG_EH_IN_PROGRESS = (1 << 16), /* EH in progress */
157 ATA_QCFLAG_ACTIVE = (1 << 1), /* cmd not yet ack'd to scsi lyer */ 168 ATA_FLAG_FROZEN = (1 << 17), /* port is frozen */
158 ATA_QCFLAG_SG = (1 << 3), /* have s/g table? */ 169 ATA_FLAG_RECOVERED = (1 << 18), /* recovery action performed */
159 ATA_QCFLAG_SINGLE = (1 << 4), /* no s/g, just a single buffer */ 170 ATA_FLAG_LOADING = (1 << 19), /* boot/loading probe */
171 ATA_FLAG_UNLOADING = (1 << 20), /* module is unloading */
172 ATA_FLAG_SCSI_HOTPLUG = (1 << 21), /* SCSI hotplug scheduled */
173
174 ATA_FLAG_DISABLED = (1 << 22), /* port is disabled, ignore it */
175 ATA_FLAG_SUSPENDED = (1 << 23), /* port is suspended (power) */
176
177 /* bits 24:31 of ap->flags are reserved for LLDD specific flags */
178
179 /* struct ata_queued_cmd flags */
180 ATA_QCFLAG_ACTIVE = (1 << 0), /* cmd not yet ack'd to scsi lyer */
181 ATA_QCFLAG_SG = (1 << 1), /* have s/g table? */
182 ATA_QCFLAG_SINGLE = (1 << 2), /* no s/g, just a single buffer */
160 ATA_QCFLAG_DMAMAP = ATA_QCFLAG_SG | ATA_QCFLAG_SINGLE, 183 ATA_QCFLAG_DMAMAP = ATA_QCFLAG_SG | ATA_QCFLAG_SINGLE,
161 ATA_QCFLAG_EH_SCHEDULED = (1 << 5), /* EH scheduled */ 184 ATA_QCFLAG_IO = (1 << 3), /* standard IO command */
185 ATA_QCFLAG_RESULT_TF = (1 << 4), /* result TF requested */
186
187 ATA_QCFLAG_FAILED = (1 << 16), /* cmd failed and is owned by EH */
188 ATA_QCFLAG_SENSE_VALID = (1 << 17), /* sense data valid */
189 ATA_QCFLAG_EH_SCHEDULED = (1 << 18), /* EH scheduled (obsolete) */
162 190
163 /* host set flags */ 191 /* host set flags */
164 ATA_HOST_SIMPLEX = (1 << 0), /* Host is simplex, one DMA channel per host_set only */ 192 ATA_HOST_SIMPLEX = (1 << 0), /* Host is simplex, one DMA channel per host_set only */
165 193
166 /* various lengths of time */ 194 /* various lengths of time */
167 ATA_TMOUT_PIO = 30 * HZ,
168 ATA_TMOUT_BOOT = 30 * HZ, /* heuristic */ 195 ATA_TMOUT_BOOT = 30 * HZ, /* heuristic */
169 ATA_TMOUT_BOOT_QUICK = 7 * HZ, /* heuristic */ 196 ATA_TMOUT_BOOT_QUICK = 7 * HZ, /* heuristic */
170 ATA_TMOUT_CDB = 30 * HZ,
171 ATA_TMOUT_CDB_QUICK = 5 * HZ,
172 ATA_TMOUT_INTERNAL = 30 * HZ, 197 ATA_TMOUT_INTERNAL = 30 * HZ,
173 ATA_TMOUT_INTERNAL_QUICK = 5 * HZ, 198 ATA_TMOUT_INTERNAL_QUICK = 5 * HZ,
174 199
@@ -207,21 +232,56 @@ enum {
207 /* size of buffer to pad xfers ending on unaligned boundaries */ 232 /* size of buffer to pad xfers ending on unaligned boundaries */
208 ATA_DMA_PAD_SZ = 4, 233 ATA_DMA_PAD_SZ = 4,
209 ATA_DMA_PAD_BUF_SZ = ATA_DMA_PAD_SZ * ATA_MAX_QUEUE, 234 ATA_DMA_PAD_BUF_SZ = ATA_DMA_PAD_SZ * ATA_MAX_QUEUE,
210 235
211 /* Masks for port functions */ 236 /* masks for port functions */
212 ATA_PORT_PRIMARY = (1 << 0), 237 ATA_PORT_PRIMARY = (1 << 0),
213 ATA_PORT_SECONDARY = (1 << 1), 238 ATA_PORT_SECONDARY = (1 << 1),
239
240 /* ering size */
241 ATA_ERING_SIZE = 32,
242
243 /* desc_len for ata_eh_info and context */
244 ATA_EH_DESC_LEN = 80,
245
246 /* reset / recovery action types */
247 ATA_EH_REVALIDATE = (1 << 0),
248 ATA_EH_SOFTRESET = (1 << 1),
249 ATA_EH_HARDRESET = (1 << 2),
250
251 ATA_EH_RESET_MASK = ATA_EH_SOFTRESET | ATA_EH_HARDRESET,
252 ATA_EH_PERDEV_MASK = ATA_EH_REVALIDATE,
253
254 /* ata_eh_info->flags */
255 ATA_EHI_HOTPLUGGED = (1 << 0), /* could have been hotplugged */
256
257 ATA_EHI_DID_RESET = (1 << 16), /* already reset this port */
258
259 /* max repeat if error condition is still set after ->error_handler */
260 ATA_EH_MAX_REPEAT = 5,
261
262 /* how hard are we gonna try to probe/recover devices */
263 ATA_PROBE_MAX_TRIES = 3,
264 ATA_EH_RESET_TRIES = 3,
265 ATA_EH_DEV_TRIES = 3,
266
267 /* Drive spinup time (time from power-on to the first D2H FIS)
268 * in msecs - 8s currently. Failing to get ready in this time
269 * isn't critical. It will result in reset failure for
270 * controllers which can't wait for the first D2H FIS. libata
271 * will retry, so it just has to be long enough to spin up
272 * most devices.
273 */
274 ATA_SPINUP_WAIT = 8000,
214}; 275};
215 276
216enum hsm_task_states { 277enum hsm_task_states {
217 HSM_ST_UNKNOWN, 278 HSM_ST_UNKNOWN, /* state unknown */
218 HSM_ST_IDLE, 279 HSM_ST_IDLE, /* no command on going */
219 HSM_ST_POLL, 280 HSM_ST, /* (waiting the device to) transfer data */
220 HSM_ST_TMOUT, 281 HSM_ST_LAST, /* (waiting the device to) complete command */
221 HSM_ST, 282 HSM_ST_ERR, /* error */
222 HSM_ST_LAST, 283 HSM_ST_FIRST, /* (waiting the device to)
223 HSM_ST_LAST_POLL, 284 write CDB or first data block */
224 HSM_ST_ERR,
225}; 285};
226 286
227enum ata_completion_errors { 287enum ata_completion_errors {
@@ -244,9 +304,9 @@ struct ata_queued_cmd;
244 304
245/* typedefs */ 305/* typedefs */
246typedef void (*ata_qc_cb_t) (struct ata_queued_cmd *qc); 306typedef void (*ata_qc_cb_t) (struct ata_queued_cmd *qc);
247typedef void (*ata_probeinit_fn_t)(struct ata_port *); 307typedef int (*ata_prereset_fn_t)(struct ata_port *ap);
248typedef int (*ata_reset_fn_t)(struct ata_port *, int, unsigned int *); 308typedef int (*ata_reset_fn_t)(struct ata_port *ap, unsigned int *classes);
249typedef void (*ata_postreset_fn_t)(struct ata_port *ap, unsigned int *); 309typedef void (*ata_postreset_fn_t)(struct ata_port *ap, unsigned int *classes);
250 310
251struct ata_ioports { 311struct ata_ioports {
252 unsigned long cmd_addr; 312 unsigned long cmd_addr;
@@ -297,7 +357,8 @@ struct ata_host_set {
297 unsigned long flags; 357 unsigned long flags;
298 int simplex_claimed; /* Keep seperate in case we 358 int simplex_claimed; /* Keep seperate in case we
299 ever need to do this locked */ 359 ever need to do this locked */
300 struct ata_port * ports[0]; 360 struct ata_host_set *next; /* for legacy mode */
361 struct ata_port *ports[0];
301}; 362};
302 363
303struct ata_queued_cmd { 364struct ata_queued_cmd {
@@ -336,7 +397,7 @@ struct ata_queued_cmd {
336 struct scatterlist *__sg; 397 struct scatterlist *__sg;
337 398
338 unsigned int err_mask; 399 unsigned int err_mask;
339 400 struct ata_taskfile result_tf;
340 ata_qc_cb_t complete_fn; 401 ata_qc_cb_t complete_fn;
341 402
342 void *private_data; 403 void *private_data;
@@ -348,12 +409,26 @@ struct ata_host_stats {
348 unsigned long rw_reqbuf; 409 unsigned long rw_reqbuf;
349}; 410};
350 411
412struct ata_ering_entry {
413 int is_io;
414 unsigned int err_mask;
415 u64 timestamp;
416};
417
418struct ata_ering {
419 int cursor;
420 struct ata_ering_entry ring[ATA_ERING_SIZE];
421};
422
351struct ata_device { 423struct ata_device {
352 u64 n_sectors; /* size of device, if ATA */ 424 struct ata_port *ap;
425 unsigned int devno; /* 0 or 1 */
353 unsigned long flags; /* ATA_DFLAG_xxx */ 426 unsigned long flags; /* ATA_DFLAG_xxx */
427 struct scsi_device *sdev; /* attached SCSI device */
428 /* n_sector is used as CLEAR_OFFSET, read comment above CLEAR_OFFSET */
429 u64 n_sectors; /* size of device, if ATA */
354 unsigned int class; /* ATA_DEV_xxx */ 430 unsigned int class; /* ATA_DEV_xxx */
355 unsigned int devno; /* 0 or 1 */ 431 u16 id[ATA_ID_WORDS]; /* IDENTIFY xxx DEVICE data */
356 u16 *id; /* IDENTIFY xxx DEVICE data */
357 u8 pio_mode; 432 u8 pio_mode;
358 u8 dma_mode; 433 u8 dma_mode;
359 u8 xfer_mode; 434 u8 xfer_mode;
@@ -373,11 +448,42 @@ struct ata_device {
373 u16 cylinders; /* Number of cylinders */ 448 u16 cylinders; /* Number of cylinders */
374 u16 heads; /* Number of heads */ 449 u16 heads; /* Number of heads */
375 u16 sectors; /* Number of sectors per track */ 450 u16 sectors; /* Number of sectors per track */
451
452 /* error history */
453 struct ata_ering ering;
454};
455
456/* Offset into struct ata_device. Fields above it are maintained
457 * acress device init. Fields below are zeroed.
458 */
459#define ATA_DEVICE_CLEAR_OFFSET offsetof(struct ata_device, n_sectors)
460
461struct ata_eh_info {
462 struct ata_device *dev; /* offending device */
463 u32 serror; /* SError from LLDD */
464 unsigned int err_mask; /* port-wide err_mask */
465 unsigned int action; /* ATA_EH_* action mask */
466 unsigned int dev_action[ATA_MAX_DEVICES]; /* dev EH action */
467 unsigned int flags; /* ATA_EHI_* flags */
468
469 unsigned long hotplug_timestamp;
470 unsigned int probe_mask;
471
472 char desc[ATA_EH_DESC_LEN];
473 int desc_len;
474};
475
476struct ata_eh_context {
477 struct ata_eh_info i;
478 int tries[ATA_MAX_DEVICES];
479 unsigned int classes[ATA_MAX_DEVICES];
480 unsigned int did_probe_mask;
376}; 481};
377 482
378struct ata_port { 483struct ata_port {
379 struct Scsi_Host *host; /* our co-allocated scsi host */ 484 struct Scsi_Host *host; /* our co-allocated scsi host */
380 const struct ata_port_operations *ops; 485 const struct ata_port_operations *ops;
486 spinlock_t *lock;
381 unsigned long flags; /* ATA_FLAG_xxx */ 487 unsigned long flags; /* ATA_FLAG_xxx */
382 unsigned int id; /* unique id req'd by scsi midlyr */ 488 unsigned int id; /* unique id req'd by scsi midlyr */
383 unsigned int port_no; /* unique port #; from zero */ 489 unsigned int port_no; /* unique port #; from zero */
@@ -397,26 +503,40 @@ struct ata_port {
397 unsigned int mwdma_mask; 503 unsigned int mwdma_mask;
398 unsigned int udma_mask; 504 unsigned int udma_mask;
399 unsigned int cbl; /* cable type; ATA_CBL_xxx */ 505 unsigned int cbl; /* cable type; ATA_CBL_xxx */
506 unsigned int hw_sata_spd_limit;
507 unsigned int sata_spd_limit; /* SATA PHY speed limit */
508
509 /* record runtime error info, protected by host_set lock */
510 struct ata_eh_info eh_info;
511 /* EH context owned by EH */
512 struct ata_eh_context eh_context;
400 513
401 struct ata_device device[ATA_MAX_DEVICES]; 514 struct ata_device device[ATA_MAX_DEVICES];
402 515
403 struct ata_queued_cmd qcmd[ATA_MAX_QUEUE]; 516 struct ata_queued_cmd qcmd[ATA_MAX_QUEUE];
404 unsigned long qactive; 517 unsigned long qc_allocated;
518 unsigned int qc_active;
519
405 unsigned int active_tag; 520 unsigned int active_tag;
521 u32 sactive;
406 522
407 struct ata_host_stats stats; 523 struct ata_host_stats stats;
408 struct ata_host_set *host_set; 524 struct ata_host_set *host_set;
409 struct device *dev; 525 struct device *dev;
410 526
411 struct work_struct port_task; 527 struct work_struct port_task;
528 struct work_struct hotplug_task;
529 struct work_struct scsi_rescan_task;
412 530
413 unsigned int hsm_task_state; 531 unsigned int hsm_task_state;
414 unsigned long pio_task_timeout;
415 532
416 u32 msg_enable; 533 u32 msg_enable;
417 struct list_head eh_done_q; 534 struct list_head eh_done_q;
535 wait_queue_head_t eh_wait_q;
418 536
419 void *private_data; 537 void *private_data;
538
539 u8 sector_buf[ATA_SECT_SIZE]; /* owned by EH */
420}; 540};
421 541
422struct ata_port_operations { 542struct ata_port_operations {
@@ -438,7 +558,6 @@ struct ata_port_operations {
438 558
439 void (*phy_reset) (struct ata_port *ap); /* obsolete */ 559 void (*phy_reset) (struct ata_port *ap); /* obsolete */
440 void (*set_mode) (struct ata_port *ap); 560 void (*set_mode) (struct ata_port *ap);
441 int (*probe_reset) (struct ata_port *ap, unsigned int *classes);
442 561
443 void (*post_set_mode) (struct ata_port *ap); 562 void (*post_set_mode) (struct ata_port *ap);
444 563
@@ -447,10 +566,20 @@ struct ata_port_operations {
447 void (*bmdma_setup) (struct ata_queued_cmd *qc); 566 void (*bmdma_setup) (struct ata_queued_cmd *qc);
448 void (*bmdma_start) (struct ata_queued_cmd *qc); 567 void (*bmdma_start) (struct ata_queued_cmd *qc);
449 568
569 void (*data_xfer) (struct ata_device *, unsigned char *, unsigned int, int);
570
450 void (*qc_prep) (struct ata_queued_cmd *qc); 571 void (*qc_prep) (struct ata_queued_cmd *qc);
451 unsigned int (*qc_issue) (struct ata_queued_cmd *qc); 572 unsigned int (*qc_issue) (struct ata_queued_cmd *qc);
452 573
453 void (*eng_timeout) (struct ata_port *ap); 574 /* Error handlers. ->error_handler overrides ->eng_timeout and
575 * indicates that new-style EH is in place.
576 */
577 void (*eng_timeout) (struct ata_port *ap); /* obsolete */
578
579 void (*freeze) (struct ata_port *ap);
580 void (*thaw) (struct ata_port *ap);
581 void (*error_handler) (struct ata_port *ap);
582 void (*post_internal_cmd) (struct ata_queued_cmd *qc);
454 583
455 irqreturn_t (*irq_handler)(int, void *, struct pt_regs *); 584 irqreturn_t (*irq_handler)(int, void *, struct pt_regs *);
456 void (*irq_clear) (struct ata_port *); 585 void (*irq_clear) (struct ata_port *);
@@ -492,22 +621,22 @@ struct ata_timing {
492 621
493#define FIT(v,vmin,vmax) max_t(short,min_t(short,v,vmax),vmin) 622#define FIT(v,vmin,vmax) max_t(short,min_t(short,v,vmax),vmin)
494 623
624extern const unsigned long sata_deb_timing_boot[];
625extern const unsigned long sata_deb_timing_eh[];
626extern const unsigned long sata_deb_timing_before_fsrst[];
627
495extern void ata_port_probe(struct ata_port *); 628extern void ata_port_probe(struct ata_port *);
496extern void __sata_phy_reset(struct ata_port *ap); 629extern void __sata_phy_reset(struct ata_port *ap);
497extern void sata_phy_reset(struct ata_port *ap); 630extern void sata_phy_reset(struct ata_port *ap);
498extern void ata_bus_reset(struct ata_port *ap); 631extern void ata_bus_reset(struct ata_port *ap);
499extern int ata_drive_probe_reset(struct ata_port *ap, 632extern int sata_set_spd(struct ata_port *ap);
500 ata_probeinit_fn_t probeinit, 633extern int sata_phy_debounce(struct ata_port *ap, const unsigned long *param);
501 ata_reset_fn_t softreset, ata_reset_fn_t hardreset, 634extern int sata_phy_resume(struct ata_port *ap, const unsigned long *param);
502 ata_postreset_fn_t postreset, unsigned int *classes); 635extern int ata_std_prereset(struct ata_port *ap);
503extern void ata_std_probeinit(struct ata_port *ap); 636extern int ata_std_softreset(struct ata_port *ap, unsigned int *classes);
504extern int ata_std_softreset(struct ata_port *ap, int verbose, 637extern int sata_std_hardreset(struct ata_port *ap, unsigned int *class);
505 unsigned int *classes);
506extern int sata_std_hardreset(struct ata_port *ap, int verbose,
507 unsigned int *class);
508extern void ata_std_postreset(struct ata_port *ap, unsigned int *classes); 638extern void ata_std_postreset(struct ata_port *ap, unsigned int *classes);
509extern int ata_dev_revalidate(struct ata_port *ap, struct ata_device *dev, 639extern int ata_dev_revalidate(struct ata_device *dev, int post_reset);
510 int post_reset);
511extern void ata_port_disable(struct ata_port *); 640extern void ata_port_disable(struct ata_port *);
512extern void ata_std_ports(struct ata_ioports *ioaddr); 641extern void ata_std_ports(struct ata_ioports *ioaddr);
513#ifdef CONFIG_PCI 642#ifdef CONFIG_PCI
@@ -519,24 +648,32 @@ extern int ata_pci_device_resume(struct pci_dev *pdev);
519extern int ata_pci_clear_simplex(struct pci_dev *pdev); 648extern int ata_pci_clear_simplex(struct pci_dev *pdev);
520#endif /* CONFIG_PCI */ 649#endif /* CONFIG_PCI */
521extern int ata_device_add(const struct ata_probe_ent *ent); 650extern int ata_device_add(const struct ata_probe_ent *ent);
651extern void ata_port_detach(struct ata_port *ap);
522extern void ata_host_set_remove(struct ata_host_set *host_set); 652extern void ata_host_set_remove(struct ata_host_set *host_set);
523extern int ata_scsi_detect(struct scsi_host_template *sht); 653extern int ata_scsi_detect(struct scsi_host_template *sht);
524extern int ata_scsi_ioctl(struct scsi_device *dev, int cmd, void __user *arg); 654extern int ata_scsi_ioctl(struct scsi_device *dev, int cmd, void __user *arg);
525extern int ata_scsi_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)); 655extern int ata_scsi_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *));
526extern void ata_eh_qc_complete(struct ata_queued_cmd *qc);
527extern void ata_eh_qc_retry(struct ata_queued_cmd *qc);
528extern int ata_scsi_release(struct Scsi_Host *host); 656extern int ata_scsi_release(struct Scsi_Host *host);
529extern unsigned int ata_host_intr(struct ata_port *ap, struct ata_queued_cmd *qc); 657extern unsigned int ata_host_intr(struct ata_port *ap, struct ata_queued_cmd *qc);
658extern int sata_scr_valid(struct ata_port *ap);
659extern int sata_scr_read(struct ata_port *ap, int reg, u32 *val);
660extern int sata_scr_write(struct ata_port *ap, int reg, u32 val);
661extern int sata_scr_write_flush(struct ata_port *ap, int reg, u32 val);
662extern int ata_port_online(struct ata_port *ap);
663extern int ata_port_offline(struct ata_port *ap);
530extern int ata_scsi_device_resume(struct scsi_device *); 664extern int ata_scsi_device_resume(struct scsi_device *);
531extern int ata_scsi_device_suspend(struct scsi_device *, pm_message_t state); 665extern int ata_scsi_device_suspend(struct scsi_device *, pm_message_t state);
532extern int ata_device_resume(struct ata_port *, struct ata_device *); 666extern int ata_device_resume(struct ata_device *);
533extern int ata_device_suspend(struct ata_port *, struct ata_device *, pm_message_t state); 667extern int ata_device_suspend(struct ata_device *, pm_message_t state);
534extern int ata_ratelimit(void); 668extern int ata_ratelimit(void);
535extern unsigned int ata_busy_sleep(struct ata_port *ap, 669extern unsigned int ata_busy_sleep(struct ata_port *ap,
536 unsigned long timeout_pat, 670 unsigned long timeout_pat,
537 unsigned long timeout); 671 unsigned long timeout);
538extern void ata_port_queue_task(struct ata_port *ap, void (*fn)(void *), 672extern void ata_port_queue_task(struct ata_port *ap, void (*fn)(void *),
539 void *data, unsigned long delay); 673 void *data, unsigned long delay);
674extern u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val,
675 unsigned long interval_msec,
676 unsigned long timeout_msec);
540 677
541/* 678/*
542 * Default driver ops implementations 679 * Default driver ops implementations
@@ -550,11 +687,16 @@ extern void ata_std_dev_select (struct ata_port *ap, unsigned int device);
550extern u8 ata_check_status(struct ata_port *ap); 687extern u8 ata_check_status(struct ata_port *ap);
551extern u8 ata_altstatus(struct ata_port *ap); 688extern u8 ata_altstatus(struct ata_port *ap);
552extern void ata_exec_command(struct ata_port *ap, const struct ata_taskfile *tf); 689extern void ata_exec_command(struct ata_port *ap, const struct ata_taskfile *tf);
553extern int ata_std_probe_reset(struct ata_port *ap, unsigned int *classes);
554extern int ata_port_start (struct ata_port *ap); 690extern int ata_port_start (struct ata_port *ap);
555extern void ata_port_stop (struct ata_port *ap); 691extern void ata_port_stop (struct ata_port *ap);
556extern void ata_host_stop (struct ata_host_set *host_set); 692extern void ata_host_stop (struct ata_host_set *host_set);
557extern irqreturn_t ata_interrupt (int irq, void *dev_instance, struct pt_regs *regs); 693extern irqreturn_t ata_interrupt (int irq, void *dev_instance, struct pt_regs *regs);
694extern void ata_mmio_data_xfer(struct ata_device *adev, unsigned char *buf,
695 unsigned int buflen, int write_data);
696extern void ata_pio_data_xfer(struct ata_device *adev, unsigned char *buf,
697 unsigned int buflen, int write_data);
698extern void ata_pio_data_xfer_noirq(struct ata_device *adev, unsigned char *buf,
699 unsigned int buflen, int write_data);
558extern void ata_qc_prep(struct ata_queued_cmd *qc); 700extern void ata_qc_prep(struct ata_queued_cmd *qc);
559extern void ata_noop_qc_prep(struct ata_queued_cmd *qc); 701extern void ata_noop_qc_prep(struct ata_queued_cmd *qc);
560extern unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc); 702extern unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc);
@@ -572,17 +714,29 @@ extern void ata_bmdma_start (struct ata_queued_cmd *qc);
572extern void ata_bmdma_stop(struct ata_queued_cmd *qc); 714extern void ata_bmdma_stop(struct ata_queued_cmd *qc);
573extern u8 ata_bmdma_status(struct ata_port *ap); 715extern u8 ata_bmdma_status(struct ata_port *ap);
574extern void ata_bmdma_irq_clear(struct ata_port *ap); 716extern void ata_bmdma_irq_clear(struct ata_port *ap);
575extern void __ata_qc_complete(struct ata_queued_cmd *qc); 717extern void ata_bmdma_freeze(struct ata_port *ap);
576extern void ata_eng_timeout(struct ata_port *ap); 718extern void ata_bmdma_thaw(struct ata_port *ap);
577extern void ata_scsi_simulate(struct ata_port *ap, struct ata_device *dev, 719extern void ata_bmdma_drive_eh(struct ata_port *ap, ata_prereset_fn_t prereset,
578 struct scsi_cmnd *cmd, 720 ata_reset_fn_t softreset,
721 ata_reset_fn_t hardreset,
722 ata_postreset_fn_t postreset);
723extern void ata_bmdma_error_handler(struct ata_port *ap);
724extern void ata_bmdma_post_internal_cmd(struct ata_queued_cmd *qc);
725extern int ata_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
726 u8 status, int in_wq);
727extern void ata_qc_complete(struct ata_queued_cmd *qc);
728extern int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active,
729 void (*finish_qc)(struct ata_queued_cmd *));
730extern void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd,
579 void (*done)(struct scsi_cmnd *)); 731 void (*done)(struct scsi_cmnd *));
580extern int ata_std_bios_param(struct scsi_device *sdev, 732extern int ata_std_bios_param(struct scsi_device *sdev,
581 struct block_device *bdev, 733 struct block_device *bdev,
582 sector_t capacity, int geom[]); 734 sector_t capacity, int geom[]);
583extern int ata_scsi_slave_config(struct scsi_device *sdev); 735extern int ata_scsi_slave_config(struct scsi_device *sdev);
584extern struct ata_device *ata_dev_pair(struct ata_port *ap, 736extern void ata_scsi_slave_destroy(struct scsi_device *sdev);
585 struct ata_device *adev); 737extern int ata_scsi_change_queue_depth(struct scsi_device *sdev,
738 int queue_depth);
739extern struct ata_device *ata_dev_pair(struct ata_device *adev);
586 740
587/* 741/*
588 * Timing helpers 742 * Timing helpers
@@ -628,7 +782,64 @@ extern int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bit
628extern unsigned long ata_pci_default_filter(const struct ata_port *, struct ata_device *, unsigned long); 782extern unsigned long ata_pci_default_filter(const struct ata_port *, struct ata_device *, unsigned long);
629#endif /* CONFIG_PCI */ 783#endif /* CONFIG_PCI */
630 784
785/*
786 * EH
787 */
788extern void ata_eng_timeout(struct ata_port *ap);
789
790extern void ata_port_schedule_eh(struct ata_port *ap);
791extern int ata_port_abort(struct ata_port *ap);
792extern int ata_port_freeze(struct ata_port *ap);
793
794extern void ata_eh_freeze_port(struct ata_port *ap);
795extern void ata_eh_thaw_port(struct ata_port *ap);
796
797extern void ata_eh_qc_complete(struct ata_queued_cmd *qc);
798extern void ata_eh_qc_retry(struct ata_queued_cmd *qc);
799
800extern void ata_do_eh(struct ata_port *ap, ata_prereset_fn_t prereset,
801 ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
802 ata_postreset_fn_t postreset);
803
804/*
805 * printk helpers
806 */
807#define ata_port_printk(ap, lv, fmt, args...) \
808 printk(lv"ata%u: "fmt, (ap)->id , ##args)
809
810#define ata_dev_printk(dev, lv, fmt, args...) \
811 printk(lv"ata%u.%02u: "fmt, (dev)->ap->id, (dev)->devno , ##args)
812
813/*
814 * ata_eh_info helpers
815 */
816#define ata_ehi_push_desc(ehi, fmt, args...) do { \
817 (ehi)->desc_len += scnprintf((ehi)->desc + (ehi)->desc_len, \
818 ATA_EH_DESC_LEN - (ehi)->desc_len, \
819 fmt , ##args); \
820} while (0)
821
822#define ata_ehi_clear_desc(ehi) do { \
823 (ehi)->desc[0] = '\0'; \
824 (ehi)->desc_len = 0; \
825} while (0)
826
827static inline void ata_ehi_hotplugged(struct ata_eh_info *ehi)
828{
829 if (ehi->flags & ATA_EHI_HOTPLUGGED)
830 return;
831
832 ehi->flags |= ATA_EHI_HOTPLUGGED;
833 ehi->hotplug_timestamp = jiffies;
631 834
835 ehi->err_mask |= AC_ERR_ATA_BUS;
836 ehi->action |= ATA_EH_SOFTRESET;
837 ehi->probe_mask |= (1 << ATA_MAX_DEVICES) - 1;
838}
839
840/*
841 * qc helpers
842 */
632static inline int 843static inline int
633ata_sg_is_last(struct scatterlist *sg, struct ata_queued_cmd *qc) 844ata_sg_is_last(struct scatterlist *sg, struct ata_queued_cmd *qc)
634{ 845{
@@ -671,14 +882,39 @@ static inline unsigned int ata_tag_valid(unsigned int tag)
671 return (tag < ATA_MAX_QUEUE) ? 1 : 0; 882 return (tag < ATA_MAX_QUEUE) ? 1 : 0;
672} 883}
673 884
674static inline unsigned int ata_class_present(unsigned int class) 885static inline unsigned int ata_tag_internal(unsigned int tag)
886{
887 return tag == ATA_MAX_QUEUE - 1;
888}
889
890static inline unsigned int ata_class_enabled(unsigned int class)
675{ 891{
676 return class == ATA_DEV_ATA || class == ATA_DEV_ATAPI; 892 return class == ATA_DEV_ATA || class == ATA_DEV_ATAPI;
677} 893}
678 894
679static inline unsigned int ata_dev_present(const struct ata_device *dev) 895static inline unsigned int ata_class_disabled(unsigned int class)
680{ 896{
681 return ata_class_present(dev->class); 897 return class == ATA_DEV_ATA_UNSUP || class == ATA_DEV_ATAPI_UNSUP;
898}
899
900static inline unsigned int ata_class_absent(unsigned int class)
901{
902 return !ata_class_enabled(class) && !ata_class_disabled(class);
903}
904
905static inline unsigned int ata_dev_enabled(const struct ata_device *dev)
906{
907 return ata_class_enabled(dev->class);
908}
909
910static inline unsigned int ata_dev_disabled(const struct ata_device *dev)
911{
912 return ata_class_disabled(dev->class);
913}
914
915static inline unsigned int ata_dev_absent(const struct ata_device *dev)
916{
917 return ata_class_absent(dev->class);
682} 918}
683 919
684static inline u8 ata_chk_status(struct ata_port *ap) 920static inline u8 ata_chk_status(struct ata_port *ap)
@@ -759,20 +995,35 @@ static inline void ata_qc_set_polling(struct ata_queued_cmd *qc)
759 qc->tf.ctl |= ATA_NIEN; 995 qc->tf.ctl |= ATA_NIEN;
760} 996}
761 997
762static inline struct ata_queued_cmd *ata_qc_from_tag (struct ata_port *ap, 998static inline struct ata_queued_cmd *__ata_qc_from_tag(struct ata_port *ap,
763 unsigned int tag) 999 unsigned int tag)
764{ 1000{
765 if (likely(ata_tag_valid(tag))) 1001 if (likely(ata_tag_valid(tag)))
766 return &ap->qcmd[tag]; 1002 return &ap->qcmd[tag];
767 return NULL; 1003 return NULL;
768} 1004}
769 1005
770static inline void ata_tf_init(struct ata_port *ap, struct ata_taskfile *tf, unsigned int device) 1006static inline struct ata_queued_cmd *ata_qc_from_tag(struct ata_port *ap,
1007 unsigned int tag)
1008{
1009 struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
1010
1011 if (unlikely(!qc) || !ap->ops->error_handler)
1012 return qc;
1013
1014 if ((qc->flags & (ATA_QCFLAG_ACTIVE |
1015 ATA_QCFLAG_FAILED)) == ATA_QCFLAG_ACTIVE)
1016 return qc;
1017
1018 return NULL;
1019}
1020
1021static inline void ata_tf_init(struct ata_device *dev, struct ata_taskfile *tf)
771{ 1022{
772 memset(tf, 0, sizeof(*tf)); 1023 memset(tf, 0, sizeof(*tf));
773 1024
774 tf->ctl = ap->ctl; 1025 tf->ctl = dev->ap->ctl;
775 if (device == 0) 1026 if (dev->devno == 0)
776 tf->device = ATA_DEVICE_OBS; 1027 tf->device = ATA_DEVICE_OBS;
777 else 1028 else
778 tf->device = ATA_DEVICE_OBS | ATA_DEV1; 1029 tf->device = ATA_DEVICE_OBS | ATA_DEV1;
@@ -787,26 +1038,11 @@ static inline void ata_qc_reinit(struct ata_queued_cmd *qc)
787 qc->nbytes = qc->curbytes = 0; 1038 qc->nbytes = qc->curbytes = 0;
788 qc->err_mask = 0; 1039 qc->err_mask = 0;
789 1040
790 ata_tf_init(qc->ap, &qc->tf, qc->dev->devno); 1041 ata_tf_init(qc->dev, &qc->tf);
791}
792
793/**
794 * ata_qc_complete - Complete an active ATA command
795 * @qc: Command to complete
796 * @err_mask: ATA Status register contents
797 *
798 * Indicate to the mid and upper layers that an ATA
799 * command has completed, with either an ok or not-ok status.
800 *
801 * LOCKING:
802 * spin_lock_irqsave(host_set lock)
803 */
804static inline void ata_qc_complete(struct ata_queued_cmd *qc)
805{
806 if (unlikely(qc->flags & ATA_QCFLAG_EH_SCHEDULED))
807 return;
808 1042
809 __ata_qc_complete(qc); 1043 /* init result_tf such that it indicates normal completion */
1044 qc->result_tf.command = ATA_DRDY;
1045 qc->result_tf.feature = 0;
810} 1046}
811 1047
812/** 1048/**
@@ -885,28 +1121,6 @@ static inline u8 ata_irq_ack(struct ata_port *ap, unsigned int chk_drq)
885 return status; 1121 return status;
886} 1122}
887 1123
888static inline u32 scr_read(struct ata_port *ap, unsigned int reg)
889{
890 return ap->ops->scr_read(ap, reg);
891}
892
893static inline void scr_write(struct ata_port *ap, unsigned int reg, u32 val)
894{
895 ap->ops->scr_write(ap, reg, val);
896}
897
898static inline void scr_write_flush(struct ata_port *ap, unsigned int reg,
899 u32 val)
900{
901 ap->ops->scr_write(ap, reg, val);
902 (void) ap->ops->scr_read(ap, reg);
903}
904
905static inline unsigned int sata_dev_present(struct ata_port *ap)
906{
907 return ((scr_read(ap, SCR_STATUS) & 0xf) == 0x3) ? 1 : 0;
908}
909
910static inline int ata_try_flush_cache(const struct ata_device *dev) 1124static inline int ata_try_flush_cache(const struct ata_device *dev)
911{ 1125{
912 return ata_id_wcache_enabled(dev->id) || 1126 return ata_id_wcache_enabled(dev->id) ||
@@ -916,7 +1130,7 @@ static inline int ata_try_flush_cache(const struct ata_device *dev)
916 1130
917static inline unsigned int ac_err_mask(u8 status) 1131static inline unsigned int ac_err_mask(u8 status)
918{ 1132{
919 if (status & ATA_BUSY) 1133 if (status & (ATA_BUSY | ATA_DRQ))
920 return AC_ERR_HSM; 1134 return AC_ERR_HSM;
921 if (status & (ATA_ERR | ATA_DF)) 1135 if (status & (ATA_ERR | ATA_DF))
922 return AC_ERR_DEV; 1136 return AC_ERR_DEV;
@@ -944,4 +1158,9 @@ static inline void ata_pad_free(struct ata_port *ap, struct device *dev)
944 dma_free_coherent(dev, ATA_DMA_PAD_BUF_SZ, ap->pad, ap->pad_dma); 1158 dma_free_coherent(dev, ATA_DMA_PAD_BUF_SZ, ap->pad, ap->pad_dma);
945} 1159}
946 1160
1161static inline struct ata_port *ata_shost_to_port(struct Scsi_Host *host)
1162{
1163 return (struct ata_port *) &host->hostdata[0];
1164}
1165
947#endif /* __LINUX_LIBATA_H__ */ 1166#endif /* __LINUX_LIBATA_H__ */
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index cde701c13c77..c2fd2d19938b 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -1196,8 +1196,12 @@
1196#define PCI_DEVICE_ID_NVIDIA_NVENET_15 0x0373 1196#define PCI_DEVICE_ID_NVIDIA_NVENET_15 0x0373
1197#define PCI_DEVICE_ID_NVIDIA_NVENET_16 0x03E5 1197#define PCI_DEVICE_ID_NVIDIA_NVENET_16 0x03E5
1198#define PCI_DEVICE_ID_NVIDIA_NVENET_17 0x03E6 1198#define PCI_DEVICE_ID_NVIDIA_NVENET_17 0x03E6
1199#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA 0x03E7
1200#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_IDE 0x03EC
1199#define PCI_DEVICE_ID_NVIDIA_NVENET_18 0x03EE 1201#define PCI_DEVICE_ID_NVIDIA_NVENET_18 0x03EE
1200#define PCI_DEVICE_ID_NVIDIA_NVENET_19 0x03EF 1202#define PCI_DEVICE_ID_NVIDIA_NVENET_19 0x03EF
1203#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA2 0x03F6
1204#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA3 0x03F7
1201#define PCI_DEVICE_ID_NVIDIA_NVENET_20 0x0450 1205#define PCI_DEVICE_ID_NVIDIA_NVENET_20 0x0450
1202#define PCI_DEVICE_ID_NVIDIA_NVENET_21 0x0451 1206#define PCI_DEVICE_ID_NVIDIA_NVENET_21 0x0451
1203#define PCI_DEVICE_ID_NVIDIA_NVENET_22 0x0452 1207#define PCI_DEVICE_ID_NVIDIA_NVENET_22 0x0452
@@ -1255,6 +1259,7 @@
1255#define PCI_DEVICE_ID_VIA_PX8X0_0 0x0259 1259#define PCI_DEVICE_ID_VIA_PX8X0_0 0x0259
1256#define PCI_DEVICE_ID_VIA_3269_0 0x0269 1260#define PCI_DEVICE_ID_VIA_3269_0 0x0269
1257#define PCI_DEVICE_ID_VIA_K8T800PRO_0 0x0282 1261#define PCI_DEVICE_ID_VIA_K8T800PRO_0 0x0282
1262#define PCI_DEVICE_ID_VIA_3296_0 0x0296
1258#define PCI_DEVICE_ID_VIA_8363_0 0x0305 1263#define PCI_DEVICE_ID_VIA_8363_0 0x0305
1259#define PCI_DEVICE_ID_VIA_P4M800CE 0x0314 1264#define PCI_DEVICE_ID_VIA_P4M800CE 0x0314
1260#define PCI_DEVICE_ID_VIA_8371_0 0x0391 1265#define PCI_DEVICE_ID_VIA_8371_0 0x0391
@@ -1262,6 +1267,7 @@
1262#define PCI_DEVICE_ID_VIA_82C561 0x0561 1267#define PCI_DEVICE_ID_VIA_82C561 0x0561
1263#define PCI_DEVICE_ID_VIA_82C586_1 0x0571 1268#define PCI_DEVICE_ID_VIA_82C586_1 0x0571
1264#define PCI_DEVICE_ID_VIA_82C576 0x0576 1269#define PCI_DEVICE_ID_VIA_82C576 0x0576
1270#define PCI_DEVICE_ID_VIA_SATA_EIDE 0x0581
1265#define PCI_DEVICE_ID_VIA_82C586_0 0x0586 1271#define PCI_DEVICE_ID_VIA_82C586_0 0x0586
1266#define PCI_DEVICE_ID_VIA_82C596 0x0596 1272#define PCI_DEVICE_ID_VIA_82C596 0x0596
1267#define PCI_DEVICE_ID_VIA_82C597_0 0x0597 1273#define PCI_DEVICE_ID_VIA_82C597_0 0x0597
@@ -1302,10 +1308,11 @@
1302#define PCI_DEVICE_ID_VIA_8783_0 0x3208 1308#define PCI_DEVICE_ID_VIA_8783_0 0x3208
1303#define PCI_DEVICE_ID_VIA_8237 0x3227 1309#define PCI_DEVICE_ID_VIA_8237 0x3227
1304#define PCI_DEVICE_ID_VIA_8251 0x3287 1310#define PCI_DEVICE_ID_VIA_8251 0x3287
1305#define PCI_DEVICE_ID_VIA_3296_0 0x0296 1311#define PCI_DEVICE_ID_VIA_8237A 0x3337
1306#define PCI_DEVICE_ID_VIA_8231 0x8231 1312#define PCI_DEVICE_ID_VIA_8231 0x8231
1307#define PCI_DEVICE_ID_VIA_8231_4 0x8235 1313#define PCI_DEVICE_ID_VIA_8231_4 0x8235
1308#define PCI_DEVICE_ID_VIA_8365_1 0x8305 1314#define PCI_DEVICE_ID_VIA_8365_1 0x8305
1315#define PCI_DEVICE_ID_VIA_CX700 0x8324
1309#define PCI_DEVICE_ID_VIA_8371_1 0x8391 1316#define PCI_DEVICE_ID_VIA_8371_1 0x8391
1310#define PCI_DEVICE_ID_VIA_82C598_1 0x8598 1317#define PCI_DEVICE_ID_VIA_82C598_1 0x8598
1311#define PCI_DEVICE_ID_VIA_838X_1 0xB188 1318#define PCI_DEVICE_ID_VIA_838X_1 0xB188
diff --git a/include/scsi/scsi_cmnd.h b/include/scsi/scsi_cmnd.h
index b0caabec1bd1..e46cd404bd7d 100644
--- a/include/scsi/scsi_cmnd.h
+++ b/include/scsi/scsi_cmnd.h
@@ -145,6 +145,7 @@ extern struct scsi_cmnd *scsi_get_command(struct scsi_device *, gfp_t);
145extern void scsi_put_command(struct scsi_cmnd *); 145extern void scsi_put_command(struct scsi_cmnd *);
146extern void scsi_io_completion(struct scsi_cmnd *, unsigned int, unsigned int); 146extern void scsi_io_completion(struct scsi_cmnd *, unsigned int, unsigned int);
147extern void scsi_finish_command(struct scsi_cmnd *cmd); 147extern void scsi_finish_command(struct scsi_cmnd *cmd);
148extern void scsi_req_abort_cmd(struct scsi_cmnd *cmd);
148 149
149extern void *scsi_kmap_atomic_sg(struct scatterlist *sg, int sg_count, 150extern void *scsi_kmap_atomic_sg(struct scatterlist *sg, int sg_count,
150 size_t *offset, size_t *len); 151 size_t *offset, size_t *len);
diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h
index de6ce541a046..a42efd6e4be8 100644
--- a/include/scsi/scsi_host.h
+++ b/include/scsi/scsi_host.h
@@ -472,6 +472,7 @@ struct Scsi_Host {
472 */ 472 */
473 unsigned int host_busy; /* commands actually active on low-level */ 473 unsigned int host_busy; /* commands actually active on low-level */
474 unsigned int host_failed; /* commands that failed. */ 474 unsigned int host_failed; /* commands that failed. */
475 unsigned int host_eh_scheduled; /* EH scheduled without command */
475 476
476 unsigned short host_no; /* Used for IOCTL_GET_IDLUN, /proc/scsi et al. */ 477 unsigned short host_no; /* Used for IOCTL_GET_IDLUN, /proc/scsi et al. */
477 int resetting; /* if set, it means that last_reset is a valid value */ 478 int resetting; /* if set, it means that last_reset is a valid value */