aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/ide/pci/amd74xx.c7
-rw-r--r--drivers/scsi/Makefile2
-rw-r--r--drivers/scsi/ahci.c436
-rw-r--r--drivers/scsi/ata_piix.c28
-rw-r--r--drivers/scsi/libata-bmdma.c143
-rw-r--r--drivers/scsi/libata-core.c2528
-rw-r--r--drivers/scsi/libata-eh.c1561
-rw-r--r--drivers/scsi/libata-scsi.c408
-rw-r--r--drivers/scsi/libata.h24
-rw-r--r--drivers/scsi/pdc_adma.c10
-rw-r--r--drivers/scsi/sata_mv.c70
-rw-r--r--drivers/scsi/sata_nv.c13
-rw-r--r--drivers/scsi/sata_promise.c39
-rw-r--r--drivers/scsi/sata_qstor.c14
-rw-r--r--drivers/scsi/sata_sil.c66
-rw-r--r--drivers/scsi/sata_sil24.c615
-rw-r--r--drivers/scsi/sata_sis.c3
-rw-r--r--drivers/scsi/sata_svw.c5
-rw-r--r--drivers/scsi/sata_sx4.c20
-rw-r--r--drivers/scsi/sata_uli.c3
-rw-r--r--drivers/scsi/sata_via.c3
-rw-r--r--drivers/scsi/sata_vsc.c16
-rw-r--r--drivers/scsi/scsi.c18
-rw-r--r--drivers/scsi/scsi_error.c24
-rw-r--r--drivers/scsi/scsi_lib.c2
-rw-r--r--drivers/scsi/scsi_transport_api.h6
-rw-r--r--include/linux/ata.h34
-rw-r--r--include/linux/libata.h386
-rw-r--r--include/linux/pci_ids.h4
-rw-r--r--include/scsi/scsi_cmnd.h1
-rw-r--r--include/scsi/scsi_host.h1
31 files changed, 4766 insertions, 1724 deletions
diff --git a/drivers/ide/pci/amd74xx.c b/drivers/ide/pci/amd74xx.c
index b22ee5462318..6e9dbf4d8077 100644
--- a/drivers/ide/pci/amd74xx.c
+++ b/drivers/ide/pci/amd74xx.c
@@ -74,6 +74,7 @@ static struct amd_ide_chip {
74 { PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_IDE, 0x50, AMD_UDMA_133 }, 74 { PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_IDE, 0x50, AMD_UDMA_133 },
75 { PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_IDE, 0x50, AMD_UDMA_133 }, 75 { PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_IDE, 0x50, AMD_UDMA_133 },
76 { PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_IDE, 0x50, AMD_UDMA_133 }, 76 { PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_IDE, 0x50, AMD_UDMA_133 },
77 { PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_IDE, 0x50, AMD_UDMA_133 },
77 { PCI_DEVICE_ID_AMD_CS5536_IDE, 0x40, AMD_UDMA_100 }, 78 { PCI_DEVICE_ID_AMD_CS5536_IDE, 0x40, AMD_UDMA_100 },
78 { 0 } 79 { 0 }
79}; 80};
@@ -488,7 +489,8 @@ static ide_pci_device_t amd74xx_chipsets[] __devinitdata = {
488 /* 14 */ DECLARE_NV_DEV("NFORCE-MCP04"), 489 /* 14 */ DECLARE_NV_DEV("NFORCE-MCP04"),
489 /* 15 */ DECLARE_NV_DEV("NFORCE-MCP51"), 490 /* 15 */ DECLARE_NV_DEV("NFORCE-MCP51"),
490 /* 16 */ DECLARE_NV_DEV("NFORCE-MCP55"), 491 /* 16 */ DECLARE_NV_DEV("NFORCE-MCP55"),
491 /* 17 */ DECLARE_AMD_DEV("AMD5536"), 492 /* 17 */ DECLARE_NV_DEV("NFORCE-MCP61"),
493 /* 18 */ DECLARE_AMD_DEV("AMD5536"),
492}; 494};
493 495
494static int __devinit amd74xx_probe(struct pci_dev *dev, const struct pci_device_id *id) 496static int __devinit amd74xx_probe(struct pci_dev *dev, const struct pci_device_id *id)
@@ -525,7 +527,8 @@ static struct pci_device_id amd74xx_pci_tbl[] = {
525 { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_IDE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 14 }, 527 { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_IDE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 14 },
526 { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_IDE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 15 }, 528 { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_IDE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 15 },
527 { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_IDE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 16 }, 529 { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_IDE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 16 },
528 { PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CS5536_IDE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 17 }, 530 { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_IDE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 17 },
531 { PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CS5536_IDE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 18 },
529 { 0, }, 532 { 0, },
530}; 533};
531MODULE_DEVICE_TABLE(pci, amd74xx_pci_tbl); 534MODULE_DEVICE_TABLE(pci, amd74xx_pci_tbl);
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index 81803a16f986..669ff6b99c4f 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -164,7 +164,7 @@ ncr53c8xx-flags-$(CONFIG_SCSI_ZALON) \
164CFLAGS_ncr53c8xx.o := $(ncr53c8xx-flags-y) $(ncr53c8xx-flags-m) 164CFLAGS_ncr53c8xx.o := $(ncr53c8xx-flags-y) $(ncr53c8xx-flags-m)
165zalon7xx-objs := zalon.o ncr53c8xx.o 165zalon7xx-objs := zalon.o ncr53c8xx.o
166NCR_Q720_mod-objs := NCR_Q720.o ncr53c8xx.o 166NCR_Q720_mod-objs := NCR_Q720.o ncr53c8xx.o
167libata-objs := libata-core.o libata-scsi.o libata-bmdma.o 167libata-objs := libata-core.o libata-scsi.o libata-bmdma.o libata-eh.o
168oktagon_esp_mod-objs := oktagon_esp.o oktagon_io.o 168oktagon_esp_mod-objs := oktagon_esp.o oktagon_io.o
169 169
170# Files generated that shall be removed upon make clean 170# Files generated that shall be removed upon make clean
diff --git a/drivers/scsi/ahci.c b/drivers/scsi/ahci.c
index b4f8fb1d628b..45fd71d80128 100644
--- a/drivers/scsi/ahci.c
+++ b/drivers/scsi/ahci.c
@@ -48,7 +48,7 @@
48#include <asm/io.h> 48#include <asm/io.h>
49 49
50#define DRV_NAME "ahci" 50#define DRV_NAME "ahci"
51#define DRV_VERSION "1.2" 51#define DRV_VERSION "1.3"
52 52
53 53
54enum { 54enum {
@@ -56,12 +56,15 @@ enum {
56 AHCI_MAX_SG = 168, /* hardware max is 64K */ 56 AHCI_MAX_SG = 168, /* hardware max is 64K */
57 AHCI_DMA_BOUNDARY = 0xffffffff, 57 AHCI_DMA_BOUNDARY = 0xffffffff,
58 AHCI_USE_CLUSTERING = 0, 58 AHCI_USE_CLUSTERING = 0,
59 AHCI_CMD_SLOT_SZ = 32 * 32, 59 AHCI_MAX_CMDS = 32,
60 AHCI_CMD_SZ = 32,
61 AHCI_CMD_SLOT_SZ = AHCI_MAX_CMDS * AHCI_CMD_SZ,
60 AHCI_RX_FIS_SZ = 256, 62 AHCI_RX_FIS_SZ = 256,
61 AHCI_CMD_TBL_HDR = 0x80,
62 AHCI_CMD_TBL_CDB = 0x40, 63 AHCI_CMD_TBL_CDB = 0x40,
63 AHCI_CMD_TBL_SZ = AHCI_CMD_TBL_HDR + (AHCI_MAX_SG * 16), 64 AHCI_CMD_TBL_HDR_SZ = 0x80,
64 AHCI_PORT_PRIV_DMA_SZ = AHCI_CMD_SLOT_SZ + AHCI_CMD_TBL_SZ + 65 AHCI_CMD_TBL_SZ = AHCI_CMD_TBL_HDR_SZ + (AHCI_MAX_SG * 16),
66 AHCI_CMD_TBL_AR_SZ = AHCI_CMD_TBL_SZ * AHCI_MAX_CMDS,
67 AHCI_PORT_PRIV_DMA_SZ = AHCI_CMD_SLOT_SZ + AHCI_CMD_TBL_AR_SZ +
65 AHCI_RX_FIS_SZ, 68 AHCI_RX_FIS_SZ,
66 AHCI_IRQ_ON_SG = (1 << 31), 69 AHCI_IRQ_ON_SG = (1 << 31),
67 AHCI_CMD_ATAPI = (1 << 5), 70 AHCI_CMD_ATAPI = (1 << 5),
@@ -71,8 +74,10 @@ enum {
71 AHCI_CMD_CLR_BUSY = (1 << 10), 74 AHCI_CMD_CLR_BUSY = (1 << 10),
72 75
73 RX_FIS_D2H_REG = 0x40, /* offset of D2H Register FIS data */ 76 RX_FIS_D2H_REG = 0x40, /* offset of D2H Register FIS data */
77 RX_FIS_UNK = 0x60, /* offset of Unknown FIS data */
74 78
75 board_ahci = 0, 79 board_ahci = 0,
80 board_ahci_vt8251 = 1,
76 81
77 /* global controller registers */ 82 /* global controller registers */
78 HOST_CAP = 0x00, /* host capabilities */ 83 HOST_CAP = 0x00, /* host capabilities */
@@ -87,8 +92,9 @@ enum {
87 HOST_AHCI_EN = (1 << 31), /* AHCI enabled */ 92 HOST_AHCI_EN = (1 << 31), /* AHCI enabled */
88 93
89 /* HOST_CAP bits */ 94 /* HOST_CAP bits */
90 HOST_CAP_64 = (1 << 31), /* PCI DAC (64-bit DMA) support */
91 HOST_CAP_CLO = (1 << 24), /* Command List Override support */ 95 HOST_CAP_CLO = (1 << 24), /* Command List Override support */
96 HOST_CAP_NCQ = (1 << 30), /* Native Command Queueing */
97 HOST_CAP_64 = (1 << 31), /* PCI DAC (64-bit DMA) support */
92 98
93 /* registers for each SATA port */ 99 /* registers for each SATA port */
94 PORT_LST_ADDR = 0x00, /* command list DMA addr */ 100 PORT_LST_ADDR = 0x00, /* command list DMA addr */
@@ -127,15 +133,16 @@ enum {
127 PORT_IRQ_PIOS_FIS = (1 << 1), /* PIO Setup FIS rx'd */ 133 PORT_IRQ_PIOS_FIS = (1 << 1), /* PIO Setup FIS rx'd */
128 PORT_IRQ_D2H_REG_FIS = (1 << 0), /* D2H Register FIS rx'd */ 134 PORT_IRQ_D2H_REG_FIS = (1 << 0), /* D2H Register FIS rx'd */
129 135
130 PORT_IRQ_FATAL = PORT_IRQ_TF_ERR | 136 PORT_IRQ_FREEZE = PORT_IRQ_HBUS_ERR |
131 PORT_IRQ_HBUS_ERR | 137 PORT_IRQ_IF_ERR |
132 PORT_IRQ_HBUS_DATA_ERR | 138 PORT_IRQ_CONNECT |
133 PORT_IRQ_IF_ERR, 139 PORT_IRQ_UNK_FIS,
134 DEF_PORT_IRQ = PORT_IRQ_FATAL | PORT_IRQ_PHYRDY | 140 PORT_IRQ_ERROR = PORT_IRQ_FREEZE |
135 PORT_IRQ_CONNECT | PORT_IRQ_SG_DONE | 141 PORT_IRQ_TF_ERR |
136 PORT_IRQ_UNK_FIS | PORT_IRQ_SDB_FIS | 142 PORT_IRQ_HBUS_DATA_ERR,
137 PORT_IRQ_DMAS_FIS | PORT_IRQ_PIOS_FIS | 143 DEF_PORT_IRQ = PORT_IRQ_ERROR | PORT_IRQ_SG_DONE |
138 PORT_IRQ_D2H_REG_FIS, 144 PORT_IRQ_SDB_FIS | PORT_IRQ_DMAS_FIS |
145 PORT_IRQ_PIOS_FIS | PORT_IRQ_D2H_REG_FIS,
139 146
140 /* PORT_CMD bits */ 147 /* PORT_CMD bits */
141 PORT_CMD_ATAPI = (1 << 24), /* Device is ATAPI */ 148 PORT_CMD_ATAPI = (1 << 24), /* Device is ATAPI */
@@ -153,6 +160,9 @@ enum {
153 160
154 /* hpriv->flags bits */ 161 /* hpriv->flags bits */
155 AHCI_FLAG_MSI = (1 << 0), 162 AHCI_FLAG_MSI = (1 << 0),
163
164 /* ap->flags bits */
165 AHCI_FLAG_RESET_NEEDS_CLO = (1 << 24),
156}; 166};
157 167
158struct ahci_cmd_hdr { 168struct ahci_cmd_hdr {
@@ -181,7 +191,6 @@ struct ahci_port_priv {
181 dma_addr_t cmd_slot_dma; 191 dma_addr_t cmd_slot_dma;
182 void *cmd_tbl; 192 void *cmd_tbl;
183 dma_addr_t cmd_tbl_dma; 193 dma_addr_t cmd_tbl_dma;
184 struct ahci_sg *cmd_tbl_sg;
185 void *rx_fis; 194 void *rx_fis;
186 dma_addr_t rx_fis_dma; 195 dma_addr_t rx_fis_dma;
187}; 196};
@@ -193,13 +202,15 @@ static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc);
193static irqreturn_t ahci_interrupt (int irq, void *dev_instance, struct pt_regs *regs); 202static irqreturn_t ahci_interrupt (int irq, void *dev_instance, struct pt_regs *regs);
194static int ahci_probe_reset(struct ata_port *ap, unsigned int *classes); 203static int ahci_probe_reset(struct ata_port *ap, unsigned int *classes);
195static void ahci_irq_clear(struct ata_port *ap); 204static void ahci_irq_clear(struct ata_port *ap);
196static void ahci_eng_timeout(struct ata_port *ap);
197static int ahci_port_start(struct ata_port *ap); 205static int ahci_port_start(struct ata_port *ap);
198static void ahci_port_stop(struct ata_port *ap); 206static void ahci_port_stop(struct ata_port *ap);
199static void ahci_tf_read(struct ata_port *ap, struct ata_taskfile *tf); 207static void ahci_tf_read(struct ata_port *ap, struct ata_taskfile *tf);
200static void ahci_qc_prep(struct ata_queued_cmd *qc); 208static void ahci_qc_prep(struct ata_queued_cmd *qc);
201static u8 ahci_check_status(struct ata_port *ap); 209static u8 ahci_check_status(struct ata_port *ap);
202static inline int ahci_host_intr(struct ata_port *ap, struct ata_queued_cmd *qc); 210static void ahci_freeze(struct ata_port *ap);
211static void ahci_thaw(struct ata_port *ap);
212static void ahci_error_handler(struct ata_port *ap);
213static void ahci_post_internal_cmd(struct ata_queued_cmd *qc);
203static void ahci_remove_one (struct pci_dev *pdev); 214static void ahci_remove_one (struct pci_dev *pdev);
204 215
205static struct scsi_host_template ahci_sht = { 216static struct scsi_host_template ahci_sht = {
@@ -207,7 +218,8 @@ static struct scsi_host_template ahci_sht = {
207 .name = DRV_NAME, 218 .name = DRV_NAME,
208 .ioctl = ata_scsi_ioctl, 219 .ioctl = ata_scsi_ioctl,
209 .queuecommand = ata_scsi_queuecmd, 220 .queuecommand = ata_scsi_queuecmd,
210 .can_queue = ATA_DEF_QUEUE, 221 .change_queue_depth = ata_scsi_change_queue_depth,
222 .can_queue = AHCI_MAX_CMDS - 1,
211 .this_id = ATA_SHT_THIS_ID, 223 .this_id = ATA_SHT_THIS_ID,
212 .sg_tablesize = AHCI_MAX_SG, 224 .sg_tablesize = AHCI_MAX_SG,
213 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 225 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
@@ -233,14 +245,18 @@ static const struct ata_port_operations ahci_ops = {
233 .qc_prep = ahci_qc_prep, 245 .qc_prep = ahci_qc_prep,
234 .qc_issue = ahci_qc_issue, 246 .qc_issue = ahci_qc_issue,
235 247
236 .eng_timeout = ahci_eng_timeout,
237
238 .irq_handler = ahci_interrupt, 248 .irq_handler = ahci_interrupt,
239 .irq_clear = ahci_irq_clear, 249 .irq_clear = ahci_irq_clear,
240 250
241 .scr_read = ahci_scr_read, 251 .scr_read = ahci_scr_read,
242 .scr_write = ahci_scr_write, 252 .scr_write = ahci_scr_write,
243 253
254 .freeze = ahci_freeze,
255 .thaw = ahci_thaw,
256
257 .error_handler = ahci_error_handler,
258 .post_internal_cmd = ahci_post_internal_cmd,
259
244 .port_start = ahci_port_start, 260 .port_start = ahci_port_start,
245 .port_stop = ahci_port_stop, 261 .port_stop = ahci_port_stop,
246}; 262};
@@ -255,6 +271,16 @@ static const struct ata_port_info ahci_port_info[] = {
255 .udma_mask = 0x7f, /* udma0-6 ; FIXME */ 271 .udma_mask = 0x7f, /* udma0-6 ; FIXME */
256 .port_ops = &ahci_ops, 272 .port_ops = &ahci_ops,
257 }, 273 },
274 /* board_ahci_vt8251 */
275 {
276 .sht = &ahci_sht,
277 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
278 ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA |
279 AHCI_FLAG_RESET_NEEDS_CLO,
280 .pio_mask = 0x1f, /* pio0-4 */
281 .udma_mask = 0x7f, /* udma0-6 ; FIXME */
282 .port_ops = &ahci_ops,
283 },
258}; 284};
259 285
260static const struct pci_device_id ahci_pci_tbl[] = { 286static const struct pci_device_id ahci_pci_tbl[] = {
@@ -296,6 +322,8 @@ static const struct pci_device_id ahci_pci_tbl[] = {
296 board_ahci }, /* ATI SB600 non-raid */ 322 board_ahci }, /* ATI SB600 non-raid */
297 { PCI_VENDOR_ID_ATI, 0x4381, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 323 { PCI_VENDOR_ID_ATI, 0x4381, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
298 board_ahci }, /* ATI SB600 raid */ 324 board_ahci }, /* ATI SB600 raid */
325 { PCI_VENDOR_ID_VIA, 0x3349, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
326 board_ahci_vt8251 }, /* VIA VT8251 */
299 { } /* terminate list */ 327 { } /* terminate list */
300}; 328};
301 329
@@ -374,8 +402,6 @@ static int ahci_port_start(struct ata_port *ap)
374 pp->cmd_tbl = mem; 402 pp->cmd_tbl = mem;
375 pp->cmd_tbl_dma = mem_dma; 403 pp->cmd_tbl_dma = mem_dma;
376 404
377 pp->cmd_tbl_sg = mem + AHCI_CMD_TBL_HDR;
378
379 ap->private_data = pp; 405 ap->private_data = pp;
380 406
381 if (hpriv->cap & HOST_CAP_64) 407 if (hpriv->cap & HOST_CAP_64)
@@ -508,46 +534,60 @@ static unsigned int ahci_dev_classify(struct ata_port *ap)
508 return ata_dev_classify(&tf); 534 return ata_dev_classify(&tf);
509} 535}
510 536
511static void ahci_fill_cmd_slot(struct ahci_port_priv *pp, u32 opts) 537static void ahci_fill_cmd_slot(struct ahci_port_priv *pp, unsigned int tag,
538 u32 opts)
512{ 539{
513 pp->cmd_slot[0].opts = cpu_to_le32(opts); 540 dma_addr_t cmd_tbl_dma;
514 pp->cmd_slot[0].status = 0; 541
515 pp->cmd_slot[0].tbl_addr = cpu_to_le32(pp->cmd_tbl_dma & 0xffffffff); 542 cmd_tbl_dma = pp->cmd_tbl_dma + tag * AHCI_CMD_TBL_SZ;
516 pp->cmd_slot[0].tbl_addr_hi = cpu_to_le32((pp->cmd_tbl_dma >> 16) >> 16); 543
544 pp->cmd_slot[tag].opts = cpu_to_le32(opts);
545 pp->cmd_slot[tag].status = 0;
546 pp->cmd_slot[tag].tbl_addr = cpu_to_le32(cmd_tbl_dma & 0xffffffff);
547 pp->cmd_slot[tag].tbl_addr_hi = cpu_to_le32((cmd_tbl_dma >> 16) >> 16);
517} 548}
518 549
519static int ahci_poll_register(void __iomem *reg, u32 mask, u32 val, 550static int ahci_clo(struct ata_port *ap)
520 unsigned long interval_msec,
521 unsigned long timeout_msec)
522{ 551{
523 unsigned long timeout; 552 void __iomem *port_mmio = (void __iomem *) ap->ioaddr.cmd_addr;
553 struct ahci_host_priv *hpriv = ap->host_set->private_data;
524 u32 tmp; 554 u32 tmp;
525 555
526 timeout = jiffies + (timeout_msec * HZ) / 1000; 556 if (!(hpriv->cap & HOST_CAP_CLO))
527 do { 557 return -EOPNOTSUPP;
528 tmp = readl(reg); 558
529 if ((tmp & mask) == val) 559 tmp = readl(port_mmio + PORT_CMD);
530 return 0; 560 tmp |= PORT_CMD_CLO;
531 msleep(interval_msec); 561 writel(tmp, port_mmio + PORT_CMD);
532 } while (time_before(jiffies, timeout)); 562
563 tmp = ata_wait_register(port_mmio + PORT_CMD,
564 PORT_CMD_CLO, PORT_CMD_CLO, 1, 500);
565 if (tmp & PORT_CMD_CLO)
566 return -EIO;
533 567
534 return -1; 568 return 0;
535} 569}
536 570
537static int ahci_softreset(struct ata_port *ap, int verbose, unsigned int *class) 571static int ahci_softreset(struct ata_port *ap, unsigned int *class)
538{ 572{
539 struct ahci_host_priv *hpriv = ap->host_set->private_data;
540 struct ahci_port_priv *pp = ap->private_data; 573 struct ahci_port_priv *pp = ap->private_data;
541 void __iomem *mmio = ap->host_set->mmio_base; 574 void __iomem *mmio = ap->host_set->mmio_base;
542 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no); 575 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
543 const u32 cmd_fis_len = 5; /* five dwords */ 576 const u32 cmd_fis_len = 5; /* five dwords */
544 const char *reason = NULL; 577 const char *reason = NULL;
545 struct ata_taskfile tf; 578 struct ata_taskfile tf;
579 u32 tmp;
546 u8 *fis; 580 u8 *fis;
547 int rc; 581 int rc;
548 582
549 DPRINTK("ENTER\n"); 583 DPRINTK("ENTER\n");
550 584
585 if (ata_port_offline(ap)) {
586 DPRINTK("PHY reports no device\n");
587 *class = ATA_DEV_NONE;
588 return 0;
589 }
590
551 /* prepare for SRST (AHCI-1.1 10.4.1) */ 591 /* prepare for SRST (AHCI-1.1 10.4.1) */
552 rc = ahci_stop_engine(ap); 592 rc = ahci_stop_engine(ap);
553 if (rc) { 593 if (rc) {
@@ -558,23 +598,13 @@ static int ahci_softreset(struct ata_port *ap, int verbose, unsigned int *class)
558 /* check BUSY/DRQ, perform Command List Override if necessary */ 598 /* check BUSY/DRQ, perform Command List Override if necessary */
559 ahci_tf_read(ap, &tf); 599 ahci_tf_read(ap, &tf);
560 if (tf.command & (ATA_BUSY | ATA_DRQ)) { 600 if (tf.command & (ATA_BUSY | ATA_DRQ)) {
561 u32 tmp; 601 rc = ahci_clo(ap);
562 602
563 if (!(hpriv->cap & HOST_CAP_CLO)) { 603 if (rc == -EOPNOTSUPP) {
564 rc = -EIO; 604 reason = "port busy but CLO unavailable";
565 reason = "port busy but no CLO";
566 goto fail_restart; 605 goto fail_restart;
567 } 606 } else if (rc) {
568 607 reason = "port busy but CLO failed";
569 tmp = readl(port_mmio + PORT_CMD);
570 tmp |= PORT_CMD_CLO;
571 writel(tmp, port_mmio + PORT_CMD);
572 readl(port_mmio + PORT_CMD); /* flush */
573
574 if (ahci_poll_register(port_mmio + PORT_CMD, PORT_CMD_CLO, 0x0,
575 1, 500)) {
576 rc = -EIO;
577 reason = "CLO failed";
578 goto fail_restart; 608 goto fail_restart;
579 } 609 }
580 } 610 }
@@ -582,20 +612,21 @@ static int ahci_softreset(struct ata_port *ap, int verbose, unsigned int *class)
582 /* restart engine */ 612 /* restart engine */
583 ahci_start_engine(ap); 613 ahci_start_engine(ap);
584 614
585 ata_tf_init(ap, &tf, 0); 615 ata_tf_init(ap->device, &tf);
586 fis = pp->cmd_tbl; 616 fis = pp->cmd_tbl;
587 617
588 /* issue the first D2H Register FIS */ 618 /* issue the first D2H Register FIS */
589 ahci_fill_cmd_slot(pp, cmd_fis_len | AHCI_CMD_RESET | AHCI_CMD_CLR_BUSY); 619 ahci_fill_cmd_slot(pp, 0,
620 cmd_fis_len | AHCI_CMD_RESET | AHCI_CMD_CLR_BUSY);
590 621
591 tf.ctl |= ATA_SRST; 622 tf.ctl |= ATA_SRST;
592 ata_tf_to_fis(&tf, fis, 0); 623 ata_tf_to_fis(&tf, fis, 0);
593 fis[1] &= ~(1 << 7); /* turn off Command FIS bit */ 624 fis[1] &= ~(1 << 7); /* turn off Command FIS bit */
594 625
595 writel(1, port_mmio + PORT_CMD_ISSUE); 626 writel(1, port_mmio + PORT_CMD_ISSUE);
596 readl(port_mmio + PORT_CMD_ISSUE); /* flush */
597 627
598 if (ahci_poll_register(port_mmio + PORT_CMD_ISSUE, 0x1, 0x0, 1, 500)) { 628 tmp = ata_wait_register(port_mmio + PORT_CMD_ISSUE, 0x1, 0x1, 1, 500);
629 if (tmp & 0x1) {
599 rc = -EIO; 630 rc = -EIO;
600 reason = "1st FIS failed"; 631 reason = "1st FIS failed";
601 goto fail; 632 goto fail;
@@ -605,7 +636,7 @@ static int ahci_softreset(struct ata_port *ap, int verbose, unsigned int *class)
605 msleep(1); 636 msleep(1);
606 637
607 /* issue the second D2H Register FIS */ 638 /* issue the second D2H Register FIS */
608 ahci_fill_cmd_slot(pp, cmd_fis_len); 639 ahci_fill_cmd_slot(pp, 0, cmd_fis_len);
609 640
610 tf.ctl &= ~ATA_SRST; 641 tf.ctl &= ~ATA_SRST;
611 ata_tf_to_fis(&tf, fis, 0); 642 ata_tf_to_fis(&tf, fis, 0);
@@ -625,7 +656,7 @@ static int ahci_softreset(struct ata_port *ap, int verbose, unsigned int *class)
625 msleep(150); 656 msleep(150);
626 657
627 *class = ATA_DEV_NONE; 658 *class = ATA_DEV_NONE;
628 if (sata_dev_present(ap)) { 659 if (ata_port_online(ap)) {
629 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) { 660 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
630 rc = -EIO; 661 rc = -EIO;
631 reason = "device not ready"; 662 reason = "device not ready";
@@ -640,25 +671,21 @@ static int ahci_softreset(struct ata_port *ap, int verbose, unsigned int *class)
640 fail_restart: 671 fail_restart:
641 ahci_start_engine(ap); 672 ahci_start_engine(ap);
642 fail: 673 fail:
643 if (verbose) 674 ata_port_printk(ap, KERN_ERR, "softreset failed (%s)\n", reason);
644 printk(KERN_ERR "ata%u: softreset failed (%s)\n",
645 ap->id, reason);
646 else
647 DPRINTK("EXIT, rc=%d reason=\"%s\"\n", rc, reason);
648 return rc; 675 return rc;
649} 676}
650 677
651static int ahci_hardreset(struct ata_port *ap, int verbose, unsigned int *class) 678static int ahci_hardreset(struct ata_port *ap, unsigned int *class)
652{ 679{
653 int rc; 680 int rc;
654 681
655 DPRINTK("ENTER\n"); 682 DPRINTK("ENTER\n");
656 683
657 ahci_stop_engine(ap); 684 ahci_stop_engine(ap);
658 rc = sata_std_hardreset(ap, verbose, class); 685 rc = sata_std_hardreset(ap, class);
659 ahci_start_engine(ap); 686 ahci_start_engine(ap);
660 687
661 if (rc == 0) 688 if (rc == 0 && ata_port_online(ap))
662 *class = ahci_dev_classify(ap); 689 *class = ahci_dev_classify(ap);
663 if (*class == ATA_DEV_UNKNOWN) 690 if (*class == ATA_DEV_UNKNOWN)
664 *class = ATA_DEV_NONE; 691 *class = ATA_DEV_NONE;
@@ -688,6 +715,12 @@ static void ahci_postreset(struct ata_port *ap, unsigned int *class)
688 715
689static int ahci_probe_reset(struct ata_port *ap, unsigned int *classes) 716static int ahci_probe_reset(struct ata_port *ap, unsigned int *classes)
690{ 717{
718 if ((ap->flags & AHCI_FLAG_RESET_NEEDS_CLO) &&
719 (ata_busy_wait(ap, ATA_BUSY, 1000) & ATA_BUSY)) {
720 /* ATA_BUSY hasn't cleared, so send a CLO */
721 ahci_clo(ap);
722 }
723
691 return ata_drive_probe_reset(ap, ata_std_probeinit, 724 return ata_drive_probe_reset(ap, ata_std_probeinit,
692 ahci_softreset, ahci_hardreset, 725 ahci_softreset, ahci_hardreset,
693 ahci_postreset, classes); 726 ahci_postreset, classes);
@@ -708,9 +741,8 @@ static void ahci_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
708 ata_tf_from_fis(d2h_fis, tf); 741 ata_tf_from_fis(d2h_fis, tf);
709} 742}
710 743
711static unsigned int ahci_fill_sg(struct ata_queued_cmd *qc) 744static unsigned int ahci_fill_sg(struct ata_queued_cmd *qc, void *cmd_tbl)
712{ 745{
713 struct ahci_port_priv *pp = qc->ap->private_data;
714 struct scatterlist *sg; 746 struct scatterlist *sg;
715 struct ahci_sg *ahci_sg; 747 struct ahci_sg *ahci_sg;
716 unsigned int n_sg = 0; 748 unsigned int n_sg = 0;
@@ -720,7 +752,7 @@ static unsigned int ahci_fill_sg(struct ata_queued_cmd *qc)
720 /* 752 /*
721 * Next, the S/G list. 753 * Next, the S/G list.
722 */ 754 */
723 ahci_sg = pp->cmd_tbl_sg; 755 ahci_sg = cmd_tbl + AHCI_CMD_TBL_HDR_SZ;
724 ata_for_each_sg(sg, qc) { 756 ata_for_each_sg(sg, qc) {
725 dma_addr_t addr = sg_dma_address(sg); 757 dma_addr_t addr = sg_dma_address(sg);
726 u32 sg_len = sg_dma_len(sg); 758 u32 sg_len = sg_dma_len(sg);
@@ -741,6 +773,7 @@ static void ahci_qc_prep(struct ata_queued_cmd *qc)
741 struct ata_port *ap = qc->ap; 773 struct ata_port *ap = qc->ap;
742 struct ahci_port_priv *pp = ap->private_data; 774 struct ahci_port_priv *pp = ap->private_data;
743 int is_atapi = is_atapi_taskfile(&qc->tf); 775 int is_atapi = is_atapi_taskfile(&qc->tf);
776 void *cmd_tbl;
744 u32 opts; 777 u32 opts;
745 const u32 cmd_fis_len = 5; /* five dwords */ 778 const u32 cmd_fis_len = 5; /* five dwords */
746 unsigned int n_elem; 779 unsigned int n_elem;
@@ -749,16 +782,17 @@ static void ahci_qc_prep(struct ata_queued_cmd *qc)
749 * Fill in command table information. First, the header, 782 * Fill in command table information. First, the header,
750 * a SATA Register - Host to Device command FIS. 783 * a SATA Register - Host to Device command FIS.
751 */ 784 */
752 ata_tf_to_fis(&qc->tf, pp->cmd_tbl, 0); 785 cmd_tbl = pp->cmd_tbl + qc->tag * AHCI_CMD_TBL_SZ;
786
787 ata_tf_to_fis(&qc->tf, cmd_tbl, 0);
753 if (is_atapi) { 788 if (is_atapi) {
754 memset(pp->cmd_tbl + AHCI_CMD_TBL_CDB, 0, 32); 789 memset(cmd_tbl + AHCI_CMD_TBL_CDB, 0, 32);
755 memcpy(pp->cmd_tbl + AHCI_CMD_TBL_CDB, qc->cdb, 790 memcpy(cmd_tbl + AHCI_CMD_TBL_CDB, qc->cdb, qc->dev->cdb_len);
756 qc->dev->cdb_len);
757 } 791 }
758 792
759 n_elem = 0; 793 n_elem = 0;
760 if (qc->flags & ATA_QCFLAG_DMAMAP) 794 if (qc->flags & ATA_QCFLAG_DMAMAP)
761 n_elem = ahci_fill_sg(qc); 795 n_elem = ahci_fill_sg(qc, cmd_tbl);
762 796
763 /* 797 /*
764 * Fill in command slot information. 798 * Fill in command slot information.
@@ -769,112 +803,123 @@ static void ahci_qc_prep(struct ata_queued_cmd *qc)
769 if (is_atapi) 803 if (is_atapi)
770 opts |= AHCI_CMD_ATAPI | AHCI_CMD_PREFETCH; 804 opts |= AHCI_CMD_ATAPI | AHCI_CMD_PREFETCH;
771 805
772 ahci_fill_cmd_slot(pp, opts); 806 ahci_fill_cmd_slot(pp, qc->tag, opts);
773} 807}
774 808
775static void ahci_restart_port(struct ata_port *ap, u32 irq_stat) 809static void ahci_error_intr(struct ata_port *ap, u32 irq_stat)
776{ 810{
777 void __iomem *mmio = ap->host_set->mmio_base; 811 struct ahci_port_priv *pp = ap->private_data;
778 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no); 812 struct ata_eh_info *ehi = &ap->eh_info;
779 u32 tmp; 813 unsigned int err_mask = 0, action = 0;
814 struct ata_queued_cmd *qc;
815 u32 serror;
780 816
781 if ((ap->device[0].class != ATA_DEV_ATAPI) || 817 ata_ehi_clear_desc(ehi);
782 ((irq_stat & PORT_IRQ_TF_ERR) == 0))
783 printk(KERN_WARNING "ata%u: port reset, "
784 "p_is %x is %x pis %x cmd %x tf %x ss %x se %x\n",
785 ap->id,
786 irq_stat,
787 readl(mmio + HOST_IRQ_STAT),
788 readl(port_mmio + PORT_IRQ_STAT),
789 readl(port_mmio + PORT_CMD),
790 readl(port_mmio + PORT_TFDATA),
791 readl(port_mmio + PORT_SCR_STAT),
792 readl(port_mmio + PORT_SCR_ERR));
793
794 /* stop DMA */
795 ahci_stop_engine(ap);
796 818
797 /* clear SATA phy error, if any */ 819 /* AHCI needs SError cleared; otherwise, it might lock up */
798 tmp = readl(port_mmio + PORT_SCR_ERR); 820 serror = ahci_scr_read(ap, SCR_ERROR);
799 writel(tmp, port_mmio + PORT_SCR_ERR); 821 ahci_scr_write(ap, SCR_ERROR, serror);
800 822
801 /* if DRQ/BSY is set, device needs to be reset. 823 /* analyze @irq_stat */
802 * if so, issue COMRESET 824 ata_ehi_push_desc(ehi, "irq_stat 0x%08x", irq_stat);
803 */ 825
804 tmp = readl(port_mmio + PORT_TFDATA); 826 if (irq_stat & PORT_IRQ_TF_ERR)
805 if (tmp & (ATA_BUSY | ATA_DRQ)) { 827 err_mask |= AC_ERR_DEV;
806 writel(0x301, port_mmio + PORT_SCR_CTL); 828
807 readl(port_mmio + PORT_SCR_CTL); /* flush */ 829 if (irq_stat & (PORT_IRQ_HBUS_ERR | PORT_IRQ_HBUS_DATA_ERR)) {
808 udelay(10); 830 err_mask |= AC_ERR_HOST_BUS;
809 writel(0x300, port_mmio + PORT_SCR_CTL); 831 action |= ATA_EH_SOFTRESET;
810 readl(port_mmio + PORT_SCR_CTL); /* flush */
811 } 832 }
812 833
813 /* re-start DMA */ 834 if (irq_stat & PORT_IRQ_IF_ERR) {
814 ahci_start_engine(ap); 835 err_mask |= AC_ERR_ATA_BUS;
815} 836 action |= ATA_EH_SOFTRESET;
837 ata_ehi_push_desc(ehi, ", interface fatal error");
838 }
816 839
817static void ahci_eng_timeout(struct ata_port *ap) 840 if (irq_stat & (PORT_IRQ_CONNECT | PORT_IRQ_PHYRDY)) {
818{ 841 err_mask |= AC_ERR_ATA_BUS;
819 struct ata_host_set *host_set = ap->host_set; 842 action |= ATA_EH_SOFTRESET;
820 void __iomem *mmio = host_set->mmio_base; 843 ata_ehi_push_desc(ehi, ", %s", irq_stat & PORT_IRQ_CONNECT ?
821 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no); 844 "connection status changed" : "PHY RDY changed");
822 struct ata_queued_cmd *qc; 845 }
823 unsigned long flags;
824 846
825 printk(KERN_WARNING "ata%u: handling error/timeout\n", ap->id); 847 if (irq_stat & PORT_IRQ_UNK_FIS) {
848 u32 *unk = (u32 *)(pp->rx_fis + RX_FIS_UNK);
826 849
827 spin_lock_irqsave(&host_set->lock, flags); 850 err_mask |= AC_ERR_HSM;
851 action |= ATA_EH_SOFTRESET;
852 ata_ehi_push_desc(ehi, ", unknown FIS %08x %08x %08x %08x",
853 unk[0], unk[1], unk[2], unk[3]);
854 }
828 855
829 ahci_restart_port(ap, readl(port_mmio + PORT_IRQ_STAT)); 856 /* okay, let's hand over to EH */
830 qc = ata_qc_from_tag(ap, ap->active_tag); 857 ehi->serror |= serror;
831 qc->err_mask |= AC_ERR_TIMEOUT; 858 ehi->action |= action;
832 859
833 spin_unlock_irqrestore(&host_set->lock, flags); 860 qc = ata_qc_from_tag(ap, ap->active_tag);
861 if (qc)
862 qc->err_mask |= err_mask;
863 else
864 ehi->err_mask |= err_mask;
834 865
835 ata_eh_qc_complete(qc); 866 if (irq_stat & PORT_IRQ_FREEZE)
867 ata_port_freeze(ap);
868 else
869 ata_port_abort(ap);
836} 870}
837 871
838static inline int ahci_host_intr(struct ata_port *ap, struct ata_queued_cmd *qc) 872static void ahci_host_intr(struct ata_port *ap)
839{ 873{
840 void __iomem *mmio = ap->host_set->mmio_base; 874 void __iomem *mmio = ap->host_set->mmio_base;
841 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no); 875 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
842 u32 status, serr, ci; 876 struct ata_eh_info *ehi = &ap->eh_info;
843 877 u32 status, qc_active;
844 serr = readl(port_mmio + PORT_SCR_ERR); 878 int rc;
845 writel(serr, port_mmio + PORT_SCR_ERR);
846 879
847 status = readl(port_mmio + PORT_IRQ_STAT); 880 status = readl(port_mmio + PORT_IRQ_STAT);
848 writel(status, port_mmio + PORT_IRQ_STAT); 881 writel(status, port_mmio + PORT_IRQ_STAT);
849 882
850 ci = readl(port_mmio + PORT_CMD_ISSUE); 883 if (unlikely(status & PORT_IRQ_ERROR)) {
851 if (likely((ci & 0x1) == 0)) { 884 ahci_error_intr(ap, status);
852 if (qc) { 885 return;
853 WARN_ON(qc->err_mask);
854 ata_qc_complete(qc);
855 qc = NULL;
856 }
857 } 886 }
858 887
859 if (status & PORT_IRQ_FATAL) { 888 if (ap->sactive)
860 unsigned int err_mask; 889 qc_active = readl(port_mmio + PORT_SCR_ACT);
861 if (status & PORT_IRQ_TF_ERR) 890 else
862 err_mask = AC_ERR_DEV; 891 qc_active = readl(port_mmio + PORT_CMD_ISSUE);
863 else if (status & PORT_IRQ_IF_ERR) 892
864 err_mask = AC_ERR_ATA_BUS; 893 rc = ata_qc_complete_multiple(ap, qc_active, NULL);
865 else 894 if (rc > 0)
866 err_mask = AC_ERR_HOST_BUS; 895 return;
867 896 if (rc < 0) {
868 /* command processing has stopped due to error; restart */ 897 ehi->err_mask |= AC_ERR_HSM;
869 ahci_restart_port(ap, status); 898 ehi->action |= ATA_EH_SOFTRESET;
870 899 ata_port_freeze(ap);
871 if (qc) { 900 return;
872 qc->err_mask |= err_mask; 901 }
873 ata_qc_complete(qc); 902
874 } 903 /* hmmm... a spurious interupt */
904
905 /* some devices send D2H reg with I bit set during NCQ command phase */
906 if (ap->sactive && status & PORT_IRQ_D2H_REG_FIS)
907 return;
908
909 /* ignore interim PIO setup fis interrupts */
910 if (ata_tag_valid(ap->active_tag)) {
911 struct ata_queued_cmd *qc =
912 ata_qc_from_tag(ap, ap->active_tag);
913
914 if (qc && qc->tf.protocol == ATA_PROT_PIO &&
915 (status & PORT_IRQ_PIOS_FIS))
916 return;
875 } 917 }
876 918
877 return 1; 919 if (ata_ratelimit())
920 ata_port_printk(ap, KERN_INFO, "spurious interrupt "
921 "(irq_stat 0x%x active_tag %d sactive 0x%x)\n",
922 status, ap->active_tag, ap->sactive);
878} 923}
879 924
880static void ahci_irq_clear(struct ata_port *ap) 925static void ahci_irq_clear(struct ata_port *ap)
@@ -882,7 +927,7 @@ static void ahci_irq_clear(struct ata_port *ap)
882 /* TODO */ 927 /* TODO */
883} 928}
884 929
885static irqreturn_t ahci_interrupt (int irq, void *dev_instance, struct pt_regs *regs) 930static irqreturn_t ahci_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
886{ 931{
887 struct ata_host_set *host_set = dev_instance; 932 struct ata_host_set *host_set = dev_instance;
888 struct ahci_host_priv *hpriv; 933 struct ahci_host_priv *hpriv;
@@ -911,14 +956,7 @@ static irqreturn_t ahci_interrupt (int irq, void *dev_instance, struct pt_regs *
911 956
912 ap = host_set->ports[i]; 957 ap = host_set->ports[i];
913 if (ap) { 958 if (ap) {
914 struct ata_queued_cmd *qc; 959 ahci_host_intr(ap);
915 qc = ata_qc_from_tag(ap, ap->active_tag);
916 if (!ahci_host_intr(ap, qc))
917 if (ata_ratelimit())
918 dev_printk(KERN_WARNING, host_set->dev,
919 "unhandled interrupt on port %u\n",
920 i);
921
922 VPRINTK("port %u\n", i); 960 VPRINTK("port %u\n", i);
923 } else { 961 } else {
924 VPRINTK("port %u (no irq)\n", i); 962 VPRINTK("port %u (no irq)\n", i);
@@ -935,7 +973,7 @@ static irqreturn_t ahci_interrupt (int irq, void *dev_instance, struct pt_regs *
935 handled = 1; 973 handled = 1;
936 } 974 }
937 975
938 spin_unlock(&host_set->lock); 976 spin_unlock(&host_set->lock);
939 977
940 VPRINTK("EXIT\n"); 978 VPRINTK("EXIT\n");
941 979
@@ -947,12 +985,64 @@ static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc)
947 struct ata_port *ap = qc->ap; 985 struct ata_port *ap = qc->ap;
948 void __iomem *port_mmio = (void __iomem *) ap->ioaddr.cmd_addr; 986 void __iomem *port_mmio = (void __iomem *) ap->ioaddr.cmd_addr;
949 987
950 writel(1, port_mmio + PORT_CMD_ISSUE); 988 if (qc->tf.protocol == ATA_PROT_NCQ)
989 writel(1 << qc->tag, port_mmio + PORT_SCR_ACT);
990 writel(1 << qc->tag, port_mmio + PORT_CMD_ISSUE);
951 readl(port_mmio + PORT_CMD_ISSUE); /* flush */ 991 readl(port_mmio + PORT_CMD_ISSUE); /* flush */
952 992
953 return 0; 993 return 0;
954} 994}
955 995
996static void ahci_freeze(struct ata_port *ap)
997{
998 void __iomem *mmio = ap->host_set->mmio_base;
999 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
1000
1001 /* turn IRQ off */
1002 writel(0, port_mmio + PORT_IRQ_MASK);
1003}
1004
1005static void ahci_thaw(struct ata_port *ap)
1006{
1007 void __iomem *mmio = ap->host_set->mmio_base;
1008 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
1009 u32 tmp;
1010
1011 /* clear IRQ */
1012 tmp = readl(port_mmio + PORT_IRQ_STAT);
1013 writel(tmp, port_mmio + PORT_IRQ_STAT);
1014 writel(1 << ap->id, mmio + HOST_IRQ_STAT);
1015
1016 /* turn IRQ back on */
1017 writel(DEF_PORT_IRQ, port_mmio + PORT_IRQ_MASK);
1018}
1019
1020static void ahci_error_handler(struct ata_port *ap)
1021{
1022 if (!(ap->flags & ATA_FLAG_FROZEN)) {
1023 /* restart engine */
1024 ahci_stop_engine(ap);
1025 ahci_start_engine(ap);
1026 }
1027
1028 /* perform recovery */
1029 ata_do_eh(ap, ahci_softreset, ahci_hardreset, ahci_postreset);
1030}
1031
1032static void ahci_post_internal_cmd(struct ata_queued_cmd *qc)
1033{
1034 struct ata_port *ap = qc->ap;
1035
1036 if (qc->flags & ATA_QCFLAG_FAILED)
1037 qc->err_mask |= AC_ERR_OTHER;
1038
1039 if (qc->err_mask) {
1040 /* make DMA engine forget about the failed command */
1041 ahci_stop_engine(ap);
1042 ahci_start_engine(ap);
1043 }
1044}
1045
956static void ahci_setup_port(struct ata_ioports *port, unsigned long base, 1046static void ahci_setup_port(struct ata_ioports *port, unsigned long base,
957 unsigned int port_idx) 1047 unsigned int port_idx)
958{ 1048{
@@ -1097,9 +1187,6 @@ static int ahci_host_init(struct ata_probe_ent *probe_ent)
1097 writel(tmp, port_mmio + PORT_IRQ_STAT); 1187 writel(tmp, port_mmio + PORT_IRQ_STAT);
1098 1188
1099 writel(1 << i, mmio + HOST_IRQ_STAT); 1189 writel(1 << i, mmio + HOST_IRQ_STAT);
1100
1101 /* set irq mask (enables interrupts) */
1102 writel(DEF_PORT_IRQ, port_mmio + PORT_IRQ_MASK);
1103 } 1190 }
1104 1191
1105 tmp = readl(mmio + HOST_CTL); 1192 tmp = readl(mmio + HOST_CTL);
@@ -1197,6 +1284,8 @@ static int ahci_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
1197 1284
1198 VPRINTK("ENTER\n"); 1285 VPRINTK("ENTER\n");
1199 1286
1287 WARN_ON(ATA_MAX_QUEUE > AHCI_MAX_CMDS);
1288
1200 if (!printed_version++) 1289 if (!printed_version++)
1201 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n"); 1290 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
1202 1291
@@ -1264,6 +1353,9 @@ static int ahci_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
1264 if (rc) 1353 if (rc)
1265 goto err_out_hpriv; 1354 goto err_out_hpriv;
1266 1355
1356 if (hpriv->cap & HOST_CAP_NCQ)
1357 probe_ent->host_flags |= ATA_FLAG_NCQ;
1358
1267 ahci_print_info(probe_ent); 1359 ahci_print_info(probe_ent);
1268 1360
1269 /* FIXME: check ata_device_add return value */ 1361 /* FIXME: check ata_device_add return value */
diff --git a/drivers/scsi/ata_piix.c b/drivers/scsi/ata_piix.c
index 6dc88149f9f1..ad41dfd33e06 100644
--- a/drivers/scsi/ata_piix.c
+++ b/drivers/scsi/ata_piix.c
@@ -93,7 +93,7 @@
93#include <linux/libata.h> 93#include <linux/libata.h>
94 94
95#define DRV_NAME "ata_piix" 95#define DRV_NAME "ata_piix"
96#define DRV_VERSION "1.05" 96#define DRV_VERSION "1.10"
97 97
98enum { 98enum {
99 PIIX_IOCFG = 0x54, /* IDE I/O configuration register */ 99 PIIX_IOCFG = 0x54, /* IDE I/O configuration register */
@@ -159,6 +159,7 @@ static const struct pci_device_id piix_pci_tbl[] = {
159 { 0x8086, 0x7111, PCI_ANY_ID, PCI_ANY_ID, 0, 0, piix4_pata }, 159 { 0x8086, 0x7111, PCI_ANY_ID, PCI_ANY_ID, 0, 0, piix4_pata },
160 { 0x8086, 0x24db, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich5_pata }, 160 { 0x8086, 0x24db, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich5_pata },
161 { 0x8086, 0x25a2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich5_pata }, 161 { 0x8086, 0x25a2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich5_pata },
162 { 0x8086, 0x27df, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich5_pata },
162#endif 163#endif
163 164
164 /* NOTE: The following PCI ids must be kept in sync with the 165 /* NOTE: The following PCI ids must be kept in sync with the
@@ -227,6 +228,7 @@ static const struct ata_port_operations piix_pata_ops = {
227 .port_disable = ata_port_disable, 228 .port_disable = ata_port_disable,
228 .set_piomode = piix_set_piomode, 229 .set_piomode = piix_set_piomode,
229 .set_dmamode = piix_set_dmamode, 230 .set_dmamode = piix_set_dmamode,
231 .mode_filter = ata_pci_default_filter,
230 232
231 .tf_load = ata_tf_load, 233 .tf_load = ata_tf_load,
232 .tf_read = ata_tf_read, 234 .tf_read = ata_tf_read,
@@ -242,8 +244,12 @@ static const struct ata_port_operations piix_pata_ops = {
242 .bmdma_status = ata_bmdma_status, 244 .bmdma_status = ata_bmdma_status,
243 .qc_prep = ata_qc_prep, 245 .qc_prep = ata_qc_prep,
244 .qc_issue = ata_qc_issue_prot, 246 .qc_issue = ata_qc_issue_prot,
247 .data_xfer = ata_pio_data_xfer,
245 248
246 .eng_timeout = ata_eng_timeout, 249 .freeze = ata_bmdma_freeze,
250 .thaw = ata_bmdma_thaw,
251 .error_handler = ata_bmdma_error_handler,
252 .post_internal_cmd = ata_bmdma_post_internal_cmd,
247 253
248 .irq_handler = ata_interrupt, 254 .irq_handler = ata_interrupt,
249 .irq_clear = ata_bmdma_irq_clear, 255 .irq_clear = ata_bmdma_irq_clear,
@@ -270,8 +276,12 @@ static const struct ata_port_operations piix_sata_ops = {
270 .bmdma_status = ata_bmdma_status, 276 .bmdma_status = ata_bmdma_status,
271 .qc_prep = ata_qc_prep, 277 .qc_prep = ata_qc_prep,
272 .qc_issue = ata_qc_issue_prot, 278 .qc_issue = ata_qc_issue_prot,
279 .data_xfer = ata_pio_data_xfer,
273 280
274 .eng_timeout = ata_eng_timeout, 281 .freeze = ata_bmdma_freeze,
282 .thaw = ata_bmdma_thaw,
283 .error_handler = ata_bmdma_error_handler,
284 .post_internal_cmd = ata_bmdma_post_internal_cmd,
275 285
276 .irq_handler = ata_interrupt, 286 .irq_handler = ata_interrupt,
277 .irq_clear = ata_bmdma_irq_clear, 287 .irq_clear = ata_bmdma_irq_clear,
@@ -484,7 +494,7 @@ static int piix_pata_probe_reset(struct ata_port *ap, unsigned int *classes)
484 struct pci_dev *pdev = to_pci_dev(ap->host_set->dev); 494 struct pci_dev *pdev = to_pci_dev(ap->host_set->dev);
485 495
486 if (!pci_test_config_bits(pdev, &piix_enable_bits[ap->hard_port_no])) { 496 if (!pci_test_config_bits(pdev, &piix_enable_bits[ap->hard_port_no])) {
487 printk(KERN_INFO "ata%u: port disabled. ignoring.\n", ap->id); 497 ata_port_printk(ap, KERN_INFO, "port disabled. ignoring.\n");
488 return 0; 498 return 0;
489 } 499 }
490 500
@@ -565,7 +575,7 @@ static unsigned int piix_sata_probe (struct ata_port *ap)
565static int piix_sata_probe_reset(struct ata_port *ap, unsigned int *classes) 575static int piix_sata_probe_reset(struct ata_port *ap, unsigned int *classes)
566{ 576{
567 if (!piix_sata_probe(ap)) { 577 if (!piix_sata_probe(ap)) {
568 printk(KERN_INFO "ata%u: SATA port has no device.\n", ap->id); 578 ata_port_printk(ap, KERN_INFO, "SATA port has no device.\n");
569 return 0; 579 return 0;
570 } 580 }
571 581
@@ -760,15 +770,15 @@ static int __devinit piix_check_450nx_errata(struct pci_dev *ata_dev)
760 pci_read_config_byte(pdev, PCI_REVISION_ID, &rev); 770 pci_read_config_byte(pdev, PCI_REVISION_ID, &rev);
761 pci_read_config_word(pdev, 0x41, &cfg); 771 pci_read_config_word(pdev, 0x41, &cfg);
762 /* Only on the original revision: IDE DMA can hang */ 772 /* Only on the original revision: IDE DMA can hang */
763 if(rev == 0x00) 773 if (rev == 0x00)
764 no_piix_dma = 1; 774 no_piix_dma = 1;
765 /* On all revisions below 5 PXB bus lock must be disabled for IDE */ 775 /* On all revisions below 5 PXB bus lock must be disabled for IDE */
766 else if(cfg & (1<<14) && rev < 5) 776 else if (cfg & (1<<14) && rev < 5)
767 no_piix_dma = 2; 777 no_piix_dma = 2;
768 } 778 }
769 if(no_piix_dma) 779 if (no_piix_dma)
770 dev_printk(KERN_WARNING, &ata_dev->dev, "450NX errata present, disabling IDE DMA.\n"); 780 dev_printk(KERN_WARNING, &ata_dev->dev, "450NX errata present, disabling IDE DMA.\n");
771 if(no_piix_dma == 2) 781 if (no_piix_dma == 2)
772 dev_printk(KERN_WARNING, &ata_dev->dev, "A BIOS update may resolve this.\n"); 782 dev_printk(KERN_WARNING, &ata_dev->dev, "A BIOS update may resolve this.\n");
773 return no_piix_dma; 783 return no_piix_dma;
774} 784}
diff --git a/drivers/scsi/libata-bmdma.c b/drivers/scsi/libata-bmdma.c
index 835dff0bafdc..6d30d2c52960 100644
--- a/drivers/scsi/libata-bmdma.c
+++ b/drivers/scsi/libata-bmdma.c
@@ -652,6 +652,149 @@ void ata_bmdma_stop(struct ata_queued_cmd *qc)
652 ata_altstatus(ap); /* dummy read */ 652 ata_altstatus(ap); /* dummy read */
653} 653}
654 654
655/**
656 * ata_bmdma_freeze - Freeze BMDMA controller port
657 * @ap: port to freeze
658 *
659 * Freeze BMDMA controller port.
660 *
661 * LOCKING:
662 * Inherited from caller.
663 */
664void ata_bmdma_freeze(struct ata_port *ap)
665{
666 struct ata_ioports *ioaddr = &ap->ioaddr;
667
668 ap->ctl |= ATA_NIEN;
669 ap->last_ctl = ap->ctl;
670
671 if (ap->flags & ATA_FLAG_MMIO)
672 writeb(ap->ctl, (void __iomem *)ioaddr->ctl_addr);
673 else
674 outb(ap->ctl, ioaddr->ctl_addr);
675}
676
677/**
678 * ata_bmdma_thaw - Thaw BMDMA controller port
679 * @ap: port to thaw
680 *
681 * Thaw BMDMA controller port.
682 *
683 * LOCKING:
684 * Inherited from caller.
685 */
686void ata_bmdma_thaw(struct ata_port *ap)
687{
688 /* clear & re-enable interrupts */
689 ata_chk_status(ap);
690 ap->ops->irq_clear(ap);
691 if (ap->ioaddr.ctl_addr) /* FIXME: hack. create a hook instead */
692 ata_irq_on(ap);
693}
694
695/**
696 * ata_bmdma_drive_eh - Perform EH with given methods for BMDMA controller
697 * @ap: port to handle error for
698 * @softreset: softreset method (can be NULL)
699 * @hardreset: hardreset method (can be NULL)
700 * @postreset: postreset method (can be NULL)
701 *
702 * Handle error for ATA BMDMA controller. It can handle both
703 * PATA and SATA controllers. Many controllers should be able to
704 * use this EH as-is or with some added handling before and
705 * after.
706 *
707 * This function is intended to be used for constructing
708 * ->error_handler callback by low level drivers.
709 *
710 * LOCKING:
711 * Kernel thread context (may sleep)
712 */
713void ata_bmdma_drive_eh(struct ata_port *ap, ata_reset_fn_t softreset,
714 ata_reset_fn_t hardreset, ata_postreset_fn_t postreset)
715{
716 struct ata_host_set *host_set = ap->host_set;
717 struct ata_eh_context *ehc = &ap->eh_context;
718 struct ata_queued_cmd *qc;
719 unsigned long flags;
720 int thaw = 0;
721
722 qc = __ata_qc_from_tag(ap, ap->active_tag);
723 if (qc && !(qc->flags & ATA_QCFLAG_FAILED))
724 qc = NULL;
725
726 /* reset PIO HSM and stop DMA engine */
727 spin_lock_irqsave(&host_set->lock, flags);
728
729 ap->hsm_task_state = HSM_ST_IDLE;
730
731 if (qc && (qc->tf.protocol == ATA_PROT_DMA ||
732 qc->tf.protocol == ATA_PROT_ATAPI_DMA)) {
733 u8 host_stat;
734
735 host_stat = ata_bmdma_status(ap);
736
737 ata_ehi_push_desc(&ehc->i, "BMDMA stat 0x%x", host_stat);
738
739 /* BMDMA controllers indicate host bus error by
740 * setting DMA_ERR bit and timing out. As it wasn't
741 * really a timeout event, adjust error mask and
742 * cancel frozen state.
743 */
744 if (qc->err_mask == AC_ERR_TIMEOUT && host_stat & ATA_DMA_ERR) {
745 qc->err_mask = AC_ERR_HOST_BUS;
746 thaw = 1;
747 }
748
749 ap->ops->bmdma_stop(qc);
750 }
751
752 ata_altstatus(ap);
753 ata_chk_status(ap);
754 ap->ops->irq_clear(ap);
755
756 spin_unlock_irqrestore(&host_set->lock, flags);
757
758 if (thaw)
759 ata_eh_thaw_port(ap);
760
761 /* PIO and DMA engines have been stopped, perform recovery */
762 ata_do_eh(ap, softreset, hardreset, postreset);
763}
764
765/**
766 * ata_bmdma_error_handler - Stock error handler for BMDMA controller
767 * @ap: port to handle error for
768 *
769 * Stock error handler for BMDMA controller.
770 *
771 * LOCKING:
772 * Kernel thread context (may sleep)
773 */
774void ata_bmdma_error_handler(struct ata_port *ap)
775{
776 ata_reset_fn_t hardreset;
777
778 hardreset = NULL;
779 if (sata_scr_valid(ap))
780 hardreset = sata_std_hardreset;
781
782 ata_bmdma_drive_eh(ap, ata_std_softreset, hardreset, ata_std_postreset);
783}
784
785/**
786 * ata_bmdma_post_internal_cmd - Stock post_internal_cmd for
787 * BMDMA controller
788 * @qc: internal command to clean up
789 *
790 * LOCKING:
791 * Kernel thread context (may sleep)
792 */
793void ata_bmdma_post_internal_cmd(struct ata_queued_cmd *qc)
794{
795 ata_bmdma_stop(qc);
796}
797
655#ifdef CONFIG_PCI 798#ifdef CONFIG_PCI
656static struct ata_probe_ent * 799static struct ata_probe_ent *
657ata_probe_ent_alloc(struct device *dev, const struct ata_port_info *port) 800ata_probe_ent_alloc(struct device *dev, const struct ata_port_info *port)
diff --git a/drivers/scsi/libata-core.c b/drivers/scsi/libata-core.c
index fa476e7e0a48..074a46e5bbdd 100644
--- a/drivers/scsi/libata-core.c
+++ b/drivers/scsi/libata-core.c
@@ -61,14 +61,10 @@
61 61
62#include "libata.h" 62#include "libata.h"
63 63
64static unsigned int ata_dev_init_params(struct ata_port *ap, 64static unsigned int ata_dev_init_params(struct ata_device *dev,
65 struct ata_device *dev, 65 u16 heads, u16 sectors);
66 u16 heads, 66static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
67 u16 sectors); 67static void ata_dev_xfermask(struct ata_device *dev);
68static void ata_set_mode(struct ata_port *ap);
69static unsigned int ata_dev_set_xfermode(struct ata_port *ap,
70 struct ata_device *dev);
71static void ata_dev_xfermask(struct ata_port *ap, struct ata_device *dev);
72 68
73static unsigned int ata_unique_id = 1; 69static unsigned int ata_unique_id = 1;
74static struct workqueue_struct *ata_wq; 70static struct workqueue_struct *ata_wq;
@@ -77,6 +73,10 @@ int atapi_enabled = 1;
77module_param(atapi_enabled, int, 0444); 73module_param(atapi_enabled, int, 0444);
78MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on)"); 74MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on)");
79 75
76int atapi_dmadir = 0;
77module_param(atapi_dmadir, int, 0444);
78MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off, 1=on)");
79
80int libata_fua = 0; 80int libata_fua = 0;
81module_param_named(fua, libata_fua, int, 0444); 81module_param_named(fua, libata_fua, int, 0444);
82MODULE_PARM_DESC(fua, "FUA support (0=off, 1=on)"); 82MODULE_PARM_DESC(fua, "FUA support (0=off, 1=on)");
@@ -397,11 +397,22 @@ static const char *ata_mode_string(unsigned int xfer_mask)
397 return "<n/a>"; 397 return "<n/a>";
398} 398}
399 399
400static void ata_dev_disable(struct ata_port *ap, struct ata_device *dev) 400static const char *sata_spd_string(unsigned int spd)
401{
402 static const char * const spd_str[] = {
403 "1.5 Gbps",
404 "3.0 Gbps",
405 };
406
407 if (spd == 0 || (spd - 1) >= ARRAY_SIZE(spd_str))
408 return "<unknown>";
409 return spd_str[spd - 1];
410}
411
412void ata_dev_disable(struct ata_device *dev)
401{ 413{
402 if (ata_dev_present(dev)) { 414 if (ata_dev_enabled(dev)) {
403 printk(KERN_WARNING "ata%u: dev %u disabled\n", 415 ata_dev_printk(dev, KERN_WARNING, "disabled\n");
404 ap->id, dev->devno);
405 dev->class++; 416 dev->class++;
406 } 417 }
407} 418}
@@ -943,15 +954,14 @@ void ata_qc_complete_internal(struct ata_queued_cmd *qc)
943{ 954{
944 struct completion *waiting = qc->private_data; 955 struct completion *waiting = qc->private_data;
945 956
946 qc->ap->ops->tf_read(qc->ap, &qc->tf);
947 complete(waiting); 957 complete(waiting);
948} 958}
949 959
950/** 960/**
951 * ata_exec_internal - execute libata internal command 961 * ata_exec_internal - execute libata internal command
952 * @ap: Port to which the command is sent
953 * @dev: Device to which the command is sent 962 * @dev: Device to which the command is sent
954 * @tf: Taskfile registers for the command and the result 963 * @tf: Taskfile registers for the command and the result
964 * @cdb: CDB for packet command
955 * @dma_dir: Data tranfer direction of the command 965 * @dma_dir: Data tranfer direction of the command
956 * @buf: Data buffer of the command 966 * @buf: Data buffer of the command
957 * @buflen: Length of data buffer 967 * @buflen: Length of data buffer
@@ -966,23 +976,62 @@ void ata_qc_complete_internal(struct ata_queued_cmd *qc)
966 * None. Should be called with kernel context, might sleep. 976 * None. Should be called with kernel context, might sleep.
967 */ 977 */
968 978
969static unsigned 979unsigned ata_exec_internal(struct ata_device *dev,
970ata_exec_internal(struct ata_port *ap, struct ata_device *dev, 980 struct ata_taskfile *tf, const u8 *cdb,
971 struct ata_taskfile *tf, 981 int dma_dir, void *buf, unsigned int buflen)
972 int dma_dir, void *buf, unsigned int buflen)
973{ 982{
983 struct ata_port *ap = dev->ap;
974 u8 command = tf->command; 984 u8 command = tf->command;
975 struct ata_queued_cmd *qc; 985 struct ata_queued_cmd *qc;
986 unsigned int tag, preempted_tag;
987 u32 preempted_sactive, preempted_qc_active;
976 DECLARE_COMPLETION(wait); 988 DECLARE_COMPLETION(wait);
977 unsigned long flags; 989 unsigned long flags;
978 unsigned int err_mask; 990 unsigned int err_mask;
991 int rc;
979 992
980 spin_lock_irqsave(&ap->host_set->lock, flags); 993 spin_lock_irqsave(&ap->host_set->lock, flags);
981 994
982 qc = ata_qc_new_init(ap, dev); 995 /* no internal command while frozen */
983 BUG_ON(qc == NULL); 996 if (ap->flags & ATA_FLAG_FROZEN) {
997 spin_unlock_irqrestore(&ap->host_set->lock, flags);
998 return AC_ERR_SYSTEM;
999 }
1000
1001 /* initialize internal qc */
1002
1003 /* XXX: Tag 0 is used for drivers with legacy EH as some
1004 * drivers choke if any other tag is given. This breaks
1005 * ata_tag_internal() test for those drivers. Don't use new
1006 * EH stuff without converting to it.
1007 */
1008 if (ap->ops->error_handler)
1009 tag = ATA_TAG_INTERNAL;
1010 else
1011 tag = 0;
1012
1013 if (test_and_set_bit(tag, &ap->qc_allocated))
1014 BUG();
1015 qc = __ata_qc_from_tag(ap, tag);
1016
1017 qc->tag = tag;
1018 qc->scsicmd = NULL;
1019 qc->ap = ap;
1020 qc->dev = dev;
1021 ata_qc_reinit(qc);
1022
1023 preempted_tag = ap->active_tag;
1024 preempted_sactive = ap->sactive;
1025 preempted_qc_active = ap->qc_active;
1026 ap->active_tag = ATA_TAG_POISON;
1027 ap->sactive = 0;
1028 ap->qc_active = 0;
984 1029
1030 /* prepare & issue qc */
985 qc->tf = *tf; 1031 qc->tf = *tf;
1032 if (cdb)
1033 memcpy(qc->cdb, cdb, ATAPI_CDB_LEN);
1034 qc->flags |= ATA_QCFLAG_RESULT_TF;
986 qc->dma_dir = dma_dir; 1035 qc->dma_dir = dma_dir;
987 if (dma_dir != DMA_NONE) { 1036 if (dma_dir != DMA_NONE) {
988 ata_sg_init_one(qc, buf, buflen); 1037 ata_sg_init_one(qc, buf, buflen);
@@ -996,31 +1045,53 @@ ata_exec_internal(struct ata_port *ap, struct ata_device *dev,
996 1045
997 spin_unlock_irqrestore(&ap->host_set->lock, flags); 1046 spin_unlock_irqrestore(&ap->host_set->lock, flags);
998 1047
999 if (!wait_for_completion_timeout(&wait, ATA_TMOUT_INTERNAL)) { 1048 rc = wait_for_completion_timeout(&wait, ATA_TMOUT_INTERNAL);
1000 ata_port_flush_task(ap);
1001 1049
1050 ata_port_flush_task(ap);
1051
1052 if (!rc) {
1002 spin_lock_irqsave(&ap->host_set->lock, flags); 1053 spin_lock_irqsave(&ap->host_set->lock, flags);
1003 1054
1004 /* We're racing with irq here. If we lose, the 1055 /* We're racing with irq here. If we lose, the
1005 * following test prevents us from completing the qc 1056 * following test prevents us from completing the qc
1006 * again. If completion irq occurs after here but 1057 * twice. If we win, the port is frozen and will be
1007 * before the caller cleans up, it will result in a 1058 * cleaned up by ->post_internal_cmd().
1008 * spurious interrupt. We can live with that.
1009 */ 1059 */
1010 if (qc->flags & ATA_QCFLAG_ACTIVE) { 1060 if (qc->flags & ATA_QCFLAG_ACTIVE) {
1011 qc->err_mask = AC_ERR_TIMEOUT; 1061 qc->err_mask |= AC_ERR_TIMEOUT;
1012 ata_qc_complete(qc); 1062
1013 printk(KERN_WARNING "ata%u: qc timeout (cmd 0x%x)\n", 1063 if (ap->ops->error_handler)
1014 ap->id, command); 1064 ata_port_freeze(ap);
1065 else
1066 ata_qc_complete(qc);
1067
1068 ata_dev_printk(dev, KERN_WARNING,
1069 "qc timeout (cmd 0x%x)\n", command);
1015 } 1070 }
1016 1071
1017 spin_unlock_irqrestore(&ap->host_set->lock, flags); 1072 spin_unlock_irqrestore(&ap->host_set->lock, flags);
1018 } 1073 }
1019 1074
1020 *tf = qc->tf; 1075 /* do post_internal_cmd */
1076 if (ap->ops->post_internal_cmd)
1077 ap->ops->post_internal_cmd(qc);
1078
1079 if (qc->flags & ATA_QCFLAG_FAILED && !qc->err_mask) {
1080 ata_dev_printk(dev, KERN_WARNING, "zero err_mask for failed "
1081 "internal command, assuming AC_ERR_OTHER\n");
1082 qc->err_mask |= AC_ERR_OTHER;
1083 }
1084
1085 /* finish up */
1086 spin_lock_irqsave(&ap->host_set->lock, flags);
1087
1088 *tf = qc->result_tf;
1021 err_mask = qc->err_mask; 1089 err_mask = qc->err_mask;
1022 1090
1023 ata_qc_free(qc); 1091 ata_qc_free(qc);
1092 ap->active_tag = preempted_tag;
1093 ap->sactive = preempted_sactive;
1094 ap->qc_active = preempted_qc_active;
1024 1095
1025 /* XXX - Some LLDDs (sata_mv) disable port on command failure. 1096 /* XXX - Some LLDDs (sata_mv) disable port on command failure.
1026 * Until those drivers are fixed, we detect the condition 1097 * Until those drivers are fixed, we detect the condition
@@ -1033,11 +1104,13 @@ ata_exec_internal(struct ata_port *ap, struct ata_device *dev,
1033 * 1104 *
1034 * Kill the following code as soon as those drivers are fixed. 1105 * Kill the following code as soon as those drivers are fixed.
1035 */ 1106 */
1036 if (ap->flags & ATA_FLAG_PORT_DISABLED) { 1107 if (ap->flags & ATA_FLAG_DISABLED) {
1037 err_mask |= AC_ERR_SYSTEM; 1108 err_mask |= AC_ERR_SYSTEM;
1038 ata_port_probe(ap); 1109 ata_port_probe(ap);
1039 } 1110 }
1040 1111
1112 spin_unlock_irqrestore(&ap->host_set->lock, flags);
1113
1041 return err_mask; 1114 return err_mask;
1042} 1115}
1043 1116
@@ -1076,11 +1149,10 @@ unsigned int ata_pio_need_iordy(const struct ata_device *adev)
1076 1149
1077/** 1150/**
1078 * ata_dev_read_id - Read ID data from the specified device 1151 * ata_dev_read_id - Read ID data from the specified device
1079 * @ap: port on which target device resides
1080 * @dev: target device 1152 * @dev: target device
1081 * @p_class: pointer to class of the target device (may be changed) 1153 * @p_class: pointer to class of the target device (may be changed)
1082 * @post_reset: is this read ID post-reset? 1154 * @post_reset: is this read ID post-reset?
1083 * @p_id: read IDENTIFY page (newly allocated) 1155 * @id: buffer to read IDENTIFY data into
1084 * 1156 *
1085 * Read ID data from the specified device. ATA_CMD_ID_ATA is 1157 * Read ID data from the specified device. ATA_CMD_ID_ATA is
1086 * performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI 1158 * performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI
@@ -1093,13 +1165,13 @@ unsigned int ata_pio_need_iordy(const struct ata_device *adev)
1093 * RETURNS: 1165 * RETURNS:
1094 * 0 on success, -errno otherwise. 1166 * 0 on success, -errno otherwise.
1095 */ 1167 */
1096static int ata_dev_read_id(struct ata_port *ap, struct ata_device *dev, 1168static int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
1097 unsigned int *p_class, int post_reset, u16 **p_id) 1169 int post_reset, u16 *id)
1098{ 1170{
1171 struct ata_port *ap = dev->ap;
1099 unsigned int class = *p_class; 1172 unsigned int class = *p_class;
1100 struct ata_taskfile tf; 1173 struct ata_taskfile tf;
1101 unsigned int err_mask = 0; 1174 unsigned int err_mask = 0;
1102 u16 *id;
1103 const char *reason; 1175 const char *reason;
1104 int rc; 1176 int rc;
1105 1177
@@ -1107,15 +1179,8 @@ static int ata_dev_read_id(struct ata_port *ap, struct ata_device *dev,
1107 1179
1108 ata_dev_select(ap, dev->devno, 1, 1); /* select device 0/1 */ 1180 ata_dev_select(ap, dev->devno, 1, 1); /* select device 0/1 */
1109 1181
1110 id = kmalloc(sizeof(id[0]) * ATA_ID_WORDS, GFP_KERNEL);
1111 if (id == NULL) {
1112 rc = -ENOMEM;
1113 reason = "out of memory";
1114 goto err_out;
1115 }
1116
1117 retry: 1182 retry:
1118 ata_tf_init(ap, &tf, dev->devno); 1183 ata_tf_init(dev, &tf);
1119 1184
1120 switch (class) { 1185 switch (class) {
1121 case ATA_DEV_ATA: 1186 case ATA_DEV_ATA:
@@ -1132,7 +1197,7 @@ static int ata_dev_read_id(struct ata_port *ap, struct ata_device *dev,
1132 1197
1133 tf.protocol = ATA_PROT_PIO; 1198 tf.protocol = ATA_PROT_PIO;
1134 1199
1135 err_mask = ata_exec_internal(ap, dev, &tf, DMA_FROM_DEVICE, 1200 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE,
1136 id, sizeof(id[0]) * ATA_ID_WORDS); 1201 id, sizeof(id[0]) * ATA_ID_WORDS);
1137 if (err_mask) { 1202 if (err_mask) {
1138 rc = -EIO; 1203 rc = -EIO;
@@ -1159,7 +1224,7 @@ static int ata_dev_read_id(struct ata_port *ap, struct ata_device *dev,
1159 * Some drives were very specific about that exact sequence. 1224 * Some drives were very specific about that exact sequence.
1160 */ 1225 */
1161 if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) { 1226 if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
1162 err_mask = ata_dev_init_params(ap, dev, id[3], id[6]); 1227 err_mask = ata_dev_init_params(dev, id[3], id[6]);
1163 if (err_mask) { 1228 if (err_mask) {
1164 rc = -EIO; 1229 rc = -EIO;
1165 reason = "INIT_DEV_PARAMS failed"; 1230 reason = "INIT_DEV_PARAMS failed";
@@ -1175,25 +1240,44 @@ static int ata_dev_read_id(struct ata_port *ap, struct ata_device *dev,
1175 } 1240 }
1176 1241
1177 *p_class = class; 1242 *p_class = class;
1178 *p_id = id; 1243
1179 return 0; 1244 return 0;
1180 1245
1181 err_out: 1246 err_out:
1182 printk(KERN_WARNING "ata%u: dev %u failed to IDENTIFY (%s)\n", 1247 ata_dev_printk(dev, KERN_WARNING, "failed to IDENTIFY "
1183 ap->id, dev->devno, reason); 1248 "(%s, err_mask=0x%x)\n", reason, err_mask);
1184 kfree(id);
1185 return rc; 1249 return rc;
1186} 1250}
1187 1251
1188static inline u8 ata_dev_knobble(const struct ata_port *ap, 1252static inline u8 ata_dev_knobble(struct ata_device *dev)
1189 struct ata_device *dev) 1253{
1254 return ((dev->ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
1255}
1256
1257static void ata_dev_config_ncq(struct ata_device *dev,
1258 char *desc, size_t desc_sz)
1190{ 1259{
1191 return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id))); 1260 struct ata_port *ap = dev->ap;
1261 int hdepth = 0, ddepth = ata_id_queue_depth(dev->id);
1262
1263 if (!ata_id_has_ncq(dev->id)) {
1264 desc[0] = '\0';
1265 return;
1266 }
1267
1268 if (ap->flags & ATA_FLAG_NCQ) {
1269 hdepth = min(ap->host->can_queue, ATA_MAX_QUEUE - 1);
1270 dev->flags |= ATA_DFLAG_NCQ;
1271 }
1272
1273 if (hdepth >= ddepth)
1274 snprintf(desc, desc_sz, "NCQ (depth %d)", ddepth);
1275 else
1276 snprintf(desc, desc_sz, "NCQ (depth %d/%d)", hdepth, ddepth);
1192} 1277}
1193 1278
1194/** 1279/**
1195 * ata_dev_configure - Configure the specified ATA/ATAPI device 1280 * ata_dev_configure - Configure the specified ATA/ATAPI device
1196 * @ap: Port on which target device resides
1197 * @dev: Target device to configure 1281 * @dev: Target device to configure
1198 * @print_info: Enable device info printout 1282 * @print_info: Enable device info printout
1199 * 1283 *
@@ -1206,14 +1290,14 @@ static inline u8 ata_dev_knobble(const struct ata_port *ap,
1206 * RETURNS: 1290 * RETURNS:
1207 * 0 on success, -errno otherwise 1291 * 0 on success, -errno otherwise
1208 */ 1292 */
1209static int ata_dev_configure(struct ata_port *ap, struct ata_device *dev, 1293static int ata_dev_configure(struct ata_device *dev, int print_info)
1210 int print_info)
1211{ 1294{
1295 struct ata_port *ap = dev->ap;
1212 const u16 *id = dev->id; 1296 const u16 *id = dev->id;
1213 unsigned int xfer_mask; 1297 unsigned int xfer_mask;
1214 int i, rc; 1298 int i, rc;
1215 1299
1216 if (!ata_dev_present(dev)) { 1300 if (!ata_dev_enabled(dev)) {
1217 DPRINTK("ENTER/EXIT (host %u, dev %u) -- nodev\n", 1301 DPRINTK("ENTER/EXIT (host %u, dev %u) -- nodev\n",
1218 ap->id, dev->devno); 1302 ap->id, dev->devno);
1219 return 0; 1303 return 0;
@@ -1223,13 +1307,13 @@ static int ata_dev_configure(struct ata_port *ap, struct ata_device *dev,
1223 1307
1224 /* print device capabilities */ 1308 /* print device capabilities */
1225 if (print_info) 1309 if (print_info)
1226 printk(KERN_DEBUG "ata%u: dev %u cfg 49:%04x 82:%04x 83:%04x " 1310 ata_dev_printk(dev, KERN_DEBUG, "cfg 49:%04x 82:%04x 83:%04x "
1227 "84:%04x 85:%04x 86:%04x 87:%04x 88:%04x\n", 1311 "84:%04x 85:%04x 86:%04x 87:%04x 88:%04x\n",
1228 ap->id, dev->devno, id[49], id[82], id[83], 1312 id[49], id[82], id[83], id[84],
1229 id[84], id[85], id[86], id[87], id[88]); 1313 id[85], id[86], id[87], id[88]);
1230 1314
1231 /* initialize to-be-configured parameters */ 1315 /* initialize to-be-configured parameters */
1232 dev->flags = 0; 1316 dev->flags &= ~ATA_DFLAG_CFG_MASK;
1233 dev->max_sectors = 0; 1317 dev->max_sectors = 0;
1234 dev->cdb_len = 0; 1318 dev->cdb_len = 0;
1235 dev->n_sectors = 0; 1319 dev->n_sectors = 0;
@@ -1252,6 +1336,7 @@ static int ata_dev_configure(struct ata_port *ap, struct ata_device *dev,
1252 1336
1253 if (ata_id_has_lba(id)) { 1337 if (ata_id_has_lba(id)) {
1254 const char *lba_desc; 1338 const char *lba_desc;
1339 char ncq_desc[20];
1255 1340
1256 lba_desc = "LBA"; 1341 lba_desc = "LBA";
1257 dev->flags |= ATA_DFLAG_LBA; 1342 dev->flags |= ATA_DFLAG_LBA;
@@ -1260,15 +1345,17 @@ static int ata_dev_configure(struct ata_port *ap, struct ata_device *dev,
1260 lba_desc = "LBA48"; 1345 lba_desc = "LBA48";
1261 } 1346 }
1262 1347
1348 /* config NCQ */
1349 ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc));
1350
1263 /* print device info to dmesg */ 1351 /* print device info to dmesg */
1264 if (print_info) 1352 if (print_info)
1265 printk(KERN_INFO "ata%u: dev %u ATA-%d, " 1353 ata_dev_printk(dev, KERN_INFO, "ATA-%d, "
1266 "max %s, %Lu sectors: %s\n", 1354 "max %s, %Lu sectors: %s %s\n",
1267 ap->id, dev->devno, 1355 ata_id_major_version(id),
1268 ata_id_major_version(id), 1356 ata_mode_string(xfer_mask),
1269 ata_mode_string(xfer_mask), 1357 (unsigned long long)dev->n_sectors,
1270 (unsigned long long)dev->n_sectors, 1358 lba_desc, ncq_desc);
1271 lba_desc);
1272 } else { 1359 } else {
1273 /* CHS */ 1360 /* CHS */
1274 1361
@@ -1286,13 +1373,18 @@ static int ata_dev_configure(struct ata_port *ap, struct ata_device *dev,
1286 1373
1287 /* print device info to dmesg */ 1374 /* print device info to dmesg */
1288 if (print_info) 1375 if (print_info)
1289 printk(KERN_INFO "ata%u: dev %u ATA-%d, " 1376 ata_dev_printk(dev, KERN_INFO, "ATA-%d, "
1290 "max %s, %Lu sectors: CHS %u/%u/%u\n", 1377 "max %s, %Lu sectors: CHS %u/%u/%u\n",
1291 ap->id, dev->devno, 1378 ata_id_major_version(id),
1292 ata_id_major_version(id), 1379 ata_mode_string(xfer_mask),
1293 ata_mode_string(xfer_mask), 1380 (unsigned long long)dev->n_sectors,
1294 (unsigned long long)dev->n_sectors, 1381 dev->cylinders, dev->heads, dev->sectors);
1295 dev->cylinders, dev->heads, dev->sectors); 1382 }
1383
1384 if (dev->id[59] & 0x100) {
1385 dev->multi_count = dev->id[59] & 0xff;
1386 DPRINTK("ata%u: dev %u multi count %u\n",
1387 ap->id, dev->devno, dev->multi_count);
1296 } 1388 }
1297 1389
1298 dev->cdb_len = 16; 1390 dev->cdb_len = 16;
@@ -1300,18 +1392,27 @@ static int ata_dev_configure(struct ata_port *ap, struct ata_device *dev,
1300 1392
1301 /* ATAPI-specific feature tests */ 1393 /* ATAPI-specific feature tests */
1302 else if (dev->class == ATA_DEV_ATAPI) { 1394 else if (dev->class == ATA_DEV_ATAPI) {
1395 char *cdb_intr_string = "";
1396
1303 rc = atapi_cdb_len(id); 1397 rc = atapi_cdb_len(id);
1304 if ((rc < 12) || (rc > ATAPI_CDB_LEN)) { 1398 if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
1305 printk(KERN_WARNING "ata%u: unsupported CDB len\n", ap->id); 1399 ata_dev_printk(dev, KERN_WARNING,
1400 "unsupported CDB len\n");
1306 rc = -EINVAL; 1401 rc = -EINVAL;
1307 goto err_out_nosup; 1402 goto err_out_nosup;
1308 } 1403 }
1309 dev->cdb_len = (unsigned int) rc; 1404 dev->cdb_len = (unsigned int) rc;
1310 1405
1406 if (ata_id_cdb_intr(dev->id)) {
1407 dev->flags |= ATA_DFLAG_CDB_INTR;
1408 cdb_intr_string = ", CDB intr";
1409 }
1410
1311 /* print device info to dmesg */ 1411 /* print device info to dmesg */
1312 if (print_info) 1412 if (print_info)
1313 printk(KERN_INFO "ata%u: dev %u ATAPI, max %s\n", 1413 ata_dev_printk(dev, KERN_INFO, "ATAPI, max %s%s\n",
1314 ap->id, dev->devno, ata_mode_string(xfer_mask)); 1414 ata_mode_string(xfer_mask),
1415 cdb_intr_string);
1315 } 1416 }
1316 1417
1317 ap->host->max_cmd_len = 0; 1418 ap->host->max_cmd_len = 0;
@@ -1321,10 +1422,10 @@ static int ata_dev_configure(struct ata_port *ap, struct ata_device *dev,
1321 ap->device[i].cdb_len); 1422 ap->device[i].cdb_len);
1322 1423
1323 /* limit bridge transfers to udma5, 200 sectors */ 1424 /* limit bridge transfers to udma5, 200 sectors */
1324 if (ata_dev_knobble(ap, dev)) { 1425 if (ata_dev_knobble(dev)) {
1325 if (print_info) 1426 if (print_info)
1326 printk(KERN_INFO "ata%u(%u): applying bridge limits\n", 1427 ata_dev_printk(dev, KERN_INFO,
1327 ap->id, dev->devno); 1428 "applying bridge limits\n");
1328 dev->udma_mask &= ATA_UDMA5; 1429 dev->udma_mask &= ATA_UDMA5;
1329 dev->max_sectors = ATA_MAX_SECTORS; 1430 dev->max_sectors = ATA_MAX_SECTORS;
1330 } 1431 }
@@ -1352,16 +1453,24 @@ err_out_nosup:
1352 * PCI/etc. bus probe sem. 1453 * PCI/etc. bus probe sem.
1353 * 1454 *
1354 * RETURNS: 1455 * RETURNS:
1355 * Zero on success, non-zero on error. 1456 * Zero on success, negative errno otherwise.
1356 */ 1457 */
1357 1458
1358static int ata_bus_probe(struct ata_port *ap) 1459static int ata_bus_probe(struct ata_port *ap)
1359{ 1460{
1360 unsigned int classes[ATA_MAX_DEVICES]; 1461 unsigned int classes[ATA_MAX_DEVICES];
1361 unsigned int i, rc, found = 0; 1462 int tries[ATA_MAX_DEVICES];
1463 int i, rc, down_xfermask;
1464 struct ata_device *dev;
1362 1465
1363 ata_port_probe(ap); 1466 ata_port_probe(ap);
1364 1467
1468 for (i = 0; i < ATA_MAX_DEVICES; i++)
1469 tries[i] = ATA_PROBE_MAX_TRIES;
1470
1471 retry:
1472 down_xfermask = 0;
1473
1365 /* reset and determine device classes */ 1474 /* reset and determine device classes */
1366 for (i = 0; i < ATA_MAX_DEVICES; i++) 1475 for (i = 0; i < ATA_MAX_DEVICES; i++)
1367 classes[i] = ATA_DEV_UNKNOWN; 1476 classes[i] = ATA_DEV_UNKNOWN;
@@ -1369,15 +1478,18 @@ static int ata_bus_probe(struct ata_port *ap)
1369 if (ap->ops->probe_reset) { 1478 if (ap->ops->probe_reset) {
1370 rc = ap->ops->probe_reset(ap, classes); 1479 rc = ap->ops->probe_reset(ap, classes);
1371 if (rc) { 1480 if (rc) {
1372 printk("ata%u: reset failed (errno=%d)\n", ap->id, rc); 1481 ata_port_printk(ap, KERN_ERR,
1482 "reset failed (errno=%d)\n", rc);
1373 return rc; 1483 return rc;
1374 } 1484 }
1375 } else { 1485 } else {
1376 ap->ops->phy_reset(ap); 1486 ap->ops->phy_reset(ap);
1377 1487
1378 if (!(ap->flags & ATA_FLAG_PORT_DISABLED)) 1488 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1379 for (i = 0; i < ATA_MAX_DEVICES; i++) 1489 if (!(ap->flags & ATA_FLAG_DISABLED))
1380 classes[i] = ap->device[i].class; 1490 classes[i] = ap->device[i].class;
1491 ap->device[i].class = ATA_DEV_UNKNOWN;
1492 }
1381 1493
1382 ata_port_probe(ap); 1494 ata_port_probe(ap);
1383 } 1495 }
@@ -1386,45 +1498,69 @@ static int ata_bus_probe(struct ata_port *ap)
1386 if (classes[i] == ATA_DEV_UNKNOWN) 1498 if (classes[i] == ATA_DEV_UNKNOWN)
1387 classes[i] = ATA_DEV_NONE; 1499 classes[i] = ATA_DEV_NONE;
1388 1500
1501 /* after the reset the device state is PIO 0 and the controller
1502 state is undefined. Record the mode */
1503
1504 for (i = 0; i < ATA_MAX_DEVICES; i++)
1505 ap->device[i].pio_mode = XFER_PIO_0;
1506
1389 /* read IDENTIFY page and configure devices */ 1507 /* read IDENTIFY page and configure devices */
1390 for (i = 0; i < ATA_MAX_DEVICES; i++) { 1508 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1391 struct ata_device *dev = &ap->device[i]; 1509 dev = &ap->device[i];
1392 1510
1393 dev->class = classes[i]; 1511 if (tries[i])
1512 dev->class = classes[i];
1394 1513
1395 if (!ata_dev_present(dev)) 1514 if (!ata_dev_enabled(dev))
1396 continue; 1515 continue;
1397 1516
1398 WARN_ON(dev->id != NULL); 1517 rc = ata_dev_read_id(dev, &dev->class, 1, dev->id);
1399 if (ata_dev_read_id(ap, dev, &dev->class, 1, &dev->id)) { 1518 if (rc)
1400 dev->class = ATA_DEV_NONE; 1519 goto fail;
1401 continue;
1402 }
1403 1520
1404 if (ata_dev_configure(ap, dev, 1)) { 1521 rc = ata_dev_configure(dev, 1);
1405 ata_dev_disable(ap, dev); 1522 if (rc)
1406 continue; 1523 goto fail;
1407 } 1524 }
1408 1525
1409 found = 1; 1526 /* configure transfer mode */
1527 rc = ata_set_mode(ap, &dev);
1528 if (rc) {
1529 down_xfermask = 1;
1530 goto fail;
1410 } 1531 }
1411 1532
1412 if (!found) 1533 for (i = 0; i < ATA_MAX_DEVICES; i++)
1413 goto err_out_disable; 1534 if (ata_dev_enabled(&ap->device[i]))
1535 return 0;
1414 1536
1415 if (ap->ops->set_mode) 1537 /* no device present, disable port */
1416 ap->ops->set_mode(ap); 1538 ata_port_disable(ap);
1417 else 1539 ap->ops->port_disable(ap);
1418 ata_set_mode(ap); 1540 return -ENODEV;
1419 1541
1420 if (ap->flags & ATA_FLAG_PORT_DISABLED) 1542 fail:
1421 goto err_out_disable; 1543 switch (rc) {
1544 case -EINVAL:
1545 case -ENODEV:
1546 tries[dev->devno] = 0;
1547 break;
1548 case -EIO:
1549 sata_down_spd_limit(ap);
1550 /* fall through */
1551 default:
1552 tries[dev->devno]--;
1553 if (down_xfermask &&
1554 ata_down_xfermask_limit(dev, tries[dev->devno] == 1))
1555 tries[dev->devno] = 0;
1556 }
1422 1557
1423 return 0; 1558 if (!tries[dev->devno]) {
1559 ata_down_xfermask_limit(dev, 1);
1560 ata_dev_disable(dev);
1561 }
1424 1562
1425err_out_disable: 1563 goto retry;
1426 ap->ops->port_disable(ap);
1427 return -1;
1428} 1564}
1429 1565
1430/** 1566/**
@@ -1440,7 +1576,7 @@ err_out_disable:
1440 1576
1441void ata_port_probe(struct ata_port *ap) 1577void ata_port_probe(struct ata_port *ap)
1442{ 1578{
1443 ap->flags &= ~ATA_FLAG_PORT_DISABLED; 1579 ap->flags &= ~ATA_FLAG_DISABLED;
1444} 1580}
1445 1581
1446/** 1582/**
@@ -1454,27 +1590,21 @@ void ata_port_probe(struct ata_port *ap)
1454 */ 1590 */
1455static void sata_print_link_status(struct ata_port *ap) 1591static void sata_print_link_status(struct ata_port *ap)
1456{ 1592{
1457 u32 sstatus, tmp; 1593 u32 sstatus, scontrol, tmp;
1458 const char *speed;
1459 1594
1460 if (!ap->ops->scr_read) 1595 if (sata_scr_read(ap, SCR_STATUS, &sstatus))
1461 return; 1596 return;
1597 sata_scr_read(ap, SCR_CONTROL, &scontrol);
1462 1598
1463 sstatus = scr_read(ap, SCR_STATUS); 1599 if (ata_port_online(ap)) {
1464
1465 if (sata_dev_present(ap)) {
1466 tmp = (sstatus >> 4) & 0xf; 1600 tmp = (sstatus >> 4) & 0xf;
1467 if (tmp & (1 << 0)) 1601 ata_port_printk(ap, KERN_INFO,
1468 speed = "1.5"; 1602 "SATA link up %s (SStatus %X SControl %X)\n",
1469 else if (tmp & (1 << 1)) 1603 sata_spd_string(tmp), sstatus, scontrol);
1470 speed = "3.0";
1471 else
1472 speed = "<unknown>";
1473 printk(KERN_INFO "ata%u: SATA link up %s Gbps (SStatus %X)\n",
1474 ap->id, speed, sstatus);
1475 } else { 1604 } else {
1476 printk(KERN_INFO "ata%u: SATA link down (SStatus %X)\n", 1605 ata_port_printk(ap, KERN_INFO,
1477 ap->id, sstatus); 1606 "SATA link down (SStatus %X SControl %X)\n",
1607 sstatus, scontrol);
1478 } 1608 }
1479} 1609}
1480 1610
@@ -1497,17 +1627,18 @@ void __sata_phy_reset(struct ata_port *ap)
1497 1627
1498 if (ap->flags & ATA_FLAG_SATA_RESET) { 1628 if (ap->flags & ATA_FLAG_SATA_RESET) {
1499 /* issue phy wake/reset */ 1629 /* issue phy wake/reset */
1500 scr_write_flush(ap, SCR_CONTROL, 0x301); 1630 sata_scr_write_flush(ap, SCR_CONTROL, 0x301);
1501 /* Couldn't find anything in SATA I/II specs, but 1631 /* Couldn't find anything in SATA I/II specs, but
1502 * AHCI-1.1 10.4.2 says at least 1 ms. */ 1632 * AHCI-1.1 10.4.2 says at least 1 ms. */
1503 mdelay(1); 1633 mdelay(1);
1504 } 1634 }
1505 scr_write_flush(ap, SCR_CONTROL, 0x300); /* phy wake/clear reset */ 1635 /* phy wake/clear reset */
1636 sata_scr_write_flush(ap, SCR_CONTROL, 0x300);
1506 1637
1507 /* wait for phy to become ready, if necessary */ 1638 /* wait for phy to become ready, if necessary */
1508 do { 1639 do {
1509 msleep(200); 1640 msleep(200);
1510 sstatus = scr_read(ap, SCR_STATUS); 1641 sata_scr_read(ap, SCR_STATUS, &sstatus);
1511 if ((sstatus & 0xf) != 1) 1642 if ((sstatus & 0xf) != 1)
1512 break; 1643 break;
1513 } while (time_before(jiffies, timeout)); 1644 } while (time_before(jiffies, timeout));
@@ -1516,12 +1647,12 @@ void __sata_phy_reset(struct ata_port *ap)
1516 sata_print_link_status(ap); 1647 sata_print_link_status(ap);
1517 1648
1518 /* TODO: phy layer with polling, timeouts, etc. */ 1649 /* TODO: phy layer with polling, timeouts, etc. */
1519 if (sata_dev_present(ap)) 1650 if (!ata_port_offline(ap))
1520 ata_port_probe(ap); 1651 ata_port_probe(ap);
1521 else 1652 else
1522 ata_port_disable(ap); 1653 ata_port_disable(ap);
1523 1654
1524 if (ap->flags & ATA_FLAG_PORT_DISABLED) 1655 if (ap->flags & ATA_FLAG_DISABLED)
1525 return; 1656 return;
1526 1657
1527 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) { 1658 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
@@ -1546,24 +1677,24 @@ void __sata_phy_reset(struct ata_port *ap)
1546void sata_phy_reset(struct ata_port *ap) 1677void sata_phy_reset(struct ata_port *ap)
1547{ 1678{
1548 __sata_phy_reset(ap); 1679 __sata_phy_reset(ap);
1549 if (ap->flags & ATA_FLAG_PORT_DISABLED) 1680 if (ap->flags & ATA_FLAG_DISABLED)
1550 return; 1681 return;
1551 ata_bus_reset(ap); 1682 ata_bus_reset(ap);
1552} 1683}
1553 1684
1554/** 1685/**
1555 * ata_dev_pair - return other device on cable 1686 * ata_dev_pair - return other device on cable
1556 * @ap: port
1557 * @adev: device 1687 * @adev: device
1558 * 1688 *
1559 * Obtain the other device on the same cable, or if none is 1689 * Obtain the other device on the same cable, or if none is
1560 * present NULL is returned 1690 * present NULL is returned
1561 */ 1691 */
1562 1692
1563struct ata_device *ata_dev_pair(struct ata_port *ap, struct ata_device *adev) 1693struct ata_device *ata_dev_pair(struct ata_device *adev)
1564{ 1694{
1695 struct ata_port *ap = adev->ap;
1565 struct ata_device *pair = &ap->device[1 - adev->devno]; 1696 struct ata_device *pair = &ap->device[1 - adev->devno];
1566 if (!ata_dev_present(pair)) 1697 if (!ata_dev_enabled(pair))
1567 return NULL; 1698 return NULL;
1568 return pair; 1699 return pair;
1569} 1700}
@@ -1585,7 +1716,122 @@ void ata_port_disable(struct ata_port *ap)
1585{ 1716{
1586 ap->device[0].class = ATA_DEV_NONE; 1717 ap->device[0].class = ATA_DEV_NONE;
1587 ap->device[1].class = ATA_DEV_NONE; 1718 ap->device[1].class = ATA_DEV_NONE;
1588 ap->flags |= ATA_FLAG_PORT_DISABLED; 1719 ap->flags |= ATA_FLAG_DISABLED;
1720}
1721
1722/**
1723 * sata_down_spd_limit - adjust SATA spd limit downward
1724 * @ap: Port to adjust SATA spd limit for
1725 *
1726 * Adjust SATA spd limit of @ap downward. Note that this
1727 * function only adjusts the limit. The change must be applied
1728 * using sata_set_spd().
1729 *
1730 * LOCKING:
1731 * Inherited from caller.
1732 *
1733 * RETURNS:
1734 * 0 on success, negative errno on failure
1735 */
1736int sata_down_spd_limit(struct ata_port *ap)
1737{
1738 u32 sstatus, spd, mask;
1739 int rc, highbit;
1740
1741 rc = sata_scr_read(ap, SCR_STATUS, &sstatus);
1742 if (rc)
1743 return rc;
1744
1745 mask = ap->sata_spd_limit;
1746 if (mask <= 1)
1747 return -EINVAL;
1748 highbit = fls(mask) - 1;
1749 mask &= ~(1 << highbit);
1750
1751 spd = (sstatus >> 4) & 0xf;
1752 if (spd <= 1)
1753 return -EINVAL;
1754 spd--;
1755 mask &= (1 << spd) - 1;
1756 if (!mask)
1757 return -EINVAL;
1758
1759 ap->sata_spd_limit = mask;
1760
1761 ata_port_printk(ap, KERN_WARNING, "limiting SATA link speed to %s\n",
1762 sata_spd_string(fls(mask)));
1763
1764 return 0;
1765}
1766
1767static int __sata_set_spd_needed(struct ata_port *ap, u32 *scontrol)
1768{
1769 u32 spd, limit;
1770
1771 if (ap->sata_spd_limit == UINT_MAX)
1772 limit = 0;
1773 else
1774 limit = fls(ap->sata_spd_limit);
1775
1776 spd = (*scontrol >> 4) & 0xf;
1777 *scontrol = (*scontrol & ~0xf0) | ((limit & 0xf) << 4);
1778
1779 return spd != limit;
1780}
1781
1782/**
1783 * sata_set_spd_needed - is SATA spd configuration needed
1784 * @ap: Port in question
1785 *
1786 * Test whether the spd limit in SControl matches
1787 * @ap->sata_spd_limit. This function is used to determine
1788 * whether hardreset is necessary to apply SATA spd
1789 * configuration.
1790 *
1791 * LOCKING:
1792 * Inherited from caller.
1793 *
1794 * RETURNS:
1795 * 1 if SATA spd configuration is needed, 0 otherwise.
1796 */
1797int sata_set_spd_needed(struct ata_port *ap)
1798{
1799 u32 scontrol;
1800
1801 if (sata_scr_read(ap, SCR_CONTROL, &scontrol))
1802 return 0;
1803
1804 return __sata_set_spd_needed(ap, &scontrol);
1805}
1806
1807/**
1808 * sata_set_spd - set SATA spd according to spd limit
1809 * @ap: Port to set SATA spd for
1810 *
1811 * Set SATA spd of @ap according to sata_spd_limit.
1812 *
1813 * LOCKING:
1814 * Inherited from caller.
1815 *
1816 * RETURNS:
1817 * 0 if spd doesn't need to be changed, 1 if spd has been
1818 * changed. Negative errno if SCR registers are inaccessible.
1819 */
1820int sata_set_spd(struct ata_port *ap)
1821{
1822 u32 scontrol;
1823 int rc;
1824
1825 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
1826 return rc;
1827
1828 if (!__sata_set_spd_needed(ap, &scontrol))
1829 return 0;
1830
1831 if ((rc = sata_scr_write(ap, SCR_CONTROL, scontrol)))
1832 return rc;
1833
1834 return 1;
1589} 1835}
1590 1836
1591/* 1837/*
@@ -1736,151 +1982,196 @@ int ata_timing_compute(struct ata_device *adev, unsigned short speed,
1736 return 0; 1982 return 0;
1737} 1983}
1738 1984
1739static int ata_dev_set_mode(struct ata_port *ap, struct ata_device *dev) 1985/**
1986 * ata_down_xfermask_limit - adjust dev xfer masks downward
1987 * @dev: Device to adjust xfer masks
1988 * @force_pio0: Force PIO0
1989 *
1990 * Adjust xfer masks of @dev downward. Note that this function
1991 * does not apply the change. Invoking ata_set_mode() afterwards
1992 * will apply the limit.
1993 *
1994 * LOCKING:
1995 * Inherited from caller.
1996 *
1997 * RETURNS:
1998 * 0 on success, negative errno on failure
1999 */
2000int ata_down_xfermask_limit(struct ata_device *dev, int force_pio0)
2001{
2002 unsigned long xfer_mask;
2003 int highbit;
2004
2005 xfer_mask = ata_pack_xfermask(dev->pio_mask, dev->mwdma_mask,
2006 dev->udma_mask);
2007
2008 if (!xfer_mask)
2009 goto fail;
2010 /* don't gear down to MWDMA from UDMA, go directly to PIO */
2011 if (xfer_mask & ATA_MASK_UDMA)
2012 xfer_mask &= ~ATA_MASK_MWDMA;
2013
2014 highbit = fls(xfer_mask) - 1;
2015 xfer_mask &= ~(1 << highbit);
2016 if (force_pio0)
2017 xfer_mask &= 1 << ATA_SHIFT_PIO;
2018 if (!xfer_mask)
2019 goto fail;
2020
2021 ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask,
2022 &dev->udma_mask);
2023
2024 ata_dev_printk(dev, KERN_WARNING, "limiting speed to %s\n",
2025 ata_mode_string(xfer_mask));
2026
2027 return 0;
2028
2029 fail:
2030 return -EINVAL;
2031}
2032
2033static int ata_dev_set_mode(struct ata_device *dev)
1740{ 2034{
1741 unsigned int err_mask; 2035 unsigned int err_mask;
1742 int rc; 2036 int rc;
1743 2037
2038 dev->flags &= ~ATA_DFLAG_PIO;
1744 if (dev->xfer_shift == ATA_SHIFT_PIO) 2039 if (dev->xfer_shift == ATA_SHIFT_PIO)
1745 dev->flags |= ATA_DFLAG_PIO; 2040 dev->flags |= ATA_DFLAG_PIO;
1746 2041
1747 err_mask = ata_dev_set_xfermode(ap, dev); 2042 err_mask = ata_dev_set_xfermode(dev);
1748 if (err_mask) { 2043 if (err_mask) {
1749 printk(KERN_ERR 2044 ata_dev_printk(dev, KERN_ERR, "failed to set xfermode "
1750 "ata%u: failed to set xfermode (err_mask=0x%x)\n", 2045 "(err_mask=0x%x)\n", err_mask);
1751 ap->id, err_mask);
1752 return -EIO; 2046 return -EIO;
1753 } 2047 }
1754 2048
1755 rc = ata_dev_revalidate(ap, dev, 0); 2049 rc = ata_dev_revalidate(dev, 0);
1756 if (rc) { 2050 if (rc)
1757 printk(KERN_ERR
1758 "ata%u: failed to revalidate after set xfermode\n",
1759 ap->id);
1760 return rc; 2051 return rc;
1761 }
1762 2052
1763 DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n", 2053 DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n",
1764 dev->xfer_shift, (int)dev->xfer_mode); 2054 dev->xfer_shift, (int)dev->xfer_mode);
1765 2055
1766 printk(KERN_INFO "ata%u: dev %u configured for %s\n", 2056 ata_dev_printk(dev, KERN_INFO, "configured for %s\n",
1767 ap->id, dev->devno, 2057 ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)));
1768 ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)));
1769 return 0;
1770}
1771
1772static int ata_host_set_pio(struct ata_port *ap)
1773{
1774 int i;
1775
1776 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1777 struct ata_device *dev = &ap->device[i];
1778
1779 if (!ata_dev_present(dev))
1780 continue;
1781
1782 if (!dev->pio_mode) {
1783 printk(KERN_WARNING "ata%u: no PIO support for device %d.\n", ap->id, i);
1784 return -1;
1785 }
1786
1787 dev->xfer_mode = dev->pio_mode;
1788 dev->xfer_shift = ATA_SHIFT_PIO;
1789 if (ap->ops->set_piomode)
1790 ap->ops->set_piomode(ap, dev);
1791 }
1792
1793 return 0; 2058 return 0;
1794} 2059}
1795 2060
1796static void ata_host_set_dma(struct ata_port *ap)
1797{
1798 int i;
1799
1800 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1801 struct ata_device *dev = &ap->device[i];
1802
1803 if (!ata_dev_present(dev) || !dev->dma_mode)
1804 continue;
1805
1806 dev->xfer_mode = dev->dma_mode;
1807 dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode);
1808 if (ap->ops->set_dmamode)
1809 ap->ops->set_dmamode(ap, dev);
1810 }
1811}
1812
1813/** 2061/**
1814 * ata_set_mode - Program timings and issue SET FEATURES - XFER 2062 * ata_set_mode - Program timings and issue SET FEATURES - XFER
1815 * @ap: port on which timings will be programmed 2063 * @ap: port on which timings will be programmed
2064 * @r_failed_dev: out paramter for failed device
1816 * 2065 *
1817 * Set ATA device disk transfer mode (PIO3, UDMA6, etc.). 2066 * Set ATA device disk transfer mode (PIO3, UDMA6, etc.). If
2067 * ata_set_mode() fails, pointer to the failing device is
2068 * returned in @r_failed_dev.
1818 * 2069 *
1819 * LOCKING: 2070 * LOCKING:
1820 * PCI/etc. bus probe sem. 2071 * PCI/etc. bus probe sem.
2072 *
2073 * RETURNS:
2074 * 0 on success, negative errno otherwise
1821 */ 2075 */
1822static void ata_set_mode(struct ata_port *ap) 2076int ata_set_mode(struct ata_port *ap, struct ata_device **r_failed_dev)
1823{ 2077{
1824 int i, rc, used_dma = 0; 2078 struct ata_device *dev;
2079 int i, rc = 0, used_dma = 0, found = 0;
2080
2081 /* has private set_mode? */
2082 if (ap->ops->set_mode) {
2083 /* FIXME: make ->set_mode handle no device case and
2084 * return error code and failing device on failure.
2085 */
2086 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2087 if (ata_dev_enabled(&ap->device[i])) {
2088 ap->ops->set_mode(ap);
2089 break;
2090 }
2091 }
2092 return 0;
2093 }
1825 2094
1826 /* step 1: calculate xfer_mask */ 2095 /* step 1: calculate xfer_mask */
1827 for (i = 0; i < ATA_MAX_DEVICES; i++) { 2096 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1828 struct ata_device *dev = &ap->device[i];
1829 unsigned int pio_mask, dma_mask; 2097 unsigned int pio_mask, dma_mask;
1830 2098
1831 if (!ata_dev_present(dev)) 2099 dev = &ap->device[i];
1832 continue;
1833 2100
1834 ata_dev_xfermask(ap, dev); 2101 if (!ata_dev_enabled(dev))
2102 continue;
1835 2103
1836 /* TODO: let LLDD filter dev->*_mask here */ 2104 ata_dev_xfermask(dev);
1837 2105
1838 pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0); 2106 pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0);
1839 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask); 2107 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
1840 dev->pio_mode = ata_xfer_mask2mode(pio_mask); 2108 dev->pio_mode = ata_xfer_mask2mode(pio_mask);
1841 dev->dma_mode = ata_xfer_mask2mode(dma_mask); 2109 dev->dma_mode = ata_xfer_mask2mode(dma_mask);
1842 2110
2111 found = 1;
1843 if (dev->dma_mode) 2112 if (dev->dma_mode)
1844 used_dma = 1; 2113 used_dma = 1;
1845 } 2114 }
2115 if (!found)
2116 goto out;
1846 2117
1847 /* step 2: always set host PIO timings */ 2118 /* step 2: always set host PIO timings */
1848 rc = ata_host_set_pio(ap); 2119 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1849 if (rc) 2120 dev = &ap->device[i];
1850 goto err_out; 2121 if (!ata_dev_enabled(dev))
2122 continue;
2123
2124 if (!dev->pio_mode) {
2125 ata_dev_printk(dev, KERN_WARNING, "no PIO support\n");
2126 rc = -EINVAL;
2127 goto out;
2128 }
2129
2130 dev->xfer_mode = dev->pio_mode;
2131 dev->xfer_shift = ATA_SHIFT_PIO;
2132 if (ap->ops->set_piomode)
2133 ap->ops->set_piomode(ap, dev);
2134 }
1851 2135
1852 /* step 3: set host DMA timings */ 2136 /* step 3: set host DMA timings */
1853 ata_host_set_dma(ap); 2137 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2138 dev = &ap->device[i];
2139
2140 if (!ata_dev_enabled(dev) || !dev->dma_mode)
2141 continue;
2142
2143 dev->xfer_mode = dev->dma_mode;
2144 dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode);
2145 if (ap->ops->set_dmamode)
2146 ap->ops->set_dmamode(ap, dev);
2147 }
1854 2148
1855 /* step 4: update devices' xfer mode */ 2149 /* step 4: update devices' xfer mode */
1856 for (i = 0; i < ATA_MAX_DEVICES; i++) { 2150 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1857 struct ata_device *dev = &ap->device[i]; 2151 dev = &ap->device[i];
1858 2152
1859 if (!ata_dev_present(dev)) 2153 if (!ata_dev_enabled(dev))
1860 continue; 2154 continue;
1861 2155
1862 if (ata_dev_set_mode(ap, dev)) 2156 rc = ata_dev_set_mode(dev);
1863 goto err_out; 2157 if (rc)
2158 goto out;
1864 } 2159 }
1865 2160
1866 /* 2161 /* Record simplex status. If we selected DMA then the other
1867 * Record simplex status. If we selected DMA then the other 2162 * host channels are not permitted to do so.
1868 * host channels are not permitted to do so.
1869 */ 2163 */
1870
1871 if (used_dma && (ap->host_set->flags & ATA_HOST_SIMPLEX)) 2164 if (used_dma && (ap->host_set->flags & ATA_HOST_SIMPLEX))
1872 ap->host_set->simplex_claimed = 1; 2165 ap->host_set->simplex_claimed = 1;
1873 2166
1874 /* 2167 /* step5: chip specific finalisation */
1875 * Chip specific finalisation
1876 */
1877 if (ap->ops->post_set_mode) 2168 if (ap->ops->post_set_mode)
1878 ap->ops->post_set_mode(ap); 2169 ap->ops->post_set_mode(ap);
1879 2170
1880 return; 2171 out:
1881 2172 if (rc)
1882err_out: 2173 *r_failed_dev = dev;
1883 ata_port_disable(ap); 2174 return rc;
1884} 2175}
1885 2176
1886/** 2177/**
@@ -1930,8 +2221,8 @@ unsigned int ata_busy_sleep (struct ata_port *ap,
1930 } 2221 }
1931 2222
1932 if (status & ATA_BUSY) 2223 if (status & ATA_BUSY)
1933 printk(KERN_WARNING "ata%u is slow to respond, " 2224 ata_port_printk(ap, KERN_WARNING,
1934 "please be patient\n", ap->id); 2225 "port is slow to respond, please be patient\n");
1935 2226
1936 timeout = timer_start + tmout; 2227 timeout = timer_start + tmout;
1937 while ((status & ATA_BUSY) && (time_before(jiffies, timeout))) { 2228 while ((status & ATA_BUSY) && (time_before(jiffies, timeout))) {
@@ -1940,8 +2231,8 @@ unsigned int ata_busy_sleep (struct ata_port *ap,
1940 } 2231 }
1941 2232
1942 if (status & ATA_BUSY) { 2233 if (status & ATA_BUSY) {
1943 printk(KERN_ERR "ata%u failed to respond (%lu secs)\n", 2234 ata_port_printk(ap, KERN_ERR, "port failed to respond "
1944 ap->id, tmout / HZ); 2235 "(%lu secs)\n", tmout / HZ);
1945 return 1; 2236 return 1;
1946 } 2237 }
1947 2238
@@ -2033,8 +2324,10 @@ static unsigned int ata_bus_softreset(struct ata_port *ap,
2033 * the bus shows 0xFF because the odd clown forgets the D7 2324 * the bus shows 0xFF because the odd clown forgets the D7
2034 * pulldown resistor. 2325 * pulldown resistor.
2035 */ 2326 */
2036 if (ata_check_status(ap) == 0xFF) 2327 if (ata_check_status(ap) == 0xFF) {
2328 ata_port_printk(ap, KERN_ERR, "SRST failed (status 0xFF)\n");
2037 return AC_ERR_OTHER; 2329 return AC_ERR_OTHER;
2330 }
2038 2331
2039 ata_bus_post_reset(ap, devmask); 2332 ata_bus_post_reset(ap, devmask);
2040 2333
@@ -2058,7 +2351,7 @@ static unsigned int ata_bus_softreset(struct ata_port *ap,
2058 * Obtains host_set lock. 2351 * Obtains host_set lock.
2059 * 2352 *
2060 * SIDE EFFECTS: 2353 * SIDE EFFECTS:
2061 * Sets ATA_FLAG_PORT_DISABLED if bus reset fails. 2354 * Sets ATA_FLAG_DISABLED if bus reset fails.
2062 */ 2355 */
2063 2356
2064void ata_bus_reset(struct ata_port *ap) 2357void ata_bus_reset(struct ata_port *ap)
@@ -2126,7 +2419,7 @@ void ata_bus_reset(struct ata_port *ap)
2126 return; 2419 return;
2127 2420
2128err_out: 2421err_out:
2129 printk(KERN_ERR "ata%u: disabling port\n", ap->id); 2422 ata_port_printk(ap, KERN_ERR, "disabling port\n");
2130 ap->ops->port_disable(ap); 2423 ap->ops->port_disable(ap);
2131 2424
2132 DPRINTK("EXIT\n"); 2425 DPRINTK("EXIT\n");
@@ -2135,19 +2428,27 @@ err_out:
2135static int sata_phy_resume(struct ata_port *ap) 2428static int sata_phy_resume(struct ata_port *ap)
2136{ 2429{
2137 unsigned long timeout = jiffies + (HZ * 5); 2430 unsigned long timeout = jiffies + (HZ * 5);
2138 u32 sstatus; 2431 u32 scontrol, sstatus;
2432 int rc;
2433
2434 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
2435 return rc;
2436
2437 scontrol = (scontrol & 0x0f0) | 0x300;
2139 2438
2140 scr_write_flush(ap, SCR_CONTROL, 0x300); 2439 if ((rc = sata_scr_write(ap, SCR_CONTROL, scontrol)))
2440 return rc;
2141 2441
2142 /* Wait for phy to become ready, if necessary. */ 2442 /* Wait for phy to become ready, if necessary. */
2143 do { 2443 do {
2144 msleep(200); 2444 msleep(200);
2145 sstatus = scr_read(ap, SCR_STATUS); 2445 if ((rc = sata_scr_read(ap, SCR_STATUS, &sstatus)))
2446 return rc;
2146 if ((sstatus & 0xf) != 1) 2447 if ((sstatus & 0xf) != 1)
2147 return 0; 2448 return 0;
2148 } while (time_before(jiffies, timeout)); 2449 } while (time_before(jiffies, timeout));
2149 2450
2150 return -1; 2451 return -EBUSY;
2151} 2452}
2152 2453
2153/** 2454/**
@@ -2165,17 +2466,25 @@ static int sata_phy_resume(struct ata_port *ap)
2165 */ 2466 */
2166void ata_std_probeinit(struct ata_port *ap) 2467void ata_std_probeinit(struct ata_port *ap)
2167{ 2468{
2168 if ((ap->flags & ATA_FLAG_SATA) && ap->ops->scr_read) { 2469 u32 scontrol;
2169 sata_phy_resume(ap); 2470
2170 if (sata_dev_present(ap)) 2471 /* resume link */
2171 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT); 2472 sata_phy_resume(ap);
2473
2474 /* init sata_spd_limit to the current value */
2475 if (sata_scr_read(ap, SCR_CONTROL, &scontrol) == 0) {
2476 int spd = (scontrol >> 4) & 0xf;
2477 ap->sata_spd_limit &= (1 << spd) - 1;
2172 } 2478 }
2479
2480 /* wait for device */
2481 if (ata_port_online(ap))
2482 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
2173} 2483}
2174 2484
2175/** 2485/**
2176 * ata_std_softreset - reset host port via ATA SRST 2486 * ata_std_softreset - reset host port via ATA SRST
2177 * @ap: port to reset 2487 * @ap: port to reset
2178 * @verbose: fail verbosely
2179 * @classes: resulting classes of attached devices 2488 * @classes: resulting classes of attached devices
2180 * 2489 *
2181 * Reset host port using ATA SRST. This function is to be used 2490 * Reset host port using ATA SRST. This function is to be used
@@ -2187,7 +2496,7 @@ void ata_std_probeinit(struct ata_port *ap)
2187 * RETURNS: 2496 * RETURNS:
2188 * 0 on success, -errno otherwise. 2497 * 0 on success, -errno otherwise.
2189 */ 2498 */
2190int ata_std_softreset(struct ata_port *ap, int verbose, unsigned int *classes) 2499int ata_std_softreset(struct ata_port *ap, unsigned int *classes)
2191{ 2500{
2192 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS; 2501 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
2193 unsigned int devmask = 0, err_mask; 2502 unsigned int devmask = 0, err_mask;
@@ -2195,7 +2504,7 @@ int ata_std_softreset(struct ata_port *ap, int verbose, unsigned int *classes)
2195 2504
2196 DPRINTK("ENTER\n"); 2505 DPRINTK("ENTER\n");
2197 2506
2198 if (ap->ops->scr_read && !sata_dev_present(ap)) { 2507 if (ata_port_offline(ap)) {
2199 classes[0] = ATA_DEV_NONE; 2508 classes[0] = ATA_DEV_NONE;
2200 goto out; 2509 goto out;
2201 } 2510 }
@@ -2213,11 +2522,7 @@ int ata_std_softreset(struct ata_port *ap, int verbose, unsigned int *classes)
2213 DPRINTK("about to softreset, devmask=%x\n", devmask); 2522 DPRINTK("about to softreset, devmask=%x\n", devmask);
2214 err_mask = ata_bus_softreset(ap, devmask); 2523 err_mask = ata_bus_softreset(ap, devmask);
2215 if (err_mask) { 2524 if (err_mask) {
2216 if (verbose) 2525 ata_port_printk(ap, KERN_ERR, "SRST failed (err_mask=0x%x)\n",
2217 printk(KERN_ERR "ata%u: SRST failed (err_mask=0x%x)\n",
2218 ap->id, err_mask);
2219 else
2220 DPRINTK("EXIT, softreset failed (err_mask=0x%x)\n",
2221 err_mask); 2526 err_mask);
2222 return -EIO; 2527 return -EIO;
2223 } 2528 }
@@ -2235,7 +2540,6 @@ int ata_std_softreset(struct ata_port *ap, int verbose, unsigned int *classes)
2235/** 2540/**
2236 * sata_std_hardreset - reset host port via SATA phy reset 2541 * sata_std_hardreset - reset host port via SATA phy reset
2237 * @ap: port to reset 2542 * @ap: port to reset
2238 * @verbose: fail verbosely
2239 * @class: resulting class of attached device 2543 * @class: resulting class of attached device
2240 * 2544 *
2241 * SATA phy-reset host port using DET bits of SControl register. 2545 * SATA phy-reset host port using DET bits of SControl register.
@@ -2248,35 +2552,57 @@ int ata_std_softreset(struct ata_port *ap, int verbose, unsigned int *classes)
2248 * RETURNS: 2552 * RETURNS:
2249 * 0 on success, -errno otherwise. 2553 * 0 on success, -errno otherwise.
2250 */ 2554 */
2251int sata_std_hardreset(struct ata_port *ap, int verbose, unsigned int *class) 2555int sata_std_hardreset(struct ata_port *ap, unsigned int *class)
2252{ 2556{
2557 u32 scontrol;
2558 int rc;
2559
2253 DPRINTK("ENTER\n"); 2560 DPRINTK("ENTER\n");
2254 2561
2255 /* Issue phy wake/reset */ 2562 if (sata_set_spd_needed(ap)) {
2256 scr_write_flush(ap, SCR_CONTROL, 0x301); 2563 /* SATA spec says nothing about how to reconfigure
2564 * spd. To be on the safe side, turn off phy during
2565 * reconfiguration. This works for at least ICH7 AHCI
2566 * and Sil3124.
2567 */
2568 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
2569 return rc;
2570
2571 scontrol = (scontrol & 0x0f0) | 0x302;
2257 2572
2258 /* 2573 if ((rc = sata_scr_write(ap, SCR_CONTROL, scontrol)))
2259 * Couldn't find anything in SATA I/II specs, but AHCI-1.1 2574 return rc;
2575
2576 sata_set_spd(ap);
2577 }
2578
2579 /* issue phy wake/reset */
2580 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
2581 return rc;
2582
2583 scontrol = (scontrol & 0x0f0) | 0x301;
2584
2585 if ((rc = sata_scr_write_flush(ap, SCR_CONTROL, scontrol)))
2586 return rc;
2587
2588 /* Couldn't find anything in SATA I/II specs, but AHCI-1.1
2260 * 10.4.2 says at least 1 ms. 2589 * 10.4.2 says at least 1 ms.
2261 */ 2590 */
2262 msleep(1); 2591 msleep(1);
2263 2592
2264 /* Bring phy back */ 2593 /* bring phy back */
2265 sata_phy_resume(ap); 2594 sata_phy_resume(ap);
2266 2595
2267 /* TODO: phy layer with polling, timeouts, etc. */ 2596 /* TODO: phy layer with polling, timeouts, etc. */
2268 if (!sata_dev_present(ap)) { 2597 if (ata_port_offline(ap)) {
2269 *class = ATA_DEV_NONE; 2598 *class = ATA_DEV_NONE;
2270 DPRINTK("EXIT, link offline\n"); 2599 DPRINTK("EXIT, link offline\n");
2271 return 0; 2600 return 0;
2272 } 2601 }
2273 2602
2274 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) { 2603 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
2275 if (verbose) 2604 ata_port_printk(ap, KERN_ERR,
2276 printk(KERN_ERR "ata%u: COMRESET failed " 2605 "COMRESET failed (device not ready)\n");
2277 "(device not ready)\n", ap->id);
2278 else
2279 DPRINTK("EXIT, device not ready\n");
2280 return -EIO; 2606 return -EIO;
2281 } 2607 }
2282 2608
@@ -2305,19 +2631,23 @@ int sata_std_hardreset(struct ata_port *ap, int verbose, unsigned int *class)
2305 */ 2631 */
2306void ata_std_postreset(struct ata_port *ap, unsigned int *classes) 2632void ata_std_postreset(struct ata_port *ap, unsigned int *classes)
2307{ 2633{
2308 DPRINTK("ENTER\n"); 2634 u32 serror;
2309 2635
2310 /* set cable type if it isn't already set */ 2636 DPRINTK("ENTER\n");
2311 if (ap->cbl == ATA_CBL_NONE && ap->flags & ATA_FLAG_SATA)
2312 ap->cbl = ATA_CBL_SATA;
2313 2637
2314 /* print link status */ 2638 /* print link status */
2315 if (ap->cbl == ATA_CBL_SATA) 2639 sata_print_link_status(ap);
2316 sata_print_link_status(ap); 2640
2641 /* clear SError */
2642 if (sata_scr_read(ap, SCR_ERROR, &serror) == 0)
2643 sata_scr_write(ap, SCR_ERROR, serror);
2317 2644
2318 /* re-enable interrupts */ 2645 /* re-enable interrupts */
2319 if (ap->ioaddr.ctl_addr) /* FIXME: hack. create a hook instead */ 2646 if (!ap->ops->error_handler) {
2320 ata_irq_on(ap); 2647 /* FIXME: hack. create a hook instead */
2648 if (ap->ioaddr.ctl_addr)
2649 ata_irq_on(ap);
2650 }
2321 2651
2322 /* is double-select really necessary? */ 2652 /* is double-select really necessary? */
2323 if (classes[0] != ATA_DEV_NONE) 2653 if (classes[0] != ATA_DEV_NONE)
@@ -2360,7 +2690,7 @@ int ata_std_probe_reset(struct ata_port *ap, unsigned int *classes)
2360 ata_reset_fn_t hardreset; 2690 ata_reset_fn_t hardreset;
2361 2691
2362 hardreset = NULL; 2692 hardreset = NULL;
2363 if (ap->flags & ATA_FLAG_SATA && ap->ops->scr_read) 2693 if (sata_scr_valid(ap))
2364 hardreset = sata_std_hardreset; 2694 hardreset = sata_std_hardreset;
2365 2695
2366 return ata_drive_probe_reset(ap, ata_std_probeinit, 2696 return ata_drive_probe_reset(ap, ata_std_probeinit,
@@ -2368,16 +2698,15 @@ int ata_std_probe_reset(struct ata_port *ap, unsigned int *classes)
2368 ata_std_postreset, classes); 2698 ata_std_postreset, classes);
2369} 2699}
2370 2700
2371static int do_probe_reset(struct ata_port *ap, ata_reset_fn_t reset, 2701int ata_do_reset(struct ata_port *ap, ata_reset_fn_t reset,
2372 ata_postreset_fn_t postreset, 2702 unsigned int *classes)
2373 unsigned int *classes)
2374{ 2703{
2375 int i, rc; 2704 int i, rc;
2376 2705
2377 for (i = 0; i < ATA_MAX_DEVICES; i++) 2706 for (i = 0; i < ATA_MAX_DEVICES; i++)
2378 classes[i] = ATA_DEV_UNKNOWN; 2707 classes[i] = ATA_DEV_UNKNOWN;
2379 2708
2380 rc = reset(ap, 0, classes); 2709 rc = reset(ap, classes);
2381 if (rc) 2710 if (rc)
2382 return rc; 2711 return rc;
2383 2712
@@ -2394,10 +2723,7 @@ static int do_probe_reset(struct ata_port *ap, ata_reset_fn_t reset,
2394 if (classes[i] == ATA_DEV_UNKNOWN) 2723 if (classes[i] == ATA_DEV_UNKNOWN)
2395 classes[i] = ATA_DEV_NONE; 2724 classes[i] = ATA_DEV_NONE;
2396 2725
2397 if (postreset) 2726 return 0;
2398 postreset(ap, classes);
2399
2400 return classes[0] != ATA_DEV_UNKNOWN ? 0 : -ENODEV;
2401} 2727}
2402 2728
2403/** 2729/**
@@ -2421,8 +2747,6 @@ static int do_probe_reset(struct ata_port *ap, ata_reset_fn_t reset,
2421 * - If classification is supported, fill classes[] with 2747 * - If classification is supported, fill classes[] with
2422 * recognized class codes. 2748 * recognized class codes.
2423 * - If classification is not supported, leave classes[] alone. 2749 * - If classification is not supported, leave classes[] alone.
2424 * - If verbose is non-zero, print error message on failure;
2425 * otherwise, shut up.
2426 * 2750 *
2427 * LOCKING: 2751 * LOCKING:
2428 * Kernel thread context (may sleep) 2752 * Kernel thread context (may sleep)
@@ -2438,31 +2762,63 @@ int ata_drive_probe_reset(struct ata_port *ap, ata_probeinit_fn_t probeinit,
2438{ 2762{
2439 int rc = -EINVAL; 2763 int rc = -EINVAL;
2440 2764
2765 ata_eh_freeze_port(ap);
2766
2441 if (probeinit) 2767 if (probeinit)
2442 probeinit(ap); 2768 probeinit(ap);
2443 2769
2444 if (softreset) { 2770 if (softreset && !sata_set_spd_needed(ap)) {
2445 rc = do_probe_reset(ap, softreset, postreset, classes); 2771 rc = ata_do_reset(ap, softreset, classes);
2446 if (rc == 0) 2772 if (rc == 0 && classes[0] != ATA_DEV_UNKNOWN)
2447 return 0; 2773 goto done;
2774 ata_port_printk(ap, KERN_INFO, "softreset failed, "
2775 "will try hardreset in 5 secs\n");
2776 ssleep(5);
2448 } 2777 }
2449 2778
2450 if (!hardreset) 2779 if (!hardreset)
2451 return rc; 2780 goto done;
2452 2781
2453 rc = do_probe_reset(ap, hardreset, postreset, classes); 2782 while (1) {
2454 if (rc == 0 || rc != -ENODEV) 2783 rc = ata_do_reset(ap, hardreset, classes);
2455 return rc; 2784 if (rc == 0) {
2785 if (classes[0] != ATA_DEV_UNKNOWN)
2786 goto done;
2787 break;
2788 }
2789
2790 if (sata_down_spd_limit(ap))
2791 goto done;
2456 2792
2457 if (softreset) 2793 ata_port_printk(ap, KERN_INFO, "hardreset failed, "
2458 rc = do_probe_reset(ap, softreset, postreset, classes); 2794 "will retry in 5 secs\n");
2795 ssleep(5);
2796 }
2797
2798 if (softreset) {
2799 ata_port_printk(ap, KERN_INFO,
2800 "hardreset succeeded without classification, "
2801 "will retry softreset in 5 secs\n");
2802 ssleep(5);
2459 2803
2804 rc = ata_do_reset(ap, softreset, classes);
2805 }
2806
2807 done:
2808 if (rc == 0) {
2809 if (postreset)
2810 postreset(ap, classes);
2811
2812 ata_eh_thaw_port(ap);
2813
2814 if (classes[0] == ATA_DEV_UNKNOWN)
2815 rc = -ENODEV;
2816 }
2460 return rc; 2817 return rc;
2461} 2818}
2462 2819
2463/** 2820/**
2464 * ata_dev_same_device - Determine whether new ID matches configured device 2821 * ata_dev_same_device - Determine whether new ID matches configured device
2465 * @ap: port on which the device to compare against resides
2466 * @dev: device to compare against 2822 * @dev: device to compare against
2467 * @new_class: class of the new device 2823 * @new_class: class of the new device
2468 * @new_id: IDENTIFY page of the new device 2824 * @new_id: IDENTIFY page of the new device
@@ -2477,17 +2833,16 @@ int ata_drive_probe_reset(struct ata_port *ap, ata_probeinit_fn_t probeinit,
2477 * RETURNS: 2833 * RETURNS:
2478 * 1 if @dev matches @new_class and @new_id, 0 otherwise. 2834 * 1 if @dev matches @new_class and @new_id, 0 otherwise.
2479 */ 2835 */
2480static int ata_dev_same_device(struct ata_port *ap, struct ata_device *dev, 2836static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class,
2481 unsigned int new_class, const u16 *new_id) 2837 const u16 *new_id)
2482{ 2838{
2483 const u16 *old_id = dev->id; 2839 const u16 *old_id = dev->id;
2484 unsigned char model[2][41], serial[2][21]; 2840 unsigned char model[2][41], serial[2][21];
2485 u64 new_n_sectors; 2841 u64 new_n_sectors;
2486 2842
2487 if (dev->class != new_class) { 2843 if (dev->class != new_class) {
2488 printk(KERN_INFO 2844 ata_dev_printk(dev, KERN_INFO, "class mismatch %d != %d\n",
2489 "ata%u: dev %u class mismatch %d != %d\n", 2845 dev->class, new_class);
2490 ap->id, dev->devno, dev->class, new_class);
2491 return 0; 2846 return 0;
2492 } 2847 }
2493 2848
@@ -2498,24 +2853,22 @@ static int ata_dev_same_device(struct ata_port *ap, struct ata_device *dev,
2498 new_n_sectors = ata_id_n_sectors(new_id); 2853 new_n_sectors = ata_id_n_sectors(new_id);
2499 2854
2500 if (strcmp(model[0], model[1])) { 2855 if (strcmp(model[0], model[1])) {
2501 printk(KERN_INFO 2856 ata_dev_printk(dev, KERN_INFO, "model number mismatch "
2502 "ata%u: dev %u model number mismatch '%s' != '%s'\n", 2857 "'%s' != '%s'\n", model[0], model[1]);
2503 ap->id, dev->devno, model[0], model[1]);
2504 return 0; 2858 return 0;
2505 } 2859 }
2506 2860
2507 if (strcmp(serial[0], serial[1])) { 2861 if (strcmp(serial[0], serial[1])) {
2508 printk(KERN_INFO 2862 ata_dev_printk(dev, KERN_INFO, "serial number mismatch "
2509 "ata%u: dev %u serial number mismatch '%s' != '%s'\n", 2863 "'%s' != '%s'\n", serial[0], serial[1]);
2510 ap->id, dev->devno, serial[0], serial[1]);
2511 return 0; 2864 return 0;
2512 } 2865 }
2513 2866
2514 if (dev->class == ATA_DEV_ATA && dev->n_sectors != new_n_sectors) { 2867 if (dev->class == ATA_DEV_ATA && dev->n_sectors != new_n_sectors) {
2515 printk(KERN_INFO 2868 ata_dev_printk(dev, KERN_INFO, "n_sectors mismatch "
2516 "ata%u: dev %u n_sectors mismatch %llu != %llu\n", 2869 "%llu != %llu\n",
2517 ap->id, dev->devno, (unsigned long long)dev->n_sectors, 2870 (unsigned long long)dev->n_sectors,
2518 (unsigned long long)new_n_sectors); 2871 (unsigned long long)new_n_sectors);
2519 return 0; 2872 return 0;
2520 } 2873 }
2521 2874
@@ -2524,7 +2877,6 @@ static int ata_dev_same_device(struct ata_port *ap, struct ata_device *dev,
2524 2877
2525/** 2878/**
2526 * ata_dev_revalidate - Revalidate ATA device 2879 * ata_dev_revalidate - Revalidate ATA device
2527 * @ap: port on which the device to revalidate resides
2528 * @dev: device to revalidate 2880 * @dev: device to revalidate
2529 * @post_reset: is this revalidation after reset? 2881 * @post_reset: is this revalidation after reset?
2530 * 2882 *
@@ -2537,40 +2889,37 @@ static int ata_dev_same_device(struct ata_port *ap, struct ata_device *dev,
2537 * RETURNS: 2889 * RETURNS:
2538 * 0 on success, negative errno otherwise 2890 * 0 on success, negative errno otherwise
2539 */ 2891 */
2540int ata_dev_revalidate(struct ata_port *ap, struct ata_device *dev, 2892int ata_dev_revalidate(struct ata_device *dev, int post_reset)
2541 int post_reset)
2542{ 2893{
2543 unsigned int class; 2894 unsigned int class = dev->class;
2544 u16 *id; 2895 u16 *id = (void *)dev->ap->sector_buf;
2545 int rc; 2896 int rc;
2546 2897
2547 if (!ata_dev_present(dev)) 2898 if (!ata_dev_enabled(dev)) {
2548 return -ENODEV; 2899 rc = -ENODEV;
2549 2900 goto fail;
2550 class = dev->class; 2901 }
2551 id = NULL;
2552 2902
2553 /* allocate & read ID data */ 2903 /* read ID data */
2554 rc = ata_dev_read_id(ap, dev, &class, post_reset, &id); 2904 rc = ata_dev_read_id(dev, &class, post_reset, id);
2555 if (rc) 2905 if (rc)
2556 goto fail; 2906 goto fail;
2557 2907
2558 /* is the device still there? */ 2908 /* is the device still there? */
2559 if (!ata_dev_same_device(ap, dev, class, id)) { 2909 if (!ata_dev_same_device(dev, class, id)) {
2560 rc = -ENODEV; 2910 rc = -ENODEV;
2561 goto fail; 2911 goto fail;
2562 } 2912 }
2563 2913
2564 kfree(dev->id); 2914 memcpy(dev->id, id, sizeof(id[0]) * ATA_ID_WORDS);
2565 dev->id = id;
2566 2915
2567 /* configure device according to the new ID */ 2916 /* configure device according to the new ID */
2568 return ata_dev_configure(ap, dev, 0); 2917 rc = ata_dev_configure(dev, 0);
2918 if (rc == 0)
2919 return 0;
2569 2920
2570 fail: 2921 fail:
2571 printk(KERN_ERR "ata%u: dev %u revalidation failed (errno=%d)\n", 2922 ata_dev_printk(dev, KERN_ERR, "revalidation failed (errno=%d)\n", rc);
2572 ap->id, dev->devno, rc);
2573 kfree(id);
2574 return rc; 2923 return rc;
2575} 2924}
2576 2925
@@ -2646,7 +2995,6 @@ static int ata_dma_blacklisted(const struct ata_device *dev)
2646 2995
2647/** 2996/**
2648 * ata_dev_xfermask - Compute supported xfermask of the given device 2997 * ata_dev_xfermask - Compute supported xfermask of the given device
2649 * @ap: Port on which the device to compute xfermask for resides
2650 * @dev: Device to compute xfermask for 2998 * @dev: Device to compute xfermask for
2651 * 2999 *
2652 * Compute supported xfermask of @dev and store it in 3000 * Compute supported xfermask of @dev and store it in
@@ -2661,49 +3009,61 @@ static int ata_dma_blacklisted(const struct ata_device *dev)
2661 * LOCKING: 3009 * LOCKING:
2662 * None. 3010 * None.
2663 */ 3011 */
2664static void ata_dev_xfermask(struct ata_port *ap, struct ata_device *dev) 3012static void ata_dev_xfermask(struct ata_device *dev)
2665{ 3013{
3014 struct ata_port *ap = dev->ap;
2666 struct ata_host_set *hs = ap->host_set; 3015 struct ata_host_set *hs = ap->host_set;
2667 unsigned long xfer_mask; 3016 unsigned long xfer_mask;
2668 int i; 3017 int i;
2669 3018
2670 xfer_mask = ata_pack_xfermask(ap->pio_mask, ap->mwdma_mask, 3019 xfer_mask = ata_pack_xfermask(ap->pio_mask,
2671 ap->udma_mask); 3020 ap->mwdma_mask, ap->udma_mask);
3021
3022 /* Apply cable rule here. Don't apply it early because when
3023 * we handle hot plug the cable type can itself change.
3024 */
3025 if (ap->cbl == ATA_CBL_PATA40)
3026 xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
2672 3027
2673 /* FIXME: Use port-wide xfermask for now */ 3028 /* FIXME: Use port-wide xfermask for now */
2674 for (i = 0; i < ATA_MAX_DEVICES; i++) { 3029 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2675 struct ata_device *d = &ap->device[i]; 3030 struct ata_device *d = &ap->device[i];
2676 if (!ata_dev_present(d)) 3031
3032 if (ata_dev_absent(d))
3033 continue;
3034
3035 if (ata_dev_disabled(d)) {
3036 /* to avoid violating device selection timing */
3037 xfer_mask &= ata_pack_xfermask(d->pio_mask,
3038 UINT_MAX, UINT_MAX);
2677 continue; 3039 continue;
2678 xfer_mask &= ata_pack_xfermask(d->pio_mask, d->mwdma_mask, 3040 }
2679 d->udma_mask); 3041
3042 xfer_mask &= ata_pack_xfermask(d->pio_mask,
3043 d->mwdma_mask, d->udma_mask);
2680 xfer_mask &= ata_id_xfermask(d->id); 3044 xfer_mask &= ata_id_xfermask(d->id);
2681 if (ata_dma_blacklisted(d)) 3045 if (ata_dma_blacklisted(d))
2682 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA); 3046 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
2683 /* Apply cable rule here. Don't apply it early because when
2684 we handle hot plug the cable type can itself change */
2685 if (ap->cbl == ATA_CBL_PATA40)
2686 xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
2687 } 3047 }
2688 3048
2689 if (ata_dma_blacklisted(dev)) 3049 if (ata_dma_blacklisted(dev))
2690 printk(KERN_WARNING "ata%u: dev %u is on DMA blacklist, " 3050 ata_dev_printk(dev, KERN_WARNING,
2691 "disabling DMA\n", ap->id, dev->devno); 3051 "device is on DMA blacklist, disabling DMA\n");
2692 3052
2693 if (hs->flags & ATA_HOST_SIMPLEX) { 3053 if (hs->flags & ATA_HOST_SIMPLEX) {
2694 if (hs->simplex_claimed) 3054 if (hs->simplex_claimed)
2695 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA); 3055 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
2696 } 3056 }
3057
2697 if (ap->ops->mode_filter) 3058 if (ap->ops->mode_filter)
2698 xfer_mask = ap->ops->mode_filter(ap, dev, xfer_mask); 3059 xfer_mask = ap->ops->mode_filter(ap, dev, xfer_mask);
2699 3060
2700 ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask, 3061 ata_unpack_xfermask(xfer_mask, &dev->pio_mask,
2701 &dev->udma_mask); 3062 &dev->mwdma_mask, &dev->udma_mask);
2702} 3063}
2703 3064
2704/** 3065/**
2705 * ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command 3066 * ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
2706 * @ap: Port associated with device @dev
2707 * @dev: Device to which command will be sent 3067 * @dev: Device to which command will be sent
2708 * 3068 *
2709 * Issue SET FEATURES - XFER MODE command to device @dev 3069 * Issue SET FEATURES - XFER MODE command to device @dev
@@ -2716,8 +3076,7 @@ static void ata_dev_xfermask(struct ata_port *ap, struct ata_device *dev)
2716 * 0 on success, AC_ERR_* mask otherwise. 3076 * 0 on success, AC_ERR_* mask otherwise.
2717 */ 3077 */
2718 3078
2719static unsigned int ata_dev_set_xfermode(struct ata_port *ap, 3079static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
2720 struct ata_device *dev)
2721{ 3080{
2722 struct ata_taskfile tf; 3081 struct ata_taskfile tf;
2723 unsigned int err_mask; 3082 unsigned int err_mask;
@@ -2725,14 +3084,14 @@ static unsigned int ata_dev_set_xfermode(struct ata_port *ap,
2725 /* set up set-features taskfile */ 3084 /* set up set-features taskfile */
2726 DPRINTK("set features - xfer mode\n"); 3085 DPRINTK("set features - xfer mode\n");
2727 3086
2728 ata_tf_init(ap, &tf, dev->devno); 3087 ata_tf_init(dev, &tf);
2729 tf.command = ATA_CMD_SET_FEATURES; 3088 tf.command = ATA_CMD_SET_FEATURES;
2730 tf.feature = SETFEATURES_XFER; 3089 tf.feature = SETFEATURES_XFER;
2731 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; 3090 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
2732 tf.protocol = ATA_PROT_NODATA; 3091 tf.protocol = ATA_PROT_NODATA;
2733 tf.nsect = dev->xfer_mode; 3092 tf.nsect = dev->xfer_mode;
2734 3093
2735 err_mask = ata_exec_internal(ap, dev, &tf, DMA_NONE, NULL, 0); 3094 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
2736 3095
2737 DPRINTK("EXIT, err_mask=%x\n", err_mask); 3096 DPRINTK("EXIT, err_mask=%x\n", err_mask);
2738 return err_mask; 3097 return err_mask;
@@ -2740,7 +3099,6 @@ static unsigned int ata_dev_set_xfermode(struct ata_port *ap,
2740 3099
2741/** 3100/**
2742 * ata_dev_init_params - Issue INIT DEV PARAMS command 3101 * ata_dev_init_params - Issue INIT DEV PARAMS command
2743 * @ap: Port associated with device @dev
2744 * @dev: Device to which command will be sent 3102 * @dev: Device to which command will be sent
2745 * @heads: Number of heads (taskfile parameter) 3103 * @heads: Number of heads (taskfile parameter)
2746 * @sectors: Number of sectors (taskfile parameter) 3104 * @sectors: Number of sectors (taskfile parameter)
@@ -2751,11 +3109,8 @@ static unsigned int ata_dev_set_xfermode(struct ata_port *ap,
2751 * RETURNS: 3109 * RETURNS:
2752 * 0 on success, AC_ERR_* mask otherwise. 3110 * 0 on success, AC_ERR_* mask otherwise.
2753 */ 3111 */
2754 3112static unsigned int ata_dev_init_params(struct ata_device *dev,
2755static unsigned int ata_dev_init_params(struct ata_port *ap, 3113 u16 heads, u16 sectors)
2756 struct ata_device *dev,
2757 u16 heads,
2758 u16 sectors)
2759{ 3114{
2760 struct ata_taskfile tf; 3115 struct ata_taskfile tf;
2761 unsigned int err_mask; 3116 unsigned int err_mask;
@@ -2767,14 +3122,14 @@ static unsigned int ata_dev_init_params(struct ata_port *ap,
2767 /* set up init dev params taskfile */ 3122 /* set up init dev params taskfile */
2768 DPRINTK("init dev params \n"); 3123 DPRINTK("init dev params \n");
2769 3124
2770 ata_tf_init(ap, &tf, dev->devno); 3125 ata_tf_init(dev, &tf);
2771 tf.command = ATA_CMD_INIT_DEV_PARAMS; 3126 tf.command = ATA_CMD_INIT_DEV_PARAMS;
2772 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; 3127 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
2773 tf.protocol = ATA_PROT_NODATA; 3128 tf.protocol = ATA_PROT_NODATA;
2774 tf.nsect = sectors; 3129 tf.nsect = sectors;
2775 tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */ 3130 tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
2776 3131
2777 err_mask = ata_exec_internal(ap, dev, &tf, DMA_NONE, NULL, 0); 3132 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
2778 3133
2779 DPRINTK("EXIT, err_mask=%x\n", err_mask); 3134 DPRINTK("EXIT, err_mask=%x\n", err_mask);
2780 return err_mask; 3135 return err_mask;
@@ -2912,6 +3267,15 @@ int ata_check_atapi_dma(struct ata_queued_cmd *qc)
2912 if (ap->ops->check_atapi_dma) 3267 if (ap->ops->check_atapi_dma)
2913 rc = ap->ops->check_atapi_dma(qc); 3268 rc = ap->ops->check_atapi_dma(qc);
2914 3269
3270 /* We don't support polling DMA.
3271 * Use PIO if the LLDD handles only interrupts in
3272 * the HSM_ST_LAST state and the ATAPI device
3273 * generates CDB interrupts.
3274 */
3275 if ((ap->flags & ATA_FLAG_PIO_POLLING) &&
3276 (qc->dev->flags & ATA_DFLAG_CDB_INTR))
3277 rc = 1;
3278
2915 return rc; 3279 return rc;
2916} 3280}
2917/** 3281/**
@@ -3140,134 +3504,6 @@ skip_map:
3140} 3504}
3141 3505
3142/** 3506/**
3143 * ata_poll_qc_complete - turn irq back on and finish qc
3144 * @qc: Command to complete
3145 * @err_mask: ATA status register content
3146 *
3147 * LOCKING:
3148 * None. (grabs host lock)
3149 */
3150
3151void ata_poll_qc_complete(struct ata_queued_cmd *qc)
3152{
3153 struct ata_port *ap = qc->ap;
3154 unsigned long flags;
3155
3156 spin_lock_irqsave(&ap->host_set->lock, flags);
3157 ap->flags &= ~ATA_FLAG_NOINTR;
3158 ata_irq_on(ap);
3159 ata_qc_complete(qc);
3160 spin_unlock_irqrestore(&ap->host_set->lock, flags);
3161}
3162
3163/**
3164 * ata_pio_poll - poll using PIO, depending on current state
3165 * @ap: the target ata_port
3166 *
3167 * LOCKING:
3168 * None. (executing in kernel thread context)
3169 *
3170 * RETURNS:
3171 * timeout value to use
3172 */
3173
3174static unsigned long ata_pio_poll(struct ata_port *ap)
3175{
3176 struct ata_queued_cmd *qc;
3177 u8 status;
3178 unsigned int poll_state = HSM_ST_UNKNOWN;
3179 unsigned int reg_state = HSM_ST_UNKNOWN;
3180
3181 qc = ata_qc_from_tag(ap, ap->active_tag);
3182 WARN_ON(qc == NULL);
3183
3184 switch (ap->hsm_task_state) {
3185 case HSM_ST:
3186 case HSM_ST_POLL:
3187 poll_state = HSM_ST_POLL;
3188 reg_state = HSM_ST;
3189 break;
3190 case HSM_ST_LAST:
3191 case HSM_ST_LAST_POLL:
3192 poll_state = HSM_ST_LAST_POLL;
3193 reg_state = HSM_ST_LAST;
3194 break;
3195 default:
3196 BUG();
3197 break;
3198 }
3199
3200 status = ata_chk_status(ap);
3201 if (status & ATA_BUSY) {
3202 if (time_after(jiffies, ap->pio_task_timeout)) {
3203 qc->err_mask |= AC_ERR_TIMEOUT;
3204 ap->hsm_task_state = HSM_ST_TMOUT;
3205 return 0;
3206 }
3207 ap->hsm_task_state = poll_state;
3208 return ATA_SHORT_PAUSE;
3209 }
3210
3211 ap->hsm_task_state = reg_state;
3212 return 0;
3213}
3214
3215/**
3216 * ata_pio_complete - check if drive is busy or idle
3217 * @ap: the target ata_port
3218 *
3219 * LOCKING:
3220 * None. (executing in kernel thread context)
3221 *
3222 * RETURNS:
3223 * Non-zero if qc completed, zero otherwise.
3224 */
3225
3226static int ata_pio_complete (struct ata_port *ap)
3227{
3228 struct ata_queued_cmd *qc;
3229 u8 drv_stat;
3230
3231 /*
3232 * This is purely heuristic. This is a fast path. Sometimes when
3233 * we enter, BSY will be cleared in a chk-status or two. If not,
3234 * the drive is probably seeking or something. Snooze for a couple
3235 * msecs, then chk-status again. If still busy, fall back to
3236 * HSM_ST_POLL state.
3237 */
3238 drv_stat = ata_busy_wait(ap, ATA_BUSY, 10);
3239 if (drv_stat & ATA_BUSY) {
3240 msleep(2);
3241 drv_stat = ata_busy_wait(ap, ATA_BUSY, 10);
3242 if (drv_stat & ATA_BUSY) {
3243 ap->hsm_task_state = HSM_ST_LAST_POLL;
3244 ap->pio_task_timeout = jiffies + ATA_TMOUT_PIO;
3245 return 0;
3246 }
3247 }
3248
3249 qc = ata_qc_from_tag(ap, ap->active_tag);
3250 WARN_ON(qc == NULL);
3251
3252 drv_stat = ata_wait_idle(ap);
3253 if (!ata_ok(drv_stat)) {
3254 qc->err_mask |= __ac_err_mask(drv_stat);
3255 ap->hsm_task_state = HSM_ST_ERR;
3256 return 0;
3257 }
3258
3259 ap->hsm_task_state = HSM_ST_IDLE;
3260
3261 WARN_ON(qc->err_mask);
3262 ata_poll_qc_complete(qc);
3263
3264 /* another command may start at this point */
3265
3266 return 1;
3267}
3268
3269
3270/**
3271 * swap_buf_le16 - swap halves of 16-bit words in place 3507 * swap_buf_le16 - swap halves of 16-bit words in place
3272 * @buf: Buffer to swap 3508 * @buf: Buffer to swap
3273 * @buf_words: Number of 16-bit words in buffer. 3509 * @buf_words: Number of 16-bit words in buffer.
@@ -3291,7 +3527,7 @@ void swap_buf_le16(u16 *buf, unsigned int buf_words)
3291 3527
3292/** 3528/**
3293 * ata_mmio_data_xfer - Transfer data by MMIO 3529 * ata_mmio_data_xfer - Transfer data by MMIO
3294 * @ap: port to read/write 3530 * @dev: device for this I/O
3295 * @buf: data buffer 3531 * @buf: data buffer
3296 * @buflen: buffer length 3532 * @buflen: buffer length
3297 * @write_data: read/write 3533 * @write_data: read/write
@@ -3302,9 +3538,10 @@ void swap_buf_le16(u16 *buf, unsigned int buf_words)
3302 * Inherited from caller. 3538 * Inherited from caller.
3303 */ 3539 */
3304 3540
3305static void ata_mmio_data_xfer(struct ata_port *ap, unsigned char *buf, 3541void ata_mmio_data_xfer(struct ata_device *adev, unsigned char *buf,
3306 unsigned int buflen, int write_data) 3542 unsigned int buflen, int write_data)
3307{ 3543{
3544 struct ata_port *ap = adev->ap;
3308 unsigned int i; 3545 unsigned int i;
3309 unsigned int words = buflen >> 1; 3546 unsigned int words = buflen >> 1;
3310 u16 *buf16 = (u16 *) buf; 3547 u16 *buf16 = (u16 *) buf;
@@ -3336,7 +3573,7 @@ static void ata_mmio_data_xfer(struct ata_port *ap, unsigned char *buf,
3336 3573
3337/** 3574/**
3338 * ata_pio_data_xfer - Transfer data by PIO 3575 * ata_pio_data_xfer - Transfer data by PIO
3339 * @ap: port to read/write 3576 * @adev: device to target
3340 * @buf: data buffer 3577 * @buf: data buffer
3341 * @buflen: buffer length 3578 * @buflen: buffer length
3342 * @write_data: read/write 3579 * @write_data: read/write
@@ -3347,9 +3584,10 @@ static void ata_mmio_data_xfer(struct ata_port *ap, unsigned char *buf,
3347 * Inherited from caller. 3584 * Inherited from caller.
3348 */ 3585 */
3349 3586
3350static void ata_pio_data_xfer(struct ata_port *ap, unsigned char *buf, 3587void ata_pio_data_xfer(struct ata_device *adev, unsigned char *buf,
3351 unsigned int buflen, int write_data) 3588 unsigned int buflen, int write_data)
3352{ 3589{
3590 struct ata_port *ap = adev->ap;
3353 unsigned int words = buflen >> 1; 3591 unsigned int words = buflen >> 1;
3354 3592
3355 /* Transfer multiple of 2 bytes */ 3593 /* Transfer multiple of 2 bytes */
@@ -3374,39 +3612,6 @@ static void ata_pio_data_xfer(struct ata_port *ap, unsigned char *buf,
3374} 3612}
3375 3613
3376/** 3614/**
3377 * ata_data_xfer - Transfer data from/to the data register.
3378 * @ap: port to read/write
3379 * @buf: data buffer
3380 * @buflen: buffer length
3381 * @do_write: read/write
3382 *
3383 * Transfer data from/to the device data register.
3384 *
3385 * LOCKING:
3386 * Inherited from caller.
3387 */
3388
3389static void ata_data_xfer(struct ata_port *ap, unsigned char *buf,
3390 unsigned int buflen, int do_write)
3391{
3392 /* Make the crap hardware pay the costs not the good stuff */
3393 if (unlikely(ap->flags & ATA_FLAG_IRQ_MASK)) {
3394 unsigned long flags;
3395 local_irq_save(flags);
3396 if (ap->flags & ATA_FLAG_MMIO)
3397 ata_mmio_data_xfer(ap, buf, buflen, do_write);
3398 else
3399 ata_pio_data_xfer(ap, buf, buflen, do_write);
3400 local_irq_restore(flags);
3401 } else {
3402 if (ap->flags & ATA_FLAG_MMIO)
3403 ata_mmio_data_xfer(ap, buf, buflen, do_write);
3404 else
3405 ata_pio_data_xfer(ap, buf, buflen, do_write);
3406 }
3407}
3408
3409/**
3410 * ata_pio_sector - Transfer ATA_SECT_SIZE (512 bytes) of data. 3615 * ata_pio_sector - Transfer ATA_SECT_SIZE (512 bytes) of data.
3411 * @qc: Command on going 3616 * @qc: Command on going
3412 * 3617 *
@@ -3435,7 +3640,24 @@ static void ata_pio_sector(struct ata_queued_cmd *qc)
3435 page = nth_page(page, (offset >> PAGE_SHIFT)); 3640 page = nth_page(page, (offset >> PAGE_SHIFT));
3436 offset %= PAGE_SIZE; 3641 offset %= PAGE_SIZE;
3437 3642
3438 buf = kmap(page) + offset; 3643 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
3644
3645 if (PageHighMem(page)) {
3646 unsigned long flags;
3647
3648 /* FIXME: use a bounce buffer */
3649 local_irq_save(flags);
3650 buf = kmap_atomic(page, KM_IRQ0);
3651
3652 /* do the actual data transfer */
3653 ap->ops->data_xfer(qc->dev, buf + offset, ATA_SECT_SIZE, do_write);
3654
3655 kunmap_atomic(buf, KM_IRQ0);
3656 local_irq_restore(flags);
3657 } else {
3658 buf = page_address(page);
3659 ap->ops->data_xfer(qc->dev, buf + offset, ATA_SECT_SIZE, do_write);
3660 }
3439 3661
3440 qc->cursect++; 3662 qc->cursect++;
3441 qc->cursg_ofs++; 3663 qc->cursg_ofs++;
@@ -3444,14 +3666,68 @@ static void ata_pio_sector(struct ata_queued_cmd *qc)
3444 qc->cursg++; 3666 qc->cursg++;
3445 qc->cursg_ofs = 0; 3667 qc->cursg_ofs = 0;
3446 } 3668 }
3669}
3447 3670
3448 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read"); 3671/**
3672 * ata_pio_sectors - Transfer one or many 512-byte sectors.
3673 * @qc: Command on going
3674 *
3675 * Transfer one or many ATA_SECT_SIZE of data from/to the
3676 * ATA device for the DRQ request.
3677 *
3678 * LOCKING:
3679 * Inherited from caller.
3680 */
3681
3682static void ata_pio_sectors(struct ata_queued_cmd *qc)
3683{
3684 if (is_multi_taskfile(&qc->tf)) {
3685 /* READ/WRITE MULTIPLE */
3686 unsigned int nsect;
3687
3688 WARN_ON(qc->dev->multi_count == 0);
3689
3690 nsect = min(qc->nsect - qc->cursect, qc->dev->multi_count);
3691 while (nsect--)
3692 ata_pio_sector(qc);
3693 } else
3694 ata_pio_sector(qc);
3695}
3696
3697/**
3698 * atapi_send_cdb - Write CDB bytes to hardware
3699 * @ap: Port to which ATAPI device is attached.
3700 * @qc: Taskfile currently active
3701 *
3702 * When device has indicated its readiness to accept
3703 * a CDB, this function is called. Send the CDB.
3704 *
3705 * LOCKING:
3706 * caller.
3707 */
3449 3708
3450 /* do the actual data transfer */ 3709static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc)
3451 do_write = (qc->tf.flags & ATA_TFLAG_WRITE); 3710{
3452 ata_data_xfer(ap, buf, ATA_SECT_SIZE, do_write); 3711 /* send SCSI cdb */
3712 DPRINTK("send cdb\n");
3713 WARN_ON(qc->dev->cdb_len < 12);
3453 3714
3454 kunmap(page); 3715 ap->ops->data_xfer(qc->dev, qc->cdb, qc->dev->cdb_len, 1);
3716 ata_altstatus(ap); /* flush */
3717
3718 switch (qc->tf.protocol) {
3719 case ATA_PROT_ATAPI:
3720 ap->hsm_task_state = HSM_ST;
3721 break;
3722 case ATA_PROT_ATAPI_NODATA:
3723 ap->hsm_task_state = HSM_ST_LAST;
3724 break;
3725 case ATA_PROT_ATAPI_DMA:
3726 ap->hsm_task_state = HSM_ST_LAST;
3727 /* initiate bmdma */
3728 ap->ops->bmdma_start(qc);
3729 break;
3730 }
3455} 3731}
3456 3732
3457/** 3733/**
@@ -3492,11 +3768,11 @@ next_sg:
3492 unsigned int i; 3768 unsigned int i;
3493 3769
3494 if (words) /* warning if bytes > 1 */ 3770 if (words) /* warning if bytes > 1 */
3495 printk(KERN_WARNING "ata%u: %u bytes trailing data\n", 3771 ata_dev_printk(qc->dev, KERN_WARNING,
3496 ap->id, bytes); 3772 "%u bytes trailing data\n", bytes);
3497 3773
3498 for (i = 0; i < words; i++) 3774 for (i = 0; i < words; i++)
3499 ata_data_xfer(ap, (unsigned char*)pad_buf, 2, do_write); 3775 ap->ops->data_xfer(qc->dev, (unsigned char*)pad_buf, 2, do_write);
3500 3776
3501 ap->hsm_task_state = HSM_ST_LAST; 3777 ap->hsm_task_state = HSM_ST_LAST;
3502 return; 3778 return;
@@ -3517,7 +3793,24 @@ next_sg:
3517 /* don't cross page boundaries */ 3793 /* don't cross page boundaries */
3518 count = min(count, (unsigned int)PAGE_SIZE - offset); 3794 count = min(count, (unsigned int)PAGE_SIZE - offset);
3519 3795
3520 buf = kmap(page) + offset; 3796 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
3797
3798 if (PageHighMem(page)) {
3799 unsigned long flags;
3800
3801 /* FIXME: use bounce buffer */
3802 local_irq_save(flags);
3803 buf = kmap_atomic(page, KM_IRQ0);
3804
3805 /* do the actual data transfer */
3806 ap->ops->data_xfer(qc->dev, buf + offset, count, do_write);
3807
3808 kunmap_atomic(buf, KM_IRQ0);
3809 local_irq_restore(flags);
3810 } else {
3811 buf = page_address(page);
3812 ap->ops->data_xfer(qc->dev, buf + offset, count, do_write);
3813 }
3521 3814
3522 bytes -= count; 3815 bytes -= count;
3523 qc->curbytes += count; 3816 qc->curbytes += count;
@@ -3528,13 +3821,6 @@ next_sg:
3528 qc->cursg_ofs = 0; 3821 qc->cursg_ofs = 0;
3529 } 3822 }
3530 3823
3531 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
3532
3533 /* do the actual data transfer */
3534 ata_data_xfer(ap, buf, count, do_write);
3535
3536 kunmap(page);
3537
3538 if (bytes) 3824 if (bytes)
3539 goto next_sg; 3825 goto next_sg;
3540} 3826}
@@ -3556,10 +3842,16 @@ static void atapi_pio_bytes(struct ata_queued_cmd *qc)
3556 unsigned int ireason, bc_lo, bc_hi, bytes; 3842 unsigned int ireason, bc_lo, bc_hi, bytes;
3557 int i_write, do_write = (qc->tf.flags & ATA_TFLAG_WRITE) ? 1 : 0; 3843 int i_write, do_write = (qc->tf.flags & ATA_TFLAG_WRITE) ? 1 : 0;
3558 3844
3559 ap->ops->tf_read(ap, &qc->tf); 3845 /* Abuse qc->result_tf for temp storage of intermediate TF
3560 ireason = qc->tf.nsect; 3846 * here to save some kernel stack usage.
3561 bc_lo = qc->tf.lbam; 3847 * For normal completion, qc->result_tf is not relevant. For
3562 bc_hi = qc->tf.lbah; 3848 * error, qc->result_tf is later overwritten by ata_qc_complete().
3849 * So, the correctness of qc->result_tf is not affected.
3850 */
3851 ap->ops->tf_read(ap, &qc->result_tf);
3852 ireason = qc->result_tf.nsect;
3853 bc_lo = qc->result_tf.lbam;
3854 bc_hi = qc->result_tf.lbah;
3563 bytes = (bc_hi << 8) | bc_lo; 3855 bytes = (bc_hi << 8) | bc_lo;
3564 3856
3565 /* shall be cleared to zero, indicating xfer of data */ 3857 /* shall be cleared to zero, indicating xfer of data */
@@ -3571,307 +3863,366 @@ static void atapi_pio_bytes(struct ata_queued_cmd *qc)
3571 if (do_write != i_write) 3863 if (do_write != i_write)
3572 goto err_out; 3864 goto err_out;
3573 3865
3866 VPRINTK("ata%u: xfering %d bytes\n", ap->id, bytes);
3867
3574 __atapi_pio_bytes(qc, bytes); 3868 __atapi_pio_bytes(qc, bytes);
3575 3869
3576 return; 3870 return;
3577 3871
3578err_out: 3872err_out:
3579 printk(KERN_INFO "ata%u: dev %u: ATAPI check failed\n", 3873 ata_dev_printk(dev, KERN_INFO, "ATAPI check failed\n");
3580 ap->id, dev->devno);
3581 qc->err_mask |= AC_ERR_HSM; 3874 qc->err_mask |= AC_ERR_HSM;
3582 ap->hsm_task_state = HSM_ST_ERR; 3875 ap->hsm_task_state = HSM_ST_ERR;
3583} 3876}
3584 3877
3585/** 3878/**
3586 * ata_pio_block - start PIO on a block 3879 * ata_hsm_ok_in_wq - Check if the qc can be handled in the workqueue.
3587 * @ap: the target ata_port 3880 * @ap: the target ata_port
3881 * @qc: qc on going
3588 * 3882 *
3589 * LOCKING: 3883 * RETURNS:
3590 * None. (executing in kernel thread context) 3884 * 1 if ok in workqueue, 0 otherwise.
3591 */ 3885 */
3592 3886
3593static void ata_pio_block(struct ata_port *ap) 3887static inline int ata_hsm_ok_in_wq(struct ata_port *ap, struct ata_queued_cmd *qc)
3594{ 3888{
3595 struct ata_queued_cmd *qc; 3889 if (qc->tf.flags & ATA_TFLAG_POLLING)
3596 u8 status; 3890 return 1;
3597
3598 /*
3599 * This is purely heuristic. This is a fast path.
3600 * Sometimes when we enter, BSY will be cleared in
3601 * a chk-status or two. If not, the drive is probably seeking
3602 * or something. Snooze for a couple msecs, then
3603 * chk-status again. If still busy, fall back to
3604 * HSM_ST_POLL state.
3605 */
3606 status = ata_busy_wait(ap, ATA_BUSY, 5);
3607 if (status & ATA_BUSY) {
3608 msleep(2);
3609 status = ata_busy_wait(ap, ATA_BUSY, 10);
3610 if (status & ATA_BUSY) {
3611 ap->hsm_task_state = HSM_ST_POLL;
3612 ap->pio_task_timeout = jiffies + ATA_TMOUT_PIO;
3613 return;
3614 }
3615 }
3616
3617 qc = ata_qc_from_tag(ap, ap->active_tag);
3618 WARN_ON(qc == NULL);
3619
3620 /* check error */
3621 if (status & (ATA_ERR | ATA_DF)) {
3622 qc->err_mask |= AC_ERR_DEV;
3623 ap->hsm_task_state = HSM_ST_ERR;
3624 return;
3625 }
3626 3891
3627 /* transfer data if any */ 3892 if (ap->hsm_task_state == HSM_ST_FIRST) {
3628 if (is_atapi_taskfile(&qc->tf)) { 3893 if (qc->tf.protocol == ATA_PROT_PIO &&
3629 /* DRQ=0 means no more data to transfer */ 3894 (qc->tf.flags & ATA_TFLAG_WRITE))
3630 if ((status & ATA_DRQ) == 0) { 3895 return 1;
3631 ap->hsm_task_state = HSM_ST_LAST;
3632 return;
3633 }
3634 3896
3635 atapi_pio_bytes(qc); 3897 if (is_atapi_taskfile(&qc->tf) &&
3636 } else { 3898 !(qc->dev->flags & ATA_DFLAG_CDB_INTR))
3637 /* handle BSY=0, DRQ=0 as error */ 3899 return 1;
3638 if ((status & ATA_DRQ) == 0) {
3639 qc->err_mask |= AC_ERR_HSM;
3640 ap->hsm_task_state = HSM_ST_ERR;
3641 return;
3642 }
3643
3644 ata_pio_sector(qc);
3645 } 3900 }
3646 3901
3647 ata_altstatus(ap); /* flush */ 3902 return 0;
3648}
3649
3650static void ata_pio_error(struct ata_port *ap)
3651{
3652 struct ata_queued_cmd *qc;
3653
3654 qc = ata_qc_from_tag(ap, ap->active_tag);
3655 WARN_ON(qc == NULL);
3656
3657 if (qc->tf.command != ATA_CMD_PACKET)
3658 printk(KERN_WARNING "ata%u: PIO error\n", ap->id);
3659
3660 /* make sure qc->err_mask is available to
3661 * know what's wrong and recover
3662 */
3663 WARN_ON(qc->err_mask == 0);
3664
3665 ap->hsm_task_state = HSM_ST_IDLE;
3666
3667 ata_poll_qc_complete(qc);
3668} 3903}
3669 3904
3670static void ata_pio_task(void *_data) 3905/**
3906 * ata_hsm_qc_complete - finish a qc running on standard HSM
3907 * @qc: Command to complete
3908 * @in_wq: 1 if called from workqueue, 0 otherwise
3909 *
3910 * Finish @qc which is running on standard HSM.
3911 *
3912 * LOCKING:
3913 * If @in_wq is zero, spin_lock_irqsave(host_set lock).
3914 * Otherwise, none on entry and grabs host lock.
3915 */
3916static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
3671{ 3917{
3672 struct ata_port *ap = _data; 3918 struct ata_port *ap = qc->ap;
3673 unsigned long timeout; 3919 unsigned long flags;
3674 int qc_completed;
3675
3676fsm_start:
3677 timeout = 0;
3678 qc_completed = 0;
3679
3680 switch (ap->hsm_task_state) {
3681 case HSM_ST_IDLE:
3682 return;
3683
3684 case HSM_ST:
3685 ata_pio_block(ap);
3686 break;
3687 3920
3688 case HSM_ST_LAST: 3921 if (ap->ops->error_handler) {
3689 qc_completed = ata_pio_complete(ap); 3922 if (in_wq) {
3690 break; 3923 spin_lock_irqsave(&ap->host_set->lock, flags);
3691 3924
3692 case HSM_ST_POLL: 3925 /* EH might have kicked in while host_set lock
3693 case HSM_ST_LAST_POLL: 3926 * is released.
3694 timeout = ata_pio_poll(ap); 3927 */
3695 break; 3928 qc = ata_qc_from_tag(ap, qc->tag);
3929 if (qc) {
3930 if (likely(!(qc->err_mask & AC_ERR_HSM))) {
3931 ata_irq_on(ap);
3932 ata_qc_complete(qc);
3933 } else
3934 ata_port_freeze(ap);
3935 }
3696 3936
3697 case HSM_ST_TMOUT: 3937 spin_unlock_irqrestore(&ap->host_set->lock, flags);
3698 case HSM_ST_ERR: 3938 } else {
3699 ata_pio_error(ap); 3939 if (likely(!(qc->err_mask & AC_ERR_HSM)))
3700 return; 3940 ata_qc_complete(qc);
3941 else
3942 ata_port_freeze(ap);
3943 }
3944 } else {
3945 if (in_wq) {
3946 spin_lock_irqsave(&ap->host_set->lock, flags);
3947 ata_irq_on(ap);
3948 ata_qc_complete(qc);
3949 spin_unlock_irqrestore(&ap->host_set->lock, flags);
3950 } else
3951 ata_qc_complete(qc);
3701 } 3952 }
3702 3953
3703 if (timeout) 3954 ata_altstatus(ap); /* flush */
3704 ata_port_queue_task(ap, ata_pio_task, ap, timeout);
3705 else if (!qc_completed)
3706 goto fsm_start;
3707} 3955}
3708 3956
3709/** 3957/**
3710 * atapi_packet_task - Write CDB bytes to hardware 3958 * ata_hsm_move - move the HSM to the next state.
3711 * @_data: Port to which ATAPI device is attached. 3959 * @ap: the target ata_port
3712 * 3960 * @qc: qc on going
3713 * When device has indicated its readiness to accept 3961 * @status: current device status
3714 * a CDB, this function is called. Send the CDB. 3962 * @in_wq: 1 if called from workqueue, 0 otherwise
3715 * If DMA is to be performed, exit immediately.
3716 * Otherwise, we are in polling mode, so poll
3717 * status under operation succeeds or fails.
3718 * 3963 *
3719 * LOCKING: 3964 * RETURNS:
3720 * Kernel thread context (may sleep) 3965 * 1 when poll next status needed, 0 otherwise.
3721 */ 3966 */
3722 3967
3723static void atapi_packet_task(void *_data) 3968static int ata_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
3969 u8 status, int in_wq)
3724{ 3970{
3725 struct ata_port *ap = _data; 3971 unsigned long flags = 0;
3726 struct ata_queued_cmd *qc; 3972 int poll_next;
3727 u8 status;
3728 3973
3729 qc = ata_qc_from_tag(ap, ap->active_tag); 3974 WARN_ON((qc->flags & ATA_QCFLAG_ACTIVE) == 0);
3730 WARN_ON(qc == NULL);
3731 WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
3732 3975
3733 /* sleep-wait for BSY to clear */ 3976 /* Make sure ata_qc_issue_prot() does not throw things
3734 DPRINTK("busy wait\n"); 3977 * like DMA polling into the workqueue. Notice that
3735 if (ata_busy_sleep(ap, ATA_TMOUT_CDB_QUICK, ATA_TMOUT_CDB)) { 3978 * in_wq is not equivalent to (qc->tf.flags & ATA_TFLAG_POLLING).
3736 qc->err_mask |= AC_ERR_TIMEOUT; 3979 */
3737 goto err_out; 3980 WARN_ON(in_wq != ata_hsm_ok_in_wq(ap, qc));
3738 }
3739 3981
3740 /* make sure DRQ is set */ 3982fsm_start:
3741 status = ata_chk_status(ap); 3983 DPRINTK("ata%u: protocol %d task_state %d (dev_stat 0x%X)\n",
3742 if ((status & (ATA_BUSY | ATA_DRQ)) != ATA_DRQ) { 3984 ap->id, qc->tf.protocol, ap->hsm_task_state, status);
3743 qc->err_mask |= AC_ERR_HSM;
3744 goto err_out;
3745 }
3746 3985
3747 /* send SCSI cdb */ 3986 switch (ap->hsm_task_state) {
3748 DPRINTK("send cdb\n"); 3987 case HSM_ST_FIRST:
3749 WARN_ON(qc->dev->cdb_len < 12); 3988 /* Send first data block or PACKET CDB */
3750 3989
3751 if (qc->tf.protocol == ATA_PROT_ATAPI_DMA || 3990 /* If polling, we will stay in the work queue after
3752 qc->tf.protocol == ATA_PROT_ATAPI_NODATA) { 3991 * sending the data. Otherwise, interrupt handler
3753 unsigned long flags; 3992 * takes over after sending the data.
3993 */
3994 poll_next = (qc->tf.flags & ATA_TFLAG_POLLING);
3995
3996 /* check device status */
3997 if (unlikely((status & ATA_DRQ) == 0)) {
3998 /* handle BSY=0, DRQ=0 as error */
3999 if (likely(status & (ATA_ERR | ATA_DF)))
4000 /* device stops HSM for abort/error */
4001 qc->err_mask |= AC_ERR_DEV;
4002 else
4003 /* HSM violation. Let EH handle this */
4004 qc->err_mask |= AC_ERR_HSM;
3754 4005
3755 /* Once we're done issuing command and kicking bmdma, 4006 ap->hsm_task_state = HSM_ST_ERR;
3756 * irq handler takes over. To not lose irq, we need 4007 goto fsm_start;
3757 * to clear NOINTR flag before sending cdb, but 4008 }
3758 * interrupt handler shouldn't be invoked before we're 4009
3759 * finished. Hence, the following locking. 4010 /* Device should not ask for data transfer (DRQ=1)
4011 * when it finds something wrong.
4012 * We ignore DRQ here and stop the HSM by
4013 * changing hsm_task_state to HSM_ST_ERR and
4014 * let the EH abort the command or reset the device.
3760 */ 4015 */
3761 spin_lock_irqsave(&ap->host_set->lock, flags); 4016 if (unlikely(status & (ATA_ERR | ATA_DF))) {
3762 ap->flags &= ~ATA_FLAG_NOINTR; 4017 printk(KERN_WARNING "ata%d: DRQ=1 with device error, dev_stat 0x%X\n",
3763 ata_data_xfer(ap, qc->cdb, qc->dev->cdb_len, 1); 4018 ap->id, status);
3764 ata_altstatus(ap); /* flush */ 4019 qc->err_mask |= AC_ERR_HSM;
4020 ap->hsm_task_state = HSM_ST_ERR;
4021 goto fsm_start;
4022 }
3765 4023
3766 if (qc->tf.protocol == ATA_PROT_ATAPI_DMA) 4024 /* Send the CDB (atapi) or the first data block (ata pio out).
3767 ap->ops->bmdma_start(qc); /* initiate bmdma */ 4025 * During the state transition, interrupt handler shouldn't
3768 spin_unlock_irqrestore(&ap->host_set->lock, flags); 4026 * be invoked before the data transfer is complete and
3769 } else { 4027 * hsm_task_state is changed. Hence, the following locking.
3770 ata_data_xfer(ap, qc->cdb, qc->dev->cdb_len, 1); 4028 */
3771 ata_altstatus(ap); /* flush */ 4029 if (in_wq)
4030 spin_lock_irqsave(&ap->host_set->lock, flags);
3772 4031
3773 /* PIO commands are handled by polling */ 4032 if (qc->tf.protocol == ATA_PROT_PIO) {
3774 ap->hsm_task_state = HSM_ST; 4033 /* PIO data out protocol.
3775 ata_port_queue_task(ap, ata_pio_task, ap, 0); 4034 * send first data block.
3776 } 4035 */
3777 4036
3778 return; 4037 /* ata_pio_sectors() might change the state
4038 * to HSM_ST_LAST. so, the state is changed here
4039 * before ata_pio_sectors().
4040 */
4041 ap->hsm_task_state = HSM_ST;
4042 ata_pio_sectors(qc);
4043 ata_altstatus(ap); /* flush */
4044 } else
4045 /* send CDB */
4046 atapi_send_cdb(ap, qc);
4047
4048 if (in_wq)
4049 spin_unlock_irqrestore(&ap->host_set->lock, flags);
4050
4051 /* if polling, ata_pio_task() handles the rest.
4052 * otherwise, interrupt handler takes over from here.
4053 */
4054 break;
3779 4055
3780err_out: 4056 case HSM_ST:
3781 ata_poll_qc_complete(qc); 4057 /* complete command or read/write the data register */
3782} 4058 if (qc->tf.protocol == ATA_PROT_ATAPI) {
4059 /* ATAPI PIO protocol */
4060 if ((status & ATA_DRQ) == 0) {
4061 /* No more data to transfer or device error.
4062 * Device error will be tagged in HSM_ST_LAST.
4063 */
4064 ap->hsm_task_state = HSM_ST_LAST;
4065 goto fsm_start;
4066 }
3783 4067
3784/** 4068 /* Device should not ask for data transfer (DRQ=1)
3785 * ata_qc_timeout - Handle timeout of queued command 4069 * when it finds something wrong.
3786 * @qc: Command that timed out 4070 * We ignore DRQ here and stop the HSM by
3787 * 4071 * changing hsm_task_state to HSM_ST_ERR and
3788 * Some part of the kernel (currently, only the SCSI layer) 4072 * let the EH abort the command or reset the device.
3789 * has noticed that the active command on port @ap has not 4073 */
3790 * completed after a specified length of time. Handle this 4074 if (unlikely(status & (ATA_ERR | ATA_DF))) {
3791 * condition by disabling DMA (if necessary) and completing 4075 printk(KERN_WARNING "ata%d: DRQ=1 with device error, dev_stat 0x%X\n",
3792 * transactions, with error if necessary. 4076 ap->id, status);
3793 * 4077 qc->err_mask |= AC_ERR_HSM;
3794 * This also handles the case of the "lost interrupt", where 4078 ap->hsm_task_state = HSM_ST_ERR;
3795 * for some reason (possibly hardware bug, possibly driver bug) 4079 goto fsm_start;
3796 * an interrupt was not delivered to the driver, even though the 4080 }
3797 * transaction completed successfully.
3798 *
3799 * LOCKING:
3800 * Inherited from SCSI layer (none, can sleep)
3801 */
3802 4081
3803static void ata_qc_timeout(struct ata_queued_cmd *qc) 4082 atapi_pio_bytes(qc);
3804{
3805 struct ata_port *ap = qc->ap;
3806 struct ata_host_set *host_set = ap->host_set;
3807 u8 host_stat = 0, drv_stat;
3808 unsigned long flags;
3809 4083
3810 DPRINTK("ENTER\n"); 4084 if (unlikely(ap->hsm_task_state == HSM_ST_ERR))
4085 /* bad ireason reported by device */
4086 goto fsm_start;
3811 4087
3812 ap->hsm_task_state = HSM_ST_IDLE; 4088 } else {
4089 /* ATA PIO protocol */
4090 if (unlikely((status & ATA_DRQ) == 0)) {
4091 /* handle BSY=0, DRQ=0 as error */
4092 if (likely(status & (ATA_ERR | ATA_DF)))
4093 /* device stops HSM for abort/error */
4094 qc->err_mask |= AC_ERR_DEV;
4095 else
4096 /* HSM violation. Let EH handle this */
4097 qc->err_mask |= AC_ERR_HSM;
4098
4099 ap->hsm_task_state = HSM_ST_ERR;
4100 goto fsm_start;
4101 }
3813 4102
3814 spin_lock_irqsave(&host_set->lock, flags); 4103 /* For PIO reads, some devices may ask for
4104 * data transfer (DRQ=1) alone with ERR=1.
4105 * We respect DRQ here and transfer one
4106 * block of junk data before changing the
4107 * hsm_task_state to HSM_ST_ERR.
4108 *
4109 * For PIO writes, ERR=1 DRQ=1 doesn't make
4110 * sense since the data block has been
4111 * transferred to the device.
4112 */
4113 if (unlikely(status & (ATA_ERR | ATA_DF))) {
4114 /* data might be corrputed */
4115 qc->err_mask |= AC_ERR_DEV;
4116
4117 if (!(qc->tf.flags & ATA_TFLAG_WRITE)) {
4118 ata_pio_sectors(qc);
4119 ata_altstatus(ap);
4120 status = ata_wait_idle(ap);
4121 }
4122
4123 if (status & (ATA_BUSY | ATA_DRQ))
4124 qc->err_mask |= AC_ERR_HSM;
4125
4126 /* ata_pio_sectors() might change the
4127 * state to HSM_ST_LAST. so, the state
4128 * is changed after ata_pio_sectors().
4129 */
4130 ap->hsm_task_state = HSM_ST_ERR;
4131 goto fsm_start;
4132 }
3815 4133
3816 switch (qc->tf.protocol) { 4134 ata_pio_sectors(qc);
3817 4135
3818 case ATA_PROT_DMA: 4136 if (ap->hsm_task_state == HSM_ST_LAST &&
3819 case ATA_PROT_ATAPI_DMA: 4137 (!(qc->tf.flags & ATA_TFLAG_WRITE))) {
3820 host_stat = ap->ops->bmdma_status(ap); 4138 /* all data read */
4139 ata_altstatus(ap);
4140 status = ata_wait_idle(ap);
4141 goto fsm_start;
4142 }
4143 }
3821 4144
3822 /* before we do anything else, clear DMA-Start bit */ 4145 ata_altstatus(ap); /* flush */
3823 ap->ops->bmdma_stop(qc); 4146 poll_next = 1;
4147 break;
3824 4148
3825 /* fall through */ 4149 case HSM_ST_LAST:
4150 if (unlikely(!ata_ok(status))) {
4151 qc->err_mask |= __ac_err_mask(status);
4152 ap->hsm_task_state = HSM_ST_ERR;
4153 goto fsm_start;
4154 }
3826 4155
3827 default: 4156 /* no more data to transfer */
3828 ata_altstatus(ap); 4157 DPRINTK("ata%u: dev %u command complete, drv_stat 0x%x\n",
3829 drv_stat = ata_chk_status(ap); 4158 ap->id, qc->dev->devno, status);
3830 4159
3831 /* ack bmdma irq events */ 4160 WARN_ON(qc->err_mask);
3832 ap->ops->irq_clear(ap);
3833 4161
3834 printk(KERN_ERR "ata%u: command 0x%x timeout, stat 0x%x host_stat 0x%x\n", 4162 ap->hsm_task_state = HSM_ST_IDLE;
3835 ap->id, qc->tf.command, drv_stat, host_stat);
3836 4163
3837 /* complete taskfile transaction */ 4164 /* complete taskfile transaction */
3838 qc->err_mask |= ac_err_mask(drv_stat); 4165 ata_hsm_qc_complete(qc, in_wq);
4166
4167 poll_next = 0;
3839 break; 4168 break;
3840 }
3841 4169
3842 spin_unlock_irqrestore(&host_set->lock, flags); 4170 case HSM_ST_ERR:
4171 /* make sure qc->err_mask is available to
4172 * know what's wrong and recover
4173 */
4174 WARN_ON(qc->err_mask == 0);
3843 4175
3844 ata_eh_qc_complete(qc); 4176 ap->hsm_task_state = HSM_ST_IDLE;
3845 4177
3846 DPRINTK("EXIT\n"); 4178 /* complete taskfile transaction */
3847} 4179 ata_hsm_qc_complete(qc, in_wq);
3848 4180
3849/** 4181 poll_next = 0;
3850 * ata_eng_timeout - Handle timeout of queued command 4182 break;
3851 * @ap: Port on which timed-out command is active 4183 default:
3852 * 4184 poll_next = 0;
3853 * Some part of the kernel (currently, only the SCSI layer) 4185 BUG();
3854 * has noticed that the active command on port @ap has not 4186 }
3855 * completed after a specified length of time. Handle this
3856 * condition by disabling DMA (if necessary) and completing
3857 * transactions, with error if necessary.
3858 *
3859 * This also handles the case of the "lost interrupt", where
3860 * for some reason (possibly hardware bug, possibly driver bug)
3861 * an interrupt was not delivered to the driver, even though the
3862 * transaction completed successfully.
3863 *
3864 * LOCKING:
3865 * Inherited from SCSI layer (none, can sleep)
3866 */
3867 4187
3868void ata_eng_timeout(struct ata_port *ap) 4188 return poll_next;
4189}
4190
4191static void ata_pio_task(void *_data)
3869{ 4192{
3870 DPRINTK("ENTER\n"); 4193 struct ata_queued_cmd *qc = _data;
4194 struct ata_port *ap = qc->ap;
4195 u8 status;
4196 int poll_next;
3871 4197
3872 ata_qc_timeout(ata_qc_from_tag(ap, ap->active_tag)); 4198fsm_start:
4199 WARN_ON(ap->hsm_task_state == HSM_ST_IDLE);
3873 4200
3874 DPRINTK("EXIT\n"); 4201 /*
4202 * This is purely heuristic. This is a fast path.
4203 * Sometimes when we enter, BSY will be cleared in
4204 * a chk-status or two. If not, the drive is probably seeking
4205 * or something. Snooze for a couple msecs, then
4206 * chk-status again. If still busy, queue delayed work.
4207 */
4208 status = ata_busy_wait(ap, ATA_BUSY, 5);
4209 if (status & ATA_BUSY) {
4210 msleep(2);
4211 status = ata_busy_wait(ap, ATA_BUSY, 10);
4212 if (status & ATA_BUSY) {
4213 ata_port_queue_task(ap, ata_pio_task, qc, ATA_SHORT_PAUSE);
4214 return;
4215 }
4216 }
4217
4218 /* move the HSM */
4219 poll_next = ata_hsm_move(ap, qc, status, 1);
4220
4221 /* another command or interrupt handler
4222 * may be running at this point.
4223 */
4224 if (poll_next)
4225 goto fsm_start;
3875} 4226}
3876 4227
3877/** 4228/**
@@ -3888,9 +4239,14 @@ static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
3888 struct ata_queued_cmd *qc = NULL; 4239 struct ata_queued_cmd *qc = NULL;
3889 unsigned int i; 4240 unsigned int i;
3890 4241
3891 for (i = 0; i < ATA_MAX_QUEUE; i++) 4242 /* no command while frozen */
3892 if (!test_and_set_bit(i, &ap->qactive)) { 4243 if (unlikely(ap->flags & ATA_FLAG_FROZEN))
3893 qc = ata_qc_from_tag(ap, i); 4244 return NULL;
4245
4246 /* the last tag is reserved for internal command. */
4247 for (i = 0; i < ATA_MAX_QUEUE - 1; i++)
4248 if (!test_and_set_bit(i, &ap->qc_allocated)) {
4249 qc = __ata_qc_from_tag(ap, i);
3894 break; 4250 break;
3895 } 4251 }
3896 4252
@@ -3902,16 +4258,15 @@ static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
3902 4258
3903/** 4259/**
3904 * ata_qc_new_init - Request an available ATA command, and initialize it 4260 * ata_qc_new_init - Request an available ATA command, and initialize it
3905 * @ap: Port associated with device @dev
3906 * @dev: Device from whom we request an available command structure 4261 * @dev: Device from whom we request an available command structure
3907 * 4262 *
3908 * LOCKING: 4263 * LOCKING:
3909 * None. 4264 * None.
3910 */ 4265 */
3911 4266
3912struct ata_queued_cmd *ata_qc_new_init(struct ata_port *ap, 4267struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev)
3913 struct ata_device *dev)
3914{ 4268{
4269 struct ata_port *ap = dev->ap;
3915 struct ata_queued_cmd *qc; 4270 struct ata_queued_cmd *qc;
3916 4271
3917 qc = ata_qc_new(ap); 4272 qc = ata_qc_new(ap);
@@ -3946,36 +4301,153 @@ void ata_qc_free(struct ata_queued_cmd *qc)
3946 qc->flags = 0; 4301 qc->flags = 0;
3947 tag = qc->tag; 4302 tag = qc->tag;
3948 if (likely(ata_tag_valid(tag))) { 4303 if (likely(ata_tag_valid(tag))) {
3949 if (tag == ap->active_tag)
3950 ap->active_tag = ATA_TAG_POISON;
3951 qc->tag = ATA_TAG_POISON; 4304 qc->tag = ATA_TAG_POISON;
3952 clear_bit(tag, &ap->qactive); 4305 clear_bit(tag, &ap->qc_allocated);
3953 } 4306 }
3954} 4307}
3955 4308
3956void __ata_qc_complete(struct ata_queued_cmd *qc) 4309void __ata_qc_complete(struct ata_queued_cmd *qc)
3957{ 4310{
4311 struct ata_port *ap = qc->ap;
4312
3958 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */ 4313 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
3959 WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE)); 4314 WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
3960 4315
3961 if (likely(qc->flags & ATA_QCFLAG_DMAMAP)) 4316 if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
3962 ata_sg_clean(qc); 4317 ata_sg_clean(qc);
3963 4318
4319 /* command should be marked inactive atomically with qc completion */
4320 if (qc->tf.protocol == ATA_PROT_NCQ)
4321 ap->sactive &= ~(1 << qc->tag);
4322 else
4323 ap->active_tag = ATA_TAG_POISON;
4324
3964 /* atapi: mark qc as inactive to prevent the interrupt handler 4325 /* atapi: mark qc as inactive to prevent the interrupt handler
3965 * from completing the command twice later, before the error handler 4326 * from completing the command twice later, before the error handler
3966 * is called. (when rc != 0 and atapi request sense is needed) 4327 * is called. (when rc != 0 and atapi request sense is needed)
3967 */ 4328 */
3968 qc->flags &= ~ATA_QCFLAG_ACTIVE; 4329 qc->flags &= ~ATA_QCFLAG_ACTIVE;
4330 ap->qc_active &= ~(1 << qc->tag);
3969 4331
3970 /* call completion callback */ 4332 /* call completion callback */
3971 qc->complete_fn(qc); 4333 qc->complete_fn(qc);
3972} 4334}
3973 4335
4336/**
4337 * ata_qc_complete - Complete an active ATA command
4338 * @qc: Command to complete
4339 * @err_mask: ATA Status register contents
4340 *
4341 * Indicate to the mid and upper layers that an ATA
4342 * command has completed, with either an ok or not-ok status.
4343 *
4344 * LOCKING:
4345 * spin_lock_irqsave(host_set lock)
4346 */
4347void ata_qc_complete(struct ata_queued_cmd *qc)
4348{
4349 struct ata_port *ap = qc->ap;
4350
4351 /* XXX: New EH and old EH use different mechanisms to
4352 * synchronize EH with regular execution path.
4353 *
4354 * In new EH, a failed qc is marked with ATA_QCFLAG_FAILED.
4355 * Normal execution path is responsible for not accessing a
4356 * failed qc. libata core enforces the rule by returning NULL
4357 * from ata_qc_from_tag() for failed qcs.
4358 *
4359 * Old EH depends on ata_qc_complete() nullifying completion
4360 * requests if ATA_QCFLAG_EH_SCHEDULED is set. Old EH does
4361 * not synchronize with interrupt handler. Only PIO task is
4362 * taken care of.
4363 */
4364 if (ap->ops->error_handler) {
4365 WARN_ON(ap->flags & ATA_FLAG_FROZEN);
4366
4367 if (unlikely(qc->err_mask))
4368 qc->flags |= ATA_QCFLAG_FAILED;
4369
4370 if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) {
4371 if (!ata_tag_internal(qc->tag)) {
4372 /* always fill result TF for failed qc */
4373 ap->ops->tf_read(ap, &qc->result_tf);
4374 ata_qc_schedule_eh(qc);
4375 return;
4376 }
4377 }
4378
4379 /* read result TF if requested */
4380 if (qc->flags & ATA_QCFLAG_RESULT_TF)
4381 ap->ops->tf_read(ap, &qc->result_tf);
4382
4383 __ata_qc_complete(qc);
4384 } else {
4385 if (qc->flags & ATA_QCFLAG_EH_SCHEDULED)
4386 return;
4387
4388 /* read result TF if failed or requested */
4389 if (qc->err_mask || qc->flags & ATA_QCFLAG_RESULT_TF)
4390 ap->ops->tf_read(ap, &qc->result_tf);
4391
4392 __ata_qc_complete(qc);
4393 }
4394}
4395
4396/**
4397 * ata_qc_complete_multiple - Complete multiple qcs successfully
4398 * @ap: port in question
4399 * @qc_active: new qc_active mask
4400 * @finish_qc: LLDD callback invoked before completing a qc
4401 *
4402 * Complete in-flight commands. This functions is meant to be
4403 * called from low-level driver's interrupt routine to complete
4404 * requests normally. ap->qc_active and @qc_active is compared
4405 * and commands are completed accordingly.
4406 *
4407 * LOCKING:
4408 * spin_lock_irqsave(host_set lock)
4409 *
4410 * RETURNS:
4411 * Number of completed commands on success, -errno otherwise.
4412 */
4413int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active,
4414 void (*finish_qc)(struct ata_queued_cmd *))
4415{
4416 int nr_done = 0;
4417 u32 done_mask;
4418 int i;
4419
4420 done_mask = ap->qc_active ^ qc_active;
4421
4422 if (unlikely(done_mask & qc_active)) {
4423 ata_port_printk(ap, KERN_ERR, "illegal qc_active transition "
4424 "(%08x->%08x)\n", ap->qc_active, qc_active);
4425 return -EINVAL;
4426 }
4427
4428 for (i = 0; i < ATA_MAX_QUEUE; i++) {
4429 struct ata_queued_cmd *qc;
4430
4431 if (!(done_mask & (1 << i)))
4432 continue;
4433
4434 if ((qc = ata_qc_from_tag(ap, i))) {
4435 if (finish_qc)
4436 finish_qc(qc);
4437 ata_qc_complete(qc);
4438 nr_done++;
4439 }
4440 }
4441
4442 return nr_done;
4443}
4444
3974static inline int ata_should_dma_map(struct ata_queued_cmd *qc) 4445static inline int ata_should_dma_map(struct ata_queued_cmd *qc)
3975{ 4446{
3976 struct ata_port *ap = qc->ap; 4447 struct ata_port *ap = qc->ap;
3977 4448
3978 switch (qc->tf.protocol) { 4449 switch (qc->tf.protocol) {
4450 case ATA_PROT_NCQ:
3979 case ATA_PROT_DMA: 4451 case ATA_PROT_DMA:
3980 case ATA_PROT_ATAPI_DMA: 4452 case ATA_PROT_ATAPI_DMA:
3981 return 1; 4453 return 1;
@@ -4010,8 +4482,22 @@ void ata_qc_issue(struct ata_queued_cmd *qc)
4010{ 4482{
4011 struct ata_port *ap = qc->ap; 4483 struct ata_port *ap = qc->ap;
4012 4484
4013 qc->ap->active_tag = qc->tag; 4485 /* Make sure only one non-NCQ command is outstanding. The
4486 * check is skipped for old EH because it reuses active qc to
4487 * request ATAPI sense.
4488 */
4489 WARN_ON(ap->ops->error_handler && ata_tag_valid(ap->active_tag));
4490
4491 if (qc->tf.protocol == ATA_PROT_NCQ) {
4492 WARN_ON(ap->sactive & (1 << qc->tag));
4493 ap->sactive |= 1 << qc->tag;
4494 } else {
4495 WARN_ON(ap->sactive);
4496 ap->active_tag = qc->tag;
4497 }
4498
4014 qc->flags |= ATA_QCFLAG_ACTIVE; 4499 qc->flags |= ATA_QCFLAG_ACTIVE;
4500 ap->qc_active |= 1 << qc->tag;
4015 4501
4016 if (ata_should_dma_map(qc)) { 4502 if (ata_should_dma_map(qc)) {
4017 if (qc->flags & ATA_QCFLAG_SG) { 4503 if (qc->flags & ATA_QCFLAG_SG) {
@@ -4061,43 +4547,105 @@ unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc)
4061{ 4547{
4062 struct ata_port *ap = qc->ap; 4548 struct ata_port *ap = qc->ap;
4063 4549
4550 /* Use polling pio if the LLD doesn't handle
4551 * interrupt driven pio and atapi CDB interrupt.
4552 */
4553 if (ap->flags & ATA_FLAG_PIO_POLLING) {
4554 switch (qc->tf.protocol) {
4555 case ATA_PROT_PIO:
4556 case ATA_PROT_ATAPI:
4557 case ATA_PROT_ATAPI_NODATA:
4558 qc->tf.flags |= ATA_TFLAG_POLLING;
4559 break;
4560 case ATA_PROT_ATAPI_DMA:
4561 if (qc->dev->flags & ATA_DFLAG_CDB_INTR)
4562 /* see ata_check_atapi_dma() */
4563 BUG();
4564 break;
4565 default:
4566 break;
4567 }
4568 }
4569
4570 /* select the device */
4064 ata_dev_select(ap, qc->dev->devno, 1, 0); 4571 ata_dev_select(ap, qc->dev->devno, 1, 0);
4065 4572
4573 /* start the command */
4066 switch (qc->tf.protocol) { 4574 switch (qc->tf.protocol) {
4067 case ATA_PROT_NODATA: 4575 case ATA_PROT_NODATA:
4576 if (qc->tf.flags & ATA_TFLAG_POLLING)
4577 ata_qc_set_polling(qc);
4578
4068 ata_tf_to_host(ap, &qc->tf); 4579 ata_tf_to_host(ap, &qc->tf);
4580 ap->hsm_task_state = HSM_ST_LAST;
4581
4582 if (qc->tf.flags & ATA_TFLAG_POLLING)
4583 ata_port_queue_task(ap, ata_pio_task, qc, 0);
4584
4069 break; 4585 break;
4070 4586
4071 case ATA_PROT_DMA: 4587 case ATA_PROT_DMA:
4588 WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
4589
4072 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */ 4590 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
4073 ap->ops->bmdma_setup(qc); /* set up bmdma */ 4591 ap->ops->bmdma_setup(qc); /* set up bmdma */
4074 ap->ops->bmdma_start(qc); /* initiate bmdma */ 4592 ap->ops->bmdma_start(qc); /* initiate bmdma */
4593 ap->hsm_task_state = HSM_ST_LAST;
4075 break; 4594 break;
4076 4595
4077 case ATA_PROT_PIO: /* load tf registers, initiate polling pio */ 4596 case ATA_PROT_PIO:
4078 ata_qc_set_polling(qc); 4597 if (qc->tf.flags & ATA_TFLAG_POLLING)
4079 ata_tf_to_host(ap, &qc->tf); 4598 ata_qc_set_polling(qc);
4080 ap->hsm_task_state = HSM_ST;
4081 ata_port_queue_task(ap, ata_pio_task, ap, 0);
4082 break;
4083 4599
4084 case ATA_PROT_ATAPI:
4085 ata_qc_set_polling(qc);
4086 ata_tf_to_host(ap, &qc->tf); 4600 ata_tf_to_host(ap, &qc->tf);
4087 ata_port_queue_task(ap, atapi_packet_task, ap, 0); 4601
4602 if (qc->tf.flags & ATA_TFLAG_WRITE) {
4603 /* PIO data out protocol */
4604 ap->hsm_task_state = HSM_ST_FIRST;
4605 ata_port_queue_task(ap, ata_pio_task, qc, 0);
4606
4607 /* always send first data block using
4608 * the ata_pio_task() codepath.
4609 */
4610 } else {
4611 /* PIO data in protocol */
4612 ap->hsm_task_state = HSM_ST;
4613
4614 if (qc->tf.flags & ATA_TFLAG_POLLING)
4615 ata_port_queue_task(ap, ata_pio_task, qc, 0);
4616
4617 /* if polling, ata_pio_task() handles the rest.
4618 * otherwise, interrupt handler takes over from here.
4619 */
4620 }
4621
4088 break; 4622 break;
4089 4623
4624 case ATA_PROT_ATAPI:
4090 case ATA_PROT_ATAPI_NODATA: 4625 case ATA_PROT_ATAPI_NODATA:
4091 ap->flags |= ATA_FLAG_NOINTR; 4626 if (qc->tf.flags & ATA_TFLAG_POLLING)
4627 ata_qc_set_polling(qc);
4628
4092 ata_tf_to_host(ap, &qc->tf); 4629 ata_tf_to_host(ap, &qc->tf);
4093 ata_port_queue_task(ap, atapi_packet_task, ap, 0); 4630
4631 ap->hsm_task_state = HSM_ST_FIRST;
4632
4633 /* send cdb by polling if no cdb interrupt */
4634 if ((!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) ||
4635 (qc->tf.flags & ATA_TFLAG_POLLING))
4636 ata_port_queue_task(ap, ata_pio_task, qc, 0);
4094 break; 4637 break;
4095 4638
4096 case ATA_PROT_ATAPI_DMA: 4639 case ATA_PROT_ATAPI_DMA:
4097 ap->flags |= ATA_FLAG_NOINTR; 4640 WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
4641
4098 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */ 4642 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
4099 ap->ops->bmdma_setup(qc); /* set up bmdma */ 4643 ap->ops->bmdma_setup(qc); /* set up bmdma */
4100 ata_port_queue_task(ap, atapi_packet_task, ap, 0); 4644 ap->hsm_task_state = HSM_ST_FIRST;
4645
4646 /* send cdb by polling if no cdb interrupt */
4647 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
4648 ata_port_queue_task(ap, ata_pio_task, qc, 0);
4101 break; 4649 break;
4102 4650
4103 default: 4651 default:
@@ -4127,52 +4675,66 @@ unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc)
4127inline unsigned int ata_host_intr (struct ata_port *ap, 4675inline unsigned int ata_host_intr (struct ata_port *ap,
4128 struct ata_queued_cmd *qc) 4676 struct ata_queued_cmd *qc)
4129{ 4677{
4130 u8 status, host_stat; 4678 u8 status, host_stat = 0;
4131 4679
4132 switch (qc->tf.protocol) { 4680 VPRINTK("ata%u: protocol %d task_state %d\n",
4133 4681 ap->id, qc->tf.protocol, ap->hsm_task_state);
4134 case ATA_PROT_DMA:
4135 case ATA_PROT_ATAPI_DMA:
4136 case ATA_PROT_ATAPI:
4137 /* check status of DMA engine */
4138 host_stat = ap->ops->bmdma_status(ap);
4139 VPRINTK("ata%u: host_stat 0x%X\n", ap->id, host_stat);
4140
4141 /* if it's not our irq... */
4142 if (!(host_stat & ATA_DMA_INTR))
4143 goto idle_irq;
4144
4145 /* before we do anything else, clear DMA-Start bit */
4146 ap->ops->bmdma_stop(qc);
4147 4682
4148 /* fall through */ 4683 /* Check whether we are expecting interrupt in this state */
4149 4684 switch (ap->hsm_task_state) {
4150 case ATA_PROT_ATAPI_NODATA: 4685 case HSM_ST_FIRST:
4151 case ATA_PROT_NODATA: 4686 /* Some pre-ATAPI-4 devices assert INTRQ
4152 /* check altstatus */ 4687 * at this state when ready to receive CDB.
4153 status = ata_altstatus(ap); 4688 */
4154 if (status & ATA_BUSY)
4155 goto idle_irq;
4156 4689
4157 /* check main status, clearing INTRQ */ 4690 /* Check the ATA_DFLAG_CDB_INTR flag is enough here.
4158 status = ata_chk_status(ap); 4691 * The flag was turned on only for atapi devices.
4159 if (unlikely(status & ATA_BUSY)) 4692 * No need to check is_atapi_taskfile(&qc->tf) again.
4693 */
4694 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
4160 goto idle_irq; 4695 goto idle_irq;
4161 DPRINTK("ata%u: protocol %d (dev_stat 0x%X)\n",
4162 ap->id, qc->tf.protocol, status);
4163
4164 /* ack bmdma irq events */
4165 ap->ops->irq_clear(ap);
4166
4167 /* complete taskfile transaction */
4168 qc->err_mask |= ac_err_mask(status);
4169 ata_qc_complete(qc);
4170 break; 4696 break;
4171 4697 case HSM_ST_LAST:
4698 if (qc->tf.protocol == ATA_PROT_DMA ||
4699 qc->tf.protocol == ATA_PROT_ATAPI_DMA) {
4700 /* check status of DMA engine */
4701 host_stat = ap->ops->bmdma_status(ap);
4702 VPRINTK("ata%u: host_stat 0x%X\n", ap->id, host_stat);
4703
4704 /* if it's not our irq... */
4705 if (!(host_stat & ATA_DMA_INTR))
4706 goto idle_irq;
4707
4708 /* before we do anything else, clear DMA-Start bit */
4709 ap->ops->bmdma_stop(qc);
4710
4711 if (unlikely(host_stat & ATA_DMA_ERR)) {
4712 /* error when transfering data to/from memory */
4713 qc->err_mask |= AC_ERR_HOST_BUS;
4714 ap->hsm_task_state = HSM_ST_ERR;
4715 }
4716 }
4717 break;
4718 case HSM_ST:
4719 break;
4172 default: 4720 default:
4173 goto idle_irq; 4721 goto idle_irq;
4174 } 4722 }
4175 4723
4724 /* check altstatus */
4725 status = ata_altstatus(ap);
4726 if (status & ATA_BUSY)
4727 goto idle_irq;
4728
4729 /* check main status, clearing INTRQ */
4730 status = ata_chk_status(ap);
4731 if (unlikely(status & ATA_BUSY))
4732 goto idle_irq;
4733
4734 /* ack bmdma irq events */
4735 ap->ops->irq_clear(ap);
4736
4737 ata_hsm_move(ap, qc, status, 0);
4176 return 1; /* irq handled */ 4738 return 1; /* irq handled */
4177 4739
4178idle_irq: 4740idle_irq:
@@ -4181,7 +4743,7 @@ idle_irq:
4181#ifdef ATA_IRQ_TRAP 4743#ifdef ATA_IRQ_TRAP
4182 if ((ap->stats.idle_irq % 1000) == 0) { 4744 if ((ap->stats.idle_irq % 1000) == 0) {
4183 ata_irq_ack(ap, 0); /* debug trap */ 4745 ata_irq_ack(ap, 0); /* debug trap */
4184 printk(KERN_WARNING "ata%d: irq trap\n", ap->id); 4746 ata_port_printk(ap, KERN_WARNING, "irq trap\n");
4185 return 1; 4747 return 1;
4186 } 4748 }
4187#endif 4749#endif
@@ -4219,11 +4781,11 @@ irqreturn_t ata_interrupt (int irq, void *dev_instance, struct pt_regs *regs)
4219 4781
4220 ap = host_set->ports[i]; 4782 ap = host_set->ports[i];
4221 if (ap && 4783 if (ap &&
4222 !(ap->flags & (ATA_FLAG_PORT_DISABLED | ATA_FLAG_NOINTR))) { 4784 !(ap->flags & ATA_FLAG_DISABLED)) {
4223 struct ata_queued_cmd *qc; 4785 struct ata_queued_cmd *qc;
4224 4786
4225 qc = ata_qc_from_tag(ap, ap->active_tag); 4787 qc = ata_qc_from_tag(ap, ap->active_tag);
4226 if (qc && (!(qc->tf.ctl & ATA_NIEN)) && 4788 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)) &&
4227 (qc->flags & ATA_QCFLAG_ACTIVE)) 4789 (qc->flags & ATA_QCFLAG_ACTIVE))
4228 handled |= ata_host_intr(ap, qc); 4790 handled |= ata_host_intr(ap, qc);
4229 } 4791 }
@@ -4234,32 +4796,168 @@ irqreturn_t ata_interrupt (int irq, void *dev_instance, struct pt_regs *regs)
4234 return IRQ_RETVAL(handled); 4796 return IRQ_RETVAL(handled);
4235} 4797}
4236 4798
4799/**
4800 * sata_scr_valid - test whether SCRs are accessible
4801 * @ap: ATA port to test SCR accessibility for
4802 *
4803 * Test whether SCRs are accessible for @ap.
4804 *
4805 * LOCKING:
4806 * None.
4807 *
4808 * RETURNS:
4809 * 1 if SCRs are accessible, 0 otherwise.
4810 */
4811int sata_scr_valid(struct ata_port *ap)
4812{
4813 return ap->cbl == ATA_CBL_SATA && ap->ops->scr_read;
4814}
4815
4816/**
4817 * sata_scr_read - read SCR register of the specified port
4818 * @ap: ATA port to read SCR for
4819 * @reg: SCR to read
4820 * @val: Place to store read value
4821 *
4822 * Read SCR register @reg of @ap into *@val. This function is
4823 * guaranteed to succeed if the cable type of the port is SATA
4824 * and the port implements ->scr_read.
4825 *
4826 * LOCKING:
4827 * None.
4828 *
4829 * RETURNS:
4830 * 0 on success, negative errno on failure.
4831 */
4832int sata_scr_read(struct ata_port *ap, int reg, u32 *val)
4833{
4834 if (sata_scr_valid(ap)) {
4835 *val = ap->ops->scr_read(ap, reg);
4836 return 0;
4837 }
4838 return -EOPNOTSUPP;
4839}
4840
4841/**
4842 * sata_scr_write - write SCR register of the specified port
4843 * @ap: ATA port to write SCR for
4844 * @reg: SCR to write
4845 * @val: value to write
4846 *
4847 * Write @val to SCR register @reg of @ap. This function is
4848 * guaranteed to succeed if the cable type of the port is SATA
4849 * and the port implements ->scr_read.
4850 *
4851 * LOCKING:
4852 * None.
4853 *
4854 * RETURNS:
4855 * 0 on success, negative errno on failure.
4856 */
4857int sata_scr_write(struct ata_port *ap, int reg, u32 val)
4858{
4859 if (sata_scr_valid(ap)) {
4860 ap->ops->scr_write(ap, reg, val);
4861 return 0;
4862 }
4863 return -EOPNOTSUPP;
4864}
4865
4866/**
4867 * sata_scr_write_flush - write SCR register of the specified port and flush
4868 * @ap: ATA port to write SCR for
4869 * @reg: SCR to write
4870 * @val: value to write
4871 *
4872 * This function is identical to sata_scr_write() except that this
4873 * function performs flush after writing to the register.
4874 *
4875 * LOCKING:
4876 * None.
4877 *
4878 * RETURNS:
4879 * 0 on success, negative errno on failure.
4880 */
4881int sata_scr_write_flush(struct ata_port *ap, int reg, u32 val)
4882{
4883 if (sata_scr_valid(ap)) {
4884 ap->ops->scr_write(ap, reg, val);
4885 ap->ops->scr_read(ap, reg);
4886 return 0;
4887 }
4888 return -EOPNOTSUPP;
4889}
4890
4891/**
4892 * ata_port_online - test whether the given port is online
4893 * @ap: ATA port to test
4894 *
4895 * Test whether @ap is online. Note that this function returns 0
4896 * if online status of @ap cannot be obtained, so
4897 * ata_port_online(ap) != !ata_port_offline(ap).
4898 *
4899 * LOCKING:
4900 * None.
4901 *
4902 * RETURNS:
4903 * 1 if the port online status is available and online.
4904 */
4905int ata_port_online(struct ata_port *ap)
4906{
4907 u32 sstatus;
4908
4909 if (!sata_scr_read(ap, SCR_STATUS, &sstatus) && (sstatus & 0xf) == 0x3)
4910 return 1;
4911 return 0;
4912}
4913
4914/**
4915 * ata_port_offline - test whether the given port is offline
4916 * @ap: ATA port to test
4917 *
4918 * Test whether @ap is offline. Note that this function returns
4919 * 0 if offline status of @ap cannot be obtained, so
4920 * ata_port_online(ap) != !ata_port_offline(ap).
4921 *
4922 * LOCKING:
4923 * None.
4924 *
4925 * RETURNS:
4926 * 1 if the port offline status is available and offline.
4927 */
4928int ata_port_offline(struct ata_port *ap)
4929{
4930 u32 sstatus;
4931
4932 if (!sata_scr_read(ap, SCR_STATUS, &sstatus) && (sstatus & 0xf) != 0x3)
4933 return 1;
4934 return 0;
4935}
4237 4936
4238/* 4937/*
4239 * Execute a 'simple' command, that only consists of the opcode 'cmd' itself, 4938 * Execute a 'simple' command, that only consists of the opcode 'cmd' itself,
4240 * without filling any other registers 4939 * without filling any other registers
4241 */ 4940 */
4242static int ata_do_simple_cmd(struct ata_port *ap, struct ata_device *dev, 4941static int ata_do_simple_cmd(struct ata_device *dev, u8 cmd)
4243 u8 cmd)
4244{ 4942{
4245 struct ata_taskfile tf; 4943 struct ata_taskfile tf;
4246 int err; 4944 int err;
4247 4945
4248 ata_tf_init(ap, &tf, dev->devno); 4946 ata_tf_init(dev, &tf);
4249 4947
4250 tf.command = cmd; 4948 tf.command = cmd;
4251 tf.flags |= ATA_TFLAG_DEVICE; 4949 tf.flags |= ATA_TFLAG_DEVICE;
4252 tf.protocol = ATA_PROT_NODATA; 4950 tf.protocol = ATA_PROT_NODATA;
4253 4951
4254 err = ata_exec_internal(ap, dev, &tf, DMA_NONE, NULL, 0); 4952 err = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
4255 if (err) 4953 if (err)
4256 printk(KERN_ERR "%s: ata command failed: %d\n", 4954 ata_dev_printk(dev, KERN_ERR, "%s: ata command failed: %d\n",
4257 __FUNCTION__, err); 4955 __FUNCTION__, err);
4258 4956
4259 return err; 4957 return err;
4260} 4958}
4261 4959
4262static int ata_flush_cache(struct ata_port *ap, struct ata_device *dev) 4960static int ata_flush_cache(struct ata_device *dev)
4263{ 4961{
4264 u8 cmd; 4962 u8 cmd;
4265 4963
@@ -4271,22 +4969,21 @@ static int ata_flush_cache(struct ata_port *ap, struct ata_device *dev)
4271 else 4969 else
4272 cmd = ATA_CMD_FLUSH; 4970 cmd = ATA_CMD_FLUSH;
4273 4971
4274 return ata_do_simple_cmd(ap, dev, cmd); 4972 return ata_do_simple_cmd(dev, cmd);
4275} 4973}
4276 4974
4277static int ata_standby_drive(struct ata_port *ap, struct ata_device *dev) 4975static int ata_standby_drive(struct ata_device *dev)
4278{ 4976{
4279 return ata_do_simple_cmd(ap, dev, ATA_CMD_STANDBYNOW1); 4977 return ata_do_simple_cmd(dev, ATA_CMD_STANDBYNOW1);
4280} 4978}
4281 4979
4282static int ata_start_drive(struct ata_port *ap, struct ata_device *dev) 4980static int ata_start_drive(struct ata_device *dev)
4283{ 4981{
4284 return ata_do_simple_cmd(ap, dev, ATA_CMD_IDLEIMMEDIATE); 4982 return ata_do_simple_cmd(dev, ATA_CMD_IDLEIMMEDIATE);
4285} 4983}
4286 4984
4287/** 4985/**
4288 * ata_device_resume - wakeup a previously suspended devices 4986 * ata_device_resume - wakeup a previously suspended devices
4289 * @ap: port the device is connected to
4290 * @dev: the device to resume 4987 * @dev: the device to resume
4291 * 4988 *
4292 * Kick the drive back into action, by sending it an idle immediate 4989 * Kick the drive back into action, by sending it an idle immediate
@@ -4294,38 +4991,43 @@ static int ata_start_drive(struct ata_port *ap, struct ata_device *dev)
4294 * and host. 4991 * and host.
4295 * 4992 *
4296 */ 4993 */
4297int ata_device_resume(struct ata_port *ap, struct ata_device *dev) 4994int ata_device_resume(struct ata_device *dev)
4298{ 4995{
4996 struct ata_port *ap = dev->ap;
4997
4299 if (ap->flags & ATA_FLAG_SUSPENDED) { 4998 if (ap->flags & ATA_FLAG_SUSPENDED) {
4999 struct ata_device *failed_dev;
4300 ap->flags &= ~ATA_FLAG_SUSPENDED; 5000 ap->flags &= ~ATA_FLAG_SUSPENDED;
4301 ata_set_mode(ap); 5001 while (ata_set_mode(ap, &failed_dev))
5002 ata_dev_disable(failed_dev);
4302 } 5003 }
4303 if (!ata_dev_present(dev)) 5004 if (!ata_dev_enabled(dev))
4304 return 0; 5005 return 0;
4305 if (dev->class == ATA_DEV_ATA) 5006 if (dev->class == ATA_DEV_ATA)
4306 ata_start_drive(ap, dev); 5007 ata_start_drive(dev);
4307 5008
4308 return 0; 5009 return 0;
4309} 5010}
4310 5011
4311/** 5012/**
4312 * ata_device_suspend - prepare a device for suspend 5013 * ata_device_suspend - prepare a device for suspend
4313 * @ap: port the device is connected to
4314 * @dev: the device to suspend 5014 * @dev: the device to suspend
4315 * @state: target power management state 5015 * @state: target power management state
4316 * 5016 *
4317 * Flush the cache on the drive, if appropriate, then issue a 5017 * Flush the cache on the drive, if appropriate, then issue a
4318 * standbynow command. 5018 * standbynow command.
4319 */ 5019 */
4320int ata_device_suspend(struct ata_port *ap, struct ata_device *dev, pm_message_t state) 5020int ata_device_suspend(struct ata_device *dev, pm_message_t state)
4321{ 5021{
4322 if (!ata_dev_present(dev)) 5022 struct ata_port *ap = dev->ap;
5023
5024 if (!ata_dev_enabled(dev))
4323 return 0; 5025 return 0;
4324 if (dev->class == ATA_DEV_ATA) 5026 if (dev->class == ATA_DEV_ATA)
4325 ata_flush_cache(ap, dev); 5027 ata_flush_cache(dev);
4326 5028
4327 if (state.event != PM_EVENT_FREEZE) 5029 if (state.event != PM_EVENT_FREEZE)
4328 ata_standby_drive(ap, dev); 5030 ata_standby_drive(dev);
4329 ap->flags |= ATA_FLAG_SUSPENDED; 5031 ap->flags |= ATA_FLAG_SUSPENDED;
4330 return 0; 5032 return 0;
4331} 5033}
@@ -4439,7 +5141,7 @@ static void ata_host_init(struct ata_port *ap, struct Scsi_Host *host,
4439 host->unique_id = ata_unique_id++; 5141 host->unique_id = ata_unique_id++;
4440 host->max_cmd_len = 12; 5142 host->max_cmd_len = 12;
4441 5143
4442 ap->flags = ATA_FLAG_PORT_DISABLED; 5144 ap->flags = ATA_FLAG_DISABLED;
4443 ap->id = host->unique_id; 5145 ap->id = host->unique_id;
4444 ap->host = host; 5146 ap->host = host;
4445 ap->ctl = ATA_DEVCTL_OBS; 5147 ap->ctl = ATA_DEVCTL_OBS;
@@ -4453,15 +5155,21 @@ static void ata_host_init(struct ata_port *ap, struct Scsi_Host *host,
4453 ap->udma_mask = ent->udma_mask; 5155 ap->udma_mask = ent->udma_mask;
4454 ap->flags |= ent->host_flags; 5156 ap->flags |= ent->host_flags;
4455 ap->ops = ent->port_ops; 5157 ap->ops = ent->port_ops;
4456 ap->cbl = ATA_CBL_NONE; 5158 ap->sata_spd_limit = UINT_MAX;
4457 ap->active_tag = ATA_TAG_POISON; 5159 ap->active_tag = ATA_TAG_POISON;
4458 ap->last_ctl = 0xFF; 5160 ap->last_ctl = 0xFF;
4459 5161
4460 INIT_WORK(&ap->port_task, NULL, NULL); 5162 INIT_WORK(&ap->port_task, NULL, NULL);
4461 INIT_LIST_HEAD(&ap->eh_done_q); 5163 INIT_LIST_HEAD(&ap->eh_done_q);
4462 5164
5165 /* set cable type */
5166 ap->cbl = ATA_CBL_NONE;
5167 if (ap->flags & ATA_FLAG_SATA)
5168 ap->cbl = ATA_CBL_SATA;
5169
4463 for (i = 0; i < ATA_MAX_DEVICES; i++) { 5170 for (i = 0; i < ATA_MAX_DEVICES; i++) {
4464 struct ata_device *dev = &ap->device[i]; 5171 struct ata_device *dev = &ap->device[i];
5172 dev->ap = ap;
4465 dev->devno = i; 5173 dev->devno = i;
4466 dev->pio_mask = UINT_MAX; 5174 dev->pio_mask = UINT_MAX;
4467 dev->mwdma_mask = UINT_MAX; 5175 dev->mwdma_mask = UINT_MAX;
@@ -4514,7 +5222,7 @@ static struct ata_port * ata_host_add(const struct ata_probe_ent *ent,
4514 5222
4515 host->transportt = &ata_scsi_transport_template; 5223 host->transportt = &ata_scsi_transport_template;
4516 5224
4517 ap = (struct ata_port *) &host->hostdata[0]; 5225 ap = ata_shost_to_port(host);
4518 5226
4519 ata_host_init(ap, host, host_set, ent, port_no); 5227 ata_host_init(ap, host, host_set, ent, port_no);
4520 5228
@@ -4585,18 +5293,18 @@ int ata_device_add(const struct ata_probe_ent *ent)
4585 (ap->pio_mask << ATA_SHIFT_PIO); 5293 (ap->pio_mask << ATA_SHIFT_PIO);
4586 5294
4587 /* print per-port info to dmesg */ 5295 /* print per-port info to dmesg */
4588 printk(KERN_INFO "ata%u: %cATA max %s cmd 0x%lX ctl 0x%lX " 5296 ata_port_printk(ap, KERN_INFO, "%cATA max %s cmd 0x%lX "
4589 "bmdma 0x%lX irq %lu\n", 5297 "ctl 0x%lX bmdma 0x%lX irq %lu\n",
4590 ap->id, 5298 ap->flags & ATA_FLAG_SATA ? 'S' : 'P',
4591 ap->flags & ATA_FLAG_SATA ? 'S' : 'P', 5299 ata_mode_string(xfer_mode_mask),
4592 ata_mode_string(xfer_mode_mask), 5300 ap->ioaddr.cmd_addr,
4593 ap->ioaddr.cmd_addr, 5301 ap->ioaddr.ctl_addr,
4594 ap->ioaddr.ctl_addr, 5302 ap->ioaddr.bmdma_addr,
4595 ap->ioaddr.bmdma_addr, 5303 ent->irq);
4596 ent->irq);
4597 5304
4598 ata_chk_status(ap); 5305 ata_chk_status(ap);
4599 host_set->ops->irq_clear(ap); 5306 host_set->ops->irq_clear(ap);
5307 ata_eh_freeze_port(ap); /* freeze port before requesting IRQ */
4600 count++; 5308 count++;
4601 } 5309 }
4602 5310
@@ -4631,8 +5339,7 @@ int ata_device_add(const struct ata_probe_ent *ent)
4631 5339
4632 rc = scsi_add_host(ap->host, dev); 5340 rc = scsi_add_host(ap->host, dev);
4633 if (rc) { 5341 if (rc) {
4634 printk(KERN_ERR "ata%u: scsi_add_host failed\n", 5342 ata_port_printk(ap, KERN_ERR, "scsi_add_host failed\n");
4635 ap->id);
4636 /* FIXME: do something useful here */ 5343 /* FIXME: do something useful here */
4637 /* FIXME: handle unconditional calls to 5344 /* FIXME: handle unconditional calls to
4638 * scsi_scan_host and ata_host_remove, below, 5345 * scsi_scan_host and ata_host_remove, below,
@@ -4727,15 +5434,12 @@ void ata_host_set_remove(struct ata_host_set *host_set)
4727 5434
4728int ata_scsi_release(struct Scsi_Host *host) 5435int ata_scsi_release(struct Scsi_Host *host)
4729{ 5436{
4730 struct ata_port *ap = (struct ata_port *) &host->hostdata[0]; 5437 struct ata_port *ap = ata_shost_to_port(host);
4731 int i;
4732 5438
4733 DPRINTK("ENTER\n"); 5439 DPRINTK("ENTER\n");
4734 5440
4735 ap->ops->port_disable(ap); 5441 ap->ops->port_disable(ap);
4736 ata_host_remove(ap, 0); 5442 ata_host_remove(ap, 0);
4737 for (i = 0; i < ATA_MAX_DEVICES; i++)
4738 kfree(ap->device[i].id);
4739 5443
4740 DPRINTK("EXIT\n"); 5444 DPRINTK("EXIT\n");
4741 return 1; 5445 return 1;
@@ -4894,6 +5598,52 @@ int ata_ratelimit(void)
4894 return rc; 5598 return rc;
4895} 5599}
4896 5600
5601/**
5602 * ata_wait_register - wait until register value changes
5603 * @reg: IO-mapped register
5604 * @mask: Mask to apply to read register value
5605 * @val: Wait condition
5606 * @interval_msec: polling interval in milliseconds
5607 * @timeout_msec: timeout in milliseconds
5608 *
5609 * Waiting for some bits of register to change is a common
5610 * operation for ATA controllers. This function reads 32bit LE
5611 * IO-mapped register @reg and tests for the following condition.
5612 *
5613 * (*@reg & mask) != val
5614 *
5615 * If the condition is met, it returns; otherwise, the process is
5616 * repeated after @interval_msec until timeout.
5617 *
5618 * LOCKING:
5619 * Kernel thread context (may sleep)
5620 *
5621 * RETURNS:
5622 * The final register value.
5623 */
5624u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val,
5625 unsigned long interval_msec,
5626 unsigned long timeout_msec)
5627{
5628 unsigned long timeout;
5629 u32 tmp;
5630
5631 tmp = ioread32(reg);
5632
5633 /* Calculate timeout _after_ the first read to make sure
5634 * preceding writes reach the controller before starting to
5635 * eat away the timeout.
5636 */
5637 timeout = jiffies + (timeout_msec * HZ) / 1000;
5638
5639 while ((tmp & mask) == val && time_before(jiffies, timeout)) {
5640 msleep(interval_msec);
5641 tmp = ioread32(reg);
5642 }
5643
5644 return tmp;
5645}
5646
4897/* 5647/*
4898 * libata is essentially a library of internal helper functions for 5648 * libata is essentially a library of internal helper functions for
4899 * low-level ATA host controller drivers. As such, the API/ABI is 5649 * low-level ATA host controller drivers. As such, the API/ABI is
@@ -4907,9 +5657,9 @@ EXPORT_SYMBOL_GPL(ata_device_add);
4907EXPORT_SYMBOL_GPL(ata_host_set_remove); 5657EXPORT_SYMBOL_GPL(ata_host_set_remove);
4908EXPORT_SYMBOL_GPL(ata_sg_init); 5658EXPORT_SYMBOL_GPL(ata_sg_init);
4909EXPORT_SYMBOL_GPL(ata_sg_init_one); 5659EXPORT_SYMBOL_GPL(ata_sg_init_one);
4910EXPORT_SYMBOL_GPL(__ata_qc_complete); 5660EXPORT_SYMBOL_GPL(ata_qc_complete);
5661EXPORT_SYMBOL_GPL(ata_qc_complete_multiple);
4911EXPORT_SYMBOL_GPL(ata_qc_issue_prot); 5662EXPORT_SYMBOL_GPL(ata_qc_issue_prot);
4912EXPORT_SYMBOL_GPL(ata_eng_timeout);
4913EXPORT_SYMBOL_GPL(ata_tf_load); 5663EXPORT_SYMBOL_GPL(ata_tf_load);
4914EXPORT_SYMBOL_GPL(ata_tf_read); 5664EXPORT_SYMBOL_GPL(ata_tf_read);
4915EXPORT_SYMBOL_GPL(ata_noop_dev_select); 5665EXPORT_SYMBOL_GPL(ata_noop_dev_select);
@@ -4923,6 +5673,8 @@ EXPORT_SYMBOL_GPL(ata_port_start);
4923EXPORT_SYMBOL_GPL(ata_port_stop); 5673EXPORT_SYMBOL_GPL(ata_port_stop);
4924EXPORT_SYMBOL_GPL(ata_host_stop); 5674EXPORT_SYMBOL_GPL(ata_host_stop);
4925EXPORT_SYMBOL_GPL(ata_interrupt); 5675EXPORT_SYMBOL_GPL(ata_interrupt);
5676EXPORT_SYMBOL_GPL(ata_mmio_data_xfer);
5677EXPORT_SYMBOL_GPL(ata_pio_data_xfer);
4926EXPORT_SYMBOL_GPL(ata_qc_prep); 5678EXPORT_SYMBOL_GPL(ata_qc_prep);
4927EXPORT_SYMBOL_GPL(ata_noop_qc_prep); 5679EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
4928EXPORT_SYMBOL_GPL(ata_bmdma_setup); 5680EXPORT_SYMBOL_GPL(ata_bmdma_setup);
@@ -4930,7 +5682,13 @@ EXPORT_SYMBOL_GPL(ata_bmdma_start);
4930EXPORT_SYMBOL_GPL(ata_bmdma_irq_clear); 5682EXPORT_SYMBOL_GPL(ata_bmdma_irq_clear);
4931EXPORT_SYMBOL_GPL(ata_bmdma_status); 5683EXPORT_SYMBOL_GPL(ata_bmdma_status);
4932EXPORT_SYMBOL_GPL(ata_bmdma_stop); 5684EXPORT_SYMBOL_GPL(ata_bmdma_stop);
5685EXPORT_SYMBOL_GPL(ata_bmdma_freeze);
5686EXPORT_SYMBOL_GPL(ata_bmdma_thaw);
5687EXPORT_SYMBOL_GPL(ata_bmdma_drive_eh);
5688EXPORT_SYMBOL_GPL(ata_bmdma_error_handler);
5689EXPORT_SYMBOL_GPL(ata_bmdma_post_internal_cmd);
4933EXPORT_SYMBOL_GPL(ata_port_probe); 5690EXPORT_SYMBOL_GPL(ata_port_probe);
5691EXPORT_SYMBOL_GPL(sata_set_spd);
4934EXPORT_SYMBOL_GPL(sata_phy_reset); 5692EXPORT_SYMBOL_GPL(sata_phy_reset);
4935EXPORT_SYMBOL_GPL(__sata_phy_reset); 5693EXPORT_SYMBOL_GPL(__sata_phy_reset);
4936EXPORT_SYMBOL_GPL(ata_bus_reset); 5694EXPORT_SYMBOL_GPL(ata_bus_reset);
@@ -4945,18 +5703,24 @@ EXPORT_SYMBOL_GPL(ata_dev_classify);
4945EXPORT_SYMBOL_GPL(ata_dev_pair); 5703EXPORT_SYMBOL_GPL(ata_dev_pair);
4946EXPORT_SYMBOL_GPL(ata_port_disable); 5704EXPORT_SYMBOL_GPL(ata_port_disable);
4947EXPORT_SYMBOL_GPL(ata_ratelimit); 5705EXPORT_SYMBOL_GPL(ata_ratelimit);
5706EXPORT_SYMBOL_GPL(ata_wait_register);
4948EXPORT_SYMBOL_GPL(ata_busy_sleep); 5707EXPORT_SYMBOL_GPL(ata_busy_sleep);
4949EXPORT_SYMBOL_GPL(ata_port_queue_task); 5708EXPORT_SYMBOL_GPL(ata_port_queue_task);
4950EXPORT_SYMBOL_GPL(ata_scsi_ioctl); 5709EXPORT_SYMBOL_GPL(ata_scsi_ioctl);
4951EXPORT_SYMBOL_GPL(ata_scsi_queuecmd); 5710EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
4952EXPORT_SYMBOL_GPL(ata_scsi_slave_config); 5711EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
5712EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth);
4953EXPORT_SYMBOL_GPL(ata_scsi_release); 5713EXPORT_SYMBOL_GPL(ata_scsi_release);
4954EXPORT_SYMBOL_GPL(ata_host_intr); 5714EXPORT_SYMBOL_GPL(ata_host_intr);
5715EXPORT_SYMBOL_GPL(sata_scr_valid);
5716EXPORT_SYMBOL_GPL(sata_scr_read);
5717EXPORT_SYMBOL_GPL(sata_scr_write);
5718EXPORT_SYMBOL_GPL(sata_scr_write_flush);
5719EXPORT_SYMBOL_GPL(ata_port_online);
5720EXPORT_SYMBOL_GPL(ata_port_offline);
4955EXPORT_SYMBOL_GPL(ata_id_string); 5721EXPORT_SYMBOL_GPL(ata_id_string);
4956EXPORT_SYMBOL_GPL(ata_id_c_string); 5722EXPORT_SYMBOL_GPL(ata_id_c_string);
4957EXPORT_SYMBOL_GPL(ata_scsi_simulate); 5723EXPORT_SYMBOL_GPL(ata_scsi_simulate);
4958EXPORT_SYMBOL_GPL(ata_eh_qc_complete);
4959EXPORT_SYMBOL_GPL(ata_eh_qc_retry);
4960 5724
4961EXPORT_SYMBOL_GPL(ata_pio_need_iordy); 5725EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
4962EXPORT_SYMBOL_GPL(ata_timing_compute); 5726EXPORT_SYMBOL_GPL(ata_timing_compute);
@@ -4978,3 +5742,13 @@ EXPORT_SYMBOL_GPL(ata_device_suspend);
4978EXPORT_SYMBOL_GPL(ata_device_resume); 5742EXPORT_SYMBOL_GPL(ata_device_resume);
4979EXPORT_SYMBOL_GPL(ata_scsi_device_suspend); 5743EXPORT_SYMBOL_GPL(ata_scsi_device_suspend);
4980EXPORT_SYMBOL_GPL(ata_scsi_device_resume); 5744EXPORT_SYMBOL_GPL(ata_scsi_device_resume);
5745
5746EXPORT_SYMBOL_GPL(ata_eng_timeout);
5747EXPORT_SYMBOL_GPL(ata_port_schedule_eh);
5748EXPORT_SYMBOL_GPL(ata_port_abort);
5749EXPORT_SYMBOL_GPL(ata_port_freeze);
5750EXPORT_SYMBOL_GPL(ata_eh_freeze_port);
5751EXPORT_SYMBOL_GPL(ata_eh_thaw_port);
5752EXPORT_SYMBOL_GPL(ata_eh_qc_complete);
5753EXPORT_SYMBOL_GPL(ata_eh_qc_retry);
5754EXPORT_SYMBOL_GPL(ata_do_eh);
diff --git a/drivers/scsi/libata-eh.c b/drivers/scsi/libata-eh.c
new file mode 100644
index 000000000000..71b45ad2c124
--- /dev/null
+++ b/drivers/scsi/libata-eh.c
@@ -0,0 +1,1561 @@
1/*
2 * libata-eh.c - libata error handling
3 *
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
7 *
8 * Copyright 2006 Tejun Heo <htejun@gmail.com>
9 *
10 *
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License as
13 * published by the Free Software Foundation; either version 2, or
14 * (at your option) any later version.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; see the file COPYING. If not, write to
23 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
24 * USA.
25 *
26 *
27 * libata documentation is available via 'make {ps|pdf}docs',
28 * as Documentation/DocBook/libata.*
29 *
30 * Hardware documentation available from http://www.t13.org/ and
31 * http://www.sata-io.org/
32 *
33 */
34
35#include <linux/config.h>
36#include <linux/kernel.h>
37#include <scsi/scsi.h>
38#include <scsi/scsi_host.h>
39#include <scsi/scsi_eh.h>
40#include <scsi/scsi_device.h>
41#include <scsi/scsi_cmnd.h>
42#include "scsi_transport_api.h"
43
44#include <linux/libata.h>
45
46#include "libata.h"
47
48static void __ata_port_freeze(struct ata_port *ap);
49
50static void ata_ering_record(struct ata_ering *ering, int is_io,
51 unsigned int err_mask)
52{
53 struct ata_ering_entry *ent;
54
55 WARN_ON(!err_mask);
56
57 ering->cursor++;
58 ering->cursor %= ATA_ERING_SIZE;
59
60 ent = &ering->ring[ering->cursor];
61 ent->is_io = is_io;
62 ent->err_mask = err_mask;
63 ent->timestamp = get_jiffies_64();
64}
65
66static struct ata_ering_entry * ata_ering_top(struct ata_ering *ering)
67{
68 struct ata_ering_entry *ent = &ering->ring[ering->cursor];
69 if (!ent->err_mask)
70 return NULL;
71 return ent;
72}
73
74static int ata_ering_map(struct ata_ering *ering,
75 int (*map_fn)(struct ata_ering_entry *, void *),
76 void *arg)
77{
78 int idx, rc = 0;
79 struct ata_ering_entry *ent;
80
81 idx = ering->cursor;
82 do {
83 ent = &ering->ring[idx];
84 if (!ent->err_mask)
85 break;
86 rc = map_fn(ent, arg);
87 if (rc)
88 break;
89 idx = (idx - 1 + ATA_ERING_SIZE) % ATA_ERING_SIZE;
90 } while (idx != ering->cursor);
91
92 return rc;
93}
94
95/**
96 * ata_scsi_timed_out - SCSI layer time out callback
97 * @cmd: timed out SCSI command
98 *
99 * Handles SCSI layer timeout. We race with normal completion of
100 * the qc for @cmd. If the qc is already gone, we lose and let
101 * the scsi command finish (EH_HANDLED). Otherwise, the qc has
102 * timed out and EH should be invoked. Prevent ata_qc_complete()
103 * from finishing it by setting EH_SCHEDULED and return
104 * EH_NOT_HANDLED.
105 *
106 * TODO: kill this function once old EH is gone.
107 *
108 * LOCKING:
109 * Called from timer context
110 *
111 * RETURNS:
112 * EH_HANDLED or EH_NOT_HANDLED
113 */
114enum scsi_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd)
115{
116 struct Scsi_Host *host = cmd->device->host;
117 struct ata_port *ap = ata_shost_to_port(host);
118 unsigned long flags;
119 struct ata_queued_cmd *qc;
120 enum scsi_eh_timer_return ret;
121
122 DPRINTK("ENTER\n");
123
124 if (ap->ops->error_handler) {
125 ret = EH_NOT_HANDLED;
126 goto out;
127 }
128
129 ret = EH_HANDLED;
130 spin_lock_irqsave(&ap->host_set->lock, flags);
131 qc = ata_qc_from_tag(ap, ap->active_tag);
132 if (qc) {
133 WARN_ON(qc->scsicmd != cmd);
134 qc->flags |= ATA_QCFLAG_EH_SCHEDULED;
135 qc->err_mask |= AC_ERR_TIMEOUT;
136 ret = EH_NOT_HANDLED;
137 }
138 spin_unlock_irqrestore(&ap->host_set->lock, flags);
139
140 out:
141 DPRINTK("EXIT, ret=%d\n", ret);
142 return ret;
143}
144
145/**
146 * ata_scsi_error - SCSI layer error handler callback
147 * @host: SCSI host on which error occurred
148 *
149 * Handles SCSI-layer-thrown error events.
150 *
151 * LOCKING:
152 * Inherited from SCSI layer (none, can sleep)
153 *
154 * RETURNS:
155 * Zero.
156 */
157void ata_scsi_error(struct Scsi_Host *host)
158{
159 struct ata_port *ap = ata_shost_to_port(host);
160 spinlock_t *hs_lock = &ap->host_set->lock;
161 int i, repeat_cnt = ATA_EH_MAX_REPEAT;
162 unsigned long flags;
163
164 DPRINTK("ENTER\n");
165
166 /* synchronize with port task */
167 ata_port_flush_task(ap);
168
169 /* synchronize with host_set lock and sort out timeouts */
170
171 /* For new EH, all qcs are finished in one of three ways -
172 * normal completion, error completion, and SCSI timeout.
173 * Both cmpletions can race against SCSI timeout. When normal
174 * completion wins, the qc never reaches EH. When error
175 * completion wins, the qc has ATA_QCFLAG_FAILED set.
176 *
177 * When SCSI timeout wins, things are a bit more complex.
178 * Normal or error completion can occur after the timeout but
179 * before this point. In such cases, both types of
180 * completions are honored. A scmd is determined to have
181 * timed out iff its associated qc is active and not failed.
182 */
183 if (ap->ops->error_handler) {
184 struct scsi_cmnd *scmd, *tmp;
185 int nr_timedout = 0;
186
187 spin_lock_irqsave(hs_lock, flags);
188
189 list_for_each_entry_safe(scmd, tmp, &host->eh_cmd_q, eh_entry) {
190 struct ata_queued_cmd *qc;
191
192 for (i = 0; i < ATA_MAX_QUEUE; i++) {
193 qc = __ata_qc_from_tag(ap, i);
194 if (qc->flags & ATA_QCFLAG_ACTIVE &&
195 qc->scsicmd == scmd)
196 break;
197 }
198
199 if (i < ATA_MAX_QUEUE) {
200 /* the scmd has an associated qc */
201 if (!(qc->flags & ATA_QCFLAG_FAILED)) {
202 /* which hasn't failed yet, timeout */
203 qc->err_mask |= AC_ERR_TIMEOUT;
204 qc->flags |= ATA_QCFLAG_FAILED;
205 nr_timedout++;
206 }
207 } else {
208 /* Normal completion occurred after
209 * SCSI timeout but before this point.
210 * Successfully complete it.
211 */
212 scmd->retries = scmd->allowed;
213 scsi_eh_finish_cmd(scmd, &ap->eh_done_q);
214 }
215 }
216
217 /* If we have timed out qcs. They belong to EH from
218 * this point but the state of the controller is
219 * unknown. Freeze the port to make sure the IRQ
220 * handler doesn't diddle with those qcs. This must
221 * be done atomically w.r.t. setting QCFLAG_FAILED.
222 */
223 if (nr_timedout)
224 __ata_port_freeze(ap);
225
226 spin_unlock_irqrestore(hs_lock, flags);
227 } else
228 spin_unlock_wait(hs_lock);
229
230 repeat:
231 /* invoke error handler */
232 if (ap->ops->error_handler) {
233 /* fetch & clear EH info */
234 spin_lock_irqsave(hs_lock, flags);
235
236 memset(&ap->eh_context, 0, sizeof(ap->eh_context));
237 ap->eh_context.i = ap->eh_info;
238 memset(&ap->eh_info, 0, sizeof(ap->eh_info));
239
240 ap->flags &= ~ATA_FLAG_EH_PENDING;
241
242 spin_unlock_irqrestore(hs_lock, flags);
243
244 /* invoke EH */
245 ap->ops->error_handler(ap);
246
247 /* Exception might have happend after ->error_handler
248 * recovered the port but before this point. Repeat
249 * EH in such case.
250 */
251 spin_lock_irqsave(hs_lock, flags);
252
253 if (ap->flags & ATA_FLAG_EH_PENDING) {
254 if (--repeat_cnt) {
255 ata_port_printk(ap, KERN_INFO,
256 "EH pending after completion, "
257 "repeating EH (cnt=%d)\n", repeat_cnt);
258 spin_unlock_irqrestore(hs_lock, flags);
259 goto repeat;
260 }
261 ata_port_printk(ap, KERN_ERR, "EH pending after %d "
262 "tries, giving up\n", ATA_EH_MAX_REPEAT);
263 }
264
265 /* this run is complete, make sure EH info is clear */
266 memset(&ap->eh_info, 0, sizeof(ap->eh_info));
267
268 /* Clear host_eh_scheduled while holding hs_lock such
269 * that if exception occurs after this point but
270 * before EH completion, SCSI midlayer will
271 * re-initiate EH.
272 */
273 host->host_eh_scheduled = 0;
274
275 spin_unlock_irqrestore(hs_lock, flags);
276 } else {
277 WARN_ON(ata_qc_from_tag(ap, ap->active_tag) == NULL);
278 ap->ops->eng_timeout(ap);
279 }
280
281 /* finish or retry handled scmd's and clean up */
282 WARN_ON(host->host_failed || !list_empty(&host->eh_cmd_q));
283
284 scsi_eh_flush_done_q(&ap->eh_done_q);
285
286 /* clean up */
287 spin_lock_irqsave(hs_lock, flags);
288
289 if (ap->flags & ATA_FLAG_RECOVERED)
290 ata_port_printk(ap, KERN_INFO, "EH complete\n");
291 ap->flags &= ~ATA_FLAG_RECOVERED;
292
293 spin_unlock_irqrestore(hs_lock, flags);
294
295 DPRINTK("EXIT\n");
296}
297
298/**
299 * ata_qc_timeout - Handle timeout of queued command
300 * @qc: Command that timed out
301 *
302 * Some part of the kernel (currently, only the SCSI layer)
303 * has noticed that the active command on port @ap has not
304 * completed after a specified length of time. Handle this
305 * condition by disabling DMA (if necessary) and completing
306 * transactions, with error if necessary.
307 *
308 * This also handles the case of the "lost interrupt", where
309 * for some reason (possibly hardware bug, possibly driver bug)
310 * an interrupt was not delivered to the driver, even though the
311 * transaction completed successfully.
312 *
313 * TODO: kill this function once old EH is gone.
314 *
315 * LOCKING:
316 * Inherited from SCSI layer (none, can sleep)
317 */
318static void ata_qc_timeout(struct ata_queued_cmd *qc)
319{
320 struct ata_port *ap = qc->ap;
321 struct ata_host_set *host_set = ap->host_set;
322 u8 host_stat = 0, drv_stat;
323 unsigned long flags;
324
325 DPRINTK("ENTER\n");
326
327 ap->hsm_task_state = HSM_ST_IDLE;
328
329 spin_lock_irqsave(&host_set->lock, flags);
330
331 switch (qc->tf.protocol) {
332
333 case ATA_PROT_DMA:
334 case ATA_PROT_ATAPI_DMA:
335 host_stat = ap->ops->bmdma_status(ap);
336
337 /* before we do anything else, clear DMA-Start bit */
338 ap->ops->bmdma_stop(qc);
339
340 /* fall through */
341
342 default:
343 ata_altstatus(ap);
344 drv_stat = ata_chk_status(ap);
345
346 /* ack bmdma irq events */
347 ap->ops->irq_clear(ap);
348
349 ata_dev_printk(qc->dev, KERN_ERR, "command 0x%x timeout, "
350 "stat 0x%x host_stat 0x%x\n",
351 qc->tf.command, drv_stat, host_stat);
352
353 /* complete taskfile transaction */
354 qc->err_mask |= AC_ERR_TIMEOUT;
355 break;
356 }
357
358 spin_unlock_irqrestore(&host_set->lock, flags);
359
360 ata_eh_qc_complete(qc);
361
362 DPRINTK("EXIT\n");
363}
364
365/**
366 * ata_eng_timeout - Handle timeout of queued command
367 * @ap: Port on which timed-out command is active
368 *
369 * Some part of the kernel (currently, only the SCSI layer)
370 * has noticed that the active command on port @ap has not
371 * completed after a specified length of time. Handle this
372 * condition by disabling DMA (if necessary) and completing
373 * transactions, with error if necessary.
374 *
375 * This also handles the case of the "lost interrupt", where
376 * for some reason (possibly hardware bug, possibly driver bug)
377 * an interrupt was not delivered to the driver, even though the
378 * transaction completed successfully.
379 *
380 * TODO: kill this function once old EH is gone.
381 *
382 * LOCKING:
383 * Inherited from SCSI layer (none, can sleep)
384 */
385void ata_eng_timeout(struct ata_port *ap)
386{
387 DPRINTK("ENTER\n");
388
389 ata_qc_timeout(ata_qc_from_tag(ap, ap->active_tag));
390
391 DPRINTK("EXIT\n");
392}
393
394/**
395 * ata_qc_schedule_eh - schedule qc for error handling
396 * @qc: command to schedule error handling for
397 *
398 * Schedule error handling for @qc. EH will kick in as soon as
399 * other commands are drained.
400 *
401 * LOCKING:
402 * spin_lock_irqsave(host_set lock)
403 */
404void ata_qc_schedule_eh(struct ata_queued_cmd *qc)
405{
406 struct ata_port *ap = qc->ap;
407
408 WARN_ON(!ap->ops->error_handler);
409
410 qc->flags |= ATA_QCFLAG_FAILED;
411 qc->ap->flags |= ATA_FLAG_EH_PENDING;
412
413 /* The following will fail if timeout has already expired.
414 * ata_scsi_error() takes care of such scmds on EH entry.
415 * Note that ATA_QCFLAG_FAILED is unconditionally set after
416 * this function completes.
417 */
418 scsi_req_abort_cmd(qc->scsicmd);
419}
420
421/**
422 * ata_port_schedule_eh - schedule error handling without a qc
423 * @ap: ATA port to schedule EH for
424 *
425 * Schedule error handling for @ap. EH will kick in as soon as
426 * all commands are drained.
427 *
428 * LOCKING:
429 * spin_lock_irqsave(host_set lock)
430 */
431void ata_port_schedule_eh(struct ata_port *ap)
432{
433 WARN_ON(!ap->ops->error_handler);
434
435 ap->flags |= ATA_FLAG_EH_PENDING;
436 scsi_schedule_eh(ap->host);
437
438 DPRINTK("port EH scheduled\n");
439}
440
441/**
442 * ata_port_abort - abort all qc's on the port
443 * @ap: ATA port to abort qc's for
444 *
445 * Abort all active qc's of @ap and schedule EH.
446 *
447 * LOCKING:
448 * spin_lock_irqsave(host_set lock)
449 *
450 * RETURNS:
451 * Number of aborted qc's.
452 */
453int ata_port_abort(struct ata_port *ap)
454{
455 int tag, nr_aborted = 0;
456
457 WARN_ON(!ap->ops->error_handler);
458
459 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
460 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, tag);
461
462 if (qc) {
463 qc->flags |= ATA_QCFLAG_FAILED;
464 ata_qc_complete(qc);
465 nr_aborted++;
466 }
467 }
468
469 if (!nr_aborted)
470 ata_port_schedule_eh(ap);
471
472 return nr_aborted;
473}
474
475/**
476 * __ata_port_freeze - freeze port
477 * @ap: ATA port to freeze
478 *
479 * This function is called when HSM violation or some other
480 * condition disrupts normal operation of the port. Frozen port
481 * is not allowed to perform any operation until the port is
482 * thawed, which usually follows a successful reset.
483 *
484 * ap->ops->freeze() callback can be used for freezing the port
485 * hardware-wise (e.g. mask interrupt and stop DMA engine). If a
486 * port cannot be frozen hardware-wise, the interrupt handler
487 * must ack and clear interrupts unconditionally while the port
488 * is frozen.
489 *
490 * LOCKING:
491 * spin_lock_irqsave(host_set lock)
492 */
493static void __ata_port_freeze(struct ata_port *ap)
494{
495 WARN_ON(!ap->ops->error_handler);
496
497 if (ap->ops->freeze)
498 ap->ops->freeze(ap);
499
500 ap->flags |= ATA_FLAG_FROZEN;
501
502 DPRINTK("ata%u port frozen\n", ap->id);
503}
504
505/**
506 * ata_port_freeze - abort & freeze port
507 * @ap: ATA port to freeze
508 *
509 * Abort and freeze @ap.
510 *
511 * LOCKING:
512 * spin_lock_irqsave(host_set lock)
513 *
514 * RETURNS:
515 * Number of aborted commands.
516 */
517int ata_port_freeze(struct ata_port *ap)
518{
519 int nr_aborted;
520
521 WARN_ON(!ap->ops->error_handler);
522
523 nr_aborted = ata_port_abort(ap);
524 __ata_port_freeze(ap);
525
526 return nr_aborted;
527}
528
529/**
530 * ata_eh_freeze_port - EH helper to freeze port
531 * @ap: ATA port to freeze
532 *
533 * Freeze @ap.
534 *
535 * LOCKING:
536 * None.
537 */
538void ata_eh_freeze_port(struct ata_port *ap)
539{
540 unsigned long flags;
541
542 if (!ap->ops->error_handler)
543 return;
544
545 spin_lock_irqsave(&ap->host_set->lock, flags);
546 __ata_port_freeze(ap);
547 spin_unlock_irqrestore(&ap->host_set->lock, flags);
548}
549
550/**
551 * ata_port_thaw_port - EH helper to thaw port
552 * @ap: ATA port to thaw
553 *
554 * Thaw frozen port @ap.
555 *
556 * LOCKING:
557 * None.
558 */
559void ata_eh_thaw_port(struct ata_port *ap)
560{
561 unsigned long flags;
562
563 if (!ap->ops->error_handler)
564 return;
565
566 spin_lock_irqsave(&ap->host_set->lock, flags);
567
568 ap->flags &= ~ATA_FLAG_FROZEN;
569
570 if (ap->ops->thaw)
571 ap->ops->thaw(ap);
572
573 spin_unlock_irqrestore(&ap->host_set->lock, flags);
574
575 DPRINTK("ata%u port thawed\n", ap->id);
576}
577
578static void ata_eh_scsidone(struct scsi_cmnd *scmd)
579{
580 /* nada */
581}
582
583static void __ata_eh_qc_complete(struct ata_queued_cmd *qc)
584{
585 struct ata_port *ap = qc->ap;
586 struct scsi_cmnd *scmd = qc->scsicmd;
587 unsigned long flags;
588
589 spin_lock_irqsave(&ap->host_set->lock, flags);
590 qc->scsidone = ata_eh_scsidone;
591 __ata_qc_complete(qc);
592 WARN_ON(ata_tag_valid(qc->tag));
593 spin_unlock_irqrestore(&ap->host_set->lock, flags);
594
595 scsi_eh_finish_cmd(scmd, &ap->eh_done_q);
596}
597
598/**
599 * ata_eh_qc_complete - Complete an active ATA command from EH
600 * @qc: Command to complete
601 *
602 * Indicate to the mid and upper layers that an ATA command has
603 * completed. To be used from EH.
604 */
605void ata_eh_qc_complete(struct ata_queued_cmd *qc)
606{
607 struct scsi_cmnd *scmd = qc->scsicmd;
608 scmd->retries = scmd->allowed;
609 __ata_eh_qc_complete(qc);
610}
611
612/**
613 * ata_eh_qc_retry - Tell midlayer to retry an ATA command after EH
614 * @qc: Command to retry
615 *
616 * Indicate to the mid and upper layers that an ATA command
617 * should be retried. To be used from EH.
618 *
619 * SCSI midlayer limits the number of retries to scmd->allowed.
620 * scmd->retries is decremented for commands which get retried
621 * due to unrelated failures (qc->err_mask is zero).
622 */
623void ata_eh_qc_retry(struct ata_queued_cmd *qc)
624{
625 struct scsi_cmnd *scmd = qc->scsicmd;
626 if (!qc->err_mask && scmd->retries)
627 scmd->retries--;
628 __ata_eh_qc_complete(qc);
629}
630
631/**
632 * ata_eh_about_to_do - about to perform eh_action
633 * @ap: target ATA port
634 * @action: action about to be performed
635 *
636 * Called just before performing EH actions to clear related bits
637 * in @ap->eh_info such that eh actions are not unnecessarily
638 * repeated.
639 *
640 * LOCKING:
641 * None.
642 */
643static void ata_eh_about_to_do(struct ata_port *ap, unsigned int action)
644{
645 unsigned long flags;
646
647 spin_lock_irqsave(&ap->host_set->lock, flags);
648 ap->eh_info.action &= ~action;
649 ap->flags |= ATA_FLAG_RECOVERED;
650 spin_unlock_irqrestore(&ap->host_set->lock, flags);
651}
652
653/**
654 * ata_err_string - convert err_mask to descriptive string
655 * @err_mask: error mask to convert to string
656 *
657 * Convert @err_mask to descriptive string. Errors are
658 * prioritized according to severity and only the most severe
659 * error is reported.
660 *
661 * LOCKING:
662 * None.
663 *
664 * RETURNS:
665 * Descriptive string for @err_mask
666 */
667static const char * ata_err_string(unsigned int err_mask)
668{
669 if (err_mask & AC_ERR_HOST_BUS)
670 return "host bus error";
671 if (err_mask & AC_ERR_ATA_BUS)
672 return "ATA bus error";
673 if (err_mask & AC_ERR_TIMEOUT)
674 return "timeout";
675 if (err_mask & AC_ERR_HSM)
676 return "HSM violation";
677 if (err_mask & AC_ERR_SYSTEM)
678 return "internal error";
679 if (err_mask & AC_ERR_MEDIA)
680 return "media error";
681 if (err_mask & AC_ERR_INVALID)
682 return "invalid argument";
683 if (err_mask & AC_ERR_DEV)
684 return "device error";
685 return "unknown error";
686}
687
688/**
689 * ata_read_log_page - read a specific log page
690 * @dev: target device
691 * @page: page to read
692 * @buf: buffer to store read page
693 * @sectors: number of sectors to read
694 *
695 * Read log page using READ_LOG_EXT command.
696 *
697 * LOCKING:
698 * Kernel thread context (may sleep).
699 *
700 * RETURNS:
701 * 0 on success, AC_ERR_* mask otherwise.
702 */
703static unsigned int ata_read_log_page(struct ata_device *dev,
704 u8 page, void *buf, unsigned int sectors)
705{
706 struct ata_taskfile tf;
707 unsigned int err_mask;
708
709 DPRINTK("read log page - page %d\n", page);
710
711 ata_tf_init(dev, &tf);
712 tf.command = ATA_CMD_READ_LOG_EXT;
713 tf.lbal = page;
714 tf.nsect = sectors;
715 tf.hob_nsect = sectors >> 8;
716 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_LBA48 | ATA_TFLAG_DEVICE;
717 tf.protocol = ATA_PROT_PIO;
718
719 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE,
720 buf, sectors * ATA_SECT_SIZE);
721
722 DPRINTK("EXIT, err_mask=%x\n", err_mask);
723 return err_mask;
724}
725
726/**
727 * ata_eh_read_log_10h - Read log page 10h for NCQ error details
728 * @dev: Device to read log page 10h from
729 * @tag: Resulting tag of the failed command
730 * @tf: Resulting taskfile registers of the failed command
731 *
732 * Read log page 10h to obtain NCQ error details and clear error
733 * condition.
734 *
735 * LOCKING:
736 * Kernel thread context (may sleep).
737 *
738 * RETURNS:
739 * 0 on success, -errno otherwise.
740 */
741static int ata_eh_read_log_10h(struct ata_device *dev,
742 int *tag, struct ata_taskfile *tf)
743{
744 u8 *buf = dev->ap->sector_buf;
745 unsigned int err_mask;
746 u8 csum;
747 int i;
748
749 err_mask = ata_read_log_page(dev, ATA_LOG_SATA_NCQ, buf, 1);
750 if (err_mask)
751 return -EIO;
752
753 csum = 0;
754 for (i = 0; i < ATA_SECT_SIZE; i++)
755 csum += buf[i];
756 if (csum)
757 ata_dev_printk(dev, KERN_WARNING,
758 "invalid checksum 0x%x on log page 10h\n", csum);
759
760 if (buf[0] & 0x80)
761 return -ENOENT;
762
763 *tag = buf[0] & 0x1f;
764
765 tf->command = buf[2];
766 tf->feature = buf[3];
767 tf->lbal = buf[4];
768 tf->lbam = buf[5];
769 tf->lbah = buf[6];
770 tf->device = buf[7];
771 tf->hob_lbal = buf[8];
772 tf->hob_lbam = buf[9];
773 tf->hob_lbah = buf[10];
774 tf->nsect = buf[12];
775 tf->hob_nsect = buf[13];
776
777 return 0;
778}
779
780/**
781 * atapi_eh_request_sense - perform ATAPI REQUEST_SENSE
782 * @dev: device to perform REQUEST_SENSE to
783 * @sense_buf: result sense data buffer (SCSI_SENSE_BUFFERSIZE bytes long)
784 *
785 * Perform ATAPI REQUEST_SENSE after the device reported CHECK
786 * SENSE. This function is EH helper.
787 *
788 * LOCKING:
789 * Kernel thread context (may sleep).
790 *
791 * RETURNS:
792 * 0 on success, AC_ERR_* mask on failure
793 */
794static unsigned int atapi_eh_request_sense(struct ata_device *dev,
795 unsigned char *sense_buf)
796{
797 struct ata_port *ap = dev->ap;
798 struct ata_taskfile tf;
799 u8 cdb[ATAPI_CDB_LEN];
800
801 DPRINTK("ATAPI request sense\n");
802
803 ata_tf_init(dev, &tf);
804
805 /* FIXME: is this needed? */
806 memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
807
808 /* XXX: why tf_read here? */
809 ap->ops->tf_read(ap, &tf);
810
811 /* fill these in, for the case where they are -not- overwritten */
812 sense_buf[0] = 0x70;
813 sense_buf[2] = tf.feature >> 4;
814
815 memset(cdb, 0, ATAPI_CDB_LEN);
816 cdb[0] = REQUEST_SENSE;
817 cdb[4] = SCSI_SENSE_BUFFERSIZE;
818
819 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
820 tf.command = ATA_CMD_PACKET;
821
822 /* is it pointless to prefer PIO for "safety reasons"? */
823 if (ap->flags & ATA_FLAG_PIO_DMA) {
824 tf.protocol = ATA_PROT_ATAPI_DMA;
825 tf.feature |= ATAPI_PKT_DMA;
826 } else {
827 tf.protocol = ATA_PROT_ATAPI;
828 tf.lbam = (8 * 1024) & 0xff;
829 tf.lbah = (8 * 1024) >> 8;
830 }
831
832 return ata_exec_internal(dev, &tf, cdb, DMA_FROM_DEVICE,
833 sense_buf, SCSI_SENSE_BUFFERSIZE);
834}
835
836/**
837 * ata_eh_analyze_serror - analyze SError for a failed port
838 * @ap: ATA port to analyze SError for
839 *
840 * Analyze SError if available and further determine cause of
841 * failure.
842 *
843 * LOCKING:
844 * None.
845 */
846static void ata_eh_analyze_serror(struct ata_port *ap)
847{
848 struct ata_eh_context *ehc = &ap->eh_context;
849 u32 serror = ehc->i.serror;
850 unsigned int err_mask = 0, action = 0;
851
852 if (serror & SERR_PERSISTENT) {
853 err_mask |= AC_ERR_ATA_BUS;
854 action |= ATA_EH_HARDRESET;
855 }
856 if (serror &
857 (SERR_DATA_RECOVERED | SERR_COMM_RECOVERED | SERR_DATA)) {
858 err_mask |= AC_ERR_ATA_BUS;
859 action |= ATA_EH_SOFTRESET;
860 }
861 if (serror & SERR_PROTOCOL) {
862 err_mask |= AC_ERR_HSM;
863 action |= ATA_EH_SOFTRESET;
864 }
865 if (serror & SERR_INTERNAL) {
866 err_mask |= AC_ERR_SYSTEM;
867 action |= ATA_EH_SOFTRESET;
868 }
869 if (serror & (SERR_PHYRDY_CHG | SERR_DEV_XCHG)) {
870 err_mask |= AC_ERR_ATA_BUS;
871 action |= ATA_EH_HARDRESET;
872 }
873
874 ehc->i.err_mask |= err_mask;
875 ehc->i.action |= action;
876}
877
878/**
879 * ata_eh_analyze_ncq_error - analyze NCQ error
880 * @ap: ATA port to analyze NCQ error for
881 *
882 * Read log page 10h, determine the offending qc and acquire
883 * error status TF. For NCQ device errors, all LLDDs have to do
884 * is setting AC_ERR_DEV in ehi->err_mask. This function takes
885 * care of the rest.
886 *
887 * LOCKING:
888 * Kernel thread context (may sleep).
889 */
890static void ata_eh_analyze_ncq_error(struct ata_port *ap)
891{
892 struct ata_eh_context *ehc = &ap->eh_context;
893 struct ata_device *dev = ap->device;
894 struct ata_queued_cmd *qc;
895 struct ata_taskfile tf;
896 int tag, rc;
897
898 /* if frozen, we can't do much */
899 if (ap->flags & ATA_FLAG_FROZEN)
900 return;
901
902 /* is it NCQ device error? */
903 if (!ap->sactive || !(ehc->i.err_mask & AC_ERR_DEV))
904 return;
905
906 /* has LLDD analyzed already? */
907 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
908 qc = __ata_qc_from_tag(ap, tag);
909
910 if (!(qc->flags & ATA_QCFLAG_FAILED))
911 continue;
912
913 if (qc->err_mask)
914 return;
915 }
916
917 /* okay, this error is ours */
918 rc = ata_eh_read_log_10h(dev, &tag, &tf);
919 if (rc) {
920 ata_port_printk(ap, KERN_ERR, "failed to read log page 10h "
921 "(errno=%d)\n", rc);
922 return;
923 }
924
925 if (!(ap->sactive & (1 << tag))) {
926 ata_port_printk(ap, KERN_ERR, "log page 10h reported "
927 "inactive tag %d\n", tag);
928 return;
929 }
930
931 /* we've got the perpetrator, condemn it */
932 qc = __ata_qc_from_tag(ap, tag);
933 memcpy(&qc->result_tf, &tf, sizeof(tf));
934 qc->err_mask |= AC_ERR_DEV;
935 ehc->i.err_mask &= ~AC_ERR_DEV;
936}
937
938/**
939 * ata_eh_analyze_tf - analyze taskfile of a failed qc
940 * @qc: qc to analyze
941 * @tf: Taskfile registers to analyze
942 *
943 * Analyze taskfile of @qc and further determine cause of
944 * failure. This function also requests ATAPI sense data if
945 * avaliable.
946 *
947 * LOCKING:
948 * Kernel thread context (may sleep).
949 *
950 * RETURNS:
951 * Determined recovery action
952 */
953static unsigned int ata_eh_analyze_tf(struct ata_queued_cmd *qc,
954 const struct ata_taskfile *tf)
955{
956 unsigned int tmp, action = 0;
957 u8 stat = tf->command, err = tf->feature;
958
959 if ((stat & (ATA_BUSY | ATA_DRQ | ATA_DRDY)) != ATA_DRDY) {
960 qc->err_mask |= AC_ERR_HSM;
961 return ATA_EH_SOFTRESET;
962 }
963
964 if (!(qc->err_mask & AC_ERR_DEV))
965 return 0;
966
967 switch (qc->dev->class) {
968 case ATA_DEV_ATA:
969 if (err & ATA_ICRC)
970 qc->err_mask |= AC_ERR_ATA_BUS;
971 if (err & ATA_UNC)
972 qc->err_mask |= AC_ERR_MEDIA;
973 if (err & ATA_IDNF)
974 qc->err_mask |= AC_ERR_INVALID;
975 break;
976
977 case ATA_DEV_ATAPI:
978 tmp = atapi_eh_request_sense(qc->dev,
979 qc->scsicmd->sense_buffer);
980 if (!tmp) {
981 /* ATA_QCFLAG_SENSE_VALID is used to tell
982 * atapi_qc_complete() that sense data is
983 * already valid.
984 *
985 * TODO: interpret sense data and set
986 * appropriate err_mask.
987 */
988 qc->flags |= ATA_QCFLAG_SENSE_VALID;
989 } else
990 qc->err_mask |= tmp;
991 }
992
993 if (qc->err_mask & (AC_ERR_HSM | AC_ERR_TIMEOUT | AC_ERR_ATA_BUS))
994 action |= ATA_EH_SOFTRESET;
995
996 return action;
997}
998
999static int ata_eh_categorize_ering_entry(struct ata_ering_entry *ent)
1000{
1001 if (ent->err_mask & (AC_ERR_ATA_BUS | AC_ERR_TIMEOUT))
1002 return 1;
1003
1004 if (ent->is_io) {
1005 if (ent->err_mask & AC_ERR_HSM)
1006 return 1;
1007 if ((ent->err_mask &
1008 (AC_ERR_DEV|AC_ERR_MEDIA|AC_ERR_INVALID)) == AC_ERR_DEV)
1009 return 2;
1010 }
1011
1012 return 0;
1013}
1014
1015struct speed_down_needed_arg {
1016 u64 since;
1017 int nr_errors[3];
1018};
1019
1020static int speed_down_needed_cb(struct ata_ering_entry *ent, void *void_arg)
1021{
1022 struct speed_down_needed_arg *arg = void_arg;
1023
1024 if (ent->timestamp < arg->since)
1025 return -1;
1026
1027 arg->nr_errors[ata_eh_categorize_ering_entry(ent)]++;
1028 return 0;
1029}
1030
1031/**
1032 * ata_eh_speed_down_needed - Determine wheter speed down is necessary
1033 * @dev: Device of interest
1034 *
1035 * This function examines error ring of @dev and determines
1036 * whether speed down is necessary. Speed down is necessary if
1037 * there have been more than 3 of Cat-1 errors or 10 of Cat-2
1038 * errors during last 15 minutes.
1039 *
1040 * Cat-1 errors are ATA_BUS, TIMEOUT for any command and HSM
1041 * violation for known supported commands.
1042 *
1043 * Cat-2 errors are unclassified DEV error for known supported
1044 * command.
1045 *
1046 * LOCKING:
1047 * Inherited from caller.
1048 *
1049 * RETURNS:
1050 * 1 if speed down is necessary, 0 otherwise
1051 */
1052static int ata_eh_speed_down_needed(struct ata_device *dev)
1053{
1054 const u64 interval = 15LLU * 60 * HZ;
1055 static const int err_limits[3] = { -1, 3, 10 };
1056 struct speed_down_needed_arg arg;
1057 struct ata_ering_entry *ent;
1058 int err_cat;
1059 u64 j64;
1060
1061 ent = ata_ering_top(&dev->ering);
1062 if (!ent)
1063 return 0;
1064
1065 err_cat = ata_eh_categorize_ering_entry(ent);
1066 if (err_cat == 0)
1067 return 0;
1068
1069 memset(&arg, 0, sizeof(arg));
1070
1071 j64 = get_jiffies_64();
1072 if (j64 >= interval)
1073 arg.since = j64 - interval;
1074 else
1075 arg.since = 0;
1076
1077 ata_ering_map(&dev->ering, speed_down_needed_cb, &arg);
1078
1079 return arg.nr_errors[err_cat] > err_limits[err_cat];
1080}
1081
1082/**
1083 * ata_eh_speed_down - record error and speed down if necessary
1084 * @dev: Failed device
1085 * @is_io: Did the device fail during normal IO?
1086 * @err_mask: err_mask of the error
1087 *
1088 * Record error and examine error history to determine whether
1089 * adjusting transmission speed is necessary. It also sets
1090 * transmission limits appropriately if such adjustment is
1091 * necessary.
1092 *
1093 * LOCKING:
1094 * Kernel thread context (may sleep).
1095 *
1096 * RETURNS:
1097 * 0 on success, -errno otherwise
1098 */
1099static int ata_eh_speed_down(struct ata_device *dev, int is_io,
1100 unsigned int err_mask)
1101{
1102 if (!err_mask)
1103 return 0;
1104
1105 /* record error and determine whether speed down is necessary */
1106 ata_ering_record(&dev->ering, is_io, err_mask);
1107
1108 if (!ata_eh_speed_down_needed(dev))
1109 return 0;
1110
1111 /* speed down SATA link speed if possible */
1112 if (sata_down_spd_limit(dev->ap) == 0)
1113 return ATA_EH_HARDRESET;
1114
1115 /* lower transfer mode */
1116 if (ata_down_xfermask_limit(dev, 0) == 0)
1117 return ATA_EH_SOFTRESET;
1118
1119 ata_dev_printk(dev, KERN_ERR,
1120 "speed down requested but no transfer mode left\n");
1121 return 0;
1122}
1123
1124/**
1125 * ata_eh_autopsy - analyze error and determine recovery action
1126 * @ap: ATA port to perform autopsy on
1127 *
1128 * Analyze why @ap failed and determine which recovery action is
1129 * needed. This function also sets more detailed AC_ERR_* values
1130 * and fills sense data for ATAPI CHECK SENSE.
1131 *
1132 * LOCKING:
1133 * Kernel thread context (may sleep).
1134 */
1135static void ata_eh_autopsy(struct ata_port *ap)
1136{
1137 struct ata_eh_context *ehc = &ap->eh_context;
1138 unsigned int action = ehc->i.action;
1139 struct ata_device *failed_dev = NULL;
1140 unsigned int all_err_mask = 0;
1141 int tag, is_io = 0;
1142 u32 serror;
1143 int rc;
1144
1145 DPRINTK("ENTER\n");
1146
1147 /* obtain and analyze SError */
1148 rc = sata_scr_read(ap, SCR_ERROR, &serror);
1149 if (rc == 0) {
1150 ehc->i.serror |= serror;
1151 ata_eh_analyze_serror(ap);
1152 } else if (rc != -EOPNOTSUPP)
1153 action |= ATA_EH_HARDRESET;
1154
1155 /* analyze NCQ failure */
1156 ata_eh_analyze_ncq_error(ap);
1157
1158 /* any real error trumps AC_ERR_OTHER */
1159 if (ehc->i.err_mask & ~AC_ERR_OTHER)
1160 ehc->i.err_mask &= ~AC_ERR_OTHER;
1161
1162 all_err_mask |= ehc->i.err_mask;
1163
1164 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
1165 struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
1166
1167 if (!(qc->flags & ATA_QCFLAG_FAILED))
1168 continue;
1169
1170 /* inherit upper level err_mask */
1171 qc->err_mask |= ehc->i.err_mask;
1172
1173 /* analyze TF */
1174 action |= ata_eh_analyze_tf(qc, &qc->result_tf);
1175
1176 /* DEV errors are probably spurious in case of ATA_BUS error */
1177 if (qc->err_mask & AC_ERR_ATA_BUS)
1178 qc->err_mask &= ~(AC_ERR_DEV | AC_ERR_MEDIA |
1179 AC_ERR_INVALID);
1180
1181 /* any real error trumps unknown error */
1182 if (qc->err_mask & ~AC_ERR_OTHER)
1183 qc->err_mask &= ~AC_ERR_OTHER;
1184
1185 /* SENSE_VALID trumps dev/unknown error and revalidation */
1186 if (qc->flags & ATA_QCFLAG_SENSE_VALID) {
1187 qc->err_mask &= ~(AC_ERR_DEV | AC_ERR_OTHER);
1188 action &= ~ATA_EH_REVALIDATE;
1189 }
1190
1191 /* accumulate error info */
1192 failed_dev = qc->dev;
1193 all_err_mask |= qc->err_mask;
1194 if (qc->flags & ATA_QCFLAG_IO)
1195 is_io = 1;
1196 }
1197
1198 /* speed down iff command was in progress */
1199 if (failed_dev)
1200 action |= ata_eh_speed_down(failed_dev, is_io, all_err_mask);
1201
1202 /* enforce default EH actions */
1203 if (ap->flags & ATA_FLAG_FROZEN ||
1204 all_err_mask & (AC_ERR_HSM | AC_ERR_TIMEOUT))
1205 action |= ATA_EH_SOFTRESET;
1206 else if (all_err_mask)
1207 action |= ATA_EH_REVALIDATE;
1208
1209 /* record autopsy result */
1210 ehc->i.dev = failed_dev;
1211 ehc->i.action = action;
1212
1213 DPRINTK("EXIT\n");
1214}
1215
1216/**
1217 * ata_eh_report - report error handling to user
1218 * @ap: ATA port EH is going on
1219 *
1220 * Report EH to user.
1221 *
1222 * LOCKING:
1223 * None.
1224 */
1225static void ata_eh_report(struct ata_port *ap)
1226{
1227 struct ata_eh_context *ehc = &ap->eh_context;
1228 const char *frozen, *desc;
1229 int tag, nr_failed = 0;
1230
1231 desc = NULL;
1232 if (ehc->i.desc[0] != '\0')
1233 desc = ehc->i.desc;
1234
1235 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
1236 struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
1237
1238 if (!(qc->flags & ATA_QCFLAG_FAILED))
1239 continue;
1240 if (qc->flags & ATA_QCFLAG_SENSE_VALID && !qc->err_mask)
1241 continue;
1242
1243 nr_failed++;
1244 }
1245
1246 if (!nr_failed && !ehc->i.err_mask)
1247 return;
1248
1249 frozen = "";
1250 if (ap->flags & ATA_FLAG_FROZEN)
1251 frozen = " frozen";
1252
1253 if (ehc->i.dev) {
1254 ata_dev_printk(ehc->i.dev, KERN_ERR, "exception Emask 0x%x "
1255 "SAct 0x%x SErr 0x%x action 0x%x%s\n",
1256 ehc->i.err_mask, ap->sactive, ehc->i.serror,
1257 ehc->i.action, frozen);
1258 if (desc)
1259 ata_dev_printk(ehc->i.dev, KERN_ERR, "(%s)\n", desc);
1260 } else {
1261 ata_port_printk(ap, KERN_ERR, "exception Emask 0x%x "
1262 "SAct 0x%x SErr 0x%x action 0x%x%s\n",
1263 ehc->i.err_mask, ap->sactive, ehc->i.serror,
1264 ehc->i.action, frozen);
1265 if (desc)
1266 ata_port_printk(ap, KERN_ERR, "(%s)\n", desc);
1267 }
1268
1269 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
1270 struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
1271
1272 if (!(qc->flags & ATA_QCFLAG_FAILED) || !qc->err_mask)
1273 continue;
1274
1275 ata_dev_printk(qc->dev, KERN_ERR, "tag %d cmd 0x%x "
1276 "Emask 0x%x stat 0x%x err 0x%x (%s)\n",
1277 qc->tag, qc->tf.command, qc->err_mask,
1278 qc->result_tf.command, qc->result_tf.feature,
1279 ata_err_string(qc->err_mask));
1280 }
1281}
1282
1283static int ata_eh_reset(struct ata_port *ap, ata_reset_fn_t softreset,
1284 ata_reset_fn_t hardreset, ata_postreset_fn_t postreset)
1285{
1286 struct ata_eh_context *ehc = &ap->eh_context;
1287 unsigned int classes[ATA_MAX_DEVICES];
1288 int tries = ATA_EH_RESET_TRIES;
1289 ata_reset_fn_t reset;
1290 int rc;
1291
1292 if (softreset && (!hardreset || (!sata_set_spd_needed(ap) &&
1293 !(ehc->i.action & ATA_EH_HARDRESET))))
1294 reset = softreset;
1295 else
1296 reset = hardreset;
1297
1298 retry:
1299 ata_port_printk(ap, KERN_INFO, "%s resetting port\n",
1300 reset == softreset ? "soft" : "hard");
1301
1302 /* reset */
1303 ata_eh_about_to_do(ap, ATA_EH_RESET_MASK);
1304 ehc->i.flags |= ATA_EHI_DID_RESET;
1305
1306 rc = ata_do_reset(ap, reset, classes);
1307
1308 if (rc && --tries) {
1309 ata_port_printk(ap, KERN_WARNING,
1310 "%sreset failed, retrying in 5 secs\n",
1311 reset == softreset ? "soft" : "hard");
1312 ssleep(5);
1313
1314 if (reset == hardreset)
1315 sata_down_spd_limit(ap);
1316 if (hardreset)
1317 reset = hardreset;
1318 goto retry;
1319 }
1320
1321 if (rc == 0) {
1322 if (postreset)
1323 postreset(ap, classes);
1324
1325 /* reset successful, schedule revalidation */
1326 ehc->i.dev = NULL;
1327 ehc->i.action &= ~ATA_EH_RESET_MASK;
1328 ehc->i.action |= ATA_EH_REVALIDATE;
1329 }
1330
1331 return rc;
1332}
1333
1334static int ata_eh_revalidate(struct ata_port *ap,
1335 struct ata_device **r_failed_dev)
1336{
1337 struct ata_eh_context *ehc = &ap->eh_context;
1338 struct ata_device *dev;
1339 int i, rc = 0;
1340
1341 DPRINTK("ENTER\n");
1342
1343 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1344 dev = &ap->device[i];
1345
1346 if (ehc->i.action & ATA_EH_REVALIDATE && ata_dev_enabled(dev) &&
1347 (!ehc->i.dev || ehc->i.dev == dev)) {
1348 if (ata_port_offline(ap)) {
1349 rc = -EIO;
1350 break;
1351 }
1352
1353 ata_eh_about_to_do(ap, ATA_EH_REVALIDATE);
1354 rc = ata_dev_revalidate(dev,
1355 ehc->i.flags & ATA_EHI_DID_RESET);
1356 if (rc)
1357 break;
1358
1359 ehc->i.action &= ~ATA_EH_REVALIDATE;
1360 }
1361 }
1362
1363 if (rc)
1364 *r_failed_dev = dev;
1365
1366 DPRINTK("EXIT\n");
1367 return rc;
1368}
1369
1370static int ata_port_nr_enabled(struct ata_port *ap)
1371{
1372 int i, cnt = 0;
1373
1374 for (i = 0; i < ATA_MAX_DEVICES; i++)
1375 if (ata_dev_enabled(&ap->device[i]))
1376 cnt++;
1377 return cnt;
1378}
1379
1380/**
1381 * ata_eh_recover - recover host port after error
1382 * @ap: host port to recover
1383 * @softreset: softreset method (can be NULL)
1384 * @hardreset: hardreset method (can be NULL)
1385 * @postreset: postreset method (can be NULL)
1386 *
1387 * This is the alpha and omega, eum and yang, heart and soul of
1388 * libata exception handling. On entry, actions required to
1389 * recover each devices are recorded in eh_context. This
1390 * function executes all the operations with appropriate retrials
1391 * and fallbacks to resurrect failed devices.
1392 *
1393 * LOCKING:
1394 * Kernel thread context (may sleep).
1395 *
1396 * RETURNS:
1397 * 0 on success, -errno on failure.
1398 */
1399static int ata_eh_recover(struct ata_port *ap, ata_reset_fn_t softreset,
1400 ata_reset_fn_t hardreset,
1401 ata_postreset_fn_t postreset)
1402{
1403 struct ata_eh_context *ehc = &ap->eh_context;
1404 struct ata_device *dev;
1405 int down_xfermask, i, rc;
1406
1407 DPRINTK("ENTER\n");
1408
1409 /* prep for recovery */
1410 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1411 dev = &ap->device[i];
1412
1413 ehc->tries[dev->devno] = ATA_EH_DEV_TRIES;
1414 }
1415
1416 retry:
1417 down_xfermask = 0;
1418 rc = 0;
1419
1420 /* skip EH if possible. */
1421 if (!ata_port_nr_enabled(ap) && !(ap->flags & ATA_FLAG_FROZEN))
1422 ehc->i.action = 0;
1423
1424 /* reset */
1425 if (ehc->i.action & ATA_EH_RESET_MASK) {
1426 ata_eh_freeze_port(ap);
1427
1428 rc = ata_eh_reset(ap, softreset, hardreset, postreset);
1429 if (rc) {
1430 ata_port_printk(ap, KERN_ERR,
1431 "reset failed, giving up\n");
1432 goto out;
1433 }
1434
1435 ata_eh_thaw_port(ap);
1436 }
1437
1438 /* revalidate existing devices */
1439 rc = ata_eh_revalidate(ap, &dev);
1440 if (rc)
1441 goto dev_fail;
1442
1443 /* configure transfer mode if the port has been reset */
1444 if (ehc->i.flags & ATA_EHI_DID_RESET) {
1445 rc = ata_set_mode(ap, &dev);
1446 if (rc) {
1447 down_xfermask = 1;
1448 goto dev_fail;
1449 }
1450 }
1451
1452 goto out;
1453
1454 dev_fail:
1455 switch (rc) {
1456 case -ENODEV:
1457 case -EINVAL:
1458 ehc->tries[dev->devno] = 0;
1459 break;
1460 case -EIO:
1461 sata_down_spd_limit(ap);
1462 default:
1463 ehc->tries[dev->devno]--;
1464 if (down_xfermask &&
1465 ata_down_xfermask_limit(dev, ehc->tries[dev->devno] == 1))
1466 ehc->tries[dev->devno] = 0;
1467 }
1468
1469 /* disable device if it has used up all its chances */
1470 if (ata_dev_enabled(dev) && !ehc->tries[dev->devno])
1471 ata_dev_disable(dev);
1472
1473 /* soft didn't work? be haaaaard */
1474 if (ehc->i.flags & ATA_EHI_DID_RESET)
1475 ehc->i.action |= ATA_EH_HARDRESET;
1476 else
1477 ehc->i.action |= ATA_EH_SOFTRESET;
1478
1479 if (ata_port_nr_enabled(ap)) {
1480 ata_port_printk(ap, KERN_WARNING, "failed to recover some "
1481 "devices, retrying in 5 secs\n");
1482 ssleep(5);
1483 } else {
1484 /* no device left, repeat fast */
1485 msleep(500);
1486 }
1487
1488 goto retry;
1489
1490 out:
1491 if (rc) {
1492 for (i = 0; i < ATA_MAX_DEVICES; i++)
1493 ata_dev_disable(&ap->device[i]);
1494 }
1495
1496 DPRINTK("EXIT, rc=%d\n", rc);
1497 return rc;
1498}
1499
1500/**
1501 * ata_eh_finish - finish up EH
1502 * @ap: host port to finish EH for
1503 *
1504 * Recovery is complete. Clean up EH states and retry or finish
1505 * failed qcs.
1506 *
1507 * LOCKING:
1508 * None.
1509 */
1510static void ata_eh_finish(struct ata_port *ap)
1511{
1512 int tag;
1513
1514 /* retry or finish qcs */
1515 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
1516 struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
1517
1518 if (!(qc->flags & ATA_QCFLAG_FAILED))
1519 continue;
1520
1521 if (qc->err_mask) {
1522 /* FIXME: Once EH migration is complete,
1523 * generate sense data in this function,
1524 * considering both err_mask and tf.
1525 */
1526 if (qc->err_mask & AC_ERR_INVALID)
1527 ata_eh_qc_complete(qc);
1528 else
1529 ata_eh_qc_retry(qc);
1530 } else {
1531 if (qc->flags & ATA_QCFLAG_SENSE_VALID) {
1532 ata_eh_qc_complete(qc);
1533 } else {
1534 /* feed zero TF to sense generation */
1535 memset(&qc->result_tf, 0, sizeof(qc->result_tf));
1536 ata_eh_qc_retry(qc);
1537 }
1538 }
1539 }
1540}
1541
1542/**
1543 * ata_do_eh - do standard error handling
1544 * @ap: host port to handle error for
1545 * @softreset: softreset method (can be NULL)
1546 * @hardreset: hardreset method (can be NULL)
1547 * @postreset: postreset method (can be NULL)
1548 *
1549 * Perform standard error handling sequence.
1550 *
1551 * LOCKING:
1552 * Kernel thread context (may sleep).
1553 */
1554void ata_do_eh(struct ata_port *ap, ata_reset_fn_t softreset,
1555 ata_reset_fn_t hardreset, ata_postreset_fn_t postreset)
1556{
1557 ata_eh_autopsy(ap);
1558 ata_eh_report(ap);
1559 ata_eh_recover(ap, softreset, hardreset, postreset);
1560 ata_eh_finish(ap);
1561}
diff --git a/drivers/scsi/libata-scsi.c b/drivers/scsi/libata-scsi.c
index a0289ec3e283..9e5cb9f748e6 100644
--- a/drivers/scsi/libata-scsi.c
+++ b/drivers/scsi/libata-scsi.c
@@ -41,6 +41,7 @@
41#include <scsi/scsi_eh.h> 41#include <scsi/scsi_eh.h>
42#include <scsi/scsi_device.h> 42#include <scsi/scsi_device.h>
43#include <scsi/scsi_request.h> 43#include <scsi/scsi_request.h>
44#include <scsi/scsi_tcq.h>
44#include <scsi/scsi_transport.h> 45#include <scsi/scsi_transport.h>
45#include <linux/libata.h> 46#include <linux/libata.h>
46#include <linux/hdreg.h> 47#include <linux/hdreg.h>
@@ -53,8 +54,6 @@
53typedef unsigned int (*ata_xlat_func_t)(struct ata_queued_cmd *qc, const u8 *scsicmd); 54typedef unsigned int (*ata_xlat_func_t)(struct ata_queued_cmd *qc, const u8 *scsicmd);
54static struct ata_device * 55static struct ata_device *
55ata_scsi_find_dev(struct ata_port *ap, const struct scsi_device *scsidev); 56ata_scsi_find_dev(struct ata_port *ap, const struct scsi_device *scsidev);
56static void ata_scsi_error(struct Scsi_Host *host);
57enum scsi_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd);
58 57
59#define RW_RECOVERY_MPAGE 0x1 58#define RW_RECOVERY_MPAGE 0x1
60#define RW_RECOVERY_MPAGE_LEN 12 59#define RW_RECOVERY_MPAGE_LEN 12
@@ -304,7 +303,6 @@ int ata_scsi_ioctl(struct scsi_device *scsidev, int cmd, void __user *arg)
304 303
305/** 304/**
306 * ata_scsi_qc_new - acquire new ata_queued_cmd reference 305 * ata_scsi_qc_new - acquire new ata_queued_cmd reference
307 * @ap: ATA port to which the new command is attached
308 * @dev: ATA device to which the new command is attached 306 * @dev: ATA device to which the new command is attached
309 * @cmd: SCSI command that originated this ATA command 307 * @cmd: SCSI command that originated this ATA command
310 * @done: SCSI command completion function 308 * @done: SCSI command completion function
@@ -323,14 +321,13 @@ int ata_scsi_ioctl(struct scsi_device *scsidev, int cmd, void __user *arg)
323 * RETURNS: 321 * RETURNS:
324 * Command allocated, or %NULL if none available. 322 * Command allocated, or %NULL if none available.
325 */ 323 */
326struct ata_queued_cmd *ata_scsi_qc_new(struct ata_port *ap, 324struct ata_queued_cmd *ata_scsi_qc_new(struct ata_device *dev,
327 struct ata_device *dev,
328 struct scsi_cmnd *cmd, 325 struct scsi_cmnd *cmd,
329 void (*done)(struct scsi_cmnd *)) 326 void (*done)(struct scsi_cmnd *))
330{ 327{
331 struct ata_queued_cmd *qc; 328 struct ata_queued_cmd *qc;
332 329
333 qc = ata_qc_new_init(ap, dev); 330 qc = ata_qc_new_init(dev);
334 if (qc) { 331 if (qc) {
335 qc->scsicmd = cmd; 332 qc->scsicmd = cmd;
336 qc->scsidone = done; 333 qc->scsidone = done;
@@ -397,18 +394,18 @@ void ata_dump_status(unsigned id, struct ata_taskfile *tf)
397 394
398int ata_scsi_device_resume(struct scsi_device *sdev) 395int ata_scsi_device_resume(struct scsi_device *sdev)
399{ 396{
400 struct ata_port *ap = (struct ata_port *) &sdev->host->hostdata[0]; 397 struct ata_port *ap = ata_shost_to_port(sdev->host);
401 struct ata_device *dev = &ap->device[sdev->id]; 398 struct ata_device *dev = &ap->device[sdev->id];
402 399
403 return ata_device_resume(ap, dev); 400 return ata_device_resume(dev);
404} 401}
405 402
406int ata_scsi_device_suspend(struct scsi_device *sdev, pm_message_t state) 403int ata_scsi_device_suspend(struct scsi_device *sdev, pm_message_t state)
407{ 404{
408 struct ata_port *ap = (struct ata_port *) &sdev->host->hostdata[0]; 405 struct ata_port *ap = ata_shost_to_port(sdev->host);
409 struct ata_device *dev = &ap->device[sdev->id]; 406 struct ata_device *dev = &ap->device[sdev->id];
410 407
411 return ata_device_suspend(ap, dev, state); 408 return ata_device_suspend(dev, state);
412} 409}
413 410
414/** 411/**
@@ -419,6 +416,7 @@ int ata_scsi_device_suspend(struct scsi_device *sdev, pm_message_t state)
419 * @sk: the sense key we'll fill out 416 * @sk: the sense key we'll fill out
420 * @asc: the additional sense code we'll fill out 417 * @asc: the additional sense code we'll fill out
421 * @ascq: the additional sense code qualifier we'll fill out 418 * @ascq: the additional sense code qualifier we'll fill out
419 * @verbose: be verbose
422 * 420 *
423 * Converts an ATA error into a SCSI error. Fill out pointers to 421 * Converts an ATA error into a SCSI error. Fill out pointers to
424 * SK, ASC, and ASCQ bytes for later use in fixed or descriptor 422 * SK, ASC, and ASCQ bytes for later use in fixed or descriptor
@@ -428,7 +426,7 @@ int ata_scsi_device_suspend(struct scsi_device *sdev, pm_message_t state)
428 * spin_lock_irqsave(host_set lock) 426 * spin_lock_irqsave(host_set lock)
429 */ 427 */
430void ata_to_sense_error(unsigned id, u8 drv_stat, u8 drv_err, u8 *sk, u8 *asc, 428void ata_to_sense_error(unsigned id, u8 drv_stat, u8 drv_err, u8 *sk, u8 *asc,
431 u8 *ascq) 429 u8 *ascq, int verbose)
432{ 430{
433 int i; 431 int i;
434 432
@@ -493,8 +491,9 @@ void ata_to_sense_error(unsigned id, u8 drv_stat, u8 drv_err, u8 *sk, u8 *asc,
493 } 491 }
494 } 492 }
495 /* No immediate match */ 493 /* No immediate match */
496 printk(KERN_WARNING "ata%u: no sense translation for " 494 if (verbose)
497 "error 0x%02x\n", id, drv_err); 495 printk(KERN_WARNING "ata%u: no sense translation for "
496 "error 0x%02x\n", id, drv_err);
498 } 497 }
499 498
500 /* Fall back to interpreting status bits */ 499 /* Fall back to interpreting status bits */
@@ -507,8 +506,9 @@ void ata_to_sense_error(unsigned id, u8 drv_stat, u8 drv_err, u8 *sk, u8 *asc,
507 } 506 }
508 } 507 }
509 /* No error? Undecoded? */ 508 /* No error? Undecoded? */
510 printk(KERN_WARNING "ata%u: no sense translation for status: 0x%02x\n", 509 if (verbose)
511 id, drv_stat); 510 printk(KERN_WARNING "ata%u: no sense translation for "
511 "status: 0x%02x\n", id, drv_stat);
512 512
513 /* We need a sensible error return here, which is tricky, and one 513 /* We need a sensible error return here, which is tricky, and one
514 that won't cause people to do things like return a disk wrongly */ 514 that won't cause people to do things like return a disk wrongly */
@@ -517,9 +517,10 @@ void ata_to_sense_error(unsigned id, u8 drv_stat, u8 drv_err, u8 *sk, u8 *asc,
517 *ascq = 0x00; 517 *ascq = 0x00;
518 518
519 translate_done: 519 translate_done:
520 printk(KERN_ERR "ata%u: translated ATA stat/err 0x%02x/%02x to " 520 if (verbose)
521 "SCSI SK/ASC/ASCQ 0x%x/%02x/%02x\n", id, drv_stat, drv_err, 521 printk(KERN_ERR "ata%u: translated ATA stat/err 0x%02x/%02x "
522 *sk, *asc, *ascq); 522 "to SCSI SK/ASC/ASCQ 0x%x/%02x/%02x\n",
523 id, drv_stat, drv_err, *sk, *asc, *ascq);
523 return; 524 return;
524} 525}
525 526
@@ -539,27 +540,23 @@ void ata_to_sense_error(unsigned id, u8 drv_stat, u8 drv_err, u8 *sk, u8 *asc,
539void ata_gen_ata_desc_sense(struct ata_queued_cmd *qc) 540void ata_gen_ata_desc_sense(struct ata_queued_cmd *qc)
540{ 541{
541 struct scsi_cmnd *cmd = qc->scsicmd; 542 struct scsi_cmnd *cmd = qc->scsicmd;
542 struct ata_taskfile *tf = &qc->tf; 543 struct ata_taskfile *tf = &qc->result_tf;
543 unsigned char *sb = cmd->sense_buffer; 544 unsigned char *sb = cmd->sense_buffer;
544 unsigned char *desc = sb + 8; 545 unsigned char *desc = sb + 8;
546 int verbose = qc->ap->ops->error_handler == NULL;
545 547
546 memset(sb, 0, SCSI_SENSE_BUFFERSIZE); 548 memset(sb, 0, SCSI_SENSE_BUFFERSIZE);
547 549
548 cmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION; 550 cmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
549 551
550 /* 552 /*
551 * Read the controller registers.
552 */
553 WARN_ON(qc->ap->ops->tf_read == NULL);
554 qc->ap->ops->tf_read(qc->ap, tf);
555
556 /*
557 * Use ata_to_sense_error() to map status register bits 553 * Use ata_to_sense_error() to map status register bits
558 * onto sense key, asc & ascq. 554 * onto sense key, asc & ascq.
559 */ 555 */
560 if (tf->command & (ATA_BUSY | ATA_DF | ATA_ERR | ATA_DRQ)) { 556 if (qc->err_mask ||
557 tf->command & (ATA_BUSY | ATA_DF | ATA_ERR | ATA_DRQ)) {
561 ata_to_sense_error(qc->ap->id, tf->command, tf->feature, 558 ata_to_sense_error(qc->ap->id, tf->command, tf->feature,
562 &sb[1], &sb[2], &sb[3]); 559 &sb[1], &sb[2], &sb[3], verbose);
563 sb[1] &= 0x0f; 560 sb[1] &= 0x0f;
564 } 561 }
565 562
@@ -615,26 +612,22 @@ void ata_gen_ata_desc_sense(struct ata_queued_cmd *qc)
615void ata_gen_fixed_sense(struct ata_queued_cmd *qc) 612void ata_gen_fixed_sense(struct ata_queued_cmd *qc)
616{ 613{
617 struct scsi_cmnd *cmd = qc->scsicmd; 614 struct scsi_cmnd *cmd = qc->scsicmd;
618 struct ata_taskfile *tf = &qc->tf; 615 struct ata_taskfile *tf = &qc->result_tf;
619 unsigned char *sb = cmd->sense_buffer; 616 unsigned char *sb = cmd->sense_buffer;
617 int verbose = qc->ap->ops->error_handler == NULL;
620 618
621 memset(sb, 0, SCSI_SENSE_BUFFERSIZE); 619 memset(sb, 0, SCSI_SENSE_BUFFERSIZE);
622 620
623 cmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION; 621 cmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
624 622
625 /* 623 /*
626 * Read the controller registers.
627 */
628 WARN_ON(qc->ap->ops->tf_read == NULL);
629 qc->ap->ops->tf_read(qc->ap, tf);
630
631 /*
632 * Use ata_to_sense_error() to map status register bits 624 * Use ata_to_sense_error() to map status register bits
633 * onto sense key, asc & ascq. 625 * onto sense key, asc & ascq.
634 */ 626 */
635 if (tf->command & (ATA_BUSY | ATA_DF | ATA_ERR | ATA_DRQ)) { 627 if (qc->err_mask ||
628 tf->command & (ATA_BUSY | ATA_DF | ATA_ERR | ATA_DRQ)) {
636 ata_to_sense_error(qc->ap->id, tf->command, tf->feature, 629 ata_to_sense_error(qc->ap->id, tf->command, tf->feature,
637 &sb[2], &sb[12], &sb[13]); 630 &sb[2], &sb[12], &sb[13], verbose);
638 sb[2] &= 0x0f; 631 sb[2] &= 0x0f;
639 } 632 }
640 633
@@ -677,7 +670,7 @@ static void ata_scsi_dev_config(struct scsi_device *sdev,
677 */ 670 */
678 max_sectors = ATA_MAX_SECTORS; 671 max_sectors = ATA_MAX_SECTORS;
679 if (dev->flags & ATA_DFLAG_LBA48) 672 if (dev->flags & ATA_DFLAG_LBA48)
680 max_sectors = 2048; 673 max_sectors = ATA_MAX_SECTORS_LBA48;
681 if (dev->max_sectors) 674 if (dev->max_sectors)
682 max_sectors = dev->max_sectors; 675 max_sectors = dev->max_sectors;
683 676
@@ -692,6 +685,14 @@ static void ata_scsi_dev_config(struct scsi_device *sdev,
692 request_queue_t *q = sdev->request_queue; 685 request_queue_t *q = sdev->request_queue;
693 blk_queue_max_hw_segments(q, q->max_hw_segments - 1); 686 blk_queue_max_hw_segments(q, q->max_hw_segments - 1);
694 } 687 }
688
689 if (dev->flags & ATA_DFLAG_NCQ) {
690 int depth;
691
692 depth = min(sdev->host->can_queue, ata_id_queue_depth(dev->id));
693 depth = min(ATA_MAX_QUEUE - 1, depth);
694 scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, depth);
695 }
695} 696}
696 697
697/** 698/**
@@ -716,7 +717,7 @@ int ata_scsi_slave_config(struct scsi_device *sdev)
716 struct ata_port *ap; 717 struct ata_port *ap;
717 struct ata_device *dev; 718 struct ata_device *dev;
718 719
719 ap = (struct ata_port *) &sdev->host->hostdata[0]; 720 ap = ata_shost_to_port(sdev->host);
720 dev = &ap->device[sdev->id]; 721 dev = &ap->device[sdev->id];
721 722
722 ata_scsi_dev_config(sdev, dev); 723 ata_scsi_dev_config(sdev, dev);
@@ -726,134 +727,40 @@ int ata_scsi_slave_config(struct scsi_device *sdev)
726} 727}
727 728
728/** 729/**
729 * ata_scsi_timed_out - SCSI layer time out callback 730 * ata_scsi_change_queue_depth - SCSI callback for queue depth config
730 * @cmd: timed out SCSI command 731 * @sdev: SCSI device to configure queue depth for
732 * @queue_depth: new queue depth
731 * 733 *
732 * Handles SCSI layer timeout. We race with normal completion of 734 * This is libata standard hostt->change_queue_depth callback.
733 * the qc for @cmd. If the qc is already gone, we lose and let 735 * SCSI will call into this callback when user tries to set queue
734 * the scsi command finish (EH_HANDLED). Otherwise, the qc has 736 * depth via sysfs.
735 * timed out and EH should be invoked. Prevent ata_qc_complete()
736 * from finishing it by setting EH_SCHEDULED and return
737 * EH_NOT_HANDLED.
738 * 737 *
739 * LOCKING: 738 * LOCKING:
740 * Called from timer context 739 * SCSI layer (we don't care)
741 * 740 *
742 * RETURNS: 741 * RETURNS:
743 * EH_HANDLED or EH_NOT_HANDLED 742 * Newly configured queue depth.
744 */ 743 */
745enum scsi_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd) 744int ata_scsi_change_queue_depth(struct scsi_device *sdev, int queue_depth)
746{
747 struct Scsi_Host *host = cmd->device->host;
748 struct ata_port *ap = (struct ata_port *) &host->hostdata[0];
749 unsigned long flags;
750 struct ata_queued_cmd *qc;
751 enum scsi_eh_timer_return ret = EH_HANDLED;
752
753 DPRINTK("ENTER\n");
754
755 spin_lock_irqsave(&ap->host_set->lock, flags);
756 qc = ata_qc_from_tag(ap, ap->active_tag);
757 if (qc) {
758 WARN_ON(qc->scsicmd != cmd);
759 qc->flags |= ATA_QCFLAG_EH_SCHEDULED;
760 qc->err_mask |= AC_ERR_TIMEOUT;
761 ret = EH_NOT_HANDLED;
762 }
763 spin_unlock_irqrestore(&ap->host_set->lock, flags);
764
765 DPRINTK("EXIT, ret=%d\n", ret);
766 return ret;
767}
768
769/**
770 * ata_scsi_error - SCSI layer error handler callback
771 * @host: SCSI host on which error occurred
772 *
773 * Handles SCSI-layer-thrown error events.
774 *
775 * LOCKING:
776 * Inherited from SCSI layer (none, can sleep)
777 */
778
779static void ata_scsi_error(struct Scsi_Host *host)
780{
781 struct ata_port *ap;
782 unsigned long flags;
783
784 DPRINTK("ENTER\n");
785
786 ap = (struct ata_port *) &host->hostdata[0];
787
788 spin_lock_irqsave(&ap->host_set->lock, flags);
789 WARN_ON(ap->flags & ATA_FLAG_IN_EH);
790 ap->flags |= ATA_FLAG_IN_EH;
791 WARN_ON(ata_qc_from_tag(ap, ap->active_tag) == NULL);
792 spin_unlock_irqrestore(&ap->host_set->lock, flags);
793
794 ata_port_flush_task(ap);
795
796 ap->ops->eng_timeout(ap);
797
798 WARN_ON(host->host_failed || !list_empty(&host->eh_cmd_q));
799
800 scsi_eh_flush_done_q(&ap->eh_done_q);
801
802 spin_lock_irqsave(&ap->host_set->lock, flags);
803 ap->flags &= ~ATA_FLAG_IN_EH;
804 spin_unlock_irqrestore(&ap->host_set->lock, flags);
805
806 DPRINTK("EXIT\n");
807}
808
809static void ata_eh_scsidone(struct scsi_cmnd *scmd)
810{ 745{
811 /* nada */ 746 struct ata_port *ap = ata_shost_to_port(sdev->host);
812} 747 struct ata_device *dev;
748 int max_depth;
813 749
814static void __ata_eh_qc_complete(struct ata_queued_cmd *qc) 750 if (queue_depth < 1)
815{ 751 return sdev->queue_depth;
816 struct ata_port *ap = qc->ap;
817 struct scsi_cmnd *scmd = qc->scsicmd;
818 unsigned long flags;
819 752
820 spin_lock_irqsave(&ap->host_set->lock, flags); 753 dev = ata_scsi_find_dev(ap, sdev);
821 qc->scsidone = ata_eh_scsidone; 754 if (!dev || !ata_dev_enabled(dev))
822 __ata_qc_complete(qc); 755 return sdev->queue_depth;
823 WARN_ON(ata_tag_valid(qc->tag));
824 spin_unlock_irqrestore(&ap->host_set->lock, flags);
825 756
826 scsi_eh_finish_cmd(scmd, &ap->eh_done_q); 757 max_depth = min(sdev->host->can_queue, ata_id_queue_depth(dev->id));
827} 758 max_depth = min(ATA_MAX_QUEUE - 1, max_depth);
759 if (queue_depth > max_depth)
760 queue_depth = max_depth;
828 761
829/** 762 scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, queue_depth);
830 * ata_eh_qc_complete - Complete an active ATA command from EH 763 return queue_depth;
831 * @qc: Command to complete
832 *
833 * Indicate to the mid and upper layers that an ATA command has
834 * completed. To be used from EH.
835 */
836void ata_eh_qc_complete(struct ata_queued_cmd *qc)
837{
838 struct scsi_cmnd *scmd = qc->scsicmd;
839 scmd->retries = scmd->allowed;
840 __ata_eh_qc_complete(qc);
841}
842
843/**
844 * ata_eh_qc_retry - Tell midlayer to retry an ATA command after EH
845 * @qc: Command to retry
846 *
847 * Indicate to the mid and upper layers that an ATA command
848 * should be retried. To be used from EH.
849 *
850 * SCSI midlayer limits the number of retries to scmd->allowed.
851 * This function might need to adjust scmd->retries for commands
852 * which get retried due to unrelated NCQ failures.
853 */
854void ata_eh_qc_retry(struct ata_queued_cmd *qc)
855{
856 __ata_eh_qc_complete(qc);
857} 764}
858 765
859/** 766/**
@@ -891,7 +798,7 @@ static unsigned int ata_scsi_start_stop_xlat(struct ata_queued_cmd *qc,
891 tf->nsect = 1; /* 1 sector, lba=0 */ 798 tf->nsect = 1; /* 1 sector, lba=0 */
892 799
893 if (qc->dev->flags & ATA_DFLAG_LBA) { 800 if (qc->dev->flags & ATA_DFLAG_LBA) {
894 qc->tf.flags |= ATA_TFLAG_LBA; 801 tf->flags |= ATA_TFLAG_LBA;
895 802
896 tf->lbah = 0x0; 803 tf->lbah = 0x0;
897 tf->lbam = 0x0; 804 tf->lbam = 0x0;
@@ -1195,6 +1102,7 @@ static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc, const u8 *scsicm
1195 u64 block; 1102 u64 block;
1196 u32 n_block; 1103 u32 n_block;
1197 1104
1105 qc->flags |= ATA_QCFLAG_IO;
1198 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; 1106 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1199 1107
1200 if (scsicmd[0] == WRITE_10 || scsicmd[0] == WRITE_6 || 1108 if (scsicmd[0] == WRITE_10 || scsicmd[0] == WRITE_6 ||
@@ -1241,7 +1149,36 @@ static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc, const u8 *scsicm
1241 */ 1149 */
1242 goto nothing_to_do; 1150 goto nothing_to_do;
1243 1151
1244 if (dev->flags & ATA_DFLAG_LBA) { 1152 if ((dev->flags & (ATA_DFLAG_PIO | ATA_DFLAG_NCQ)) == ATA_DFLAG_NCQ) {
1153 /* yay, NCQ */
1154 if (!lba_48_ok(block, n_block))
1155 goto out_of_range;
1156
1157 tf->protocol = ATA_PROT_NCQ;
1158 tf->flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
1159
1160 if (tf->flags & ATA_TFLAG_WRITE)
1161 tf->command = ATA_CMD_FPDMA_WRITE;
1162 else
1163 tf->command = ATA_CMD_FPDMA_READ;
1164
1165 qc->nsect = n_block;
1166
1167 tf->nsect = qc->tag << 3;
1168 tf->hob_feature = (n_block >> 8) & 0xff;
1169 tf->feature = n_block & 0xff;
1170
1171 tf->hob_lbah = (block >> 40) & 0xff;
1172 tf->hob_lbam = (block >> 32) & 0xff;
1173 tf->hob_lbal = (block >> 24) & 0xff;
1174 tf->lbah = (block >> 16) & 0xff;
1175 tf->lbam = (block >> 8) & 0xff;
1176 tf->lbal = block & 0xff;
1177
1178 tf->device = 1 << 6;
1179 if (tf->flags & ATA_TFLAG_FUA)
1180 tf->device |= 1 << 7;
1181 } else if (dev->flags & ATA_DFLAG_LBA) {
1245 tf->flags |= ATA_TFLAG_LBA; 1182 tf->flags |= ATA_TFLAG_LBA;
1246 1183
1247 if (lba_28_ok(block, n_block)) { 1184 if (lba_28_ok(block, n_block)) {
@@ -1356,10 +1293,8 @@ static void ata_scsi_qc_complete(struct ata_queued_cmd *qc)
1356 } 1293 }
1357 } 1294 }
1358 1295
1359 if (need_sense) { 1296 if (need_sense && !qc->ap->ops->error_handler)
1360 /* The ata_gen_..._sense routines fill in tf */ 1297 ata_dump_status(qc->ap->id, &qc->result_tf);
1361 ata_dump_status(qc->ap->id, &qc->tf);
1362 }
1363 1298
1364 qc->scsidone(cmd); 1299 qc->scsidone(cmd);
1365 1300
@@ -1367,8 +1302,40 @@ static void ata_scsi_qc_complete(struct ata_queued_cmd *qc)
1367} 1302}
1368 1303
1369/** 1304/**
1305 * ata_scmd_need_defer - Check whether we need to defer scmd
1306 * @dev: ATA device to which the command is addressed
1307 * @is_io: Is the command IO (and thus possibly NCQ)?
1308 *
1309 * NCQ and non-NCQ commands cannot run together. As upper layer
1310 * only knows the queue depth, we are responsible for maintaining
1311 * exclusion. This function checks whether a new command can be
1312 * issued to @dev.
1313 *
1314 * LOCKING:
1315 * spin_lock_irqsave(host_set lock)
1316 *
1317 * RETURNS:
1318 * 1 if deferring is needed, 0 otherwise.
1319 */
1320static int ata_scmd_need_defer(struct ata_device *dev, int is_io)
1321{
1322 struct ata_port *ap = dev->ap;
1323
1324 if (!(dev->flags & ATA_DFLAG_NCQ))
1325 return 0;
1326
1327 if (is_io) {
1328 if (!ata_tag_valid(ap->active_tag))
1329 return 0;
1330 } else {
1331 if (!ata_tag_valid(ap->active_tag) && !ap->sactive)
1332 return 0;
1333 }
1334 return 1;
1335}
1336
1337/**
1370 * ata_scsi_translate - Translate then issue SCSI command to ATA device 1338 * ata_scsi_translate - Translate then issue SCSI command to ATA device
1371 * @ap: ATA port to which the command is addressed
1372 * @dev: ATA device to which the command is addressed 1339 * @dev: ATA device to which the command is addressed
1373 * @cmd: SCSI command to execute 1340 * @cmd: SCSI command to execute
1374 * @done: SCSI command completion function 1341 * @done: SCSI command completion function
@@ -1389,19 +1356,25 @@ static void ata_scsi_qc_complete(struct ata_queued_cmd *qc)
1389 * 1356 *
1390 * LOCKING: 1357 * LOCKING:
1391 * spin_lock_irqsave(host_set lock) 1358 * spin_lock_irqsave(host_set lock)
1359 *
1360 * RETURNS:
1361 * 0 on success, SCSI_ML_QUEUE_DEVICE_BUSY if the command
1362 * needs to be deferred.
1392 */ 1363 */
1393 1364static int ata_scsi_translate(struct ata_device *dev, struct scsi_cmnd *cmd,
1394static void ata_scsi_translate(struct ata_port *ap, struct ata_device *dev,
1395 struct scsi_cmnd *cmd,
1396 void (*done)(struct scsi_cmnd *), 1365 void (*done)(struct scsi_cmnd *),
1397 ata_xlat_func_t xlat_func) 1366 ata_xlat_func_t xlat_func)
1398{ 1367{
1399 struct ata_queued_cmd *qc; 1368 struct ata_queued_cmd *qc;
1400 u8 *scsicmd = cmd->cmnd; 1369 u8 *scsicmd = cmd->cmnd;
1370 int is_io = xlat_func == ata_scsi_rw_xlat;
1401 1371
1402 VPRINTK("ENTER\n"); 1372 VPRINTK("ENTER\n");
1403 1373
1404 qc = ata_scsi_qc_new(ap, dev, cmd, done); 1374 if (unlikely(ata_scmd_need_defer(dev, is_io)))
1375 goto defer;
1376
1377 qc = ata_scsi_qc_new(dev, cmd, done);
1405 if (!qc) 1378 if (!qc)
1406 goto err_mem; 1379 goto err_mem;
1407 1380
@@ -1409,8 +1382,8 @@ static void ata_scsi_translate(struct ata_port *ap, struct ata_device *dev,
1409 if (cmd->sc_data_direction == DMA_FROM_DEVICE || 1382 if (cmd->sc_data_direction == DMA_FROM_DEVICE ||
1410 cmd->sc_data_direction == DMA_TO_DEVICE) { 1383 cmd->sc_data_direction == DMA_TO_DEVICE) {
1411 if (unlikely(cmd->request_bufflen < 1)) { 1384 if (unlikely(cmd->request_bufflen < 1)) {
1412 printk(KERN_WARNING "ata%u(%u): WARNING: zero len r/w req\n", 1385 ata_dev_printk(dev, KERN_WARNING,
1413 ap->id, dev->devno); 1386 "WARNING: zero len r/w req\n");
1414 goto err_did; 1387 goto err_did;
1415 } 1388 }
1416 1389
@@ -1432,13 +1405,13 @@ static void ata_scsi_translate(struct ata_port *ap, struct ata_device *dev,
1432 ata_qc_issue(qc); 1405 ata_qc_issue(qc);
1433 1406
1434 VPRINTK("EXIT\n"); 1407 VPRINTK("EXIT\n");
1435 return; 1408 return 0;
1436 1409
1437early_finish: 1410early_finish:
1438 ata_qc_free(qc); 1411 ata_qc_free(qc);
1439 done(cmd); 1412 done(cmd);
1440 DPRINTK("EXIT - early finish (good or error)\n"); 1413 DPRINTK("EXIT - early finish (good or error)\n");
1441 return; 1414 return 0;
1442 1415
1443err_did: 1416err_did:
1444 ata_qc_free(qc); 1417 ata_qc_free(qc);
@@ -1446,7 +1419,11 @@ err_mem:
1446 cmd->result = (DID_ERROR << 16); 1419 cmd->result = (DID_ERROR << 16);
1447 done(cmd); 1420 done(cmd);
1448 DPRINTK("EXIT - internal\n"); 1421 DPRINTK("EXIT - internal\n");
1449 return; 1422 return 0;
1423
1424defer:
1425 DPRINTK("EXIT - defer\n");
1426 return SCSI_MLQUEUE_DEVICE_BUSY;
1450} 1427}
1451 1428
1452/** 1429/**
@@ -1944,7 +1921,7 @@ unsigned int ata_scsiop_mode_sense(struct ata_scsi_args *args, u8 *rbuf,
1944 return 0; 1921 return 0;
1945 1922
1946 dpofua = 0; 1923 dpofua = 0;
1947 if (ata_dev_supports_fua(args->id) && dev->flags & ATA_DFLAG_LBA48 && 1924 if (ata_dev_supports_fua(args->id) && (dev->flags & ATA_DFLAG_LBA48) &&
1948 (!(dev->flags & ATA_DFLAG_PIO) || dev->multi_count)) 1925 (!(dev->flags & ATA_DFLAG_PIO) || dev->multi_count))
1949 dpofua = 1 << 4; 1926 dpofua = 1 << 4;
1950 1927
@@ -2137,13 +2114,14 @@ void ata_scsi_badcmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *), u8
2137 2114
2138static void atapi_sense_complete(struct ata_queued_cmd *qc) 2115static void atapi_sense_complete(struct ata_queued_cmd *qc)
2139{ 2116{
2140 if (qc->err_mask && ((qc->err_mask & AC_ERR_DEV) == 0)) 2117 if (qc->err_mask && ((qc->err_mask & AC_ERR_DEV) == 0)) {
2141 /* FIXME: not quite right; we don't want the 2118 /* FIXME: not quite right; we don't want the
2142 * translation of taskfile registers into 2119 * translation of taskfile registers into
2143 * a sense descriptors, since that's only 2120 * a sense descriptors, since that's only
2144 * correct for ATA, not ATAPI 2121 * correct for ATA, not ATAPI
2145 */ 2122 */
2146 ata_gen_ata_desc_sense(qc); 2123 ata_gen_ata_desc_sense(qc);
2124 }
2147 2125
2148 qc->scsidone(qc->scsicmd); 2126 qc->scsidone(qc->scsicmd);
2149 ata_qc_free(qc); 2127 ata_qc_free(qc);
@@ -2207,21 +2185,38 @@ static void atapi_qc_complete(struct ata_queued_cmd *qc)
2207 2185
2208 VPRINTK("ENTER, err_mask 0x%X\n", err_mask); 2186 VPRINTK("ENTER, err_mask 0x%X\n", err_mask);
2209 2187
2188 /* handle completion from new EH */
2189 if (unlikely(qc->ap->ops->error_handler &&
2190 (err_mask || qc->flags & ATA_QCFLAG_SENSE_VALID))) {
2191
2192 if (!(qc->flags & ATA_QCFLAG_SENSE_VALID)) {
2193 /* FIXME: not quite right; we don't want the
2194 * translation of taskfile registers into a
2195 * sense descriptors, since that's only
2196 * correct for ATA, not ATAPI
2197 */
2198 ata_gen_ata_desc_sense(qc);
2199 }
2200
2201 qc->scsicmd->result = SAM_STAT_CHECK_CONDITION;
2202 qc->scsidone(cmd);
2203 ata_qc_free(qc);
2204 return;
2205 }
2206
2207 /* successful completion or old EH failure path */
2210 if (unlikely(err_mask & AC_ERR_DEV)) { 2208 if (unlikely(err_mask & AC_ERR_DEV)) {
2211 cmd->result = SAM_STAT_CHECK_CONDITION; 2209 cmd->result = SAM_STAT_CHECK_CONDITION;
2212 atapi_request_sense(qc); 2210 atapi_request_sense(qc);
2213 return; 2211 return;
2214 } 2212 } else if (unlikely(err_mask)) {
2215
2216 else if (unlikely(err_mask))
2217 /* FIXME: not quite right; we don't want the 2213 /* FIXME: not quite right; we don't want the
2218 * translation of taskfile registers into 2214 * translation of taskfile registers into
2219 * a sense descriptors, since that's only 2215 * a sense descriptors, since that's only
2220 * correct for ATA, not ATAPI 2216 * correct for ATA, not ATAPI
2221 */ 2217 */
2222 ata_gen_ata_desc_sense(qc); 2218 ata_gen_ata_desc_sense(qc);
2223 2219 } else {
2224 else {
2225 u8 *scsicmd = cmd->cmnd; 2220 u8 *scsicmd = cmd->cmnd;
2226 2221
2227 if ((scsicmd[0] == INQUIRY) && ((scsicmd[1] & 0x03) == 0)) { 2222 if ((scsicmd[0] == INQUIRY) && ((scsicmd[1] & 0x03) == 0)) {
@@ -2303,11 +2298,9 @@ static unsigned int atapi_xlat(struct ata_queued_cmd *qc, const u8 *scsicmd)
2303 qc->tf.protocol = ATA_PROT_ATAPI_DMA; 2298 qc->tf.protocol = ATA_PROT_ATAPI_DMA;
2304 qc->tf.feature |= ATAPI_PKT_DMA; 2299 qc->tf.feature |= ATAPI_PKT_DMA;
2305 2300
2306#ifdef ATAPI_ENABLE_DMADIR 2301 if (atapi_dmadir && (cmd->sc_data_direction != DMA_TO_DEVICE))
2307 /* some SATA bridges need us to indicate data xfer direction */ 2302 /* some SATA bridges need us to indicate data xfer direction */
2308 if (cmd->sc_data_direction != DMA_TO_DEVICE)
2309 qc->tf.feature |= ATAPI_DMADIR; 2303 qc->tf.feature |= ATAPI_DMADIR;
2310#endif
2311 } 2304 }
2312 2305
2313 qc->nbytes = cmd->bufflen; 2306 qc->nbytes = cmd->bufflen;
@@ -2347,13 +2340,14 @@ ata_scsi_find_dev(struct ata_port *ap, const struct scsi_device *scsidev)
2347 (scsidev->lun != 0))) 2340 (scsidev->lun != 0)))
2348 return NULL; 2341 return NULL;
2349 2342
2350 if (unlikely(!ata_dev_present(dev))) 2343 if (unlikely(!ata_dev_enabled(dev)))
2351 return NULL; 2344 return NULL;
2352 2345
2353 if (!atapi_enabled || (ap->flags & ATA_FLAG_NO_ATAPI)) { 2346 if (!atapi_enabled || (ap->flags & ATA_FLAG_NO_ATAPI)) {
2354 if (unlikely(dev->class == ATA_DEV_ATAPI)) { 2347 if (unlikely(dev->class == ATA_DEV_ATAPI)) {
2355 printk(KERN_WARNING "ata%u(%u): WARNING: ATAPI is %s, device ignored.\n", 2348 ata_dev_printk(dev, KERN_WARNING,
2356 ap->id, dev->devno, atapi_enabled ? "not supported with this driver" : "disabled"); 2349 "WARNING: ATAPI is %s, device ignored.\n",
2350 atapi_enabled ? "not supported with this driver" : "disabled");
2357 return NULL; 2351 return NULL;
2358 } 2352 }
2359 } 2353 }
@@ -2414,10 +2408,15 @@ ata_scsi_pass_thru(struct ata_queued_cmd *qc, const u8 *scsicmd)
2414{ 2408{
2415 struct ata_taskfile *tf = &(qc->tf); 2409 struct ata_taskfile *tf = &(qc->tf);
2416 struct scsi_cmnd *cmd = qc->scsicmd; 2410 struct scsi_cmnd *cmd = qc->scsicmd;
2411 struct ata_device *dev = qc->dev;
2417 2412
2418 if ((tf->protocol = ata_scsi_map_proto(scsicmd[1])) == ATA_PROT_UNKNOWN) 2413 if ((tf->protocol = ata_scsi_map_proto(scsicmd[1])) == ATA_PROT_UNKNOWN)
2419 goto invalid_fld; 2414 goto invalid_fld;
2420 2415
2416 /* We may not issue DMA commands if no DMA mode is set */
2417 if (tf->protocol == ATA_PROT_DMA && dev->dma_mode == 0)
2418 goto invalid_fld;
2419
2421 if (scsicmd[1] & 0xe0) 2420 if (scsicmd[1] & 0xe0)
2422 /* PIO multi not supported yet */ 2421 /* PIO multi not supported yet */
2423 goto invalid_fld; 2422 goto invalid_fld;
@@ -2502,6 +2501,9 @@ ata_scsi_pass_thru(struct ata_queued_cmd *qc, const u8 *scsicmd)
2502 */ 2501 */
2503 qc->nsect = cmd->bufflen / ATA_SECT_SIZE; 2502 qc->nsect = cmd->bufflen / ATA_SECT_SIZE;
2504 2503
2504 /* request result TF */
2505 qc->flags |= ATA_QCFLAG_RESULT_TF;
2506
2505 return 0; 2507 return 0;
2506 2508
2507 invalid_fld: 2509 invalid_fld:
@@ -2578,19 +2580,24 @@ static inline void ata_scsi_dump_cdb(struct ata_port *ap,
2578#endif 2580#endif
2579} 2581}
2580 2582
2581static inline void __ata_scsi_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *), 2583static inline int __ata_scsi_queuecmd(struct scsi_cmnd *cmd,
2582 struct ata_port *ap, struct ata_device *dev) 2584 void (*done)(struct scsi_cmnd *),
2585 struct ata_device *dev)
2583{ 2586{
2587 int rc = 0;
2588
2584 if (dev->class == ATA_DEV_ATA) { 2589 if (dev->class == ATA_DEV_ATA) {
2585 ata_xlat_func_t xlat_func = ata_get_xlat_func(dev, 2590 ata_xlat_func_t xlat_func = ata_get_xlat_func(dev,
2586 cmd->cmnd[0]); 2591 cmd->cmnd[0]);
2587 2592
2588 if (xlat_func) 2593 if (xlat_func)
2589 ata_scsi_translate(ap, dev, cmd, done, xlat_func); 2594 rc = ata_scsi_translate(dev, cmd, done, xlat_func);
2590 else 2595 else
2591 ata_scsi_simulate(ap, dev, cmd, done); 2596 ata_scsi_simulate(dev, cmd, done);
2592 } else 2597 } else
2593 ata_scsi_translate(ap, dev, cmd, done, atapi_xlat); 2598 rc = ata_scsi_translate(dev, cmd, done, atapi_xlat);
2599
2600 return rc;
2594} 2601}
2595 2602
2596/** 2603/**
@@ -2609,17 +2616,18 @@ static inline void __ata_scsi_queuecmd(struct scsi_cmnd *cmd, void (*done)(struc
2609 * Releases scsi-layer-held lock, and obtains host_set lock. 2616 * Releases scsi-layer-held lock, and obtains host_set lock.
2610 * 2617 *
2611 * RETURNS: 2618 * RETURNS:
2612 * Zero. 2619 * Return value from __ata_scsi_queuecmd() if @cmd can be queued,
2620 * 0 otherwise.
2613 */ 2621 */
2614
2615int ata_scsi_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)) 2622int ata_scsi_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
2616{ 2623{
2617 struct ata_port *ap; 2624 struct ata_port *ap;
2618 struct ata_device *dev; 2625 struct ata_device *dev;
2619 struct scsi_device *scsidev = cmd->device; 2626 struct scsi_device *scsidev = cmd->device;
2620 struct Scsi_Host *shost = scsidev->host; 2627 struct Scsi_Host *shost = scsidev->host;
2628 int rc = 0;
2621 2629
2622 ap = (struct ata_port *) &shost->hostdata[0]; 2630 ap = ata_shost_to_port(shost);
2623 2631
2624 spin_unlock(shost->host_lock); 2632 spin_unlock(shost->host_lock);
2625 spin_lock(&ap->host_set->lock); 2633 spin_lock(&ap->host_set->lock);
@@ -2628,7 +2636,7 @@ int ata_scsi_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
2628 2636
2629 dev = ata_scsi_find_dev(ap, scsidev); 2637 dev = ata_scsi_find_dev(ap, scsidev);
2630 if (likely(dev)) 2638 if (likely(dev))
2631 __ata_scsi_queuecmd(cmd, done, ap, dev); 2639 rc = __ata_scsi_queuecmd(cmd, done, dev);
2632 else { 2640 else {
2633 cmd->result = (DID_BAD_TARGET << 16); 2641 cmd->result = (DID_BAD_TARGET << 16);
2634 done(cmd); 2642 done(cmd);
@@ -2636,12 +2644,11 @@ int ata_scsi_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
2636 2644
2637 spin_unlock(&ap->host_set->lock); 2645 spin_unlock(&ap->host_set->lock);
2638 spin_lock(shost->host_lock); 2646 spin_lock(shost->host_lock);
2639 return 0; 2647 return rc;
2640} 2648}
2641 2649
2642/** 2650/**
2643 * ata_scsi_simulate - simulate SCSI command on ATA device 2651 * ata_scsi_simulate - simulate SCSI command on ATA device
2644 * @ap: port the device is connected to
2645 * @dev: the target device 2652 * @dev: the target device
2646 * @cmd: SCSI command being sent to device. 2653 * @cmd: SCSI command being sent to device.
2647 * @done: SCSI command completion function. 2654 * @done: SCSI command completion function.
@@ -2653,14 +2660,12 @@ int ata_scsi_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
2653 * spin_lock_irqsave(host_set lock) 2660 * spin_lock_irqsave(host_set lock)
2654 */ 2661 */
2655 2662
2656void ata_scsi_simulate(struct ata_port *ap, struct ata_device *dev, 2663void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd,
2657 struct scsi_cmnd *cmd,
2658 void (*done)(struct scsi_cmnd *)) 2664 void (*done)(struct scsi_cmnd *))
2659{ 2665{
2660 struct ata_scsi_args args; 2666 struct ata_scsi_args args;
2661 const u8 *scsicmd = cmd->cmnd; 2667 const u8 *scsicmd = cmd->cmnd;
2662 2668
2663 args.ap = ap;
2664 args.dev = dev; 2669 args.dev = dev;
2665 args.id = dev->id; 2670 args.id = dev->id;
2666 args.cmd = cmd; 2671 args.cmd = cmd;
@@ -2735,14 +2740,13 @@ void ata_scsi_scan_host(struct ata_port *ap)
2735 struct ata_device *dev; 2740 struct ata_device *dev;
2736 unsigned int i; 2741 unsigned int i;
2737 2742
2738 if (ap->flags & ATA_FLAG_PORT_DISABLED) 2743 if (ap->flags & ATA_FLAG_DISABLED)
2739 return; 2744 return;
2740 2745
2741 for (i = 0; i < ATA_MAX_DEVICES; i++) { 2746 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2742 dev = &ap->device[i]; 2747 dev = &ap->device[i];
2743 2748
2744 if (ata_dev_present(dev)) 2749 if (ata_dev_enabled(dev))
2745 scsi_scan_target(&ap->host->shost_gendev, 0, i, 0, 0); 2750 scsi_scan_target(&ap->host->shost_gendev, 0, i, 0, 0);
2746 } 2751 }
2747} 2752}
2748
diff --git a/drivers/scsi/libata.h b/drivers/scsi/libata.h
index bac8cbae06fe..b76ad7d7062a 100644
--- a/drivers/scsi/libata.h
+++ b/drivers/scsi/libata.h
@@ -29,10 +29,9 @@
29#define __LIBATA_H__ 29#define __LIBATA_H__
30 30
31#define DRV_NAME "libata" 31#define DRV_NAME "libata"
32#define DRV_VERSION "1.20" /* must be exactly four chars */ 32#define DRV_VERSION "1.30" /* must be exactly four chars */
33 33
34struct ata_scsi_args { 34struct ata_scsi_args {
35 struct ata_port *ap;
36 struct ata_device *dev; 35 struct ata_device *dev;
37 u16 *id; 36 u16 *id;
38 struct scsi_cmnd *cmd; 37 struct scsi_cmnd *cmd;
@@ -41,13 +40,24 @@ struct ata_scsi_args {
41 40
42/* libata-core.c */ 41/* libata-core.c */
43extern int atapi_enabled; 42extern int atapi_enabled;
43extern int atapi_dmadir;
44extern int libata_fua; 44extern int libata_fua;
45extern struct ata_queued_cmd *ata_qc_new_init(struct ata_port *ap, 45extern struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev);
46 struct ata_device *dev);
47extern int ata_rwcmd_protocol(struct ata_queued_cmd *qc); 46extern int ata_rwcmd_protocol(struct ata_queued_cmd *qc);
47extern void ata_dev_disable(struct ata_device *dev);
48extern void ata_port_flush_task(struct ata_port *ap); 48extern void ata_port_flush_task(struct ata_port *ap);
49extern unsigned ata_exec_internal(struct ata_device *dev,
50 struct ata_taskfile *tf, const u8 *cdb,
51 int dma_dir, void *buf, unsigned int buflen);
52extern int sata_down_spd_limit(struct ata_port *ap);
53extern int sata_set_spd_needed(struct ata_port *ap);
54extern int ata_down_xfermask_limit(struct ata_device *dev, int force_pio0);
55extern int ata_set_mode(struct ata_port *ap, struct ata_device **r_failed_dev);
56extern int ata_do_reset(struct ata_port *ap, ata_reset_fn_t reset,
57 unsigned int *classes);
49extern void ata_qc_free(struct ata_queued_cmd *qc); 58extern void ata_qc_free(struct ata_queued_cmd *qc);
50extern void ata_qc_issue(struct ata_queued_cmd *qc); 59extern void ata_qc_issue(struct ata_queued_cmd *qc);
60extern void __ata_qc_complete(struct ata_queued_cmd *qc);
51extern int ata_check_atapi_dma(struct ata_queued_cmd *qc); 61extern int ata_check_atapi_dma(struct ata_queued_cmd *qc);
52extern void ata_dev_select(struct ata_port *ap, unsigned int device, 62extern void ata_dev_select(struct ata_port *ap, unsigned int device,
53 unsigned int wait, unsigned int can_sleep); 63 unsigned int wait, unsigned int can_sleep);
@@ -88,5 +98,11 @@ extern void ata_scsi_set_sense(struct scsi_cmnd *cmd,
88extern void ata_scsi_rbuf_fill(struct ata_scsi_args *args, 98extern void ata_scsi_rbuf_fill(struct ata_scsi_args *args,
89 unsigned int (*actor) (struct ata_scsi_args *args, 99 unsigned int (*actor) (struct ata_scsi_args *args,
90 u8 *rbuf, unsigned int buflen)); 100 u8 *rbuf, unsigned int buflen));
101extern void ata_schedule_scsi_eh(struct Scsi_Host *shost);
102
103/* libata-eh.c */
104extern enum scsi_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd);
105extern void ata_scsi_error(struct Scsi_Host *host);
106extern void ata_qc_schedule_eh(struct ata_queued_cmd *qc);
91 107
92#endif /* __LIBATA_H__ */ 108#endif /* __LIBATA_H__ */
diff --git a/drivers/scsi/pdc_adma.c b/drivers/scsi/pdc_adma.c
index 5cda16cfacb0..a341fa8d3291 100644
--- a/drivers/scsi/pdc_adma.c
+++ b/drivers/scsi/pdc_adma.c
@@ -46,7 +46,7 @@
46#include <linux/libata.h> 46#include <linux/libata.h>
47 47
48#define DRV_NAME "pdc_adma" 48#define DRV_NAME "pdc_adma"
49#define DRV_VERSION "0.03" 49#define DRV_VERSION "0.04"
50 50
51/* macro to calculate base address for ATA regs */ 51/* macro to calculate base address for ATA regs */
52#define ADMA_ATA_REGS(base,port_no) ((base) + ((port_no) * 0x40)) 52#define ADMA_ATA_REGS(base,port_no) ((base) + ((port_no) * 0x40))
@@ -455,13 +455,13 @@ static inline unsigned int adma_intr_pkt(struct ata_host_set *host_set)
455 continue; 455 continue;
456 handled = 1; 456 handled = 1;
457 adma_enter_reg_mode(ap); 457 adma_enter_reg_mode(ap);
458 if (ap->flags & (ATA_FLAG_PORT_DISABLED | ATA_FLAG_NOINTR)) 458 if (ap->flags & ATA_FLAG_DISABLED)
459 continue; 459 continue;
460 pp = ap->private_data; 460 pp = ap->private_data;
461 if (!pp || pp->state != adma_state_pkt) 461 if (!pp || pp->state != adma_state_pkt)
462 continue; 462 continue;
463 qc = ata_qc_from_tag(ap, ap->active_tag); 463 qc = ata_qc_from_tag(ap, ap->active_tag);
464 if (qc && (!(qc->tf.ctl & ATA_NIEN))) { 464 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) {
465 if ((status & (aPERR | aPSD | aUIRQ))) 465 if ((status & (aPERR | aPSD | aUIRQ)))
466 qc->err_mask |= AC_ERR_OTHER; 466 qc->err_mask |= AC_ERR_OTHER;
467 else if (pp->pkt[0] != cDONE) 467 else if (pp->pkt[0] != cDONE)
@@ -480,13 +480,13 @@ static inline unsigned int adma_intr_mmio(struct ata_host_set *host_set)
480 for (port_no = 0; port_no < host_set->n_ports; ++port_no) { 480 for (port_no = 0; port_no < host_set->n_ports; ++port_no) {
481 struct ata_port *ap; 481 struct ata_port *ap;
482 ap = host_set->ports[port_no]; 482 ap = host_set->ports[port_no];
483 if (ap && (!(ap->flags & (ATA_FLAG_PORT_DISABLED | ATA_FLAG_NOINTR)))) { 483 if (ap && (!(ap->flags & ATA_FLAG_DISABLED))) {
484 struct ata_queued_cmd *qc; 484 struct ata_queued_cmd *qc;
485 struct adma_port_priv *pp = ap->private_data; 485 struct adma_port_priv *pp = ap->private_data;
486 if (!pp || pp->state != adma_state_mmio) 486 if (!pp || pp->state != adma_state_mmio)
487 continue; 487 continue;
488 qc = ata_qc_from_tag(ap, ap->active_tag); 488 qc = ata_qc_from_tag(ap, ap->active_tag);
489 if (qc && (!(qc->tf.ctl & ATA_NIEN))) { 489 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) {
490 490
491 /* check main status, clearing INTRQ */ 491 /* check main status, clearing INTRQ */
492 u8 status = ata_check_status(ap); 492 u8 status = ata_check_status(ap);
diff --git a/drivers/scsi/sata_mv.c b/drivers/scsi/sata_mv.c
index 9b8bca1ac1f0..624983c2e24b 100644
--- a/drivers/scsi/sata_mv.c
+++ b/drivers/scsi/sata_mv.c
@@ -93,7 +93,7 @@ enum {
93 MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */ 93 MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */
94 MV_COMMON_FLAGS = (ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 94 MV_COMMON_FLAGS = (ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
95 ATA_FLAG_SATA_RESET | ATA_FLAG_MMIO | 95 ATA_FLAG_SATA_RESET | ATA_FLAG_MMIO |
96 ATA_FLAG_NO_ATAPI), 96 ATA_FLAG_NO_ATAPI | ATA_FLAG_PIO_POLLING),
97 MV_6XXX_FLAGS = MV_FLAG_IRQ_COALESCE, 97 MV_6XXX_FLAGS = MV_FLAG_IRQ_COALESCE,
98 98
99 CRQB_FLAG_READ = (1 << 0), 99 CRQB_FLAG_READ = (1 << 0),
@@ -272,33 +272,33 @@ enum chip_type {
272 272
273/* Command ReQuest Block: 32B */ 273/* Command ReQuest Block: 32B */
274struct mv_crqb { 274struct mv_crqb {
275 u32 sg_addr; 275 __le32 sg_addr;
276 u32 sg_addr_hi; 276 __le32 sg_addr_hi;
277 u16 ctrl_flags; 277 __le16 ctrl_flags;
278 u16 ata_cmd[11]; 278 __le16 ata_cmd[11];
279}; 279};
280 280
281struct mv_crqb_iie { 281struct mv_crqb_iie {
282 u32 addr; 282 __le32 addr;
283 u32 addr_hi; 283 __le32 addr_hi;
284 u32 flags; 284 __le32 flags;
285 u32 len; 285 __le32 len;
286 u32 ata_cmd[4]; 286 __le32 ata_cmd[4];
287}; 287};
288 288
289/* Command ResPonse Block: 8B */ 289/* Command ResPonse Block: 8B */
290struct mv_crpb { 290struct mv_crpb {
291 u16 id; 291 __le16 id;
292 u16 flags; 292 __le16 flags;
293 u32 tmstmp; 293 __le32 tmstmp;
294}; 294};
295 295
296/* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */ 296/* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */
297struct mv_sg { 297struct mv_sg {
298 u32 addr; 298 __le32 addr;
299 u32 flags_size; 299 __le32 flags_size;
300 u32 addr_hi; 300 __le32 addr_hi;
301 u32 reserved; 301 __le32 reserved;
302}; 302};
303 303
304struct mv_port_priv { 304struct mv_port_priv {
@@ -406,6 +406,7 @@ static const struct ata_port_operations mv5_ops = {
406 406
407 .qc_prep = mv_qc_prep, 407 .qc_prep = mv_qc_prep,
408 .qc_issue = mv_qc_issue, 408 .qc_issue = mv_qc_issue,
409 .data_xfer = ata_mmio_data_xfer,
409 410
410 .eng_timeout = mv_eng_timeout, 411 .eng_timeout = mv_eng_timeout,
411 412
@@ -433,6 +434,7 @@ static const struct ata_port_operations mv6_ops = {
433 434
434 .qc_prep = mv_qc_prep, 435 .qc_prep = mv_qc_prep,
435 .qc_issue = mv_qc_issue, 436 .qc_issue = mv_qc_issue,
437 .data_xfer = ata_mmio_data_xfer,
436 438
437 .eng_timeout = mv_eng_timeout, 439 .eng_timeout = mv_eng_timeout,
438 440
@@ -683,7 +685,7 @@ static void mv_stop_dma(struct ata_port *ap)
683 } 685 }
684 686
685 if (EDMA_EN & reg) { 687 if (EDMA_EN & reg) {
686 printk(KERN_ERR "ata%u: Unable to stop eDMA\n", ap->id); 688 ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n");
687 /* FIXME: Consider doing a reset here to recover */ 689 /* FIXME: Consider doing a reset here to recover */
688 } 690 }
689} 691}
@@ -1028,7 +1030,7 @@ static inline unsigned mv_inc_q_index(unsigned index)
1028 return (index + 1) & MV_MAX_Q_DEPTH_MASK; 1030 return (index + 1) & MV_MAX_Q_DEPTH_MASK;
1029} 1031}
1030 1032
1031static inline void mv_crqb_pack_cmd(u16 *cmdw, u8 data, u8 addr, unsigned last) 1033static inline void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last)
1032{ 1034{
1033 u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS | 1035 u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS |
1034 (last ? CRQB_CMD_LAST : 0); 1036 (last ? CRQB_CMD_LAST : 0);
@@ -1051,7 +1053,7 @@ static void mv_qc_prep(struct ata_queued_cmd *qc)
1051{ 1053{
1052 struct ata_port *ap = qc->ap; 1054 struct ata_port *ap = qc->ap;
1053 struct mv_port_priv *pp = ap->private_data; 1055 struct mv_port_priv *pp = ap->private_data;
1054 u16 *cw; 1056 __le16 *cw;
1055 struct ata_taskfile *tf; 1057 struct ata_taskfile *tf;
1056 u16 flags = 0; 1058 u16 flags = 0;
1057 unsigned in_index; 1059 unsigned in_index;
@@ -1307,8 +1309,8 @@ static void mv_err_intr(struct ata_port *ap, int reset_allowed)
1307 edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS); 1309 edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1308 1310
1309 if (EDMA_ERR_SERR & edma_err_cause) { 1311 if (EDMA_ERR_SERR & edma_err_cause) {
1310 serr = scr_read(ap, SCR_ERROR); 1312 sata_scr_read(ap, SCR_ERROR, &serr);
1311 scr_write_flush(ap, SCR_ERROR, serr); 1313 sata_scr_write_flush(ap, SCR_ERROR, serr);
1312 } 1314 }
1313 if (EDMA_ERR_SELF_DIS & edma_err_cause) { 1315 if (EDMA_ERR_SELF_DIS & edma_err_cause) {
1314 struct mv_port_priv *pp = ap->private_data; 1316 struct mv_port_priv *pp = ap->private_data;
@@ -1377,7 +1379,7 @@ static void mv_host_intr(struct ata_host_set *host_set, u32 relevant,
1377 /* Note that DEV_IRQ might happen spuriously during EDMA, 1379 /* Note that DEV_IRQ might happen spuriously during EDMA,
1378 * and should be ignored in such cases. 1380 * and should be ignored in such cases.
1379 * The cause of this is still under investigation. 1381 * The cause of this is still under investigation.
1380 */ 1382 */
1381 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) { 1383 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
1382 /* EDMA: check for response queue interrupt */ 1384 /* EDMA: check for response queue interrupt */
1383 if ((CRPB_DMA_DONE << hard_port) & hc_irq_cause) { 1385 if ((CRPB_DMA_DONE << hard_port) & hc_irq_cause) {
@@ -1398,7 +1400,7 @@ static void mv_host_intr(struct ata_host_set *host_set, u32 relevant,
1398 } 1400 }
1399 } 1401 }
1400 1402
1401 if (ap->flags & (ATA_FLAG_PORT_DISABLED | ATA_FLAG_NOINTR)) 1403 if (ap && (ap->flags & ATA_FLAG_DISABLED))
1402 continue; 1404 continue;
1403 1405
1404 err_mask = ac_err_mask(ata_status); 1406 err_mask = ac_err_mask(ata_status);
@@ -1419,7 +1421,7 @@ static void mv_host_intr(struct ata_host_set *host_set, u32 relevant,
1419 VPRINTK("port %u IRQ found for qc, " 1421 VPRINTK("port %u IRQ found for qc, "
1420 "ata_status 0x%x\n", port,ata_status); 1422 "ata_status 0x%x\n", port,ata_status);
1421 /* mark qc status appropriately */ 1423 /* mark qc status appropriately */
1422 if (!(qc->tf.ctl & ATA_NIEN)) { 1424 if (!(qc->tf.flags & ATA_TFLAG_POLLING)) {
1423 qc->err_mask |= err_mask; 1425 qc->err_mask |= err_mask;
1424 ata_qc_complete(qc); 1426 ata_qc_complete(qc);
1425 } 1427 }
@@ -1949,15 +1951,16 @@ static void __mv_phy_reset(struct ata_port *ap, int can_sleep)
1949 1951
1950 /* Issue COMRESET via SControl */ 1952 /* Issue COMRESET via SControl */
1951comreset_retry: 1953comreset_retry:
1952 scr_write_flush(ap, SCR_CONTROL, 0x301); 1954 sata_scr_write_flush(ap, SCR_CONTROL, 0x301);
1953 __msleep(1, can_sleep); 1955 __msleep(1, can_sleep);
1954 1956
1955 scr_write_flush(ap, SCR_CONTROL, 0x300); 1957 sata_scr_write_flush(ap, SCR_CONTROL, 0x300);
1956 __msleep(20, can_sleep); 1958 __msleep(20, can_sleep);
1957 1959
1958 timeout = jiffies + msecs_to_jiffies(200); 1960 timeout = jiffies + msecs_to_jiffies(200);
1959 do { 1961 do {
1960 sstatus = scr_read(ap, SCR_STATUS) & 0x3; 1962 sata_scr_read(ap, SCR_STATUS, &sstatus);
1963 sstatus &= 0x3;
1961 if ((sstatus == 3) || (sstatus == 0)) 1964 if ((sstatus == 3) || (sstatus == 0))
1962 break; 1965 break;
1963 1966
@@ -1974,11 +1977,12 @@ comreset_retry:
1974 "SCtrl 0x%08x\n", mv_scr_read(ap, SCR_STATUS), 1977 "SCtrl 0x%08x\n", mv_scr_read(ap, SCR_STATUS),
1975 mv_scr_read(ap, SCR_ERROR), mv_scr_read(ap, SCR_CONTROL)); 1978 mv_scr_read(ap, SCR_ERROR), mv_scr_read(ap, SCR_CONTROL));
1976 1979
1977 if (sata_dev_present(ap)) { 1980 if (ata_port_online(ap)) {
1978 ata_port_probe(ap); 1981 ata_port_probe(ap);
1979 } else { 1982 } else {
1980 printk(KERN_INFO "ata%u: no device found (phy stat %08x)\n", 1983 sata_scr_read(ap, SCR_STATUS, &sstatus);
1981 ap->id, scr_read(ap, SCR_STATUS)); 1984 ata_port_printk(ap, KERN_INFO,
1985 "no device found (phy stat %08x)\n", sstatus);
1982 ata_port_disable(ap); 1986 ata_port_disable(ap);
1983 return; 1987 return;
1984 } 1988 }
@@ -2005,7 +2009,7 @@ comreset_retry:
2005 tf.nsect = readb((void __iomem *) ap->ioaddr.nsect_addr); 2009 tf.nsect = readb((void __iomem *) ap->ioaddr.nsect_addr);
2006 2010
2007 dev->class = ata_dev_classify(&tf); 2011 dev->class = ata_dev_classify(&tf);
2008 if (!ata_dev_present(dev)) { 2012 if (!ata_dev_enabled(dev)) {
2009 VPRINTK("Port disabled post-sig: No device present.\n"); 2013 VPRINTK("Port disabled post-sig: No device present.\n");
2010 ata_port_disable(ap); 2014 ata_port_disable(ap);
2011 } 2015 }
@@ -2036,7 +2040,7 @@ static void mv_eng_timeout(struct ata_port *ap)
2036{ 2040{
2037 struct ata_queued_cmd *qc; 2041 struct ata_queued_cmd *qc;
2038 2042
2039 printk(KERN_ERR "ata%u: Entering mv_eng_timeout\n",ap->id); 2043 ata_port_printk(ap, KERN_ERR, "Entering mv_eng_timeout\n");
2040 DPRINTK("All regs @ start of eng_timeout\n"); 2044 DPRINTK("All regs @ start of eng_timeout\n");
2041 mv_dump_all_regs(ap->host_set->mmio_base, ap->port_no, 2045 mv_dump_all_regs(ap->host_set->mmio_base, ap->port_no,
2042 to_pci_dev(ap->host_set->dev)); 2046 to_pci_dev(ap->host_set->dev));
diff --git a/drivers/scsi/sata_nv.c b/drivers/scsi/sata_nv.c
index 9f553081b5e8..d93513ef7412 100644
--- a/drivers/scsi/sata_nv.c
+++ b/drivers/scsi/sata_nv.c
@@ -44,7 +44,7 @@
44#include <linux/libata.h> 44#include <linux/libata.h>
45 45
46#define DRV_NAME "sata_nv" 46#define DRV_NAME "sata_nv"
47#define DRV_VERSION "0.8" 47#define DRV_VERSION "0.9"
48 48
49enum { 49enum {
50 NV_PORTS = 2, 50 NV_PORTS = 2,
@@ -140,6 +140,12 @@ static const struct pci_device_id nv_pci_tbl[] = {
140 PCI_ANY_ID, PCI_ANY_ID, 0, 0, GENERIC }, 140 PCI_ANY_ID, PCI_ANY_ID, 0, 0, GENERIC },
141 { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2, 141 { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2,
142 PCI_ANY_ID, PCI_ANY_ID, 0, 0, GENERIC }, 142 PCI_ANY_ID, PCI_ANY_ID, 0, 0, GENERIC },
143 { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA,
144 PCI_ANY_ID, PCI_ANY_ID, 0, 0, GENERIC },
145 { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA2,
146 PCI_ANY_ID, PCI_ANY_ID, 0, 0, GENERIC },
147 { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA3,
148 PCI_ANY_ID, PCI_ANY_ID, 0, 0, GENERIC },
143 { PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID, 149 { PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
144 PCI_ANY_ID, PCI_ANY_ID, 150 PCI_ANY_ID, PCI_ANY_ID,
145 PCI_CLASS_STORAGE_IDE<<8, 0xffff00, GENERIC }, 151 PCI_CLASS_STORAGE_IDE<<8, 0xffff00, GENERIC },
@@ -228,6 +234,7 @@ static const struct ata_port_operations nv_ops = {
228 .qc_prep = ata_qc_prep, 234 .qc_prep = ata_qc_prep,
229 .qc_issue = ata_qc_issue_prot, 235 .qc_issue = ata_qc_issue_prot,
230 .eng_timeout = ata_eng_timeout, 236 .eng_timeout = ata_eng_timeout,
237 .data_xfer = ata_pio_data_xfer,
231 .irq_handler = nv_interrupt, 238 .irq_handler = nv_interrupt,
232 .irq_clear = ata_bmdma_irq_clear, 239 .irq_clear = ata_bmdma_irq_clear,
233 .scr_read = nv_scr_read, 240 .scr_read = nv_scr_read,
@@ -279,11 +286,11 @@ static irqreturn_t nv_interrupt (int irq, void *dev_instance,
279 286
280 ap = host_set->ports[i]; 287 ap = host_set->ports[i];
281 if (ap && 288 if (ap &&
282 !(ap->flags & (ATA_FLAG_PORT_DISABLED | ATA_FLAG_NOINTR))) { 289 !(ap->flags & ATA_FLAG_DISABLED)) {
283 struct ata_queued_cmd *qc; 290 struct ata_queued_cmd *qc;
284 291
285 qc = ata_qc_from_tag(ap, ap->active_tag); 292 qc = ata_qc_from_tag(ap, ap->active_tag);
286 if (qc && (!(qc->tf.ctl & ATA_NIEN))) 293 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)))
287 handled += ata_host_intr(ap, qc); 294 handled += ata_host_intr(ap, qc);
288 else 295 else
289 // No request pending? Clear interrupt status 296 // No request pending? Clear interrupt status
diff --git a/drivers/scsi/sata_promise.c b/drivers/scsi/sata_promise.c
index 7eb67a6bdc64..01111594d09c 100644
--- a/drivers/scsi/sata_promise.c
+++ b/drivers/scsi/sata_promise.c
@@ -76,7 +76,8 @@ enum {
76 PDC_RESET = (1 << 11), /* HDMA reset */ 76 PDC_RESET = (1 << 11), /* HDMA reset */
77 77
78 PDC_COMMON_FLAGS = ATA_FLAG_NO_LEGACY | ATA_FLAG_SRST | 78 PDC_COMMON_FLAGS = ATA_FLAG_NO_LEGACY | ATA_FLAG_SRST |
79 ATA_FLAG_MMIO | ATA_FLAG_NO_ATAPI, 79 ATA_FLAG_MMIO | ATA_FLAG_NO_ATAPI |
80 ATA_FLAG_PIO_POLLING,
80}; 81};
81 82
82 83
@@ -136,6 +137,7 @@ static const struct ata_port_operations pdc_sata_ops = {
136 .qc_prep = pdc_qc_prep, 137 .qc_prep = pdc_qc_prep,
137 .qc_issue = pdc_qc_issue_prot, 138 .qc_issue = pdc_qc_issue_prot,
138 .eng_timeout = pdc_eng_timeout, 139 .eng_timeout = pdc_eng_timeout,
140 .data_xfer = ata_mmio_data_xfer,
139 .irq_handler = pdc_interrupt, 141 .irq_handler = pdc_interrupt,
140 .irq_clear = pdc_irq_clear, 142 .irq_clear = pdc_irq_clear,
141 143
@@ -158,6 +160,7 @@ static const struct ata_port_operations pdc_pata_ops = {
158 160
159 .qc_prep = pdc_qc_prep, 161 .qc_prep = pdc_qc_prep,
160 .qc_issue = pdc_qc_issue_prot, 162 .qc_issue = pdc_qc_issue_prot,
163 .data_xfer = ata_mmio_data_xfer,
161 .eng_timeout = pdc_eng_timeout, 164 .eng_timeout = pdc_eng_timeout,
162 .irq_handler = pdc_interrupt, 165 .irq_handler = pdc_interrupt,
163 .irq_clear = pdc_irq_clear, 166 .irq_clear = pdc_irq_clear,
@@ -363,12 +366,23 @@ static void pdc_sata_phy_reset(struct ata_port *ap)
363 sata_phy_reset(ap); 366 sata_phy_reset(ap);
364} 367}
365 368
366static void pdc_pata_phy_reset(struct ata_port *ap) 369static void pdc_pata_cbl_detect(struct ata_port *ap)
367{ 370{
368 /* FIXME: add cable detect. Don't assume 40-pin cable */ 371 u8 tmp;
369 ap->cbl = ATA_CBL_PATA40; 372 void __iomem *mmio = (void *) ap->ioaddr.cmd_addr + PDC_CTLSTAT + 0x03;
370 ap->udma_mask &= ATA_UDMA_MASK_40C; 373
374 tmp = readb(mmio);
375
376 if (tmp & 0x01) {
377 ap->cbl = ATA_CBL_PATA40;
378 ap->udma_mask &= ATA_UDMA_MASK_40C;
379 } else
380 ap->cbl = ATA_CBL_PATA80;
381}
371 382
383static void pdc_pata_phy_reset(struct ata_port *ap)
384{
385 pdc_pata_cbl_detect(ap);
372 pdc_reset_port(ap); 386 pdc_reset_port(ap);
373 ata_port_probe(ap); 387 ata_port_probe(ap);
374 ata_bus_reset(ap); 388 ata_bus_reset(ap);
@@ -435,7 +449,7 @@ static void pdc_eng_timeout(struct ata_port *ap)
435 switch (qc->tf.protocol) { 449 switch (qc->tf.protocol) {
436 case ATA_PROT_DMA: 450 case ATA_PROT_DMA:
437 case ATA_PROT_NODATA: 451 case ATA_PROT_NODATA:
438 printk(KERN_ERR "ata%u: command timeout\n", ap->id); 452 ata_port_printk(ap, KERN_ERR, "command timeout\n");
439 drv_stat = ata_wait_idle(ap); 453 drv_stat = ata_wait_idle(ap);
440 qc->err_mask |= __ac_err_mask(drv_stat); 454 qc->err_mask |= __ac_err_mask(drv_stat);
441 break; 455 break;
@@ -443,8 +457,9 @@ static void pdc_eng_timeout(struct ata_port *ap)
443 default: 457 default:
444 drv_stat = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 1000); 458 drv_stat = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 1000);
445 459
446 printk(KERN_ERR "ata%u: unknown timeout, cmd 0x%x stat 0x%x\n", 460 ata_port_printk(ap, KERN_ERR,
447 ap->id, qc->tf.command, drv_stat); 461 "unknown timeout, cmd 0x%x stat 0x%x\n",
462 qc->tf.command, drv_stat);
448 463
449 qc->err_mask |= ac_err_mask(drv_stat); 464 qc->err_mask |= ac_err_mask(drv_stat);
450 break; 465 break;
@@ -533,11 +548,11 @@ static irqreturn_t pdc_interrupt (int irq, void *dev_instance, struct pt_regs *r
533 ap = host_set->ports[i]; 548 ap = host_set->ports[i];
534 tmp = mask & (1 << (i + 1)); 549 tmp = mask & (1 << (i + 1));
535 if (tmp && ap && 550 if (tmp && ap &&
536 !(ap->flags & (ATA_FLAG_PORT_DISABLED | ATA_FLAG_NOINTR))) { 551 !(ap->flags & ATA_FLAG_DISABLED)) {
537 struct ata_queued_cmd *qc; 552 struct ata_queued_cmd *qc;
538 553
539 qc = ata_qc_from_tag(ap, ap->active_tag); 554 qc = ata_qc_from_tag(ap, ap->active_tag);
540 if (qc && (!(qc->tf.ctl & ATA_NIEN))) 555 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)))
541 handled += pdc_host_intr(ap, qc); 556 handled += pdc_host_intr(ap, qc);
542 } 557 }
543 } 558 }
@@ -676,10 +691,6 @@ static int pdc_ata_init_one (struct pci_dev *pdev, const struct pci_device_id *e
676 if (!printed_version++) 691 if (!printed_version++)
677 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n"); 692 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
678 693
679 /*
680 * If this driver happens to only be useful on Apple's K2, then
681 * we should check that here as it has a normal Serverworks ID
682 */
683 rc = pci_enable_device(pdev); 694 rc = pci_enable_device(pdev);
684 if (rc) 695 if (rc)
685 return rc; 696 return rc;
diff --git a/drivers/scsi/sata_qstor.c b/drivers/scsi/sata_qstor.c
index 886f3447dd48..68737cadd2d4 100644
--- a/drivers/scsi/sata_qstor.c
+++ b/drivers/scsi/sata_qstor.c
@@ -41,7 +41,7 @@
41#include <linux/libata.h> 41#include <linux/libata.h>
42 42
43#define DRV_NAME "sata_qstor" 43#define DRV_NAME "sata_qstor"
44#define DRV_VERSION "0.05" 44#define DRV_VERSION "0.06"
45 45
46enum { 46enum {
47 QS_PORTS = 4, 47 QS_PORTS = 4,
@@ -156,6 +156,7 @@ static const struct ata_port_operations qs_ata_ops = {
156 .phy_reset = qs_phy_reset, 156 .phy_reset = qs_phy_reset,
157 .qc_prep = qs_qc_prep, 157 .qc_prep = qs_qc_prep,
158 .qc_issue = qs_qc_issue, 158 .qc_issue = qs_qc_issue,
159 .data_xfer = ata_mmio_data_xfer,
159 .eng_timeout = qs_eng_timeout, 160 .eng_timeout = qs_eng_timeout,
160 .irq_handler = qs_intr, 161 .irq_handler = qs_intr,
161 .irq_clear = qs_irq_clear, 162 .irq_clear = qs_irq_clear,
@@ -175,7 +176,7 @@ static const struct ata_port_info qs_port_info[] = {
175 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 176 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
176 ATA_FLAG_SATA_RESET | 177 ATA_FLAG_SATA_RESET |
177 //FIXME ATA_FLAG_SRST | 178 //FIXME ATA_FLAG_SRST |
178 ATA_FLAG_MMIO, 179 ATA_FLAG_MMIO | ATA_FLAG_PIO_POLLING,
179 .pio_mask = 0x10, /* pio4 */ 180 .pio_mask = 0x10, /* pio4 */
180 .udma_mask = 0x7f, /* udma0-6 */ 181 .udma_mask = 0x7f, /* udma0-6 */
181 .port_ops = &qs_ata_ops, 182 .port_ops = &qs_ata_ops,
@@ -394,14 +395,13 @@ static inline unsigned int qs_intr_pkt(struct ata_host_set *host_set)
394 DPRINTK("SFF=%08x%08x: sCHAN=%u sHST=%d sDST=%02x\n", 395 DPRINTK("SFF=%08x%08x: sCHAN=%u sHST=%d sDST=%02x\n",
395 sff1, sff0, port_no, sHST, sDST); 396 sff1, sff0, port_no, sHST, sDST);
396 handled = 1; 397 handled = 1;
397 if (ap && !(ap->flags & 398 if (ap && !(ap->flags & ATA_FLAG_DISABLED)) {
398 (ATA_FLAG_PORT_DISABLED|ATA_FLAG_NOINTR))) {
399 struct ata_queued_cmd *qc; 399 struct ata_queued_cmd *qc;
400 struct qs_port_priv *pp = ap->private_data; 400 struct qs_port_priv *pp = ap->private_data;
401 if (!pp || pp->state != qs_state_pkt) 401 if (!pp || pp->state != qs_state_pkt)
402 continue; 402 continue;
403 qc = ata_qc_from_tag(ap, ap->active_tag); 403 qc = ata_qc_from_tag(ap, ap->active_tag);
404 if (qc && (!(qc->tf.ctl & ATA_NIEN))) { 404 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) {
405 switch (sHST) { 405 switch (sHST) {
406 case 0: /* successful CPB */ 406 case 0: /* successful CPB */
407 case 3: /* device error */ 407 case 3: /* device error */
@@ -428,13 +428,13 @@ static inline unsigned int qs_intr_mmio(struct ata_host_set *host_set)
428 struct ata_port *ap; 428 struct ata_port *ap;
429 ap = host_set->ports[port_no]; 429 ap = host_set->ports[port_no];
430 if (ap && 430 if (ap &&
431 !(ap->flags & (ATA_FLAG_PORT_DISABLED | ATA_FLAG_NOINTR))) { 431 !(ap->flags & ATA_FLAG_DISABLED)) {
432 struct ata_queued_cmd *qc; 432 struct ata_queued_cmd *qc;
433 struct qs_port_priv *pp = ap->private_data; 433 struct qs_port_priv *pp = ap->private_data;
434 if (!pp || pp->state != qs_state_mmio) 434 if (!pp || pp->state != qs_state_mmio)
435 continue; 435 continue;
436 qc = ata_qc_from_tag(ap, ap->active_tag); 436 qc = ata_qc_from_tag(ap, ap->active_tag);
437 if (qc && (!(qc->tf.ctl & ATA_NIEN))) { 437 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) {
438 438
439 /* check main status, clearing INTRQ */ 439 /* check main status, clearing INTRQ */
440 u8 status = ata_check_status(ap); 440 u8 status = ata_check_status(ap);
diff --git a/drivers/scsi/sata_sil.c b/drivers/scsi/sata_sil.c
index 106627299d55..3bd807738698 100644
--- a/drivers/scsi/sata_sil.c
+++ b/drivers/scsi/sata_sil.c
@@ -46,7 +46,7 @@
46#include <linux/libata.h> 46#include <linux/libata.h>
47 47
48#define DRV_NAME "sata_sil" 48#define DRV_NAME "sata_sil"
49#define DRV_VERSION "0.9" 49#define DRV_VERSION "1.0"
50 50
51enum { 51enum {
52 /* 52 /*
@@ -96,6 +96,8 @@ static void sil_dev_config(struct ata_port *ap, struct ata_device *dev);
96static u32 sil_scr_read (struct ata_port *ap, unsigned int sc_reg); 96static u32 sil_scr_read (struct ata_port *ap, unsigned int sc_reg);
97static void sil_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val); 97static void sil_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val);
98static void sil_post_set_mode (struct ata_port *ap); 98static void sil_post_set_mode (struct ata_port *ap);
99static void sil_freeze(struct ata_port *ap);
100static void sil_thaw(struct ata_port *ap);
99 101
100 102
101static const struct pci_device_id sil_pci_tbl[] = { 103static const struct pci_device_id sil_pci_tbl[] = {
@@ -174,7 +176,11 @@ static const struct ata_port_operations sil_ops = {
174 .bmdma_status = ata_bmdma_status, 176 .bmdma_status = ata_bmdma_status,
175 .qc_prep = ata_qc_prep, 177 .qc_prep = ata_qc_prep,
176 .qc_issue = ata_qc_issue_prot, 178 .qc_issue = ata_qc_issue_prot,
177 .eng_timeout = ata_eng_timeout, 179 .data_xfer = ata_mmio_data_xfer,
180 .freeze = sil_freeze,
181 .thaw = sil_thaw,
182 .error_handler = ata_bmdma_error_handler,
183 .post_internal_cmd = ata_bmdma_post_internal_cmd,
178 .irq_handler = ata_interrupt, 184 .irq_handler = ata_interrupt,
179 .irq_clear = ata_bmdma_irq_clear, 185 .irq_clear = ata_bmdma_irq_clear,
180 .scr_read = sil_scr_read, 186 .scr_read = sil_scr_read,
@@ -263,7 +269,7 @@ static void sil_post_set_mode (struct ata_port *ap)
263 269
264 for (i = 0; i < 2; i++) { 270 for (i = 0; i < 2; i++) {
265 dev = &ap->device[i]; 271 dev = &ap->device[i];
266 if (!ata_dev_present(dev)) 272 if (!ata_dev_enabled(dev))
267 dev_mode[i] = 0; /* PIO0/1/2 */ 273 dev_mode[i] = 0; /* PIO0/1/2 */
268 else if (dev->flags & ATA_DFLAG_PIO) 274 else if (dev->flags & ATA_DFLAG_PIO)
269 dev_mode[i] = 1; /* PIO3/4 */ 275 dev_mode[i] = 1; /* PIO3/4 */
@@ -314,6 +320,33 @@ static void sil_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val)
314 writel(val, mmio); 320 writel(val, mmio);
315} 321}
316 322
323static void sil_freeze(struct ata_port *ap)
324{
325 void __iomem *mmio_base = ap->host_set->mmio_base;
326 u32 tmp;
327
328 /* plug IRQ */
329 tmp = readl(mmio_base + SIL_SYSCFG);
330 tmp |= SIL_MASK_IDE0_INT << ap->port_no;
331 writel(tmp, mmio_base + SIL_SYSCFG);
332 readl(mmio_base + SIL_SYSCFG); /* flush */
333}
334
335static void sil_thaw(struct ata_port *ap)
336{
337 void __iomem *mmio_base = ap->host_set->mmio_base;
338 u32 tmp;
339
340 /* clear IRQ */
341 ata_chk_status(ap);
342 ata_bmdma_irq_clear(ap);
343
344 /* turn on IRQ */
345 tmp = readl(mmio_base + SIL_SYSCFG);
346 tmp &= ~(SIL_MASK_IDE0_INT << ap->port_no);
347 writel(tmp, mmio_base + SIL_SYSCFG);
348}
349
317/** 350/**
318 * sil_dev_config - Apply device/host-specific errata fixups 351 * sil_dev_config - Apply device/host-specific errata fixups
319 * @ap: Port containing device to be examined 352 * @ap: Port containing device to be examined
@@ -360,16 +393,16 @@ static void sil_dev_config(struct ata_port *ap, struct ata_device *dev)
360 if (slow_down || 393 if (slow_down ||
361 ((ap->flags & SIL_FLAG_MOD15WRITE) && 394 ((ap->flags & SIL_FLAG_MOD15WRITE) &&
362 (quirks & SIL_QUIRK_MOD15WRITE))) { 395 (quirks & SIL_QUIRK_MOD15WRITE))) {
363 printk(KERN_INFO "ata%u(%u): applying Seagate errata fix (mod15write workaround)\n", 396 ata_dev_printk(dev, KERN_INFO, "applying Seagate errata fix "
364 ap->id, dev->devno); 397 "(mod15write workaround)\n");
365 dev->max_sectors = 15; 398 dev->max_sectors = 15;
366 return; 399 return;
367 } 400 }
368 401
369 /* limit to udma5 */ 402 /* limit to udma5 */
370 if (quirks & SIL_QUIRK_UDMA5MAX) { 403 if (quirks & SIL_QUIRK_UDMA5MAX) {
371 printk(KERN_INFO "ata%u(%u): applying Maxtor errata fix %s\n", 404 ata_dev_printk(dev, KERN_INFO,
372 ap->id, dev->devno, model_num); 405 "applying Maxtor errata fix %s\n", model_num);
373 dev->udma_mask &= ATA_UDMA5; 406 dev->udma_mask &= ATA_UDMA5;
374 return; 407 return;
375 } 408 }
@@ -384,16 +417,12 @@ static int sil_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
384 int rc; 417 int rc;
385 unsigned int i; 418 unsigned int i;
386 int pci_dev_busy = 0; 419 int pci_dev_busy = 0;
387 u32 tmp, irq_mask; 420 u32 tmp;
388 u8 cls; 421 u8 cls;
389 422
390 if (!printed_version++) 423 if (!printed_version++)
391 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n"); 424 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
392 425
393 /*
394 * If this driver happens to only be useful on Apple's K2, then
395 * we should check that here as it has a normal Serverworks ID
396 */
397 rc = pci_enable_device(pdev); 426 rc = pci_enable_device(pdev);
398 if (rc) 427 if (rc)
399 return rc; 428 return rc;
@@ -478,24 +507,11 @@ static int sil_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
478 } 507 }
479 508
480 if (ent->driver_data == sil_3114) { 509 if (ent->driver_data == sil_3114) {
481 irq_mask = SIL_MASK_4PORT;
482
483 /* flip the magic "make 4 ports work" bit */ 510 /* flip the magic "make 4 ports work" bit */
484 tmp = readl(mmio_base + sil_port[2].bmdma); 511 tmp = readl(mmio_base + sil_port[2].bmdma);
485 if ((tmp & SIL_INTR_STEERING) == 0) 512 if ((tmp & SIL_INTR_STEERING) == 0)
486 writel(tmp | SIL_INTR_STEERING, 513 writel(tmp | SIL_INTR_STEERING,
487 mmio_base + sil_port[2].bmdma); 514 mmio_base + sil_port[2].bmdma);
488
489 } else {
490 irq_mask = SIL_MASK_2PORT;
491 }
492
493 /* make sure IDE0/1/2/3 interrupts are not masked */
494 tmp = readl(mmio_base + SIL_SYSCFG);
495 if (tmp & irq_mask) {
496 tmp &= ~irq_mask;
497 writel(tmp, mmio_base + SIL_SYSCFG);
498 readl(mmio_base + SIL_SYSCFG); /* flush */
499 } 515 }
500 516
501 /* mask all SATA phy-related interrupts */ 517 /* mask all SATA phy-related interrupts */
diff --git a/drivers/scsi/sata_sil24.c b/drivers/scsi/sata_sil24.c
index f7264fd611c2..4c76f05d9b65 100644
--- a/drivers/scsi/sata_sil24.c
+++ b/drivers/scsi/sata_sil24.c
@@ -31,7 +31,7 @@
31#include <asm/io.h> 31#include <asm/io.h>
32 32
33#define DRV_NAME "sata_sil24" 33#define DRV_NAME "sata_sil24"
34#define DRV_VERSION "0.23" 34#define DRV_VERSION "0.24"
35 35
36/* 36/*
37 * Port request block (PRB) 32 bytes 37 * Port request block (PRB) 32 bytes
@@ -86,6 +86,13 @@ enum {
86 /* HOST_SLOT_STAT bits */ 86 /* HOST_SLOT_STAT bits */
87 HOST_SSTAT_ATTN = (1 << 31), 87 HOST_SSTAT_ATTN = (1 << 31),
88 88
89 /* HOST_CTRL bits */
90 HOST_CTRL_M66EN = (1 << 16), /* M66EN PCI bus signal */
91 HOST_CTRL_TRDY = (1 << 17), /* latched PCI TRDY */
92 HOST_CTRL_STOP = (1 << 18), /* latched PCI STOP */
93 HOST_CTRL_DEVSEL = (1 << 19), /* latched PCI DEVSEL */
94 HOST_CTRL_REQ64 = (1 << 20), /* latched PCI REQ64 */
95
89 /* 96 /*
90 * Port registers 97 * Port registers
91 * (8192 bytes @ +0x0000, +0x2000, +0x4000 and +0x6000 @ BAR2) 98 * (8192 bytes @ +0x0000, +0x2000, +0x4000 and +0x6000 @ BAR2)
@@ -142,8 +149,15 @@ enum {
142 PORT_IRQ_PWR_CHG = (1 << 3), /* power management change */ 149 PORT_IRQ_PWR_CHG = (1 << 3), /* power management change */
143 PORT_IRQ_PHYRDY_CHG = (1 << 4), /* PHY ready change */ 150 PORT_IRQ_PHYRDY_CHG = (1 << 4), /* PHY ready change */
144 PORT_IRQ_COMWAKE = (1 << 5), /* COMWAKE received */ 151 PORT_IRQ_COMWAKE = (1 << 5), /* COMWAKE received */
145 PORT_IRQ_UNK_FIS = (1 << 6), /* Unknown FIS received */ 152 PORT_IRQ_UNK_FIS = (1 << 6), /* unknown FIS received */
146 PORT_IRQ_SDB_FIS = (1 << 11), /* SDB FIS received */ 153 PORT_IRQ_DEV_XCHG = (1 << 7), /* device exchanged */
154 PORT_IRQ_8B10B = (1 << 8), /* 8b/10b decode error threshold */
155 PORT_IRQ_CRC = (1 << 9), /* CRC error threshold */
156 PORT_IRQ_HANDSHAKE = (1 << 10), /* handshake error threshold */
157 PORT_IRQ_SDB_NOTIFY = (1 << 11), /* SDB notify received */
158
159 DEF_PORT_IRQ = PORT_IRQ_COMPLETE | PORT_IRQ_ERROR |
160 PORT_IRQ_DEV_XCHG | PORT_IRQ_UNK_FIS,
147 161
148 /* bits[27:16] are unmasked (raw) */ 162 /* bits[27:16] are unmasked (raw) */
149 PORT_IRQ_RAW_SHIFT = 16, 163 PORT_IRQ_RAW_SHIFT = 16,
@@ -174,7 +188,7 @@ enum {
174 PORT_CERR_CMD_PCIPERR = 27, /* ctrl[15:13] 110 - PCI parity err while fetching PRB */ 188 PORT_CERR_CMD_PCIPERR = 27, /* ctrl[15:13] 110 - PCI parity err while fetching PRB */
175 PORT_CERR_XFR_UNDEF = 32, /* PSD ecode 00 - undefined */ 189 PORT_CERR_XFR_UNDEF = 32, /* PSD ecode 00 - undefined */
176 PORT_CERR_XFR_TGTABRT = 33, /* PSD ecode 01 - target abort */ 190 PORT_CERR_XFR_TGTABRT = 33, /* PSD ecode 01 - target abort */
177 PORT_CERR_XFR_MSGABRT = 34, /* PSD ecode 10 - master abort */ 191 PORT_CERR_XFR_MSTABRT = 34, /* PSD ecode 10 - master abort */
178 PORT_CERR_XFR_PCIPERR = 35, /* PSD ecode 11 - PCI prity err during transfer */ 192 PORT_CERR_XFR_PCIPERR = 35, /* PSD ecode 11 - PCI prity err during transfer */
179 PORT_CERR_SENDSERVICE = 36, /* FIS received while sending service */ 193 PORT_CERR_SENDSERVICE = 36, /* FIS received while sending service */
180 194
@@ -202,11 +216,19 @@ enum {
202 SGE_DRD = (1 << 29), /* discard data read (/dev/null) 216 SGE_DRD = (1 << 29), /* discard data read (/dev/null)
203 data address ignored */ 217 data address ignored */
204 218
219 SIL24_MAX_CMDS = 31,
220
205 /* board id */ 221 /* board id */
206 BID_SIL3124 = 0, 222 BID_SIL3124 = 0,
207 BID_SIL3132 = 1, 223 BID_SIL3132 = 1,
208 BID_SIL3131 = 2, 224 BID_SIL3131 = 2,
209 225
226 /* host flags */
227 SIL24_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
228 ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA |
229 ATA_FLAG_NCQ,
230 SIL24_FLAG_PCIX_IRQ_WOC = (1 << 24), /* IRQ loss errata on PCI-X */
231
210 IRQ_STAT_4PORTS = 0xf, 232 IRQ_STAT_4PORTS = 0xf,
211}; 233};
212 234
@@ -226,6 +248,58 @@ union sil24_cmd_block {
226 struct sil24_atapi_block atapi; 248 struct sil24_atapi_block atapi;
227}; 249};
228 250
251static struct sil24_cerr_info {
252 unsigned int err_mask, action;
253 const char *desc;
254} sil24_cerr_db[] = {
255 [0] = { AC_ERR_DEV, ATA_EH_REVALIDATE,
256 "device error" },
257 [PORT_CERR_DEV] = { AC_ERR_DEV, ATA_EH_REVALIDATE,
258 "device error via D2H FIS" },
259 [PORT_CERR_SDB] = { AC_ERR_DEV, ATA_EH_REVALIDATE,
260 "device error via SDB FIS" },
261 [PORT_CERR_DATA] = { AC_ERR_ATA_BUS, ATA_EH_SOFTRESET,
262 "error in data FIS" },
263 [PORT_CERR_SEND] = { AC_ERR_ATA_BUS, ATA_EH_SOFTRESET,
264 "failed to transmit command FIS" },
265 [PORT_CERR_INCONSISTENT] = { AC_ERR_HSM, ATA_EH_SOFTRESET,
266 "protocol mismatch" },
267 [PORT_CERR_DIRECTION] = { AC_ERR_HSM, ATA_EH_SOFTRESET,
268 "data directon mismatch" },
269 [PORT_CERR_UNDERRUN] = { AC_ERR_HSM, ATA_EH_SOFTRESET,
270 "ran out of SGEs while writing" },
271 [PORT_CERR_OVERRUN] = { AC_ERR_HSM, ATA_EH_SOFTRESET,
272 "ran out of SGEs while reading" },
273 [PORT_CERR_PKT_PROT] = { AC_ERR_HSM, ATA_EH_SOFTRESET,
274 "invalid data directon for ATAPI CDB" },
275 [PORT_CERR_SGT_BOUNDARY] = { AC_ERR_SYSTEM, ATA_EH_SOFTRESET,
276 "SGT no on qword boundary" },
277 [PORT_CERR_SGT_TGTABRT] = { AC_ERR_HOST_BUS, ATA_EH_SOFTRESET,
278 "PCI target abort while fetching SGT" },
279 [PORT_CERR_SGT_MSTABRT] = { AC_ERR_HOST_BUS, ATA_EH_SOFTRESET,
280 "PCI master abort while fetching SGT" },
281 [PORT_CERR_SGT_PCIPERR] = { AC_ERR_HOST_BUS, ATA_EH_SOFTRESET,
282 "PCI parity error while fetching SGT" },
283 [PORT_CERR_CMD_BOUNDARY] = { AC_ERR_SYSTEM, ATA_EH_SOFTRESET,
284 "PRB not on qword boundary" },
285 [PORT_CERR_CMD_TGTABRT] = { AC_ERR_HOST_BUS, ATA_EH_SOFTRESET,
286 "PCI target abort while fetching PRB" },
287 [PORT_CERR_CMD_MSTABRT] = { AC_ERR_HOST_BUS, ATA_EH_SOFTRESET,
288 "PCI master abort while fetching PRB" },
289 [PORT_CERR_CMD_PCIPERR] = { AC_ERR_HOST_BUS, ATA_EH_SOFTRESET,
290 "PCI parity error while fetching PRB" },
291 [PORT_CERR_XFR_UNDEF] = { AC_ERR_HOST_BUS, ATA_EH_SOFTRESET,
292 "undefined error while transferring data" },
293 [PORT_CERR_XFR_TGTABRT] = { AC_ERR_HOST_BUS, ATA_EH_SOFTRESET,
294 "PCI target abort while transferring data" },
295 [PORT_CERR_XFR_MSTABRT] = { AC_ERR_HOST_BUS, ATA_EH_SOFTRESET,
296 "PCI master abort while transferring data" },
297 [PORT_CERR_XFR_PCIPERR] = { AC_ERR_HOST_BUS, ATA_EH_SOFTRESET,
298 "PCI parity error while transferring data" },
299 [PORT_CERR_SENDSERVICE] = { AC_ERR_HSM, ATA_EH_SOFTRESET,
300 "FIS received while sending service FIS" },
301};
302
229/* 303/*
230 * ap->private_data 304 * ap->private_data
231 * 305 *
@@ -253,8 +327,11 @@ static int sil24_probe_reset(struct ata_port *ap, unsigned int *classes);
253static void sil24_qc_prep(struct ata_queued_cmd *qc); 327static void sil24_qc_prep(struct ata_queued_cmd *qc);
254static unsigned int sil24_qc_issue(struct ata_queued_cmd *qc); 328static unsigned int sil24_qc_issue(struct ata_queued_cmd *qc);
255static void sil24_irq_clear(struct ata_port *ap); 329static void sil24_irq_clear(struct ata_port *ap);
256static void sil24_eng_timeout(struct ata_port *ap);
257static irqreturn_t sil24_interrupt(int irq, void *dev_instance, struct pt_regs *regs); 330static irqreturn_t sil24_interrupt(int irq, void *dev_instance, struct pt_regs *regs);
331static void sil24_freeze(struct ata_port *ap);
332static void sil24_thaw(struct ata_port *ap);
333static void sil24_error_handler(struct ata_port *ap);
334static void sil24_post_internal_cmd(struct ata_queued_cmd *qc);
258static int sil24_port_start(struct ata_port *ap); 335static int sil24_port_start(struct ata_port *ap);
259static void sil24_port_stop(struct ata_port *ap); 336static void sil24_port_stop(struct ata_port *ap);
260static void sil24_host_stop(struct ata_host_set *host_set); 337static void sil24_host_stop(struct ata_host_set *host_set);
@@ -281,7 +358,8 @@ static struct scsi_host_template sil24_sht = {
281 .name = DRV_NAME, 358 .name = DRV_NAME,
282 .ioctl = ata_scsi_ioctl, 359 .ioctl = ata_scsi_ioctl,
283 .queuecommand = ata_scsi_queuecmd, 360 .queuecommand = ata_scsi_queuecmd,
284 .can_queue = ATA_DEF_QUEUE, 361 .change_queue_depth = ata_scsi_change_queue_depth,
362 .can_queue = SIL24_MAX_CMDS,
285 .this_id = ATA_SHT_THIS_ID, 363 .this_id = ATA_SHT_THIS_ID,
286 .sg_tablesize = LIBATA_MAX_PRD, 364 .sg_tablesize = LIBATA_MAX_PRD,
287 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 365 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
@@ -309,14 +387,17 @@ static const struct ata_port_operations sil24_ops = {
309 .qc_prep = sil24_qc_prep, 387 .qc_prep = sil24_qc_prep,
310 .qc_issue = sil24_qc_issue, 388 .qc_issue = sil24_qc_issue,
311 389
312 .eng_timeout = sil24_eng_timeout,
313
314 .irq_handler = sil24_interrupt, 390 .irq_handler = sil24_interrupt,
315 .irq_clear = sil24_irq_clear, 391 .irq_clear = sil24_irq_clear,
316 392
317 .scr_read = sil24_scr_read, 393 .scr_read = sil24_scr_read,
318 .scr_write = sil24_scr_write, 394 .scr_write = sil24_scr_write,
319 395
396 .freeze = sil24_freeze,
397 .thaw = sil24_thaw,
398 .error_handler = sil24_error_handler,
399 .post_internal_cmd = sil24_post_internal_cmd,
400
320 .port_start = sil24_port_start, 401 .port_start = sil24_port_start,
321 .port_stop = sil24_port_stop, 402 .port_stop = sil24_port_stop,
322 .host_stop = sil24_host_stop, 403 .host_stop = sil24_host_stop,
@@ -333,9 +414,8 @@ static struct ata_port_info sil24_port_info[] = {
333 /* sil_3124 */ 414 /* sil_3124 */
334 { 415 {
335 .sht = &sil24_sht, 416 .sht = &sil24_sht,
336 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 417 .host_flags = SIL24_COMMON_FLAGS | SIL24_NPORTS2FLAG(4) |
337 ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA | 418 SIL24_FLAG_PCIX_IRQ_WOC,
338 SIL24_NPORTS2FLAG(4),
339 .pio_mask = 0x1f, /* pio0-4 */ 419 .pio_mask = 0x1f, /* pio0-4 */
340 .mwdma_mask = 0x07, /* mwdma0-2 */ 420 .mwdma_mask = 0x07, /* mwdma0-2 */
341 .udma_mask = 0x3f, /* udma0-5 */ 421 .udma_mask = 0x3f, /* udma0-5 */
@@ -344,9 +424,7 @@ static struct ata_port_info sil24_port_info[] = {
344 /* sil_3132 */ 424 /* sil_3132 */
345 { 425 {
346 .sht = &sil24_sht, 426 .sht = &sil24_sht,
347 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 427 .host_flags = SIL24_COMMON_FLAGS | SIL24_NPORTS2FLAG(2),
348 ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA |
349 SIL24_NPORTS2FLAG(2),
350 .pio_mask = 0x1f, /* pio0-4 */ 428 .pio_mask = 0x1f, /* pio0-4 */
351 .mwdma_mask = 0x07, /* mwdma0-2 */ 429 .mwdma_mask = 0x07, /* mwdma0-2 */
352 .udma_mask = 0x3f, /* udma0-5 */ 430 .udma_mask = 0x3f, /* udma0-5 */
@@ -355,9 +433,7 @@ static struct ata_port_info sil24_port_info[] = {
355 /* sil_3131/sil_3531 */ 433 /* sil_3131/sil_3531 */
356 { 434 {
357 .sht = &sil24_sht, 435 .sht = &sil24_sht,
358 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 436 .host_flags = SIL24_COMMON_FLAGS | SIL24_NPORTS2FLAG(1),
359 ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA |
360 SIL24_NPORTS2FLAG(1),
361 .pio_mask = 0x1f, /* pio0-4 */ 437 .pio_mask = 0x1f, /* pio0-4 */
362 .mwdma_mask = 0x07, /* mwdma0-2 */ 438 .mwdma_mask = 0x07, /* mwdma0-2 */
363 .udma_mask = 0x3f, /* udma0-5 */ 439 .udma_mask = 0x3f, /* udma0-5 */
@@ -365,6 +441,13 @@ static struct ata_port_info sil24_port_info[] = {
365 }, 441 },
366}; 442};
367 443
444static int sil24_tag(int tag)
445{
446 if (unlikely(ata_tag_internal(tag)))
447 return 0;
448 return tag;
449}
450
368static void sil24_dev_config(struct ata_port *ap, struct ata_device *dev) 451static void sil24_dev_config(struct ata_port *ap, struct ata_device *dev)
369{ 452{
370 void __iomem *port = (void __iomem *)ap->ioaddr.cmd_addr; 453 void __iomem *port = (void __iomem *)ap->ioaddr.cmd_addr;
@@ -426,56 +509,65 @@ static void sil24_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
426 *tf = pp->tf; 509 *tf = pp->tf;
427} 510}
428 511
429static int sil24_softreset(struct ata_port *ap, int verbose, 512static int sil24_init_port(struct ata_port *ap)
430 unsigned int *class) 513{
514 void __iomem *port = (void __iomem *)ap->ioaddr.cmd_addr;
515 u32 tmp;
516
517 writel(PORT_CS_INIT, port + PORT_CTRL_STAT);
518 ata_wait_register(port + PORT_CTRL_STAT,
519 PORT_CS_INIT, PORT_CS_INIT, 10, 100);
520 tmp = ata_wait_register(port + PORT_CTRL_STAT,
521 PORT_CS_RDY, 0, 10, 100);
522
523 if ((tmp & (PORT_CS_INIT | PORT_CS_RDY)) != PORT_CS_RDY)
524 return -EIO;
525 return 0;
526}
527
528static int sil24_softreset(struct ata_port *ap, unsigned int *class)
431{ 529{
432 void __iomem *port = (void __iomem *)ap->ioaddr.cmd_addr; 530 void __iomem *port = (void __iomem *)ap->ioaddr.cmd_addr;
433 struct sil24_port_priv *pp = ap->private_data; 531 struct sil24_port_priv *pp = ap->private_data;
434 struct sil24_prb *prb = &pp->cmd_block[0].ata.prb; 532 struct sil24_prb *prb = &pp->cmd_block[0].ata.prb;
435 dma_addr_t paddr = pp->cmd_block_dma; 533 dma_addr_t paddr = pp->cmd_block_dma;
436 unsigned long timeout = jiffies + ATA_TMOUT_BOOT * HZ; 534 u32 mask, irq_stat;
437 u32 irq_enable, irq_stat; 535 const char *reason;
438 536
439 DPRINTK("ENTER\n"); 537 DPRINTK("ENTER\n");
440 538
441 if (!sata_dev_present(ap)) { 539 if (ata_port_offline(ap)) {
442 DPRINTK("PHY reports no device\n"); 540 DPRINTK("PHY reports no device\n");
443 *class = ATA_DEV_NONE; 541 *class = ATA_DEV_NONE;
444 goto out; 542 goto out;
445 } 543 }
446 544
447 /* temporarily turn off IRQs during SRST */ 545 /* put the port into known state */
448 irq_enable = readl(port + PORT_IRQ_ENABLE_SET); 546 if (sil24_init_port(ap)) {
449 writel(irq_enable, port + PORT_IRQ_ENABLE_CLR); 547 reason ="port not ready";
450 548 goto err;
451 /* 549 }
452 * XXX: Not sure whether the following sleep is needed or not.
453 * The original driver had it. So....
454 */
455 msleep(10);
456 550
457 prb->ctrl = PRB_CTRL_SRST; 551 /* do SRST */
552 prb->ctrl = cpu_to_le16(PRB_CTRL_SRST);
458 prb->fis[1] = 0; /* no PM yet */ 553 prb->fis[1] = 0; /* no PM yet */
459 554
460 writel((u32)paddr, port + PORT_CMD_ACTIVATE); 555 writel((u32)paddr, port + PORT_CMD_ACTIVATE);
556 writel((u64)paddr >> 32, port + PORT_CMD_ACTIVATE + 4);
461 557
462 do { 558 mask = (PORT_IRQ_COMPLETE | PORT_IRQ_ERROR) << PORT_IRQ_RAW_SHIFT;
463 irq_stat = readl(port + PORT_IRQ_STAT); 559 irq_stat = ata_wait_register(port + PORT_IRQ_STAT, mask, 0x0,
464 writel(irq_stat, port + PORT_IRQ_STAT); /* clear irq */ 560 100, ATA_TMOUT_BOOT / HZ * 1000);
465 561
466 irq_stat >>= PORT_IRQ_RAW_SHIFT; 562 writel(irq_stat, port + PORT_IRQ_STAT); /* clear IRQs */
467 if (irq_stat & (PORT_IRQ_COMPLETE | PORT_IRQ_ERROR)) 563 irq_stat >>= PORT_IRQ_RAW_SHIFT;
468 break;
469
470 msleep(100);
471 } while (time_before(jiffies, timeout));
472
473 /* restore IRQs */
474 writel(irq_enable, port + PORT_IRQ_ENABLE_SET);
475 564
476 if (!(irq_stat & PORT_IRQ_COMPLETE)) { 565 if (!(irq_stat & PORT_IRQ_COMPLETE)) {
477 DPRINTK("EXIT, srst failed\n"); 566 if (irq_stat & PORT_IRQ_ERROR)
478 return -EIO; 567 reason = "SRST command error";
568 else
569 reason = "timeout";
570 goto err;
479 } 571 }
480 572
481 sil24_update_tf(ap); 573 sil24_update_tf(ap);
@@ -487,15 +579,55 @@ static int sil24_softreset(struct ata_port *ap, int verbose,
487 out: 579 out:
488 DPRINTK("EXIT, class=%u\n", *class); 580 DPRINTK("EXIT, class=%u\n", *class);
489 return 0; 581 return 0;
582
583 err:
584 ata_port_printk(ap, KERN_ERR, "softreset failed (%s)\n", reason);
585 return -EIO;
490} 586}
491 587
492static int sil24_hardreset(struct ata_port *ap, int verbose, 588static int sil24_hardreset(struct ata_port *ap, unsigned int *class)
493 unsigned int *class)
494{ 589{
495 unsigned int dummy_class; 590 void __iomem *port = (void __iomem *)ap->ioaddr.cmd_addr;
591 const char *reason;
592 int tout_msec;
593 u32 tmp;
594
595 /* sil24 does the right thing(tm) without any protection */
596 sata_set_spd(ap);
597
598 tout_msec = 100;
599 if (ata_port_online(ap))
600 tout_msec = 5000;
496 601
497 /* sil24 doesn't report device signature after hard reset */ 602 writel(PORT_CS_DEV_RST, port + PORT_CTRL_STAT);
498 return sata_std_hardreset(ap, verbose, &dummy_class); 603 tmp = ata_wait_register(port + PORT_CTRL_STAT,
604 PORT_CS_DEV_RST, PORT_CS_DEV_RST, 10, tout_msec);
605
606 /* SStatus oscillates between zero and valid status for short
607 * duration after DEV_RST, give it time to settle.
608 */
609 msleep(100);
610
611 if (tmp & PORT_CS_DEV_RST) {
612 if (ata_port_offline(ap))
613 return 0;
614 reason = "link not ready";
615 goto err;
616 }
617
618 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
619 reason = "device not ready";
620 goto err;
621 }
622
623 /* sil24 doesn't report device class code after hardreset,
624 * leave *class alone.
625 */
626 return 0;
627
628 err:
629 ata_port_printk(ap, KERN_ERR, "hardreset failed (%s)\n", reason);
630 return -EIO;
499} 631}
500 632
501static int sil24_probe_reset(struct ata_port *ap, unsigned int *classes) 633static int sil24_probe_reset(struct ata_port *ap, unsigned int *classes)
@@ -528,17 +660,20 @@ static void sil24_qc_prep(struct ata_queued_cmd *qc)
528{ 660{
529 struct ata_port *ap = qc->ap; 661 struct ata_port *ap = qc->ap;
530 struct sil24_port_priv *pp = ap->private_data; 662 struct sil24_port_priv *pp = ap->private_data;
531 union sil24_cmd_block *cb = pp->cmd_block + qc->tag; 663 union sil24_cmd_block *cb;
532 struct sil24_prb *prb; 664 struct sil24_prb *prb;
533 struct sil24_sge *sge; 665 struct sil24_sge *sge;
666 u16 ctrl = 0;
667
668 cb = &pp->cmd_block[sil24_tag(qc->tag)];
534 669
535 switch (qc->tf.protocol) { 670 switch (qc->tf.protocol) {
536 case ATA_PROT_PIO: 671 case ATA_PROT_PIO:
537 case ATA_PROT_DMA: 672 case ATA_PROT_DMA:
673 case ATA_PROT_NCQ:
538 case ATA_PROT_NODATA: 674 case ATA_PROT_NODATA:
539 prb = &cb->ata.prb; 675 prb = &cb->ata.prb;
540 sge = cb->ata.sge; 676 sge = cb->ata.sge;
541 prb->ctrl = 0;
542 break; 677 break;
543 678
544 case ATA_PROT_ATAPI: 679 case ATA_PROT_ATAPI:
@@ -551,12 +686,10 @@ static void sil24_qc_prep(struct ata_queued_cmd *qc)
551 686
552 if (qc->tf.protocol != ATA_PROT_ATAPI_NODATA) { 687 if (qc->tf.protocol != ATA_PROT_ATAPI_NODATA) {
553 if (qc->tf.flags & ATA_TFLAG_WRITE) 688 if (qc->tf.flags & ATA_TFLAG_WRITE)
554 prb->ctrl = PRB_CTRL_PACKET_WRITE; 689 ctrl = PRB_CTRL_PACKET_WRITE;
555 else 690 else
556 prb->ctrl = PRB_CTRL_PACKET_READ; 691 ctrl = PRB_CTRL_PACKET_READ;
557 } else 692 }
558 prb->ctrl = 0;
559
560 break; 693 break;
561 694
562 default: 695 default:
@@ -565,6 +698,7 @@ static void sil24_qc_prep(struct ata_queued_cmd *qc)
565 BUG(); 698 BUG();
566 } 699 }
567 700
701 prb->ctrl = cpu_to_le16(ctrl);
568 ata_tf_to_fis(&qc->tf, prb->fis, 0); 702 ata_tf_to_fis(&qc->tf, prb->fis, 0);
569 703
570 if (qc->flags & ATA_QCFLAG_DMAMAP) 704 if (qc->flags & ATA_QCFLAG_DMAMAP)
@@ -574,11 +708,18 @@ static void sil24_qc_prep(struct ata_queued_cmd *qc)
574static unsigned int sil24_qc_issue(struct ata_queued_cmd *qc) 708static unsigned int sil24_qc_issue(struct ata_queued_cmd *qc)
575{ 709{
576 struct ata_port *ap = qc->ap; 710 struct ata_port *ap = qc->ap;
577 void __iomem *port = (void __iomem *)ap->ioaddr.cmd_addr;
578 struct sil24_port_priv *pp = ap->private_data; 711 struct sil24_port_priv *pp = ap->private_data;
579 dma_addr_t paddr = pp->cmd_block_dma + qc->tag * sizeof(*pp->cmd_block); 712 void __iomem *port = (void __iomem *)ap->ioaddr.cmd_addr;
713 unsigned int tag = sil24_tag(qc->tag);
714 dma_addr_t paddr;
715 void __iomem *activate;
716
717 paddr = pp->cmd_block_dma + tag * sizeof(*pp->cmd_block);
718 activate = port + PORT_CMD_ACTIVATE + tag * 8;
719
720 writel((u32)paddr, activate);
721 writel((u64)paddr >> 32, activate + 4);
580 722
581 writel((u32)paddr, port + PORT_CMD_ACTIVATE);
582 return 0; 723 return 0;
583} 724}
584 725
@@ -587,162 +728,141 @@ static void sil24_irq_clear(struct ata_port *ap)
587 /* unused */ 728 /* unused */
588} 729}
589 730
590static int __sil24_restart_controller(void __iomem *port) 731static void sil24_freeze(struct ata_port *ap)
591{ 732{
592 u32 tmp; 733 void __iomem *port = (void __iomem *)ap->ioaddr.cmd_addr;
593 int cnt;
594
595 writel(PORT_CS_INIT, port + PORT_CTRL_STAT);
596
597 /* Max ~10ms */
598 for (cnt = 0; cnt < 10000; cnt++) {
599 tmp = readl(port + PORT_CTRL_STAT);
600 if (tmp & PORT_CS_RDY)
601 return 0;
602 udelay(1);
603 }
604 734
605 return -1; 735 /* Port-wide IRQ mask in HOST_CTRL doesn't really work, clear
736 * PORT_IRQ_ENABLE instead.
737 */
738 writel(0xffff, port + PORT_IRQ_ENABLE_CLR);
606} 739}
607 740
608static void sil24_restart_controller(struct ata_port *ap) 741static void sil24_thaw(struct ata_port *ap)
609{ 742{
610 if (__sil24_restart_controller((void __iomem *)ap->ioaddr.cmd_addr)) 743 void __iomem *port = (void __iomem *)ap->ioaddr.cmd_addr;
611 printk(KERN_ERR DRV_NAME 744 u32 tmp;
612 " ata%u: failed to restart controller\n", ap->id); 745
746 /* clear IRQ */
747 tmp = readl(port + PORT_IRQ_STAT);
748 writel(tmp, port + PORT_IRQ_STAT);
749
750 /* turn IRQ back on */
751 writel(DEF_PORT_IRQ, port + PORT_IRQ_ENABLE_SET);
613} 752}
614 753
615static int __sil24_reset_controller(void __iomem *port) 754static void sil24_error_intr(struct ata_port *ap)
616{ 755{
617 int cnt; 756 void __iomem *port = (void __iomem *)ap->ioaddr.cmd_addr;
618 u32 tmp; 757 struct ata_eh_info *ehi = &ap->eh_info;
758 int freeze = 0;
759 u32 irq_stat;
619 760
620 /* Reset controller state. Is this correct? */ 761 /* on error, we need to clear IRQ explicitly */
621 writel(PORT_CS_DEV_RST, port + PORT_CTRL_STAT); 762 irq_stat = readl(port + PORT_IRQ_STAT);
622 readl(port + PORT_CTRL_STAT); /* sync */ 763 writel(irq_stat, port + PORT_IRQ_STAT);
623 764
624 /* Max ~100ms */ 765 /* first, analyze and record host port events */
625 for (cnt = 0; cnt < 1000; cnt++) { 766 ata_ehi_clear_desc(ehi);
626 udelay(100);
627 tmp = readl(port + PORT_CTRL_STAT);
628 if (!(tmp & PORT_CS_DEV_RST))
629 break;
630 }
631 767
632 if (tmp & PORT_CS_DEV_RST) 768 ata_ehi_push_desc(ehi, "irq_stat 0x%08x", irq_stat);
633 return -1;
634 769
635 if (tmp & PORT_CS_RDY) 770 if (irq_stat & PORT_IRQ_DEV_XCHG) {
636 return 0; 771 ehi->err_mask |= AC_ERR_ATA_BUS;
772 /* sil24 doesn't recover very well from phy
773 * disconnection with a softreset. Force hardreset.
774 */
775 ehi->action |= ATA_EH_HARDRESET;
776 ata_ehi_push_desc(ehi, ", device_exchanged");
777 freeze = 1;
778 }
637 779
638 return __sil24_restart_controller(port); 780 if (irq_stat & PORT_IRQ_UNK_FIS) {
639} 781 ehi->err_mask |= AC_ERR_HSM;
782 ehi->action |= ATA_EH_SOFTRESET;
783 ata_ehi_push_desc(ehi , ", unknown FIS");
784 freeze = 1;
785 }
640 786
641static void sil24_reset_controller(struct ata_port *ap) 787 /* deal with command error */
642{ 788 if (irq_stat & PORT_IRQ_ERROR) {
643 printk(KERN_NOTICE DRV_NAME 789 struct sil24_cerr_info *ci = NULL;
644 " ata%u: resetting controller...\n", ap->id); 790 unsigned int err_mask = 0, action = 0;
645 if (__sil24_reset_controller((void __iomem *)ap->ioaddr.cmd_addr)) 791 struct ata_queued_cmd *qc;
646 printk(KERN_ERR DRV_NAME 792 u32 cerr;
647 " ata%u: failed to reset controller\n", ap->id); 793
648} 794 /* analyze CMD_ERR */
795 cerr = readl(port + PORT_CMD_ERR);
796 if (cerr < ARRAY_SIZE(sil24_cerr_db))
797 ci = &sil24_cerr_db[cerr];
798
799 if (ci && ci->desc) {
800 err_mask |= ci->err_mask;
801 action |= ci->action;
802 ata_ehi_push_desc(ehi, ", %s", ci->desc);
803 } else {
804 err_mask |= AC_ERR_OTHER;
805 action |= ATA_EH_SOFTRESET;
806 ata_ehi_push_desc(ehi, ", unknown command error %d",
807 cerr);
808 }
649 809
650static void sil24_eng_timeout(struct ata_port *ap) 810 /* record error info */
651{ 811 qc = ata_qc_from_tag(ap, ap->active_tag);
652 struct ata_queued_cmd *qc; 812 if (qc) {
813 sil24_update_tf(ap);
814 qc->err_mask |= err_mask;
815 } else
816 ehi->err_mask |= err_mask;
653 817
654 qc = ata_qc_from_tag(ap, ap->active_tag); 818 ehi->action |= action;
819 }
655 820
656 printk(KERN_ERR "ata%u: command timeout\n", ap->id); 821 /* freeze or abort */
657 qc->err_mask |= AC_ERR_TIMEOUT; 822 if (freeze)
658 ata_eh_qc_complete(qc); 823 ata_port_freeze(ap);
824 else
825 ata_port_abort(ap);
826}
659 827
660 sil24_reset_controller(ap); 828static void sil24_finish_qc(struct ata_queued_cmd *qc)
829{
830 if (qc->flags & ATA_QCFLAG_RESULT_TF)
831 sil24_update_tf(qc->ap);
661} 832}
662 833
663static void sil24_error_intr(struct ata_port *ap, u32 slot_stat) 834static inline void sil24_host_intr(struct ata_port *ap)
664{ 835{
665 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->active_tag);
666 struct sil24_port_priv *pp = ap->private_data;
667 void __iomem *port = (void __iomem *)ap->ioaddr.cmd_addr; 836 void __iomem *port = (void __iomem *)ap->ioaddr.cmd_addr;
668 u32 irq_stat, cmd_err, sstatus, serror; 837 u32 slot_stat, qc_active;
669 unsigned int err_mask; 838 int rc;
670 839
671 irq_stat = readl(port + PORT_IRQ_STAT); 840 slot_stat = readl(port + PORT_SLOT_STAT);
672 writel(irq_stat, port + PORT_IRQ_STAT); /* clear irq */
673 841
674 if (!(irq_stat & PORT_IRQ_ERROR)) { 842 if (unlikely(slot_stat & HOST_SSTAT_ATTN)) {
675 /* ignore non-completion, non-error irqs for now */ 843 sil24_error_intr(ap);
676 printk(KERN_WARNING DRV_NAME
677 "ata%u: non-error exception irq (irq_stat %x)\n",
678 ap->id, irq_stat);
679 return; 844 return;
680 } 845 }
681 846
682 cmd_err = readl(port + PORT_CMD_ERR); 847 if (ap->flags & SIL24_FLAG_PCIX_IRQ_WOC)
683 sstatus = readl(port + PORT_SSTATUS); 848 writel(PORT_IRQ_COMPLETE, port + PORT_IRQ_STAT);
684 serror = readl(port + PORT_SERROR);
685 if (serror)
686 writel(serror, port + PORT_SERROR);
687 849
688 /* 850 qc_active = slot_stat & ~HOST_SSTAT_ATTN;
689 * Don't log ATAPI device errors. They're supposed to happen 851 rc = ata_qc_complete_multiple(ap, qc_active, sil24_finish_qc);
690 * and any serious errors will be logged using sense data by 852 if (rc > 0)
691 * the SCSI layer. 853 return;
692 */ 854 if (rc < 0) {
693 if (ap->device[0].class != ATA_DEV_ATAPI || cmd_err > PORT_CERR_SDB) 855 struct ata_eh_info *ehi = &ap->eh_info;
694 printk("ata%u: error interrupt on port%d\n" 856 ehi->err_mask |= AC_ERR_HSM;
695 " stat=0x%x irq=0x%x cmd_err=%d sstatus=0x%x serror=0x%x\n", 857 ehi->action |= ATA_EH_SOFTRESET;
696 ap->id, ap->port_no, slot_stat, irq_stat, cmd_err, sstatus, serror); 858 ata_port_freeze(ap);
697 859 return;
698 if (cmd_err == PORT_CERR_DEV || cmd_err == PORT_CERR_SDB) {
699 /*
700 * Device is reporting error, tf registers are valid.
701 */
702 sil24_update_tf(ap);
703 err_mask = ac_err_mask(pp->tf.command);
704 sil24_restart_controller(ap);
705 } else {
706 /*
707 * Other errors. libata currently doesn't have any
708 * mechanism to report these errors. Just turn on
709 * ATA_ERR.
710 */
711 err_mask = AC_ERR_OTHER;
712 sil24_reset_controller(ap);
713 } 860 }
714 861
715 if (qc) { 862 if (ata_ratelimit())
716 qc->err_mask |= err_mask; 863 ata_port_printk(ap, KERN_INFO, "spurious interrupt "
717 ata_qc_complete(qc); 864 "(slot_stat 0x%x active_tag %d sactive 0x%x)\n",
718 } 865 slot_stat, ap->active_tag, ap->sactive);
719}
720
721static inline void sil24_host_intr(struct ata_port *ap)
722{
723 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->active_tag);
724 void __iomem *port = (void __iomem *)ap->ioaddr.cmd_addr;
725 u32 slot_stat;
726
727 slot_stat = readl(port + PORT_SLOT_STAT);
728 if (!(slot_stat & HOST_SSTAT_ATTN)) {
729 struct sil24_port_priv *pp = ap->private_data;
730 /*
731 * !HOST_SSAT_ATTN guarantees successful completion,
732 * so reading back tf registers is unnecessary for
733 * most commands. TODO: read tf registers for
734 * commands which require these values on successful
735 * completion (EXECUTE DEVICE DIAGNOSTIC, CHECK POWER,
736 * DEVICE RESET and READ PORT MULTIPLIER (any more?).
737 */
738 sil24_update_tf(ap);
739
740 if (qc) {
741 qc->err_mask |= ac_err_mask(pp->tf.command);
742 ata_qc_complete(qc);
743 }
744 } else
745 sil24_error_intr(ap, slot_stat);
746} 866}
747 867
748static irqreturn_t sil24_interrupt(int irq, void *dev_instance, struct pt_regs *regs) 868static irqreturn_t sil24_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
@@ -769,7 +889,7 @@ static irqreturn_t sil24_interrupt(int irq, void *dev_instance, struct pt_regs *
769 for (i = 0; i < host_set->n_ports; i++) 889 for (i = 0; i < host_set->n_ports; i++)
770 if (status & (1 << i)) { 890 if (status & (1 << i)) {
771 struct ata_port *ap = host_set->ports[i]; 891 struct ata_port *ap = host_set->ports[i];
772 if (ap && !(ap->flags & ATA_FLAG_PORT_DISABLED)) { 892 if (ap && !(ap->flags & ATA_FLAG_DISABLED)) {
773 sil24_host_intr(host_set->ports[i]); 893 sil24_host_intr(host_set->ports[i]);
774 handled++; 894 handled++;
775 } else 895 } else
@@ -782,9 +902,34 @@ static irqreturn_t sil24_interrupt(int irq, void *dev_instance, struct pt_regs *
782 return IRQ_RETVAL(handled); 902 return IRQ_RETVAL(handled);
783} 903}
784 904
905static void sil24_error_handler(struct ata_port *ap)
906{
907 struct ata_eh_context *ehc = &ap->eh_context;
908
909 if (sil24_init_port(ap)) {
910 ata_eh_freeze_port(ap);
911 ehc->i.action |= ATA_EH_HARDRESET;
912 }
913
914 /* perform recovery */
915 ata_do_eh(ap, sil24_softreset, sil24_hardreset, ata_std_postreset);
916}
917
918static void sil24_post_internal_cmd(struct ata_queued_cmd *qc)
919{
920 struct ata_port *ap = qc->ap;
921
922 if (qc->flags & ATA_QCFLAG_FAILED)
923 qc->err_mask |= AC_ERR_OTHER;
924
925 /* make DMA engine forget about the failed command */
926 if (qc->err_mask)
927 sil24_init_port(ap);
928}
929
785static inline void sil24_cblk_free(struct sil24_port_priv *pp, struct device *dev) 930static inline void sil24_cblk_free(struct sil24_port_priv *pp, struct device *dev)
786{ 931{
787 const size_t cb_size = sizeof(*pp->cmd_block); 932 const size_t cb_size = sizeof(*pp->cmd_block) * SIL24_MAX_CMDS;
788 933
789 dma_free_coherent(dev, cb_size, pp->cmd_block, pp->cmd_block_dma); 934 dma_free_coherent(dev, cb_size, pp->cmd_block, pp->cmd_block_dma);
790} 935}
@@ -794,7 +939,7 @@ static int sil24_port_start(struct ata_port *ap)
794 struct device *dev = ap->host_set->dev; 939 struct device *dev = ap->host_set->dev;
795 struct sil24_port_priv *pp; 940 struct sil24_port_priv *pp;
796 union sil24_cmd_block *cb; 941 union sil24_cmd_block *cb;
797 size_t cb_size = sizeof(*cb); 942 size_t cb_size = sizeof(*cb) * SIL24_MAX_CMDS;
798 dma_addr_t cb_dma; 943 dma_addr_t cb_dma;
799 int rc = -ENOMEM; 944 int rc = -ENOMEM;
800 945
@@ -858,6 +1003,7 @@ static int sil24_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
858 void __iomem *host_base = NULL; 1003 void __iomem *host_base = NULL;
859 void __iomem *port_base = NULL; 1004 void __iomem *port_base = NULL;
860 int i, rc; 1005 int i, rc;
1006 u32 tmp;
861 1007
862 if (!printed_version++) 1008 if (!printed_version++)
863 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n"); 1009 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
@@ -910,35 +1056,51 @@ static int sil24_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
910 /* 1056 /*
911 * Configure the device 1057 * Configure the device
912 */ 1058 */
913 /* 1059 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
914 * FIXME: This device is certainly 64-bit capable. We just 1060 rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
915 * don't know how to use it. After fixing 32bit activation in 1061 if (rc) {
916 * this function, enable 64bit masks here. 1062 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
917 */ 1063 if (rc) {
918 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK); 1064 dev_printk(KERN_ERR, &pdev->dev,
919 if (rc) { 1065 "64-bit DMA enable failed\n");
920 dev_printk(KERN_ERR, &pdev->dev, 1066 goto out_free;
921 "32-bit DMA enable failed\n"); 1067 }
922 goto out_free; 1068 }
923 } 1069 } else {
924 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK); 1070 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
925 if (rc) { 1071 if (rc) {
926 dev_printk(KERN_ERR, &pdev->dev, 1072 dev_printk(KERN_ERR, &pdev->dev,
927 "32-bit consistent DMA enable failed\n"); 1073 "32-bit DMA enable failed\n");
928 goto out_free; 1074 goto out_free;
1075 }
1076 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
1077 if (rc) {
1078 dev_printk(KERN_ERR, &pdev->dev,
1079 "32-bit consistent DMA enable failed\n");
1080 goto out_free;
1081 }
929 } 1082 }
930 1083
931 /* GPIO off */ 1084 /* GPIO off */
932 writel(0, host_base + HOST_FLASH_CMD); 1085 writel(0, host_base + HOST_FLASH_CMD);
933 1086
934 /* Mask interrupts during initialization */ 1087 /* Apply workaround for completion IRQ loss on PCI-X errata */
1088 if (probe_ent->host_flags & SIL24_FLAG_PCIX_IRQ_WOC) {
1089 tmp = readl(host_base + HOST_CTRL);
1090 if (tmp & (HOST_CTRL_TRDY | HOST_CTRL_STOP | HOST_CTRL_DEVSEL))
1091 dev_printk(KERN_INFO, &pdev->dev,
1092 "Applying completion IRQ loss on PCI-X "
1093 "errata fix\n");
1094 else
1095 probe_ent->host_flags &= ~SIL24_FLAG_PCIX_IRQ_WOC;
1096 }
1097
1098 /* clear global reset & mask interrupts during initialization */
935 writel(0, host_base + HOST_CTRL); 1099 writel(0, host_base + HOST_CTRL);
936 1100
937 for (i = 0; i < probe_ent->n_ports; i++) { 1101 for (i = 0; i < probe_ent->n_ports; i++) {
938 void __iomem *port = port_base + i * PORT_REGS_SIZE; 1102 void __iomem *port = port_base + i * PORT_REGS_SIZE;
939 unsigned long portu = (unsigned long)port; 1103 unsigned long portu = (unsigned long)port;
940 u32 tmp;
941 int cnt;
942 1104
943 probe_ent->port[i].cmd_addr = portu + PORT_PRB; 1105 probe_ent->port[i].cmd_addr = portu + PORT_PRB;
944 probe_ent->port[i].scr_addr = portu + PORT_SCONTROL; 1106 probe_ent->port[i].scr_addr = portu + PORT_SCONTROL;
@@ -952,18 +1114,20 @@ static int sil24_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
952 tmp = readl(port + PORT_CTRL_STAT); 1114 tmp = readl(port + PORT_CTRL_STAT);
953 if (tmp & PORT_CS_PORT_RST) { 1115 if (tmp & PORT_CS_PORT_RST) {
954 writel(PORT_CS_PORT_RST, port + PORT_CTRL_CLR); 1116 writel(PORT_CS_PORT_RST, port + PORT_CTRL_CLR);
955 readl(port + PORT_CTRL_STAT); /* sync */ 1117 tmp = ata_wait_register(port + PORT_CTRL_STAT,
956 for (cnt = 0; cnt < 10; cnt++) { 1118 PORT_CS_PORT_RST,
957 msleep(10); 1119 PORT_CS_PORT_RST, 10, 100);
958 tmp = readl(port + PORT_CTRL_STAT);
959 if (!(tmp & PORT_CS_PORT_RST))
960 break;
961 }
962 if (tmp & PORT_CS_PORT_RST) 1120 if (tmp & PORT_CS_PORT_RST)
963 dev_printk(KERN_ERR, &pdev->dev, 1121 dev_printk(KERN_ERR, &pdev->dev,
964 "failed to clear port RST\n"); 1122 "failed to clear port RST\n");
965 } 1123 }
966 1124
1125 /* Configure IRQ WoC */
1126 if (probe_ent->host_flags & SIL24_FLAG_PCIX_IRQ_WOC)
1127 writel(PORT_CS_IRQ_WOC, port + PORT_CTRL_STAT);
1128 else
1129 writel(PORT_CS_IRQ_WOC, port + PORT_CTRL_CLR);
1130
967 /* Zero error counters. */ 1131 /* Zero error counters. */
968 writel(0x8000, port + PORT_DECODE_ERR_THRESH); 1132 writel(0x8000, port + PORT_DECODE_ERR_THRESH);
969 writel(0x8000, port + PORT_CRC_ERR_THRESH); 1133 writel(0x8000, port + PORT_CRC_ERR_THRESH);
@@ -972,26 +1136,11 @@ static int sil24_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
972 writel(0x0000, port + PORT_CRC_ERR_CNT); 1136 writel(0x0000, port + PORT_CRC_ERR_CNT);
973 writel(0x0000, port + PORT_HSHK_ERR_CNT); 1137 writel(0x0000, port + PORT_HSHK_ERR_CNT);
974 1138
975 /* FIXME: 32bit activation? */ 1139 /* Always use 64bit activation */
976 writel(0, port + PORT_ACTIVATE_UPPER_ADDR); 1140 writel(PORT_CS_32BIT_ACTV, port + PORT_CTRL_CLR);
977 writel(PORT_CS_32BIT_ACTV, port + PORT_CTRL_STAT);
978
979 /* Configure interrupts */
980 writel(0xffff, port + PORT_IRQ_ENABLE_CLR);
981 writel(PORT_IRQ_COMPLETE | PORT_IRQ_ERROR | PORT_IRQ_SDB_FIS,
982 port + PORT_IRQ_ENABLE_SET);
983
984 /* Clear interrupts */
985 writel(0x0fff0fff, port + PORT_IRQ_STAT);
986 writel(PORT_CS_IRQ_WOC, port + PORT_CTRL_CLR);
987 1141
988 /* Clear port multiplier enable and resume bits */ 1142 /* Clear port multiplier enable and resume bits */
989 writel(PORT_CS_PM_EN | PORT_CS_RESUME, port + PORT_CTRL_CLR); 1143 writel(PORT_CS_PM_EN | PORT_CS_RESUME, port + PORT_CTRL_CLR);
990
991 /* Reset itself */
992 if (__sil24_reset_controller(port))
993 dev_printk(KERN_ERR, &pdev->dev,
994 "failed to reset controller\n");
995 } 1144 }
996 1145
997 /* Turn on interrupts */ 1146 /* Turn on interrupts */
diff --git a/drivers/scsi/sata_sis.c b/drivers/scsi/sata_sis.c
index 728530df2e07..82a07bff7e91 100644
--- a/drivers/scsi/sata_sis.c
+++ b/drivers/scsi/sata_sis.c
@@ -43,7 +43,7 @@
43#include <linux/libata.h> 43#include <linux/libata.h>
44 44
45#define DRV_NAME "sata_sis" 45#define DRV_NAME "sata_sis"
46#define DRV_VERSION "0.5" 46#define DRV_VERSION "0.6"
47 47
48enum { 48enum {
49 sis_180 = 0, 49 sis_180 = 0,
@@ -113,6 +113,7 @@ static const struct ata_port_operations sis_ops = {
113 .bmdma_status = ata_bmdma_status, 113 .bmdma_status = ata_bmdma_status,
114 .qc_prep = ata_qc_prep, 114 .qc_prep = ata_qc_prep,
115 .qc_issue = ata_qc_issue_prot, 115 .qc_issue = ata_qc_issue_prot,
116 .data_xfer = ata_pio_data_xfer,
116 .eng_timeout = ata_eng_timeout, 117 .eng_timeout = ata_eng_timeout,
117 .irq_handler = ata_interrupt, 118 .irq_handler = ata_interrupt,
118 .irq_clear = ata_bmdma_irq_clear, 119 .irq_clear = ata_bmdma_irq_clear,
diff --git a/drivers/scsi/sata_svw.c b/drivers/scsi/sata_svw.c
index 53b0d5c0a61f..7a4703bfa12a 100644
--- a/drivers/scsi/sata_svw.c
+++ b/drivers/scsi/sata_svw.c
@@ -54,7 +54,7 @@
54#endif /* CONFIG_PPC_OF */ 54#endif /* CONFIG_PPC_OF */
55 55
56#define DRV_NAME "sata_svw" 56#define DRV_NAME "sata_svw"
57#define DRV_VERSION "1.07" 57#define DRV_VERSION "1.8"
58 58
59enum { 59enum {
60 /* Taskfile registers offsets */ 60 /* Taskfile registers offsets */
@@ -257,7 +257,7 @@ static int k2_sata_proc_info(struct Scsi_Host *shost, char *page, char **start,
257 int len, index; 257 int len, index;
258 258
259 /* Find the ata_port */ 259 /* Find the ata_port */
260 ap = (struct ata_port *) &shost->hostdata[0]; 260 ap = ata_shost_to_port(shost);
261 if (ap == NULL) 261 if (ap == NULL)
262 return 0; 262 return 0;
263 263
@@ -320,6 +320,7 @@ static const struct ata_port_operations k2_sata_ops = {
320 .bmdma_status = ata_bmdma_status, 320 .bmdma_status = ata_bmdma_status,
321 .qc_prep = ata_qc_prep, 321 .qc_prep = ata_qc_prep,
322 .qc_issue = ata_qc_issue_prot, 322 .qc_issue = ata_qc_issue_prot,
323 .data_xfer = ata_mmio_data_xfer,
323 .eng_timeout = ata_eng_timeout, 324 .eng_timeout = ata_eng_timeout,
324 .irq_handler = ata_interrupt, 325 .irq_handler = ata_interrupt,
325 .irq_clear = ata_bmdma_irq_clear, 326 .irq_clear = ata_bmdma_irq_clear,
diff --git a/drivers/scsi/sata_sx4.c b/drivers/scsi/sata_sx4.c
index 4139ad4b1df0..c4db6bf14a25 100644
--- a/drivers/scsi/sata_sx4.c
+++ b/drivers/scsi/sata_sx4.c
@@ -46,7 +46,7 @@
46#include "sata_promise.h" 46#include "sata_promise.h"
47 47
48#define DRV_NAME "sata_sx4" 48#define DRV_NAME "sata_sx4"
49#define DRV_VERSION "0.8" 49#define DRV_VERSION "0.9"
50 50
51 51
52enum { 52enum {
@@ -204,6 +204,7 @@ static const struct ata_port_operations pdc_20621_ops = {
204 .phy_reset = pdc_20621_phy_reset, 204 .phy_reset = pdc_20621_phy_reset,
205 .qc_prep = pdc20621_qc_prep, 205 .qc_prep = pdc20621_qc_prep,
206 .qc_issue = pdc20621_qc_issue_prot, 206 .qc_issue = pdc20621_qc_issue_prot,
207 .data_xfer = ata_mmio_data_xfer,
207 .eng_timeout = pdc_eng_timeout, 208 .eng_timeout = pdc_eng_timeout,
208 .irq_handler = pdc20621_interrupt, 209 .irq_handler = pdc20621_interrupt,
209 .irq_clear = pdc20621_irq_clear, 210 .irq_clear = pdc20621_irq_clear,
@@ -218,7 +219,7 @@ static const struct ata_port_info pdc_port_info[] = {
218 .sht = &pdc_sata_sht, 219 .sht = &pdc_sata_sht,
219 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 220 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
220 ATA_FLAG_SRST | ATA_FLAG_MMIO | 221 ATA_FLAG_SRST | ATA_FLAG_MMIO |
221 ATA_FLAG_NO_ATAPI, 222 ATA_FLAG_NO_ATAPI | ATA_FLAG_PIO_POLLING,
222 .pio_mask = 0x1f, /* pio0-4 */ 223 .pio_mask = 0x1f, /* pio0-4 */
223 .mwdma_mask = 0x07, /* mwdma0-2 */ 224 .mwdma_mask = 0x07, /* mwdma0-2 */
224 .udma_mask = 0x7f, /* udma0-6 ; FIXME */ 225 .udma_mask = 0x7f, /* udma0-6 ; FIXME */
@@ -833,11 +834,11 @@ static irqreturn_t pdc20621_interrupt (int irq, void *dev_instance, struct pt_re
833 tmp = mask & (1 << i); 834 tmp = mask & (1 << i);
834 VPRINTK("seq %u, port_no %u, ap %p, tmp %x\n", i, port_no, ap, tmp); 835 VPRINTK("seq %u, port_no %u, ap %p, tmp %x\n", i, port_no, ap, tmp);
835 if (tmp && ap && 836 if (tmp && ap &&
836 !(ap->flags & (ATA_FLAG_PORT_DISABLED | ATA_FLAG_NOINTR))) { 837 !(ap->flags & ATA_FLAG_DISABLED)) {
837 struct ata_queued_cmd *qc; 838 struct ata_queued_cmd *qc;
838 839
839 qc = ata_qc_from_tag(ap, ap->active_tag); 840 qc = ata_qc_from_tag(ap, ap->active_tag);
840 if (qc && (!(qc->tf.ctl & ATA_NIEN))) 841 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)))
841 handled += pdc20621_host_intr(ap, qc, (i > 4), 842 handled += pdc20621_host_intr(ap, qc, (i > 4),
842 mmio_base); 843 mmio_base);
843 } 844 }
@@ -868,15 +869,16 @@ static void pdc_eng_timeout(struct ata_port *ap)
868 switch (qc->tf.protocol) { 869 switch (qc->tf.protocol) {
869 case ATA_PROT_DMA: 870 case ATA_PROT_DMA:
870 case ATA_PROT_NODATA: 871 case ATA_PROT_NODATA:
871 printk(KERN_ERR "ata%u: command timeout\n", ap->id); 872 ata_port_printk(ap, KERN_ERR, "command timeout\n");
872 qc->err_mask |= __ac_err_mask(ata_wait_idle(ap)); 873 qc->err_mask |= __ac_err_mask(ata_wait_idle(ap));
873 break; 874 break;
874 875
875 default: 876 default:
876 drv_stat = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 1000); 877 drv_stat = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 1000);
877 878
878 printk(KERN_ERR "ata%u: unknown timeout, cmd 0x%x stat 0x%x\n", 879 ata_port_printk(ap, KERN_ERR,
879 ap->id, qc->tf.command, drv_stat); 880 "unknown timeout, cmd 0x%x stat 0x%x\n",
881 qc->tf.command, drv_stat);
880 882
881 qc->err_mask |= ac_err_mask(drv_stat); 883 qc->err_mask |= ac_err_mask(drv_stat);
882 break; 884 break;
@@ -1375,10 +1377,6 @@ static int pdc_sata_init_one (struct pci_dev *pdev, const struct pci_device_id *
1375 if (!printed_version++) 1377 if (!printed_version++)
1376 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n"); 1378 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
1377 1379
1378 /*
1379 * If this driver happens to only be useful on Apple's K2, then
1380 * we should check that here as it has a normal Serverworks ID
1381 */
1382 rc = pci_enable_device(pdev); 1380 rc = pci_enable_device(pdev);
1383 if (rc) 1381 if (rc)
1384 return rc; 1382 return rc;
diff --git a/drivers/scsi/sata_uli.c b/drivers/scsi/sata_uli.c
index 38b52bd3fa3f..7fae3e06e461 100644
--- a/drivers/scsi/sata_uli.c
+++ b/drivers/scsi/sata_uli.c
@@ -37,7 +37,7 @@
37#include <linux/libata.h> 37#include <linux/libata.h>
38 38
39#define DRV_NAME "sata_uli" 39#define DRV_NAME "sata_uli"
40#define DRV_VERSION "0.5" 40#define DRV_VERSION "0.6"
41 41
42enum { 42enum {
43 uli_5289 = 0, 43 uli_5289 = 0,
@@ -110,6 +110,7 @@ static const struct ata_port_operations uli_ops = {
110 .bmdma_status = ata_bmdma_status, 110 .bmdma_status = ata_bmdma_status,
111 .qc_prep = ata_qc_prep, 111 .qc_prep = ata_qc_prep,
112 .qc_issue = ata_qc_issue_prot, 112 .qc_issue = ata_qc_issue_prot,
113 .data_xfer = ata_pio_data_xfer,
113 114
114 .eng_timeout = ata_eng_timeout, 115 .eng_timeout = ata_eng_timeout,
115 116
diff --git a/drivers/scsi/sata_via.c b/drivers/scsi/sata_via.c
index 9e7ae4e0db32..1c9e2f36805a 100644
--- a/drivers/scsi/sata_via.c
+++ b/drivers/scsi/sata_via.c
@@ -47,7 +47,7 @@
47#include <asm/io.h> 47#include <asm/io.h>
48 48
49#define DRV_NAME "sata_via" 49#define DRV_NAME "sata_via"
50#define DRV_VERSION "1.1" 50#define DRV_VERSION "1.2"
51 51
52enum board_ids_enum { 52enum board_ids_enum {
53 vt6420, 53 vt6420,
@@ -124,6 +124,7 @@ static const struct ata_port_operations svia_sata_ops = {
124 124
125 .qc_prep = ata_qc_prep, 125 .qc_prep = ata_qc_prep,
126 .qc_issue = ata_qc_issue_prot, 126 .qc_issue = ata_qc_issue_prot,
127 .data_xfer = ata_pio_data_xfer,
127 128
128 .eng_timeout = ata_eng_timeout, 129 .eng_timeout = ata_eng_timeout,
129 130
diff --git a/drivers/scsi/sata_vsc.c b/drivers/scsi/sata_vsc.c
index 8a29ce340b47..438e7c6a0f8f 100644
--- a/drivers/scsi/sata_vsc.c
+++ b/drivers/scsi/sata_vsc.c
@@ -221,14 +221,21 @@ static irqreturn_t vsc_sata_interrupt (int irq, void *dev_instance,
221 221
222 ap = host_set->ports[i]; 222 ap = host_set->ports[i];
223 223
224 if (ap && !(ap->flags & 224 if (is_vsc_sata_int_err(i, int_status)) {
225 (ATA_FLAG_PORT_DISABLED|ATA_FLAG_NOINTR))) { 225 u32 err_status;
226 printk(KERN_DEBUG "%s: ignoring interrupt(s)\n", __FUNCTION__);
227 err_status = ap ? vsc_sata_scr_read(ap, SCR_ERROR) : 0;
228 vsc_sata_scr_write(ap, SCR_ERROR, err_status);
229 handled++;
230 }
231
232 if (ap && !(ap->flags & ATA_FLAG_DISABLED)) {
226 struct ata_queued_cmd *qc; 233 struct ata_queued_cmd *qc;
227 234
228 qc = ata_qc_from_tag(ap, ap->active_tag); 235 qc = ata_qc_from_tag(ap, ap->active_tag);
229 if (qc && (!(qc->tf.ctl & ATA_NIEN))) { 236 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)))
230 handled += ata_host_intr(ap, qc); 237 handled += ata_host_intr(ap, qc);
231 } else if (is_vsc_sata_int_err(i, int_status)) { 238 else if (is_vsc_sata_int_err(i, int_status)) {
232 /* 239 /*
233 * On some chips (i.e. Intel 31244), an error 240 * On some chips (i.e. Intel 31244), an error
234 * interrupt will sneak in at initialization 241 * interrupt will sneak in at initialization
@@ -290,6 +297,7 @@ static const struct ata_port_operations vsc_sata_ops = {
290 .bmdma_status = ata_bmdma_status, 297 .bmdma_status = ata_bmdma_status,
291 .qc_prep = ata_qc_prep, 298 .qc_prep = ata_qc_prep,
292 .qc_issue = ata_qc_issue_prot, 299 .qc_issue = ata_qc_issue_prot,
300 .data_xfer = ata_pio_data_xfer,
293 .eng_timeout = ata_eng_timeout, 301 .eng_timeout = ata_eng_timeout,
294 .irq_handler = vsc_sata_interrupt, 302 .irq_handler = vsc_sata_interrupt,
295 .irq_clear = ata_bmdma_irq_clear, 303 .irq_clear = ata_bmdma_irq_clear,
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index 73994e2ac2cb..dae4f08adde0 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -720,6 +720,24 @@ void scsi_init_cmd_from_req(struct scsi_cmnd *cmd, struct scsi_request *sreq)
720static DEFINE_PER_CPU(struct list_head, scsi_done_q); 720static DEFINE_PER_CPU(struct list_head, scsi_done_q);
721 721
722/** 722/**
723 * scsi_req_abort_cmd -- Request command recovery for the specified command
724 * cmd: pointer to the SCSI command of interest
725 *
726 * This function requests that SCSI Core start recovery for the
727 * command by deleting the timer and adding the command to the eh
728 * queue. It can be called by either LLDDs or SCSI Core. LLDDs who
729 * implement their own error recovery MAY ignore the timeout event if
730 * they generated scsi_req_abort_cmd.
731 */
732void scsi_req_abort_cmd(struct scsi_cmnd *cmd)
733{
734 if (!scsi_delete_timer(cmd))
735 return;
736 scsi_times_out(cmd);
737}
738EXPORT_SYMBOL(scsi_req_abort_cmd);
739
740/**
723 * scsi_done - Enqueue the finished SCSI command into the done queue. 741 * scsi_done - Enqueue the finished SCSI command into the done queue.
724 * @cmd: The SCSI Command for which a low-level device driver (LLDD) gives 742 * @cmd: The SCSI Command for which a low-level device driver (LLDD) gives
725 * ownership back to SCSI Core -- i.e. the LLDD has finished with it. 743 * ownership back to SCSI Core -- i.e. the LLDD has finished with it.
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index 1c75646f9689..346ab72ebf86 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -58,6 +58,28 @@ void scsi_eh_wakeup(struct Scsi_Host *shost)
58} 58}
59 59
60/** 60/**
61 * scsi_schedule_eh - schedule EH for SCSI host
62 * @shost: SCSI host to invoke error handling on.
63 *
64 * Schedule SCSI EH without scmd.
65 **/
66void scsi_schedule_eh(struct Scsi_Host *shost)
67{
68 unsigned long flags;
69
70 spin_lock_irqsave(shost->host_lock, flags);
71
72 if (scsi_host_set_state(shost, SHOST_RECOVERY) == 0 ||
73 scsi_host_set_state(shost, SHOST_CANCEL_RECOVERY) == 0) {
74 shost->host_eh_scheduled++;
75 scsi_eh_wakeup(shost);
76 }
77
78 spin_unlock_irqrestore(shost->host_lock, flags);
79}
80EXPORT_SYMBOL_GPL(scsi_schedule_eh);
81
82/**
61 * scsi_eh_scmd_add - add scsi cmd to error handling. 83 * scsi_eh_scmd_add - add scsi cmd to error handling.
62 * @scmd: scmd to run eh on. 84 * @scmd: scmd to run eh on.
63 * @eh_flag: optional SCSI_EH flag. 85 * @eh_flag: optional SCSI_EH flag.
@@ -1517,7 +1539,7 @@ int scsi_error_handler(void *data)
1517 */ 1539 */
1518 set_current_state(TASK_INTERRUPTIBLE); 1540 set_current_state(TASK_INTERRUPTIBLE);
1519 while (!kthread_should_stop()) { 1541 while (!kthread_should_stop()) {
1520 if (shost->host_failed == 0 || 1542 if ((shost->host_failed == 0 && shost->host_eh_scheduled == 0) ||
1521 shost->host_failed != shost->host_busy) { 1543 shost->host_failed != shost->host_busy) {
1522 SCSI_LOG_ERROR_RECOVERY(1, 1544 SCSI_LOG_ERROR_RECOVERY(1,
1523 printk("Error handler scsi_eh_%d sleeping\n", 1545 printk("Error handler scsi_eh_%d sleeping\n",
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 764a8b375ead..18e34775b238 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -566,7 +566,7 @@ void scsi_device_unbusy(struct scsi_device *sdev)
566 spin_lock_irqsave(shost->host_lock, flags); 566 spin_lock_irqsave(shost->host_lock, flags);
567 shost->host_busy--; 567 shost->host_busy--;
568 if (unlikely(scsi_host_in_recovery(shost) && 568 if (unlikely(scsi_host_in_recovery(shost) &&
569 shost->host_failed)) 569 (shost->host_failed || shost->host_eh_scheduled)))
570 scsi_eh_wakeup(shost); 570 scsi_eh_wakeup(shost);
571 spin_unlock(shost->host_lock); 571 spin_unlock(shost->host_lock);
572 spin_lock(sdev->request_queue->queue_lock); 572 spin_lock(sdev->request_queue->queue_lock);
diff --git a/drivers/scsi/scsi_transport_api.h b/drivers/scsi/scsi_transport_api.h
new file mode 100644
index 000000000000..934f0e62bb5c
--- /dev/null
+++ b/drivers/scsi/scsi_transport_api.h
@@ -0,0 +1,6 @@
1#ifndef _SCSI_TRANSPORT_API_H
2#define _SCSI_TRANSPORT_API_H
3
4void scsi_schedule_eh(struct Scsi_Host *shost);
5
6#endif /* _SCSI_TRANSPORT_API_H */
diff --git a/include/linux/ata.h b/include/linux/ata.h
index 312a2c0c64e6..c494e1c0531e 100644
--- a/include/linux/ata.h
+++ b/include/linux/ata.h
@@ -97,6 +97,9 @@ enum {
97 ATA_DRQ = (1 << 3), /* data request i/o */ 97 ATA_DRQ = (1 << 3), /* data request i/o */
98 ATA_ERR = (1 << 0), /* have an error */ 98 ATA_ERR = (1 << 0), /* have an error */
99 ATA_SRST = (1 << 2), /* software reset */ 99 ATA_SRST = (1 << 2), /* software reset */
100 ATA_ICRC = (1 << 7), /* interface CRC error */
101 ATA_UNC = (1 << 6), /* uncorrectable media error */
102 ATA_IDNF = (1 << 4), /* ID not found */
100 ATA_ABORTED = (1 << 2), /* command aborted */ 103 ATA_ABORTED = (1 << 2), /* command aborted */
101 104
102 /* ATA command block registers */ 105 /* ATA command block registers */
@@ -130,6 +133,8 @@ enum {
130 ATA_CMD_WRITE = 0xCA, 133 ATA_CMD_WRITE = 0xCA,
131 ATA_CMD_WRITE_EXT = 0x35, 134 ATA_CMD_WRITE_EXT = 0x35,
132 ATA_CMD_WRITE_FUA_EXT = 0x3D, 135 ATA_CMD_WRITE_FUA_EXT = 0x3D,
136 ATA_CMD_FPDMA_READ = 0x60,
137 ATA_CMD_FPDMA_WRITE = 0x61,
133 ATA_CMD_PIO_READ = 0x20, 138 ATA_CMD_PIO_READ = 0x20,
134 ATA_CMD_PIO_READ_EXT = 0x24, 139 ATA_CMD_PIO_READ_EXT = 0x24,
135 ATA_CMD_PIO_WRITE = 0x30, 140 ATA_CMD_PIO_WRITE = 0x30,
@@ -148,6 +153,10 @@ enum {
148 ATA_CMD_INIT_DEV_PARAMS = 0x91, 153 ATA_CMD_INIT_DEV_PARAMS = 0x91,
149 ATA_CMD_READ_NATIVE_MAX = 0xF8, 154 ATA_CMD_READ_NATIVE_MAX = 0xF8,
150 ATA_CMD_READ_NATIVE_MAX_EXT = 0x27, 155 ATA_CMD_READ_NATIVE_MAX_EXT = 0x27,
156 ATA_CMD_READ_LOG_EXT = 0x2f,
157
158 /* READ_LOG_EXT pages */
159 ATA_LOG_SATA_NCQ = 0x10,
151 160
152 /* SETFEATURES stuff */ 161 /* SETFEATURES stuff */
153 SETFEATURES_XFER = 0x03, 162 SETFEATURES_XFER = 0x03,
@@ -192,6 +201,16 @@ enum {
192 SCR_ACTIVE = 3, 201 SCR_ACTIVE = 3,
193 SCR_NOTIFICATION = 4, 202 SCR_NOTIFICATION = 4,
194 203
204 /* SError bits */
205 SERR_DATA_RECOVERED = (1 << 0), /* recovered data error */
206 SERR_COMM_RECOVERED = (1 << 1), /* recovered comm failure */
207 SERR_DATA = (1 << 8), /* unrecovered data error */
208 SERR_PERSISTENT = (1 << 9), /* persistent data/comm error */
209 SERR_PROTOCOL = (1 << 10), /* protocol violation */
210 SERR_INTERNAL = (1 << 11), /* host internal error */
211 SERR_PHYRDY_CHG = (1 << 16), /* PHY RDY changed */
212 SERR_DEV_XCHG = (1 << 26), /* device exchanged */
213
195 /* struct ata_taskfile flags */ 214 /* struct ata_taskfile flags */
196 ATA_TFLAG_LBA48 = (1 << 0), /* enable 48-bit LBA and "HOB" */ 215 ATA_TFLAG_LBA48 = (1 << 0), /* enable 48-bit LBA and "HOB" */
197 ATA_TFLAG_ISADDR = (1 << 1), /* enable r/w to nsect/lba regs */ 216 ATA_TFLAG_ISADDR = (1 << 1), /* enable r/w to nsect/lba regs */
@@ -199,6 +218,7 @@ enum {
199 ATA_TFLAG_WRITE = (1 << 3), /* data dir: host->dev==1 (write) */ 218 ATA_TFLAG_WRITE = (1 << 3), /* data dir: host->dev==1 (write) */
200 ATA_TFLAG_LBA = (1 << 4), /* enable LBA */ 219 ATA_TFLAG_LBA = (1 << 4), /* enable LBA */
201 ATA_TFLAG_FUA = (1 << 5), /* enable FUA */ 220 ATA_TFLAG_FUA = (1 << 5), /* enable FUA */
221 ATA_TFLAG_POLLING = (1 << 6), /* set nIEN to 1 and use polling */
202}; 222};
203 223
204enum ata_tf_protocols { 224enum ata_tf_protocols {
@@ -207,6 +227,7 @@ enum ata_tf_protocols {
207 ATA_PROT_NODATA, /* no data */ 227 ATA_PROT_NODATA, /* no data */
208 ATA_PROT_PIO, /* PIO single sector */ 228 ATA_PROT_PIO, /* PIO single sector */
209 ATA_PROT_DMA, /* DMA */ 229 ATA_PROT_DMA, /* DMA */
230 ATA_PROT_NCQ, /* NCQ */
210 ATA_PROT_ATAPI, /* packet command, PIO data xfer*/ 231 ATA_PROT_ATAPI, /* packet command, PIO data xfer*/
211 ATA_PROT_ATAPI_NODATA, /* packet command, no data */ 232 ATA_PROT_ATAPI_NODATA, /* packet command, no data */
212 ATA_PROT_ATAPI_DMA, /* packet command with special DMA sauce */ 233 ATA_PROT_ATAPI_DMA, /* packet command with special DMA sauce */
@@ -262,6 +283,8 @@ struct ata_taskfile {
262#define ata_id_has_pm(id) ((id)[82] & (1 << 3)) 283#define ata_id_has_pm(id) ((id)[82] & (1 << 3))
263#define ata_id_has_lba(id) ((id)[49] & (1 << 9)) 284#define ata_id_has_lba(id) ((id)[49] & (1 << 9))
264#define ata_id_has_dma(id) ((id)[49] & (1 << 8)) 285#define ata_id_has_dma(id) ((id)[49] & (1 << 8))
286#define ata_id_has_ncq(id) ((id)[76] & (1 << 8))
287#define ata_id_queue_depth(id) (((id)[75] & 0x1f) + 1)
265#define ata_id_removeable(id) ((id)[0] & (1 << 7)) 288#define ata_id_removeable(id) ((id)[0] & (1 << 7))
266#define ata_id_has_dword_io(id) ((id)[50] & (1 << 0)) 289#define ata_id_has_dword_io(id) ((id)[50] & (1 << 0))
267#define ata_id_u32(id,n) \ 290#define ata_id_u32(id,n) \
@@ -272,6 +295,8 @@ struct ata_taskfile {
272 ((u64) (id)[(n) + 1] << 16) | \ 295 ((u64) (id)[(n) + 1] << 16) | \
273 ((u64) (id)[(n) + 0]) ) 296 ((u64) (id)[(n) + 0]) )
274 297
298#define ata_id_cdb_intr(id) (((id)[0] & 0x60) == 0x20)
299
275static inline unsigned int ata_id_major_version(const u16 *id) 300static inline unsigned int ata_id_major_version(const u16 *id)
276{ 301{
277 unsigned int mver; 302 unsigned int mver;
@@ -311,6 +336,15 @@ static inline int is_atapi_taskfile(const struct ata_taskfile *tf)
311 (tf->protocol == ATA_PROT_ATAPI_DMA); 336 (tf->protocol == ATA_PROT_ATAPI_DMA);
312} 337}
313 338
339static inline int is_multi_taskfile(struct ata_taskfile *tf)
340{
341 return (tf->command == ATA_CMD_READ_MULTI) ||
342 (tf->command == ATA_CMD_WRITE_MULTI) ||
343 (tf->command == ATA_CMD_READ_MULTI_EXT) ||
344 (tf->command == ATA_CMD_WRITE_MULTI_EXT) ||
345 (tf->command == ATA_CMD_WRITE_MULTI_FUA_EXT);
346}
347
314static inline int ata_ok(u8 status) 348static inline int ata_ok(u8 status)
315{ 349{
316 return ((status & (ATA_BUSY | ATA_DRDY | ATA_DF | ATA_DRQ | ATA_ERR)) 350 return ((status & (ATA_BUSY | ATA_DRDY | ATA_DF | ATA_DRQ | ATA_ERR))
diff --git a/include/linux/libata.h b/include/linux/libata.h
index b80d2e7fa6d2..9c60b4a4e2fd 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -33,6 +33,7 @@
33#include <asm/io.h> 33#include <asm/io.h>
34#include <linux/ata.h> 34#include <linux/ata.h>
35#include <linux/workqueue.h> 35#include <linux/workqueue.h>
36#include <scsi/scsi_host.h>
36 37
37/* 38/*
38 * compile-time options: to be removed as soon as all the drivers are 39 * compile-time options: to be removed as soon as all the drivers are
@@ -44,7 +45,6 @@
44#undef ATA_NDEBUG /* define to disable quick runtime checks */ 45#undef ATA_NDEBUG /* define to disable quick runtime checks */
45#undef ATA_ENABLE_PATA /* define to enable PATA support in some 46#undef ATA_ENABLE_PATA /* define to enable PATA support in some
46 * low-level drivers */ 47 * low-level drivers */
47#undef ATAPI_ENABLE_DMADIR /* enables ATAPI DMADIR bridge support */
48 48
49 49
50/* note: prints function name for you */ 50/* note: prints function name for you */
@@ -108,8 +108,11 @@ enum {
108 LIBATA_MAX_PRD = ATA_MAX_PRD / 2, 108 LIBATA_MAX_PRD = ATA_MAX_PRD / 2,
109 ATA_MAX_PORTS = 8, 109 ATA_MAX_PORTS = 8,
110 ATA_DEF_QUEUE = 1, 110 ATA_DEF_QUEUE = 1,
111 ATA_MAX_QUEUE = 1, 111 /* tag ATA_MAX_QUEUE - 1 is reserved for internal commands */
112 ATA_MAX_QUEUE = 32,
113 ATA_TAG_INTERNAL = ATA_MAX_QUEUE - 1,
112 ATA_MAX_SECTORS = 200, /* FIXME */ 114 ATA_MAX_SECTORS = 200, /* FIXME */
115 ATA_MAX_SECTORS_LBA48 = 65535,
113 ATA_MAX_BUS = 2, 116 ATA_MAX_BUS = 2,
114 ATA_DEF_BUSY_WAIT = 10000, 117 ATA_DEF_BUSY_WAIT = 10000,
115 ATA_SHORT_PAUSE = (HZ >> 6) + 1, 118 ATA_SHORT_PAUSE = (HZ >> 6) + 1,
@@ -120,9 +123,13 @@ enum {
120 ATA_SHT_USE_CLUSTERING = 1, 123 ATA_SHT_USE_CLUSTERING = 1,
121 124
122 /* struct ata_device stuff */ 125 /* struct ata_device stuff */
123 ATA_DFLAG_LBA48 = (1 << 0), /* device supports LBA48 */ 126 ATA_DFLAG_LBA = (1 << 0), /* device supports LBA */
124 ATA_DFLAG_PIO = (1 << 1), /* device currently in PIO mode */ 127 ATA_DFLAG_LBA48 = (1 << 1), /* device supports LBA48 */
125 ATA_DFLAG_LBA = (1 << 2), /* device supports LBA */ 128 ATA_DFLAG_CDB_INTR = (1 << 2), /* device asserts INTRQ when ready for CDB */
129 ATA_DFLAG_NCQ = (1 << 3), /* device supports NCQ */
130 ATA_DFLAG_CFG_MASK = (1 << 8) - 1,
131
132 ATA_DFLAG_PIO = (1 << 8), /* device currently in PIO mode */
126 133
127 ATA_DEV_UNKNOWN = 0, /* unknown device */ 134 ATA_DEV_UNKNOWN = 0, /* unknown device */
128 ATA_DEV_ATA = 1, /* ATA device */ 135 ATA_DEV_ATA = 1, /* ATA device */
@@ -132,43 +139,50 @@ enum {
132 ATA_DEV_NONE = 5, /* no device */ 139 ATA_DEV_NONE = 5, /* no device */
133 140
134 /* struct ata_port flags */ 141 /* struct ata_port flags */
135 ATA_FLAG_SLAVE_POSS = (1 << 1), /* host supports slave dev */ 142 ATA_FLAG_SLAVE_POSS = (1 << 0), /* host supports slave dev */
136 /* (doesn't imply presence) */ 143 /* (doesn't imply presence) */
137 ATA_FLAG_PORT_DISABLED = (1 << 2), /* port is disabled, ignore it */ 144 ATA_FLAG_SATA = (1 << 1),
138 ATA_FLAG_SATA = (1 << 3), 145 ATA_FLAG_NO_LEGACY = (1 << 2), /* no legacy mode check */
139 ATA_FLAG_NO_LEGACY = (1 << 4), /* no legacy mode check */ 146 ATA_FLAG_MMIO = (1 << 3), /* use MMIO, not PIO */
140 ATA_FLAG_SRST = (1 << 5), /* (obsolete) use ATA SRST, not E.D.D. */ 147 ATA_FLAG_SRST = (1 << 4), /* (obsolete) use ATA SRST, not E.D.D. */
141 ATA_FLAG_MMIO = (1 << 6), /* use MMIO, not PIO */ 148 ATA_FLAG_SATA_RESET = (1 << 5), /* (obsolete) use COMRESET */
142 ATA_FLAG_SATA_RESET = (1 << 7), /* (obsolete) use COMRESET */ 149 ATA_FLAG_NO_ATAPI = (1 << 6), /* No ATAPI support */
143 ATA_FLAG_PIO_DMA = (1 << 8), /* PIO cmds via DMA */ 150 ATA_FLAG_PIO_DMA = (1 << 7), /* PIO cmds via DMA */
144 ATA_FLAG_NOINTR = (1 << 9), /* FIXME: Remove this once 151 ATA_FLAG_PIO_LBA48 = (1 << 8), /* Host DMA engine is LBA28 only */
145 * proper HSM is in place. */ 152 ATA_FLAG_PIO_POLLING = (1 << 10), /* use polling PIO if LLD
146 ATA_FLAG_DEBUGMSG = (1 << 10), 153 * doesn't handle PIO interrupts */
147 ATA_FLAG_NO_ATAPI = (1 << 11), /* No ATAPI support */ 154 ATA_FLAG_NCQ = (1 << 11), /* host supports NCQ */
148 155
149 ATA_FLAG_SUSPENDED = (1 << 12), /* port is suspended */ 156 ATA_FLAG_DEBUGMSG = (1 << 14),
150 157 ATA_FLAG_FLUSH_PORT_TASK = (1 << 15), /* flush port task */
151 ATA_FLAG_PIO_LBA48 = (1 << 13), /* Host DMA engine is LBA28 only */ 158
152 ATA_FLAG_IRQ_MASK = (1 << 14), /* Mask IRQ in PIO xfers */ 159 ATA_FLAG_EH_PENDING = (1 << 16), /* EH pending */
153 160 ATA_FLAG_FROZEN = (1 << 17), /* port is frozen */
154 ATA_FLAG_FLUSH_PORT_TASK = (1 << 15), /* Flush port task */ 161 ATA_FLAG_RECOVERED = (1 << 18), /* recovery action performed */
155 ATA_FLAG_IN_EH = (1 << 16), /* EH in progress */ 162
156 163 ATA_FLAG_DISABLED = (1 << 22), /* port is disabled, ignore it */
157 ATA_QCFLAG_ACTIVE = (1 << 1), /* cmd not yet ack'd to scsi lyer */ 164 ATA_FLAG_SUSPENDED = (1 << 23), /* port is suspended (power) */
158 ATA_QCFLAG_SG = (1 << 3), /* have s/g table? */ 165
159 ATA_QCFLAG_SINGLE = (1 << 4), /* no s/g, just a single buffer */ 166 /* bits 24:31 of ap->flags are reserved for LLDD specific flags */
167
168 /* struct ata_queued_cmd flags */
169 ATA_QCFLAG_ACTIVE = (1 << 0), /* cmd not yet ack'd to scsi lyer */
170 ATA_QCFLAG_SG = (1 << 1), /* have s/g table? */
171 ATA_QCFLAG_SINGLE = (1 << 2), /* no s/g, just a single buffer */
160 ATA_QCFLAG_DMAMAP = ATA_QCFLAG_SG | ATA_QCFLAG_SINGLE, 172 ATA_QCFLAG_DMAMAP = ATA_QCFLAG_SG | ATA_QCFLAG_SINGLE,
161 ATA_QCFLAG_EH_SCHEDULED = (1 << 5), /* EH scheduled */ 173 ATA_QCFLAG_IO = (1 << 3), /* standard IO command */
174 ATA_QCFLAG_RESULT_TF = (1 << 4), /* result TF requested */
175
176 ATA_QCFLAG_FAILED = (1 << 16), /* cmd failed and is owned by EH */
177 ATA_QCFLAG_SENSE_VALID = (1 << 17), /* sense data valid */
178 ATA_QCFLAG_EH_SCHEDULED = (1 << 18), /* EH scheduled (obsolete) */
162 179
163 /* host set flags */ 180 /* host set flags */
164 ATA_HOST_SIMPLEX = (1 << 0), /* Host is simplex, one DMA channel per host_set only */ 181 ATA_HOST_SIMPLEX = (1 << 0), /* Host is simplex, one DMA channel per host_set only */
165 182
166 /* various lengths of time */ 183 /* various lengths of time */
167 ATA_TMOUT_PIO = 30 * HZ,
168 ATA_TMOUT_BOOT = 30 * HZ, /* heuristic */ 184 ATA_TMOUT_BOOT = 30 * HZ, /* heuristic */
169 ATA_TMOUT_BOOT_QUICK = 7 * HZ, /* heuristic */ 185 ATA_TMOUT_BOOT_QUICK = 7 * HZ, /* heuristic */
170 ATA_TMOUT_CDB = 30 * HZ,
171 ATA_TMOUT_CDB_QUICK = 5 * HZ,
172 ATA_TMOUT_INTERNAL = 30 * HZ, 186 ATA_TMOUT_INTERNAL = 30 * HZ,
173 ATA_TMOUT_INTERNAL_QUICK = 5 * HZ, 187 ATA_TMOUT_INTERNAL_QUICK = 5 * HZ,
174 188
@@ -207,21 +221,44 @@ enum {
207 /* size of buffer to pad xfers ending on unaligned boundaries */ 221 /* size of buffer to pad xfers ending on unaligned boundaries */
208 ATA_DMA_PAD_SZ = 4, 222 ATA_DMA_PAD_SZ = 4,
209 ATA_DMA_PAD_BUF_SZ = ATA_DMA_PAD_SZ * ATA_MAX_QUEUE, 223 ATA_DMA_PAD_BUF_SZ = ATA_DMA_PAD_SZ * ATA_MAX_QUEUE,
210 224
211 /* Masks for port functions */ 225 /* masks for port functions */
212 ATA_PORT_PRIMARY = (1 << 0), 226 ATA_PORT_PRIMARY = (1 << 0),
213 ATA_PORT_SECONDARY = (1 << 1), 227 ATA_PORT_SECONDARY = (1 << 1),
228
229 /* ering size */
230 ATA_ERING_SIZE = 32,
231
232 /* desc_len for ata_eh_info and context */
233 ATA_EH_DESC_LEN = 80,
234
235 /* reset / recovery action types */
236 ATA_EH_REVALIDATE = (1 << 0),
237 ATA_EH_SOFTRESET = (1 << 1),
238 ATA_EH_HARDRESET = (1 << 2),
239
240 ATA_EH_RESET_MASK = ATA_EH_SOFTRESET | ATA_EH_HARDRESET,
241
242 /* ata_eh_info->flags */
243 ATA_EHI_DID_RESET = (1 << 0), /* already reset this port */
244
245 /* max repeat if error condition is still set after ->error_handler */
246 ATA_EH_MAX_REPEAT = 5,
247
248 /* how hard are we gonna try to probe/recover devices */
249 ATA_PROBE_MAX_TRIES = 3,
250 ATA_EH_RESET_TRIES = 3,
251 ATA_EH_DEV_TRIES = 3,
214}; 252};
215 253
216enum hsm_task_states { 254enum hsm_task_states {
217 HSM_ST_UNKNOWN, 255 HSM_ST_UNKNOWN, /* state unknown */
218 HSM_ST_IDLE, 256 HSM_ST_IDLE, /* no command on going */
219 HSM_ST_POLL, 257 HSM_ST, /* (waiting the device to) transfer data */
220 HSM_ST_TMOUT, 258 HSM_ST_LAST, /* (waiting the device to) complete command */
221 HSM_ST, 259 HSM_ST_ERR, /* error */
222 HSM_ST_LAST, 260 HSM_ST_FIRST, /* (waiting the device to)
223 HSM_ST_LAST_POLL, 261 write CDB or first data block */
224 HSM_ST_ERR,
225}; 262};
226 263
227enum ata_completion_errors { 264enum ata_completion_errors {
@@ -245,7 +282,7 @@ struct ata_queued_cmd;
245/* typedefs */ 282/* typedefs */
246typedef void (*ata_qc_cb_t) (struct ata_queued_cmd *qc); 283typedef void (*ata_qc_cb_t) (struct ata_queued_cmd *qc);
247typedef void (*ata_probeinit_fn_t)(struct ata_port *); 284typedef void (*ata_probeinit_fn_t)(struct ata_port *);
248typedef int (*ata_reset_fn_t)(struct ata_port *, int, unsigned int *); 285typedef int (*ata_reset_fn_t)(struct ata_port *, unsigned int *);
249typedef void (*ata_postreset_fn_t)(struct ata_port *ap, unsigned int *); 286typedef void (*ata_postreset_fn_t)(struct ata_port *ap, unsigned int *);
250 287
251struct ata_ioports { 288struct ata_ioports {
@@ -336,7 +373,7 @@ struct ata_queued_cmd {
336 struct scatterlist *__sg; 373 struct scatterlist *__sg;
337 374
338 unsigned int err_mask; 375 unsigned int err_mask;
339 376 struct ata_taskfile result_tf;
340 ata_qc_cb_t complete_fn; 377 ata_qc_cb_t complete_fn;
341 378
342 void *private_data; 379 void *private_data;
@@ -348,12 +385,24 @@ struct ata_host_stats {
348 unsigned long rw_reqbuf; 385 unsigned long rw_reqbuf;
349}; 386};
350 387
388struct ata_ering_entry {
389 int is_io;
390 unsigned int err_mask;
391 u64 timestamp;
392};
393
394struct ata_ering {
395 int cursor;
396 struct ata_ering_entry ring[ATA_ERING_SIZE];
397};
398
351struct ata_device { 399struct ata_device {
400 struct ata_port *ap;
352 u64 n_sectors; /* size of device, if ATA */ 401 u64 n_sectors; /* size of device, if ATA */
353 unsigned long flags; /* ATA_DFLAG_xxx */ 402 unsigned long flags; /* ATA_DFLAG_xxx */
354 unsigned int class; /* ATA_DEV_xxx */ 403 unsigned int class; /* ATA_DEV_xxx */
355 unsigned int devno; /* 0 or 1 */ 404 unsigned int devno; /* 0 or 1 */
356 u16 *id; /* IDENTIFY xxx DEVICE data */ 405 u16 id[ATA_ID_WORDS]; /* IDENTIFY xxx DEVICE data */
357 u8 pio_mode; 406 u8 pio_mode;
358 u8 dma_mode; 407 u8 dma_mode;
359 u8 xfer_mode; 408 u8 xfer_mode;
@@ -373,6 +422,24 @@ struct ata_device {
373 u16 cylinders; /* Number of cylinders */ 422 u16 cylinders; /* Number of cylinders */
374 u16 heads; /* Number of heads */ 423 u16 heads; /* Number of heads */
375 u16 sectors; /* Number of sectors per track */ 424 u16 sectors; /* Number of sectors per track */
425
426 /* error history */
427 struct ata_ering ering;
428};
429
430struct ata_eh_info {
431 struct ata_device *dev; /* offending device */
432 u32 serror; /* SError from LLDD */
433 unsigned int err_mask; /* port-wide err_mask */
434 unsigned int action; /* ATA_EH_* action mask */
435 unsigned int flags; /* ATA_EHI_* flags */
436 char desc[ATA_EH_DESC_LEN];
437 int desc_len;
438};
439
440struct ata_eh_context {
441 struct ata_eh_info i;
442 int tries[ATA_MAX_DEVICES];
376}; 443};
377 444
378struct ata_port { 445struct ata_port {
@@ -397,12 +464,21 @@ struct ata_port {
397 unsigned int mwdma_mask; 464 unsigned int mwdma_mask;
398 unsigned int udma_mask; 465 unsigned int udma_mask;
399 unsigned int cbl; /* cable type; ATA_CBL_xxx */ 466 unsigned int cbl; /* cable type; ATA_CBL_xxx */
467 unsigned int sata_spd_limit; /* SATA PHY speed limit */
468
469 /* record runtime error info, protected by host_set lock */
470 struct ata_eh_info eh_info;
471 /* EH context owned by EH */
472 struct ata_eh_context eh_context;
400 473
401 struct ata_device device[ATA_MAX_DEVICES]; 474 struct ata_device device[ATA_MAX_DEVICES];
402 475
403 struct ata_queued_cmd qcmd[ATA_MAX_QUEUE]; 476 struct ata_queued_cmd qcmd[ATA_MAX_QUEUE];
404 unsigned long qactive; 477 unsigned long qc_allocated;
478 unsigned int qc_active;
479
405 unsigned int active_tag; 480 unsigned int active_tag;
481 u32 sactive;
406 482
407 struct ata_host_stats stats; 483 struct ata_host_stats stats;
408 struct ata_host_set *host_set; 484 struct ata_host_set *host_set;
@@ -411,12 +487,13 @@ struct ata_port {
411 struct work_struct port_task; 487 struct work_struct port_task;
412 488
413 unsigned int hsm_task_state; 489 unsigned int hsm_task_state;
414 unsigned long pio_task_timeout;
415 490
416 u32 msg_enable; 491 u32 msg_enable;
417 struct list_head eh_done_q; 492 struct list_head eh_done_q;
418 493
419 void *private_data; 494 void *private_data;
495
496 u8 sector_buf[ATA_SECT_SIZE]; /* owned by EH */
420}; 497};
421 498
422struct ata_port_operations { 499struct ata_port_operations {
@@ -447,10 +524,20 @@ struct ata_port_operations {
447 void (*bmdma_setup) (struct ata_queued_cmd *qc); 524 void (*bmdma_setup) (struct ata_queued_cmd *qc);
448 void (*bmdma_start) (struct ata_queued_cmd *qc); 525 void (*bmdma_start) (struct ata_queued_cmd *qc);
449 526
527 void (*data_xfer) (struct ata_device *, unsigned char *, unsigned int, int);
528
450 void (*qc_prep) (struct ata_queued_cmd *qc); 529 void (*qc_prep) (struct ata_queued_cmd *qc);
451 unsigned int (*qc_issue) (struct ata_queued_cmd *qc); 530 unsigned int (*qc_issue) (struct ata_queued_cmd *qc);
452 531
453 void (*eng_timeout) (struct ata_port *ap); 532 /* Error handlers. ->error_handler overrides ->eng_timeout and
533 * indicates that new-style EH is in place.
534 */
535 void (*eng_timeout) (struct ata_port *ap); /* obsolete */
536
537 void (*freeze) (struct ata_port *ap);
538 void (*thaw) (struct ata_port *ap);
539 void (*error_handler) (struct ata_port *ap);
540 void (*post_internal_cmd) (struct ata_queued_cmd *qc);
454 541
455 irqreturn_t (*irq_handler)(int, void *, struct pt_regs *); 542 irqreturn_t (*irq_handler)(int, void *, struct pt_regs *);
456 void (*irq_clear) (struct ata_port *); 543 void (*irq_clear) (struct ata_port *);
@@ -496,18 +583,16 @@ extern void ata_port_probe(struct ata_port *);
496extern void __sata_phy_reset(struct ata_port *ap); 583extern void __sata_phy_reset(struct ata_port *ap);
497extern void sata_phy_reset(struct ata_port *ap); 584extern void sata_phy_reset(struct ata_port *ap);
498extern void ata_bus_reset(struct ata_port *ap); 585extern void ata_bus_reset(struct ata_port *ap);
586extern int sata_set_spd(struct ata_port *ap);
499extern int ata_drive_probe_reset(struct ata_port *ap, 587extern int ata_drive_probe_reset(struct ata_port *ap,
500 ata_probeinit_fn_t probeinit, 588 ata_probeinit_fn_t probeinit,
501 ata_reset_fn_t softreset, ata_reset_fn_t hardreset, 589 ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
502 ata_postreset_fn_t postreset, unsigned int *classes); 590 ata_postreset_fn_t postreset, unsigned int *classes);
503extern void ata_std_probeinit(struct ata_port *ap); 591extern void ata_std_probeinit(struct ata_port *ap);
504extern int ata_std_softreset(struct ata_port *ap, int verbose, 592extern int ata_std_softreset(struct ata_port *ap, unsigned int *classes);
505 unsigned int *classes); 593extern int sata_std_hardreset(struct ata_port *ap, unsigned int *class);
506extern int sata_std_hardreset(struct ata_port *ap, int verbose,
507 unsigned int *class);
508extern void ata_std_postreset(struct ata_port *ap, unsigned int *classes); 594extern void ata_std_postreset(struct ata_port *ap, unsigned int *classes);
509extern int ata_dev_revalidate(struct ata_port *ap, struct ata_device *dev, 595extern int ata_dev_revalidate(struct ata_device *dev, int post_reset);
510 int post_reset);
511extern void ata_port_disable(struct ata_port *); 596extern void ata_port_disable(struct ata_port *);
512extern void ata_std_ports(struct ata_ioports *ioaddr); 597extern void ata_std_ports(struct ata_ioports *ioaddr);
513#ifdef CONFIG_PCI 598#ifdef CONFIG_PCI
@@ -523,20 +608,27 @@ extern void ata_host_set_remove(struct ata_host_set *host_set);
523extern int ata_scsi_detect(struct scsi_host_template *sht); 608extern int ata_scsi_detect(struct scsi_host_template *sht);
524extern int ata_scsi_ioctl(struct scsi_device *dev, int cmd, void __user *arg); 609extern int ata_scsi_ioctl(struct scsi_device *dev, int cmd, void __user *arg);
525extern int ata_scsi_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)); 610extern int ata_scsi_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *));
526extern void ata_eh_qc_complete(struct ata_queued_cmd *qc);
527extern void ata_eh_qc_retry(struct ata_queued_cmd *qc);
528extern int ata_scsi_release(struct Scsi_Host *host); 611extern int ata_scsi_release(struct Scsi_Host *host);
529extern unsigned int ata_host_intr(struct ata_port *ap, struct ata_queued_cmd *qc); 612extern unsigned int ata_host_intr(struct ata_port *ap, struct ata_queued_cmd *qc);
613extern int sata_scr_valid(struct ata_port *ap);
614extern int sata_scr_read(struct ata_port *ap, int reg, u32 *val);
615extern int sata_scr_write(struct ata_port *ap, int reg, u32 val);
616extern int sata_scr_write_flush(struct ata_port *ap, int reg, u32 val);
617extern int ata_port_online(struct ata_port *ap);
618extern int ata_port_offline(struct ata_port *ap);
530extern int ata_scsi_device_resume(struct scsi_device *); 619extern int ata_scsi_device_resume(struct scsi_device *);
531extern int ata_scsi_device_suspend(struct scsi_device *, pm_message_t state); 620extern int ata_scsi_device_suspend(struct scsi_device *, pm_message_t state);
532extern int ata_device_resume(struct ata_port *, struct ata_device *); 621extern int ata_device_resume(struct ata_device *);
533extern int ata_device_suspend(struct ata_port *, struct ata_device *, pm_message_t state); 622extern int ata_device_suspend(struct ata_device *, pm_message_t state);
534extern int ata_ratelimit(void); 623extern int ata_ratelimit(void);
535extern unsigned int ata_busy_sleep(struct ata_port *ap, 624extern unsigned int ata_busy_sleep(struct ata_port *ap,
536 unsigned long timeout_pat, 625 unsigned long timeout_pat,
537 unsigned long timeout); 626 unsigned long timeout);
538extern void ata_port_queue_task(struct ata_port *ap, void (*fn)(void *), 627extern void ata_port_queue_task(struct ata_port *ap, void (*fn)(void *),
539 void *data, unsigned long delay); 628 void *data, unsigned long delay);
629extern u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val,
630 unsigned long interval_msec,
631 unsigned long timeout_msec);
540 632
541/* 633/*
542 * Default driver ops implementations 634 * Default driver ops implementations
@@ -555,6 +647,10 @@ extern int ata_port_start (struct ata_port *ap);
555extern void ata_port_stop (struct ata_port *ap); 647extern void ata_port_stop (struct ata_port *ap);
556extern void ata_host_stop (struct ata_host_set *host_set); 648extern void ata_host_stop (struct ata_host_set *host_set);
557extern irqreturn_t ata_interrupt (int irq, void *dev_instance, struct pt_regs *regs); 649extern irqreturn_t ata_interrupt (int irq, void *dev_instance, struct pt_regs *regs);
650extern void ata_mmio_data_xfer(struct ata_device *adev, unsigned char *buf,
651 unsigned int buflen, int write_data);
652extern void ata_pio_data_xfer(struct ata_device *adev, unsigned char *buf,
653 unsigned int buflen, int write_data);
558extern void ata_qc_prep(struct ata_queued_cmd *qc); 654extern void ata_qc_prep(struct ata_queued_cmd *qc);
559extern void ata_noop_qc_prep(struct ata_queued_cmd *qc); 655extern void ata_noop_qc_prep(struct ata_queued_cmd *qc);
560extern unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc); 656extern unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc);
@@ -572,17 +668,26 @@ extern void ata_bmdma_start (struct ata_queued_cmd *qc);
572extern void ata_bmdma_stop(struct ata_queued_cmd *qc); 668extern void ata_bmdma_stop(struct ata_queued_cmd *qc);
573extern u8 ata_bmdma_status(struct ata_port *ap); 669extern u8 ata_bmdma_status(struct ata_port *ap);
574extern void ata_bmdma_irq_clear(struct ata_port *ap); 670extern void ata_bmdma_irq_clear(struct ata_port *ap);
575extern void __ata_qc_complete(struct ata_queued_cmd *qc); 671extern void ata_bmdma_freeze(struct ata_port *ap);
576extern void ata_eng_timeout(struct ata_port *ap); 672extern void ata_bmdma_thaw(struct ata_port *ap);
577extern void ata_scsi_simulate(struct ata_port *ap, struct ata_device *dev, 673extern void ata_bmdma_drive_eh(struct ata_port *ap,
578 struct scsi_cmnd *cmd, 674 ata_reset_fn_t softreset,
675 ata_reset_fn_t hardreset,
676 ata_postreset_fn_t postreset);
677extern void ata_bmdma_error_handler(struct ata_port *ap);
678extern void ata_bmdma_post_internal_cmd(struct ata_queued_cmd *qc);
679extern void ata_qc_complete(struct ata_queued_cmd *qc);
680extern int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active,
681 void (*finish_qc)(struct ata_queued_cmd *));
682extern void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd,
579 void (*done)(struct scsi_cmnd *)); 683 void (*done)(struct scsi_cmnd *));
580extern int ata_std_bios_param(struct scsi_device *sdev, 684extern int ata_std_bios_param(struct scsi_device *sdev,
581 struct block_device *bdev, 685 struct block_device *bdev,
582 sector_t capacity, int geom[]); 686 sector_t capacity, int geom[]);
583extern int ata_scsi_slave_config(struct scsi_device *sdev); 687extern int ata_scsi_slave_config(struct scsi_device *sdev);
584extern struct ata_device *ata_dev_pair(struct ata_port *ap, 688extern int ata_scsi_change_queue_depth(struct scsi_device *sdev,
585 struct ata_device *adev); 689 int queue_depth);
690extern struct ata_device *ata_dev_pair(struct ata_device *adev);
586 691
587/* 692/*
588 * Timing helpers 693 * Timing helpers
@@ -628,7 +733,50 @@ extern int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bit
628extern unsigned long ata_pci_default_filter(const struct ata_port *, struct ata_device *, unsigned long); 733extern unsigned long ata_pci_default_filter(const struct ata_port *, struct ata_device *, unsigned long);
629#endif /* CONFIG_PCI */ 734#endif /* CONFIG_PCI */
630 735
736/*
737 * EH
738 */
739extern void ata_eng_timeout(struct ata_port *ap);
740
741extern void ata_port_schedule_eh(struct ata_port *ap);
742extern int ata_port_abort(struct ata_port *ap);
743extern int ata_port_freeze(struct ata_port *ap);
744
745extern void ata_eh_freeze_port(struct ata_port *ap);
746extern void ata_eh_thaw_port(struct ata_port *ap);
747
748extern void ata_eh_qc_complete(struct ata_queued_cmd *qc);
749extern void ata_eh_qc_retry(struct ata_queued_cmd *qc);
750
751extern void ata_do_eh(struct ata_port *ap, ata_reset_fn_t softreset,
752 ata_reset_fn_t hardreset, ata_postreset_fn_t postreset);
753
754/*
755 * printk helpers
756 */
757#define ata_port_printk(ap, lv, fmt, args...) \
758 printk(lv"ata%u: "fmt, (ap)->id , ##args)
759
760#define ata_dev_printk(dev, lv, fmt, args...) \
761 printk(lv"ata%u.%02u: "fmt, (dev)->ap->id, (dev)->devno , ##args)
762
763/*
764 * ata_eh_info helpers
765 */
766#define ata_ehi_push_desc(ehi, fmt, args...) do { \
767 (ehi)->desc_len += scnprintf((ehi)->desc + (ehi)->desc_len, \
768 ATA_EH_DESC_LEN - (ehi)->desc_len, \
769 fmt , ##args); \
770} while (0)
771
772#define ata_ehi_clear_desc(ehi) do { \
773 (ehi)->desc[0] = '\0'; \
774 (ehi)->desc_len = 0; \
775} while (0)
631 776
777/*
778 * qc helpers
779 */
632static inline int 780static inline int
633ata_sg_is_last(struct scatterlist *sg, struct ata_queued_cmd *qc) 781ata_sg_is_last(struct scatterlist *sg, struct ata_queued_cmd *qc)
634{ 782{
@@ -671,14 +819,39 @@ static inline unsigned int ata_tag_valid(unsigned int tag)
671 return (tag < ATA_MAX_QUEUE) ? 1 : 0; 819 return (tag < ATA_MAX_QUEUE) ? 1 : 0;
672} 820}
673 821
674static inline unsigned int ata_class_present(unsigned int class) 822static inline unsigned int ata_tag_internal(unsigned int tag)
823{
824 return tag == ATA_MAX_QUEUE - 1;
825}
826
827static inline unsigned int ata_class_enabled(unsigned int class)
675{ 828{
676 return class == ATA_DEV_ATA || class == ATA_DEV_ATAPI; 829 return class == ATA_DEV_ATA || class == ATA_DEV_ATAPI;
677} 830}
678 831
679static inline unsigned int ata_dev_present(const struct ata_device *dev) 832static inline unsigned int ata_class_disabled(unsigned int class)
833{
834 return class == ATA_DEV_ATA_UNSUP || class == ATA_DEV_ATAPI_UNSUP;
835}
836
837static inline unsigned int ata_class_absent(unsigned int class)
838{
839 return !ata_class_enabled(class) && !ata_class_disabled(class);
840}
841
842static inline unsigned int ata_dev_enabled(const struct ata_device *dev)
680{ 843{
681 return ata_class_present(dev->class); 844 return ata_class_enabled(dev->class);
845}
846
847static inline unsigned int ata_dev_disabled(const struct ata_device *dev)
848{
849 return ata_class_disabled(dev->class);
850}
851
852static inline unsigned int ata_dev_absent(const struct ata_device *dev)
853{
854 return ata_class_absent(dev->class);
682} 855}
683 856
684static inline u8 ata_chk_status(struct ata_port *ap) 857static inline u8 ata_chk_status(struct ata_port *ap)
@@ -759,20 +932,35 @@ static inline void ata_qc_set_polling(struct ata_queued_cmd *qc)
759 qc->tf.ctl |= ATA_NIEN; 932 qc->tf.ctl |= ATA_NIEN;
760} 933}
761 934
762static inline struct ata_queued_cmd *ata_qc_from_tag (struct ata_port *ap, 935static inline struct ata_queued_cmd *__ata_qc_from_tag(struct ata_port *ap,
763 unsigned int tag) 936 unsigned int tag)
764{ 937{
765 if (likely(ata_tag_valid(tag))) 938 if (likely(ata_tag_valid(tag)))
766 return &ap->qcmd[tag]; 939 return &ap->qcmd[tag];
767 return NULL; 940 return NULL;
768} 941}
769 942
770static inline void ata_tf_init(struct ata_port *ap, struct ata_taskfile *tf, unsigned int device) 943static inline struct ata_queued_cmd *ata_qc_from_tag(struct ata_port *ap,
944 unsigned int tag)
945{
946 struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
947
948 if (unlikely(!qc) || !ap->ops->error_handler)
949 return qc;
950
951 if ((qc->flags & (ATA_QCFLAG_ACTIVE |
952 ATA_QCFLAG_FAILED)) == ATA_QCFLAG_ACTIVE)
953 return qc;
954
955 return NULL;
956}
957
958static inline void ata_tf_init(struct ata_device *dev, struct ata_taskfile *tf)
771{ 959{
772 memset(tf, 0, sizeof(*tf)); 960 memset(tf, 0, sizeof(*tf));
773 961
774 tf->ctl = ap->ctl; 962 tf->ctl = dev->ap->ctl;
775 if (device == 0) 963 if (dev->devno == 0)
776 tf->device = ATA_DEVICE_OBS; 964 tf->device = ATA_DEVICE_OBS;
777 else 965 else
778 tf->device = ATA_DEVICE_OBS | ATA_DEV1; 966 tf->device = ATA_DEVICE_OBS | ATA_DEV1;
@@ -787,26 +975,11 @@ static inline void ata_qc_reinit(struct ata_queued_cmd *qc)
787 qc->nbytes = qc->curbytes = 0; 975 qc->nbytes = qc->curbytes = 0;
788 qc->err_mask = 0; 976 qc->err_mask = 0;
789 977
790 ata_tf_init(qc->ap, &qc->tf, qc->dev->devno); 978 ata_tf_init(qc->dev, &qc->tf);
791}
792
793/**
794 * ata_qc_complete - Complete an active ATA command
795 * @qc: Command to complete
796 * @err_mask: ATA Status register contents
797 *
798 * Indicate to the mid and upper layers that an ATA
799 * command has completed, with either an ok or not-ok status.
800 *
801 * LOCKING:
802 * spin_lock_irqsave(host_set lock)
803 */
804static inline void ata_qc_complete(struct ata_queued_cmd *qc)
805{
806 if (unlikely(qc->flags & ATA_QCFLAG_EH_SCHEDULED))
807 return;
808 979
809 __ata_qc_complete(qc); 980 /* init result_tf such that it indicates normal completion */
981 qc->result_tf.command = ATA_DRDY;
982 qc->result_tf.feature = 0;
810} 983}
811 984
812/** 985/**
@@ -885,28 +1058,6 @@ static inline u8 ata_irq_ack(struct ata_port *ap, unsigned int chk_drq)
885 return status; 1058 return status;
886} 1059}
887 1060
888static inline u32 scr_read(struct ata_port *ap, unsigned int reg)
889{
890 return ap->ops->scr_read(ap, reg);
891}
892
893static inline void scr_write(struct ata_port *ap, unsigned int reg, u32 val)
894{
895 ap->ops->scr_write(ap, reg, val);
896}
897
898static inline void scr_write_flush(struct ata_port *ap, unsigned int reg,
899 u32 val)
900{
901 ap->ops->scr_write(ap, reg, val);
902 (void) ap->ops->scr_read(ap, reg);
903}
904
905static inline unsigned int sata_dev_present(struct ata_port *ap)
906{
907 return ((scr_read(ap, SCR_STATUS) & 0xf) == 0x3) ? 1 : 0;
908}
909
910static inline int ata_try_flush_cache(const struct ata_device *dev) 1061static inline int ata_try_flush_cache(const struct ata_device *dev)
911{ 1062{
912 return ata_id_wcache_enabled(dev->id) || 1063 return ata_id_wcache_enabled(dev->id) ||
@@ -916,7 +1067,7 @@ static inline int ata_try_flush_cache(const struct ata_device *dev)
916 1067
917static inline unsigned int ac_err_mask(u8 status) 1068static inline unsigned int ac_err_mask(u8 status)
918{ 1069{
919 if (status & ATA_BUSY) 1070 if (status & (ATA_BUSY | ATA_DRQ))
920 return AC_ERR_HSM; 1071 return AC_ERR_HSM;
921 if (status & (ATA_ERR | ATA_DF)) 1072 if (status & (ATA_ERR | ATA_DF))
922 return AC_ERR_DEV; 1073 return AC_ERR_DEV;
@@ -944,4 +1095,9 @@ static inline void ata_pad_free(struct ata_port *ap, struct device *dev)
944 dma_free_coherent(dev, ATA_DMA_PAD_BUF_SZ, ap->pad, ap->pad_dma); 1095 dma_free_coherent(dev, ATA_DMA_PAD_BUF_SZ, ap->pad, ap->pad_dma);
945} 1096}
946 1097
1098static inline struct ata_port *ata_shost_to_port(struct Scsi_Host *host)
1099{
1100 return (struct ata_port *) &host->hostdata[0];
1101}
1102
947#endif /* __LINUX_LIBATA_H__ */ 1103#endif /* __LINUX_LIBATA_H__ */
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index d6fe048376ab..233f60741c82 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -1182,6 +1182,10 @@
1182#define PCI_DEVICE_ID_NVIDIA_QUADRO_FX_1100 0x034E 1182#define PCI_DEVICE_ID_NVIDIA_QUADRO_FX_1100 0x034E
1183#define PCI_DEVICE_ID_NVIDIA_NVENET_14 0x0372 1183#define PCI_DEVICE_ID_NVIDIA_NVENET_14 0x0372
1184#define PCI_DEVICE_ID_NVIDIA_NVENET_15 0x0373 1184#define PCI_DEVICE_ID_NVIDIA_NVENET_15 0x0373
1185#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA 0x03E7
1186#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_IDE 0x03EC
1187#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA2 0x03F6
1188#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA3 0x03F7
1185 1189
1186#define PCI_VENDOR_ID_IMS 0x10e0 1190#define PCI_VENDOR_ID_IMS 0x10e0
1187#define PCI_DEVICE_ID_IMS_TT128 0x9128 1191#define PCI_DEVICE_ID_IMS_TT128 0x9128
diff --git a/include/scsi/scsi_cmnd.h b/include/scsi/scsi_cmnd.h
index 1ace1b9fe537..88c6c4da6c05 100644
--- a/include/scsi/scsi_cmnd.h
+++ b/include/scsi/scsi_cmnd.h
@@ -151,5 +151,6 @@ extern struct scsi_cmnd *scsi_get_command(struct scsi_device *, gfp_t);
151extern void scsi_put_command(struct scsi_cmnd *); 151extern void scsi_put_command(struct scsi_cmnd *);
152extern void scsi_io_completion(struct scsi_cmnd *, unsigned int, unsigned int); 152extern void scsi_io_completion(struct scsi_cmnd *, unsigned int, unsigned int);
153extern void scsi_finish_command(struct scsi_cmnd *cmd); 153extern void scsi_finish_command(struct scsi_cmnd *cmd);
154extern void scsi_req_abort_cmd(struct scsi_cmnd *cmd);
154 155
155#endif /* _SCSI_SCSI_CMND_H */ 156#endif /* _SCSI_SCSI_CMND_H */
diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h
index de6ce541a046..a42efd6e4be8 100644
--- a/include/scsi/scsi_host.h
+++ b/include/scsi/scsi_host.h
@@ -472,6 +472,7 @@ struct Scsi_Host {
472 */ 472 */
473 unsigned int host_busy; /* commands actually active on low-level */ 473 unsigned int host_busy; /* commands actually active on low-level */
474 unsigned int host_failed; /* commands that failed. */ 474 unsigned int host_failed; /* commands that failed. */
475 unsigned int host_eh_scheduled; /* EH scheduled without command */
475 476
476 unsigned short host_no; /* Used for IOCTL_GET_IDLUN, /proc/scsi et al. */ 477 unsigned short host_no; /* Used for IOCTL_GET_IDLUN, /proc/scsi et al. */
477 int resetting; /* if set, it means that last_reset is a valid value */ 478 int resetting; /* if set, it means that last_reset is a valid value */