aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/ide/pci/amd74xx.c7
-rw-r--r--drivers/scsi/Makefile2
-rw-r--r--drivers/scsi/ahci.c477
-rw-r--r--drivers/scsi/ata_piix.c112
-rw-r--r--drivers/scsi/libata-bmdma.c161
-rw-r--r--drivers/scsi/libata-core.c2973
-rw-r--r--drivers/scsi/libata-eh.c1855
-rw-r--r--drivers/scsi/libata-scsi.c713
-rw-r--r--drivers/scsi/libata.h31
-rw-r--r--drivers/scsi/pdc_adma.c12
-rw-r--r--drivers/scsi/sata_mv.c71
-rw-r--r--drivers/scsi/sata_nv.c14
-rw-r--r--drivers/scsi/sata_promise.c40
-rw-r--r--drivers/scsi/sata_qstor.c15
-rw-r--r--drivers/scsi/sata_sil.c221
-rw-r--r--drivers/scsi/sata_sil24.c646
-rw-r--r--drivers/scsi/sata_sis.c4
-rw-r--r--drivers/scsi/sata_svw.c6
-rw-r--r--drivers/scsi/sata_sx4.c21
-rw-r--r--drivers/scsi/sata_uli.c4
-rw-r--r--drivers/scsi/sata_via.c4
-rw-r--r--drivers/scsi/sata_vsc.c17
-rw-r--r--drivers/scsi/scsi.c18
-rw-r--r--drivers/scsi/scsi_error.c24
-rw-r--r--drivers/scsi/scsi_lib.c2
-rw-r--r--drivers/scsi/scsi_transport_api.h6
26 files changed, 5578 insertions, 1878 deletions
diff --git a/drivers/ide/pci/amd74xx.c b/drivers/ide/pci/amd74xx.c
index b22ee5462318..6e9dbf4d8077 100644
--- a/drivers/ide/pci/amd74xx.c
+++ b/drivers/ide/pci/amd74xx.c
@@ -74,6 +74,7 @@ static struct amd_ide_chip {
74 { PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_IDE, 0x50, AMD_UDMA_133 }, 74 { PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_IDE, 0x50, AMD_UDMA_133 },
75 { PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_IDE, 0x50, AMD_UDMA_133 }, 75 { PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_IDE, 0x50, AMD_UDMA_133 },
76 { PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_IDE, 0x50, AMD_UDMA_133 }, 76 { PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_IDE, 0x50, AMD_UDMA_133 },
77 { PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_IDE, 0x50, AMD_UDMA_133 },
77 { PCI_DEVICE_ID_AMD_CS5536_IDE, 0x40, AMD_UDMA_100 }, 78 { PCI_DEVICE_ID_AMD_CS5536_IDE, 0x40, AMD_UDMA_100 },
78 { 0 } 79 { 0 }
79}; 80};
@@ -488,7 +489,8 @@ static ide_pci_device_t amd74xx_chipsets[] __devinitdata = {
488 /* 14 */ DECLARE_NV_DEV("NFORCE-MCP04"), 489 /* 14 */ DECLARE_NV_DEV("NFORCE-MCP04"),
489 /* 15 */ DECLARE_NV_DEV("NFORCE-MCP51"), 490 /* 15 */ DECLARE_NV_DEV("NFORCE-MCP51"),
490 /* 16 */ DECLARE_NV_DEV("NFORCE-MCP55"), 491 /* 16 */ DECLARE_NV_DEV("NFORCE-MCP55"),
491 /* 17 */ DECLARE_AMD_DEV("AMD5536"), 492 /* 17 */ DECLARE_NV_DEV("NFORCE-MCP61"),
493 /* 18 */ DECLARE_AMD_DEV("AMD5536"),
492}; 494};
493 495
494static int __devinit amd74xx_probe(struct pci_dev *dev, const struct pci_device_id *id) 496static int __devinit amd74xx_probe(struct pci_dev *dev, const struct pci_device_id *id)
@@ -525,7 +527,8 @@ static struct pci_device_id amd74xx_pci_tbl[] = {
525 { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_IDE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 14 }, 527 { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_IDE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 14 },
526 { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_IDE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 15 }, 528 { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_IDE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 15 },
527 { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_IDE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 16 }, 529 { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_IDE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 16 },
528 { PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CS5536_IDE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 17 }, 530 { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_IDE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 17 },
531 { PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CS5536_IDE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 18 },
529 { 0, }, 532 { 0, },
530}; 533};
531MODULE_DEVICE_TABLE(pci, amd74xx_pci_tbl); 534MODULE_DEVICE_TABLE(pci, amd74xx_pci_tbl);
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index 81803a16f986..669ff6b99c4f 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -164,7 +164,7 @@ ncr53c8xx-flags-$(CONFIG_SCSI_ZALON) \
164CFLAGS_ncr53c8xx.o := $(ncr53c8xx-flags-y) $(ncr53c8xx-flags-m) 164CFLAGS_ncr53c8xx.o := $(ncr53c8xx-flags-y) $(ncr53c8xx-flags-m)
165zalon7xx-objs := zalon.o ncr53c8xx.o 165zalon7xx-objs := zalon.o ncr53c8xx.o
166NCR_Q720_mod-objs := NCR_Q720.o ncr53c8xx.o 166NCR_Q720_mod-objs := NCR_Q720.o ncr53c8xx.o
167libata-objs := libata-core.o libata-scsi.o libata-bmdma.o 167libata-objs := libata-core.o libata-scsi.o libata-bmdma.o libata-eh.o
168oktagon_esp_mod-objs := oktagon_esp.o oktagon_io.o 168oktagon_esp_mod-objs := oktagon_esp.o oktagon_io.o
169 169
170# Files generated that shall be removed upon make clean 170# Files generated that shall be removed upon make clean
diff --git a/drivers/scsi/ahci.c b/drivers/scsi/ahci.c
index b4f8fb1d628b..e261b37c2e48 100644
--- a/drivers/scsi/ahci.c
+++ b/drivers/scsi/ahci.c
@@ -48,7 +48,7 @@
48#include <asm/io.h> 48#include <asm/io.h>
49 49
50#define DRV_NAME "ahci" 50#define DRV_NAME "ahci"
51#define DRV_VERSION "1.2" 51#define DRV_VERSION "1.3"
52 52
53 53
54enum { 54enum {
@@ -56,12 +56,15 @@ enum {
56 AHCI_MAX_SG = 168, /* hardware max is 64K */ 56 AHCI_MAX_SG = 168, /* hardware max is 64K */
57 AHCI_DMA_BOUNDARY = 0xffffffff, 57 AHCI_DMA_BOUNDARY = 0xffffffff,
58 AHCI_USE_CLUSTERING = 0, 58 AHCI_USE_CLUSTERING = 0,
59 AHCI_CMD_SLOT_SZ = 32 * 32, 59 AHCI_MAX_CMDS = 32,
60 AHCI_CMD_SZ = 32,
61 AHCI_CMD_SLOT_SZ = AHCI_MAX_CMDS * AHCI_CMD_SZ,
60 AHCI_RX_FIS_SZ = 256, 62 AHCI_RX_FIS_SZ = 256,
61 AHCI_CMD_TBL_HDR = 0x80,
62 AHCI_CMD_TBL_CDB = 0x40, 63 AHCI_CMD_TBL_CDB = 0x40,
63 AHCI_CMD_TBL_SZ = AHCI_CMD_TBL_HDR + (AHCI_MAX_SG * 16), 64 AHCI_CMD_TBL_HDR_SZ = 0x80,
64 AHCI_PORT_PRIV_DMA_SZ = AHCI_CMD_SLOT_SZ + AHCI_CMD_TBL_SZ + 65 AHCI_CMD_TBL_SZ = AHCI_CMD_TBL_HDR_SZ + (AHCI_MAX_SG * 16),
66 AHCI_CMD_TBL_AR_SZ = AHCI_CMD_TBL_SZ * AHCI_MAX_CMDS,
67 AHCI_PORT_PRIV_DMA_SZ = AHCI_CMD_SLOT_SZ + AHCI_CMD_TBL_AR_SZ +
65 AHCI_RX_FIS_SZ, 68 AHCI_RX_FIS_SZ,
66 AHCI_IRQ_ON_SG = (1 << 31), 69 AHCI_IRQ_ON_SG = (1 << 31),
67 AHCI_CMD_ATAPI = (1 << 5), 70 AHCI_CMD_ATAPI = (1 << 5),
@@ -71,8 +74,10 @@ enum {
71 AHCI_CMD_CLR_BUSY = (1 << 10), 74 AHCI_CMD_CLR_BUSY = (1 << 10),
72 75
73 RX_FIS_D2H_REG = 0x40, /* offset of D2H Register FIS data */ 76 RX_FIS_D2H_REG = 0x40, /* offset of D2H Register FIS data */
77 RX_FIS_UNK = 0x60, /* offset of Unknown FIS data */
74 78
75 board_ahci = 0, 79 board_ahci = 0,
80 board_ahci_vt8251 = 1,
76 81
77 /* global controller registers */ 82 /* global controller registers */
78 HOST_CAP = 0x00, /* host capabilities */ 83 HOST_CAP = 0x00, /* host capabilities */
@@ -87,8 +92,9 @@ enum {
87 HOST_AHCI_EN = (1 << 31), /* AHCI enabled */ 92 HOST_AHCI_EN = (1 << 31), /* AHCI enabled */
88 93
89 /* HOST_CAP bits */ 94 /* HOST_CAP bits */
90 HOST_CAP_64 = (1 << 31), /* PCI DAC (64-bit DMA) support */
91 HOST_CAP_CLO = (1 << 24), /* Command List Override support */ 95 HOST_CAP_CLO = (1 << 24), /* Command List Override support */
96 HOST_CAP_NCQ = (1 << 30), /* Native Command Queueing */
97 HOST_CAP_64 = (1 << 31), /* PCI DAC (64-bit DMA) support */
92 98
93 /* registers for each SATA port */ 99 /* registers for each SATA port */
94 PORT_LST_ADDR = 0x00, /* command list DMA addr */ 100 PORT_LST_ADDR = 0x00, /* command list DMA addr */
@@ -127,15 +133,17 @@ enum {
127 PORT_IRQ_PIOS_FIS = (1 << 1), /* PIO Setup FIS rx'd */ 133 PORT_IRQ_PIOS_FIS = (1 << 1), /* PIO Setup FIS rx'd */
128 PORT_IRQ_D2H_REG_FIS = (1 << 0), /* D2H Register FIS rx'd */ 134 PORT_IRQ_D2H_REG_FIS = (1 << 0), /* D2H Register FIS rx'd */
129 135
130 PORT_IRQ_FATAL = PORT_IRQ_TF_ERR | 136 PORT_IRQ_FREEZE = PORT_IRQ_HBUS_ERR |
131 PORT_IRQ_HBUS_ERR | 137 PORT_IRQ_IF_ERR |
132 PORT_IRQ_HBUS_DATA_ERR | 138 PORT_IRQ_CONNECT |
133 PORT_IRQ_IF_ERR, 139 PORT_IRQ_PHYRDY |
134 DEF_PORT_IRQ = PORT_IRQ_FATAL | PORT_IRQ_PHYRDY | 140 PORT_IRQ_UNK_FIS,
135 PORT_IRQ_CONNECT | PORT_IRQ_SG_DONE | 141 PORT_IRQ_ERROR = PORT_IRQ_FREEZE |
136 PORT_IRQ_UNK_FIS | PORT_IRQ_SDB_FIS | 142 PORT_IRQ_TF_ERR |
137 PORT_IRQ_DMAS_FIS | PORT_IRQ_PIOS_FIS | 143 PORT_IRQ_HBUS_DATA_ERR,
138 PORT_IRQ_D2H_REG_FIS, 144 DEF_PORT_IRQ = PORT_IRQ_ERROR | PORT_IRQ_SG_DONE |
145 PORT_IRQ_SDB_FIS | PORT_IRQ_DMAS_FIS |
146 PORT_IRQ_PIOS_FIS | PORT_IRQ_D2H_REG_FIS,
139 147
140 /* PORT_CMD bits */ 148 /* PORT_CMD bits */
141 PORT_CMD_ATAPI = (1 << 24), /* Device is ATAPI */ 149 PORT_CMD_ATAPI = (1 << 24), /* Device is ATAPI */
@@ -153,6 +161,9 @@ enum {
153 161
154 /* hpriv->flags bits */ 162 /* hpriv->flags bits */
155 AHCI_FLAG_MSI = (1 << 0), 163 AHCI_FLAG_MSI = (1 << 0),
164
165 /* ap->flags bits */
166 AHCI_FLAG_RESET_NEEDS_CLO = (1 << 24),
156}; 167};
157 168
158struct ahci_cmd_hdr { 169struct ahci_cmd_hdr {
@@ -181,7 +192,6 @@ struct ahci_port_priv {
181 dma_addr_t cmd_slot_dma; 192 dma_addr_t cmd_slot_dma;
182 void *cmd_tbl; 193 void *cmd_tbl;
183 dma_addr_t cmd_tbl_dma; 194 dma_addr_t cmd_tbl_dma;
184 struct ahci_sg *cmd_tbl_sg;
185 void *rx_fis; 195 void *rx_fis;
186 dma_addr_t rx_fis_dma; 196 dma_addr_t rx_fis_dma;
187}; 197};
@@ -191,15 +201,16 @@ static void ahci_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val);
191static int ahci_init_one (struct pci_dev *pdev, const struct pci_device_id *ent); 201static int ahci_init_one (struct pci_dev *pdev, const struct pci_device_id *ent);
192static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc); 202static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc);
193static irqreturn_t ahci_interrupt (int irq, void *dev_instance, struct pt_regs *regs); 203static irqreturn_t ahci_interrupt (int irq, void *dev_instance, struct pt_regs *regs);
194static int ahci_probe_reset(struct ata_port *ap, unsigned int *classes);
195static void ahci_irq_clear(struct ata_port *ap); 204static void ahci_irq_clear(struct ata_port *ap);
196static void ahci_eng_timeout(struct ata_port *ap);
197static int ahci_port_start(struct ata_port *ap); 205static int ahci_port_start(struct ata_port *ap);
198static void ahci_port_stop(struct ata_port *ap); 206static void ahci_port_stop(struct ata_port *ap);
199static void ahci_tf_read(struct ata_port *ap, struct ata_taskfile *tf); 207static void ahci_tf_read(struct ata_port *ap, struct ata_taskfile *tf);
200static void ahci_qc_prep(struct ata_queued_cmd *qc); 208static void ahci_qc_prep(struct ata_queued_cmd *qc);
201static u8 ahci_check_status(struct ata_port *ap); 209static u8 ahci_check_status(struct ata_port *ap);
202static inline int ahci_host_intr(struct ata_port *ap, struct ata_queued_cmd *qc); 210static void ahci_freeze(struct ata_port *ap);
211static void ahci_thaw(struct ata_port *ap);
212static void ahci_error_handler(struct ata_port *ap);
213static void ahci_post_internal_cmd(struct ata_queued_cmd *qc);
203static void ahci_remove_one (struct pci_dev *pdev); 214static void ahci_remove_one (struct pci_dev *pdev);
204 215
205static struct scsi_host_template ahci_sht = { 216static struct scsi_host_template ahci_sht = {
@@ -207,7 +218,8 @@ static struct scsi_host_template ahci_sht = {
207 .name = DRV_NAME, 218 .name = DRV_NAME,
208 .ioctl = ata_scsi_ioctl, 219 .ioctl = ata_scsi_ioctl,
209 .queuecommand = ata_scsi_queuecmd, 220 .queuecommand = ata_scsi_queuecmd,
210 .can_queue = ATA_DEF_QUEUE, 221 .change_queue_depth = ata_scsi_change_queue_depth,
222 .can_queue = AHCI_MAX_CMDS - 1,
211 .this_id = ATA_SHT_THIS_ID, 223 .this_id = ATA_SHT_THIS_ID,
212 .sg_tablesize = AHCI_MAX_SG, 224 .sg_tablesize = AHCI_MAX_SG,
213 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 225 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
@@ -216,6 +228,7 @@ static struct scsi_host_template ahci_sht = {
216 .proc_name = DRV_NAME, 228 .proc_name = DRV_NAME,
217 .dma_boundary = AHCI_DMA_BOUNDARY, 229 .dma_boundary = AHCI_DMA_BOUNDARY,
218 .slave_configure = ata_scsi_slave_config, 230 .slave_configure = ata_scsi_slave_config,
231 .slave_destroy = ata_scsi_slave_destroy,
219 .bios_param = ata_std_bios_param, 232 .bios_param = ata_std_bios_param,
220}; 233};
221 234
@@ -228,19 +241,21 @@ static const struct ata_port_operations ahci_ops = {
228 241
229 .tf_read = ahci_tf_read, 242 .tf_read = ahci_tf_read,
230 243
231 .probe_reset = ahci_probe_reset,
232
233 .qc_prep = ahci_qc_prep, 244 .qc_prep = ahci_qc_prep,
234 .qc_issue = ahci_qc_issue, 245 .qc_issue = ahci_qc_issue,
235 246
236 .eng_timeout = ahci_eng_timeout,
237
238 .irq_handler = ahci_interrupt, 247 .irq_handler = ahci_interrupt,
239 .irq_clear = ahci_irq_clear, 248 .irq_clear = ahci_irq_clear,
240 249
241 .scr_read = ahci_scr_read, 250 .scr_read = ahci_scr_read,
242 .scr_write = ahci_scr_write, 251 .scr_write = ahci_scr_write,
243 252
253 .freeze = ahci_freeze,
254 .thaw = ahci_thaw,
255
256 .error_handler = ahci_error_handler,
257 .post_internal_cmd = ahci_post_internal_cmd,
258
244 .port_start = ahci_port_start, 259 .port_start = ahci_port_start,
245 .port_stop = ahci_port_stop, 260 .port_stop = ahci_port_stop,
246}; 261};
@@ -250,7 +265,19 @@ static const struct ata_port_info ahci_port_info[] = {
250 { 265 {
251 .sht = &ahci_sht, 266 .sht = &ahci_sht,
252 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 267 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
253 ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA, 268 ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA |
269 ATA_FLAG_SKIP_D2H_BSY,
270 .pio_mask = 0x1f, /* pio0-4 */
271 .udma_mask = 0x7f, /* udma0-6 ; FIXME */
272 .port_ops = &ahci_ops,
273 },
274 /* board_ahci_vt8251 */
275 {
276 .sht = &ahci_sht,
277 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
278 ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA |
279 ATA_FLAG_SKIP_D2H_BSY |
280 AHCI_FLAG_RESET_NEEDS_CLO,
254 .pio_mask = 0x1f, /* pio0-4 */ 281 .pio_mask = 0x1f, /* pio0-4 */
255 .udma_mask = 0x7f, /* udma0-6 ; FIXME */ 282 .udma_mask = 0x7f, /* udma0-6 ; FIXME */
256 .port_ops = &ahci_ops, 283 .port_ops = &ahci_ops,
@@ -296,6 +323,8 @@ static const struct pci_device_id ahci_pci_tbl[] = {
296 board_ahci }, /* ATI SB600 non-raid */ 323 board_ahci }, /* ATI SB600 non-raid */
297 { PCI_VENDOR_ID_ATI, 0x4381, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 324 { PCI_VENDOR_ID_ATI, 0x4381, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
298 board_ahci }, /* ATI SB600 raid */ 325 board_ahci }, /* ATI SB600 raid */
326 { PCI_VENDOR_ID_VIA, 0x3349, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
327 board_ahci_vt8251 }, /* VIA VT8251 */
299 { } /* terminate list */ 328 { } /* terminate list */
300}; 329};
301 330
@@ -374,8 +403,6 @@ static int ahci_port_start(struct ata_port *ap)
374 pp->cmd_tbl = mem; 403 pp->cmd_tbl = mem;
375 pp->cmd_tbl_dma = mem_dma; 404 pp->cmd_tbl_dma = mem_dma;
376 405
377 pp->cmd_tbl_sg = mem + AHCI_CMD_TBL_HDR;
378
379 ap->private_data = pp; 406 ap->private_data = pp;
380 407
381 if (hpriv->cap & HOST_CAP_64) 408 if (hpriv->cap & HOST_CAP_64)
@@ -508,46 +535,71 @@ static unsigned int ahci_dev_classify(struct ata_port *ap)
508 return ata_dev_classify(&tf); 535 return ata_dev_classify(&tf);
509} 536}
510 537
511static void ahci_fill_cmd_slot(struct ahci_port_priv *pp, u32 opts) 538static void ahci_fill_cmd_slot(struct ahci_port_priv *pp, unsigned int tag,
539 u32 opts)
512{ 540{
513 pp->cmd_slot[0].opts = cpu_to_le32(opts); 541 dma_addr_t cmd_tbl_dma;
514 pp->cmd_slot[0].status = 0; 542
515 pp->cmd_slot[0].tbl_addr = cpu_to_le32(pp->cmd_tbl_dma & 0xffffffff); 543 cmd_tbl_dma = pp->cmd_tbl_dma + tag * AHCI_CMD_TBL_SZ;
516 pp->cmd_slot[0].tbl_addr_hi = cpu_to_le32((pp->cmd_tbl_dma >> 16) >> 16); 544
545 pp->cmd_slot[tag].opts = cpu_to_le32(opts);
546 pp->cmd_slot[tag].status = 0;
547 pp->cmd_slot[tag].tbl_addr = cpu_to_le32(cmd_tbl_dma & 0xffffffff);
548 pp->cmd_slot[tag].tbl_addr_hi = cpu_to_le32((cmd_tbl_dma >> 16) >> 16);
517} 549}
518 550
519static int ahci_poll_register(void __iomem *reg, u32 mask, u32 val, 551static int ahci_clo(struct ata_port *ap)
520 unsigned long interval_msec,
521 unsigned long timeout_msec)
522{ 552{
523 unsigned long timeout; 553 void __iomem *port_mmio = (void __iomem *) ap->ioaddr.cmd_addr;
554 struct ahci_host_priv *hpriv = ap->host_set->private_data;
524 u32 tmp; 555 u32 tmp;
525 556
526 timeout = jiffies + (timeout_msec * HZ) / 1000; 557 if (!(hpriv->cap & HOST_CAP_CLO))
527 do { 558 return -EOPNOTSUPP;
528 tmp = readl(reg); 559
529 if ((tmp & mask) == val) 560 tmp = readl(port_mmio + PORT_CMD);
530 return 0; 561 tmp |= PORT_CMD_CLO;
531 msleep(interval_msec); 562 writel(tmp, port_mmio + PORT_CMD);
532 } while (time_before(jiffies, timeout)); 563
564 tmp = ata_wait_register(port_mmio + PORT_CMD,
565 PORT_CMD_CLO, PORT_CMD_CLO, 1, 500);
566 if (tmp & PORT_CMD_CLO)
567 return -EIO;
533 568
534 return -1; 569 return 0;
535} 570}
536 571
537static int ahci_softreset(struct ata_port *ap, int verbose, unsigned int *class) 572static int ahci_prereset(struct ata_port *ap)
573{
574 if ((ap->flags & AHCI_FLAG_RESET_NEEDS_CLO) &&
575 (ata_busy_wait(ap, ATA_BUSY, 1000) & ATA_BUSY)) {
576 /* ATA_BUSY hasn't cleared, so send a CLO */
577 ahci_clo(ap);
578 }
579
580 return ata_std_prereset(ap);
581}
582
583static int ahci_softreset(struct ata_port *ap, unsigned int *class)
538{ 584{
539 struct ahci_host_priv *hpriv = ap->host_set->private_data;
540 struct ahci_port_priv *pp = ap->private_data; 585 struct ahci_port_priv *pp = ap->private_data;
541 void __iomem *mmio = ap->host_set->mmio_base; 586 void __iomem *mmio = ap->host_set->mmio_base;
542 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no); 587 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
543 const u32 cmd_fis_len = 5; /* five dwords */ 588 const u32 cmd_fis_len = 5; /* five dwords */
544 const char *reason = NULL; 589 const char *reason = NULL;
545 struct ata_taskfile tf; 590 struct ata_taskfile tf;
591 u32 tmp;
546 u8 *fis; 592 u8 *fis;
547 int rc; 593 int rc;
548 594
549 DPRINTK("ENTER\n"); 595 DPRINTK("ENTER\n");
550 596
597 if (ata_port_offline(ap)) {
598 DPRINTK("PHY reports no device\n");
599 *class = ATA_DEV_NONE;
600 return 0;
601 }
602
551 /* prepare for SRST (AHCI-1.1 10.4.1) */ 603 /* prepare for SRST (AHCI-1.1 10.4.1) */
552 rc = ahci_stop_engine(ap); 604 rc = ahci_stop_engine(ap);
553 if (rc) { 605 if (rc) {
@@ -558,23 +610,13 @@ static int ahci_softreset(struct ata_port *ap, int verbose, unsigned int *class)
558 /* check BUSY/DRQ, perform Command List Override if necessary */ 610 /* check BUSY/DRQ, perform Command List Override if necessary */
559 ahci_tf_read(ap, &tf); 611 ahci_tf_read(ap, &tf);
560 if (tf.command & (ATA_BUSY | ATA_DRQ)) { 612 if (tf.command & (ATA_BUSY | ATA_DRQ)) {
561 u32 tmp; 613 rc = ahci_clo(ap);
562 614
563 if (!(hpriv->cap & HOST_CAP_CLO)) { 615 if (rc == -EOPNOTSUPP) {
564 rc = -EIO; 616 reason = "port busy but CLO unavailable";
565 reason = "port busy but no CLO";
566 goto fail_restart; 617 goto fail_restart;
567 } 618 } else if (rc) {
568 619 reason = "port busy but CLO failed";
569 tmp = readl(port_mmio + PORT_CMD);
570 tmp |= PORT_CMD_CLO;
571 writel(tmp, port_mmio + PORT_CMD);
572 readl(port_mmio + PORT_CMD); /* flush */
573
574 if (ahci_poll_register(port_mmio + PORT_CMD, PORT_CMD_CLO, 0x0,
575 1, 500)) {
576 rc = -EIO;
577 reason = "CLO failed";
578 goto fail_restart; 620 goto fail_restart;
579 } 621 }
580 } 622 }
@@ -582,20 +624,21 @@ static int ahci_softreset(struct ata_port *ap, int verbose, unsigned int *class)
582 /* restart engine */ 624 /* restart engine */
583 ahci_start_engine(ap); 625 ahci_start_engine(ap);
584 626
585 ata_tf_init(ap, &tf, 0); 627 ata_tf_init(ap->device, &tf);
586 fis = pp->cmd_tbl; 628 fis = pp->cmd_tbl;
587 629
588 /* issue the first D2H Register FIS */ 630 /* issue the first D2H Register FIS */
589 ahci_fill_cmd_slot(pp, cmd_fis_len | AHCI_CMD_RESET | AHCI_CMD_CLR_BUSY); 631 ahci_fill_cmd_slot(pp, 0,
632 cmd_fis_len | AHCI_CMD_RESET | AHCI_CMD_CLR_BUSY);
590 633
591 tf.ctl |= ATA_SRST; 634 tf.ctl |= ATA_SRST;
592 ata_tf_to_fis(&tf, fis, 0); 635 ata_tf_to_fis(&tf, fis, 0);
593 fis[1] &= ~(1 << 7); /* turn off Command FIS bit */ 636 fis[1] &= ~(1 << 7); /* turn off Command FIS bit */
594 637
595 writel(1, port_mmio + PORT_CMD_ISSUE); 638 writel(1, port_mmio + PORT_CMD_ISSUE);
596 readl(port_mmio + PORT_CMD_ISSUE); /* flush */
597 639
598 if (ahci_poll_register(port_mmio + PORT_CMD_ISSUE, 0x1, 0x0, 1, 500)) { 640 tmp = ata_wait_register(port_mmio + PORT_CMD_ISSUE, 0x1, 0x1, 1, 500);
641 if (tmp & 0x1) {
599 rc = -EIO; 642 rc = -EIO;
600 reason = "1st FIS failed"; 643 reason = "1st FIS failed";
601 goto fail; 644 goto fail;
@@ -605,7 +648,7 @@ static int ahci_softreset(struct ata_port *ap, int verbose, unsigned int *class)
605 msleep(1); 648 msleep(1);
606 649
607 /* issue the second D2H Register FIS */ 650 /* issue the second D2H Register FIS */
608 ahci_fill_cmd_slot(pp, cmd_fis_len); 651 ahci_fill_cmd_slot(pp, 0, cmd_fis_len);
609 652
610 tf.ctl &= ~ATA_SRST; 653 tf.ctl &= ~ATA_SRST;
611 ata_tf_to_fis(&tf, fis, 0); 654 ata_tf_to_fis(&tf, fis, 0);
@@ -625,7 +668,7 @@ static int ahci_softreset(struct ata_port *ap, int verbose, unsigned int *class)
625 msleep(150); 668 msleep(150);
626 669
627 *class = ATA_DEV_NONE; 670 *class = ATA_DEV_NONE;
628 if (sata_dev_present(ap)) { 671 if (ata_port_online(ap)) {
629 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) { 672 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
630 rc = -EIO; 673 rc = -EIO;
631 reason = "device not ready"; 674 reason = "device not ready";
@@ -640,25 +683,31 @@ static int ahci_softreset(struct ata_port *ap, int verbose, unsigned int *class)
640 fail_restart: 683 fail_restart:
641 ahci_start_engine(ap); 684 ahci_start_engine(ap);
642 fail: 685 fail:
643 if (verbose) 686 ata_port_printk(ap, KERN_ERR, "softreset failed (%s)\n", reason);
644 printk(KERN_ERR "ata%u: softreset failed (%s)\n",
645 ap->id, reason);
646 else
647 DPRINTK("EXIT, rc=%d reason=\"%s\"\n", rc, reason);
648 return rc; 687 return rc;
649} 688}
650 689
651static int ahci_hardreset(struct ata_port *ap, int verbose, unsigned int *class) 690static int ahci_hardreset(struct ata_port *ap, unsigned int *class)
652{ 691{
692 struct ahci_port_priv *pp = ap->private_data;
693 u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
694 struct ata_taskfile tf;
653 int rc; 695 int rc;
654 696
655 DPRINTK("ENTER\n"); 697 DPRINTK("ENTER\n");
656 698
657 ahci_stop_engine(ap); 699 ahci_stop_engine(ap);
658 rc = sata_std_hardreset(ap, verbose, class); 700
701 /* clear D2H reception area to properly wait for D2H FIS */
702 ata_tf_init(ap->device, &tf);
703 tf.command = 0xff;
704 ata_tf_to_fis(&tf, d2h_fis, 0);
705
706 rc = sata_std_hardreset(ap, class);
707
659 ahci_start_engine(ap); 708 ahci_start_engine(ap);
660 709
661 if (rc == 0) 710 if (rc == 0 && ata_port_online(ap))
662 *class = ahci_dev_classify(ap); 711 *class = ahci_dev_classify(ap);
663 if (*class == ATA_DEV_UNKNOWN) 712 if (*class == ATA_DEV_UNKNOWN)
664 *class = ATA_DEV_NONE; 713 *class = ATA_DEV_NONE;
@@ -686,13 +735,6 @@ static void ahci_postreset(struct ata_port *ap, unsigned int *class)
686 } 735 }
687} 736}
688 737
689static int ahci_probe_reset(struct ata_port *ap, unsigned int *classes)
690{
691 return ata_drive_probe_reset(ap, ata_std_probeinit,
692 ahci_softreset, ahci_hardreset,
693 ahci_postreset, classes);
694}
695
696static u8 ahci_check_status(struct ata_port *ap) 738static u8 ahci_check_status(struct ata_port *ap)
697{ 739{
698 void __iomem *mmio = (void __iomem *) ap->ioaddr.cmd_addr; 740 void __iomem *mmio = (void __iomem *) ap->ioaddr.cmd_addr;
@@ -708,9 +750,8 @@ static void ahci_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
708 ata_tf_from_fis(d2h_fis, tf); 750 ata_tf_from_fis(d2h_fis, tf);
709} 751}
710 752
711static unsigned int ahci_fill_sg(struct ata_queued_cmd *qc) 753static unsigned int ahci_fill_sg(struct ata_queued_cmd *qc, void *cmd_tbl)
712{ 754{
713 struct ahci_port_priv *pp = qc->ap->private_data;
714 struct scatterlist *sg; 755 struct scatterlist *sg;
715 struct ahci_sg *ahci_sg; 756 struct ahci_sg *ahci_sg;
716 unsigned int n_sg = 0; 757 unsigned int n_sg = 0;
@@ -720,7 +761,7 @@ static unsigned int ahci_fill_sg(struct ata_queued_cmd *qc)
720 /* 761 /*
721 * Next, the S/G list. 762 * Next, the S/G list.
722 */ 763 */
723 ahci_sg = pp->cmd_tbl_sg; 764 ahci_sg = cmd_tbl + AHCI_CMD_TBL_HDR_SZ;
724 ata_for_each_sg(sg, qc) { 765 ata_for_each_sg(sg, qc) {
725 dma_addr_t addr = sg_dma_address(sg); 766 dma_addr_t addr = sg_dma_address(sg);
726 u32 sg_len = sg_dma_len(sg); 767 u32 sg_len = sg_dma_len(sg);
@@ -741,6 +782,7 @@ static void ahci_qc_prep(struct ata_queued_cmd *qc)
741 struct ata_port *ap = qc->ap; 782 struct ata_port *ap = qc->ap;
742 struct ahci_port_priv *pp = ap->private_data; 783 struct ahci_port_priv *pp = ap->private_data;
743 int is_atapi = is_atapi_taskfile(&qc->tf); 784 int is_atapi = is_atapi_taskfile(&qc->tf);
785 void *cmd_tbl;
744 u32 opts; 786 u32 opts;
745 const u32 cmd_fis_len = 5; /* five dwords */ 787 const u32 cmd_fis_len = 5; /* five dwords */
746 unsigned int n_elem; 788 unsigned int n_elem;
@@ -749,16 +791,17 @@ static void ahci_qc_prep(struct ata_queued_cmd *qc)
749 * Fill in command table information. First, the header, 791 * Fill in command table information. First, the header,
750 * a SATA Register - Host to Device command FIS. 792 * a SATA Register - Host to Device command FIS.
751 */ 793 */
752 ata_tf_to_fis(&qc->tf, pp->cmd_tbl, 0); 794 cmd_tbl = pp->cmd_tbl + qc->tag * AHCI_CMD_TBL_SZ;
795
796 ata_tf_to_fis(&qc->tf, cmd_tbl, 0);
753 if (is_atapi) { 797 if (is_atapi) {
754 memset(pp->cmd_tbl + AHCI_CMD_TBL_CDB, 0, 32); 798 memset(cmd_tbl + AHCI_CMD_TBL_CDB, 0, 32);
755 memcpy(pp->cmd_tbl + AHCI_CMD_TBL_CDB, qc->cdb, 799 memcpy(cmd_tbl + AHCI_CMD_TBL_CDB, qc->cdb, qc->dev->cdb_len);
756 qc->dev->cdb_len);
757 } 800 }
758 801
759 n_elem = 0; 802 n_elem = 0;
760 if (qc->flags & ATA_QCFLAG_DMAMAP) 803 if (qc->flags & ATA_QCFLAG_DMAMAP)
761 n_elem = ahci_fill_sg(qc); 804 n_elem = ahci_fill_sg(qc, cmd_tbl);
762 805
763 /* 806 /*
764 * Fill in command slot information. 807 * Fill in command slot information.
@@ -769,112 +812,122 @@ static void ahci_qc_prep(struct ata_queued_cmd *qc)
769 if (is_atapi) 812 if (is_atapi)
770 opts |= AHCI_CMD_ATAPI | AHCI_CMD_PREFETCH; 813 opts |= AHCI_CMD_ATAPI | AHCI_CMD_PREFETCH;
771 814
772 ahci_fill_cmd_slot(pp, opts); 815 ahci_fill_cmd_slot(pp, qc->tag, opts);
773} 816}
774 817
775static void ahci_restart_port(struct ata_port *ap, u32 irq_stat) 818static void ahci_error_intr(struct ata_port *ap, u32 irq_stat)
776{ 819{
777 void __iomem *mmio = ap->host_set->mmio_base; 820 struct ahci_port_priv *pp = ap->private_data;
778 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no); 821 struct ata_eh_info *ehi = &ap->eh_info;
779 u32 tmp; 822 unsigned int err_mask = 0, action = 0;
823 struct ata_queued_cmd *qc;
824 u32 serror;
780 825
781 if ((ap->device[0].class != ATA_DEV_ATAPI) || 826 ata_ehi_clear_desc(ehi);
782 ((irq_stat & PORT_IRQ_TF_ERR) == 0))
783 printk(KERN_WARNING "ata%u: port reset, "
784 "p_is %x is %x pis %x cmd %x tf %x ss %x se %x\n",
785 ap->id,
786 irq_stat,
787 readl(mmio + HOST_IRQ_STAT),
788 readl(port_mmio + PORT_IRQ_STAT),
789 readl(port_mmio + PORT_CMD),
790 readl(port_mmio + PORT_TFDATA),
791 readl(port_mmio + PORT_SCR_STAT),
792 readl(port_mmio + PORT_SCR_ERR));
793
794 /* stop DMA */
795 ahci_stop_engine(ap);
796 827
797 /* clear SATA phy error, if any */ 828 /* AHCI needs SError cleared; otherwise, it might lock up */
798 tmp = readl(port_mmio + PORT_SCR_ERR); 829 serror = ahci_scr_read(ap, SCR_ERROR);
799 writel(tmp, port_mmio + PORT_SCR_ERR); 830 ahci_scr_write(ap, SCR_ERROR, serror);
800 831
801 /* if DRQ/BSY is set, device needs to be reset. 832 /* analyze @irq_stat */
802 * if so, issue COMRESET 833 ata_ehi_push_desc(ehi, "irq_stat 0x%08x", irq_stat);
803 */ 834
804 tmp = readl(port_mmio + PORT_TFDATA); 835 if (irq_stat & PORT_IRQ_TF_ERR)
805 if (tmp & (ATA_BUSY | ATA_DRQ)) { 836 err_mask |= AC_ERR_DEV;
806 writel(0x301, port_mmio + PORT_SCR_CTL); 837
807 readl(port_mmio + PORT_SCR_CTL); /* flush */ 838 if (irq_stat & (PORT_IRQ_HBUS_ERR | PORT_IRQ_HBUS_DATA_ERR)) {
808 udelay(10); 839 err_mask |= AC_ERR_HOST_BUS;
809 writel(0x300, port_mmio + PORT_SCR_CTL); 840 action |= ATA_EH_SOFTRESET;
810 readl(port_mmio + PORT_SCR_CTL); /* flush */
811 } 841 }
812 842
813 /* re-start DMA */ 843 if (irq_stat & PORT_IRQ_IF_ERR) {
814 ahci_start_engine(ap); 844 err_mask |= AC_ERR_ATA_BUS;
815} 845 action |= ATA_EH_SOFTRESET;
846 ata_ehi_push_desc(ehi, ", interface fatal error");
847 }
816 848
817static void ahci_eng_timeout(struct ata_port *ap) 849 if (irq_stat & (PORT_IRQ_CONNECT | PORT_IRQ_PHYRDY)) {
818{ 850 ata_ehi_hotplugged(ehi);
819 struct ata_host_set *host_set = ap->host_set; 851 ata_ehi_push_desc(ehi, ", %s", irq_stat & PORT_IRQ_CONNECT ?
820 void __iomem *mmio = host_set->mmio_base; 852 "connection status changed" : "PHY RDY changed");
821 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no); 853 }
822 struct ata_queued_cmd *qc;
823 unsigned long flags;
824 854
825 printk(KERN_WARNING "ata%u: handling error/timeout\n", ap->id); 855 if (irq_stat & PORT_IRQ_UNK_FIS) {
856 u32 *unk = (u32 *)(pp->rx_fis + RX_FIS_UNK);
826 857
827 spin_lock_irqsave(&host_set->lock, flags); 858 err_mask |= AC_ERR_HSM;
859 action |= ATA_EH_SOFTRESET;
860 ata_ehi_push_desc(ehi, ", unknown FIS %08x %08x %08x %08x",
861 unk[0], unk[1], unk[2], unk[3]);
862 }
828 863
829 ahci_restart_port(ap, readl(port_mmio + PORT_IRQ_STAT)); 864 /* okay, let's hand over to EH */
830 qc = ata_qc_from_tag(ap, ap->active_tag); 865 ehi->serror |= serror;
831 qc->err_mask |= AC_ERR_TIMEOUT; 866 ehi->action |= action;
832 867
833 spin_unlock_irqrestore(&host_set->lock, flags); 868 qc = ata_qc_from_tag(ap, ap->active_tag);
869 if (qc)
870 qc->err_mask |= err_mask;
871 else
872 ehi->err_mask |= err_mask;
834 873
835 ata_eh_qc_complete(qc); 874 if (irq_stat & PORT_IRQ_FREEZE)
875 ata_port_freeze(ap);
876 else
877 ata_port_abort(ap);
836} 878}
837 879
838static inline int ahci_host_intr(struct ata_port *ap, struct ata_queued_cmd *qc) 880static void ahci_host_intr(struct ata_port *ap)
839{ 881{
840 void __iomem *mmio = ap->host_set->mmio_base; 882 void __iomem *mmio = ap->host_set->mmio_base;
841 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no); 883 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
842 u32 status, serr, ci; 884 struct ata_eh_info *ehi = &ap->eh_info;
843 885 u32 status, qc_active;
844 serr = readl(port_mmio + PORT_SCR_ERR); 886 int rc;
845 writel(serr, port_mmio + PORT_SCR_ERR);
846 887
847 status = readl(port_mmio + PORT_IRQ_STAT); 888 status = readl(port_mmio + PORT_IRQ_STAT);
848 writel(status, port_mmio + PORT_IRQ_STAT); 889 writel(status, port_mmio + PORT_IRQ_STAT);
849 890
850 ci = readl(port_mmio + PORT_CMD_ISSUE); 891 if (unlikely(status & PORT_IRQ_ERROR)) {
851 if (likely((ci & 0x1) == 0)) { 892 ahci_error_intr(ap, status);
852 if (qc) { 893 return;
853 WARN_ON(qc->err_mask);
854 ata_qc_complete(qc);
855 qc = NULL;
856 }
857 } 894 }
858 895
859 if (status & PORT_IRQ_FATAL) { 896 if (ap->sactive)
860 unsigned int err_mask; 897 qc_active = readl(port_mmio + PORT_SCR_ACT);
861 if (status & PORT_IRQ_TF_ERR) 898 else
862 err_mask = AC_ERR_DEV; 899 qc_active = readl(port_mmio + PORT_CMD_ISSUE);
863 else if (status & PORT_IRQ_IF_ERR) 900
864 err_mask = AC_ERR_ATA_BUS; 901 rc = ata_qc_complete_multiple(ap, qc_active, NULL);
865 else 902 if (rc > 0)
866 err_mask = AC_ERR_HOST_BUS; 903 return;
867 904 if (rc < 0) {
868 /* command processing has stopped due to error; restart */ 905 ehi->err_mask |= AC_ERR_HSM;
869 ahci_restart_port(ap, status); 906 ehi->action |= ATA_EH_SOFTRESET;
870 907 ata_port_freeze(ap);
871 if (qc) { 908 return;
872 qc->err_mask |= err_mask; 909 }
873 ata_qc_complete(qc); 910
874 } 911 /* hmmm... a spurious interupt */
912
913 /* some devices send D2H reg with I bit set during NCQ command phase */
914 if (ap->sactive && status & PORT_IRQ_D2H_REG_FIS)
915 return;
916
917 /* ignore interim PIO setup fis interrupts */
918 if (ata_tag_valid(ap->active_tag)) {
919 struct ata_queued_cmd *qc =
920 ata_qc_from_tag(ap, ap->active_tag);
921
922 if (qc && qc->tf.protocol == ATA_PROT_PIO &&
923 (status & PORT_IRQ_PIOS_FIS))
924 return;
875 } 925 }
876 926
877 return 1; 927 if (ata_ratelimit())
928 ata_port_printk(ap, KERN_INFO, "spurious interrupt "
929 "(irq_stat 0x%x active_tag %d sactive 0x%x)\n",
930 status, ap->active_tag, ap->sactive);
878} 931}
879 932
880static void ahci_irq_clear(struct ata_port *ap) 933static void ahci_irq_clear(struct ata_port *ap)
@@ -882,7 +935,7 @@ static void ahci_irq_clear(struct ata_port *ap)
882 /* TODO */ 935 /* TODO */
883} 936}
884 937
885static irqreturn_t ahci_interrupt (int irq, void *dev_instance, struct pt_regs *regs) 938static irqreturn_t ahci_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
886{ 939{
887 struct ata_host_set *host_set = dev_instance; 940 struct ata_host_set *host_set = dev_instance;
888 struct ahci_host_priv *hpriv; 941 struct ahci_host_priv *hpriv;
@@ -911,14 +964,7 @@ static irqreturn_t ahci_interrupt (int irq, void *dev_instance, struct pt_regs *
911 964
912 ap = host_set->ports[i]; 965 ap = host_set->ports[i];
913 if (ap) { 966 if (ap) {
914 struct ata_queued_cmd *qc; 967 ahci_host_intr(ap);
915 qc = ata_qc_from_tag(ap, ap->active_tag);
916 if (!ahci_host_intr(ap, qc))
917 if (ata_ratelimit())
918 dev_printk(KERN_WARNING, host_set->dev,
919 "unhandled interrupt on port %u\n",
920 i);
921
922 VPRINTK("port %u\n", i); 968 VPRINTK("port %u\n", i);
923 } else { 969 } else {
924 VPRINTK("port %u (no irq)\n", i); 970 VPRINTK("port %u (no irq)\n", i);
@@ -935,7 +981,7 @@ static irqreturn_t ahci_interrupt (int irq, void *dev_instance, struct pt_regs *
935 handled = 1; 981 handled = 1;
936 } 982 }
937 983
938 spin_unlock(&host_set->lock); 984 spin_unlock(&host_set->lock);
939 985
940 VPRINTK("EXIT\n"); 986 VPRINTK("EXIT\n");
941 987
@@ -947,12 +993,65 @@ static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc)
947 struct ata_port *ap = qc->ap; 993 struct ata_port *ap = qc->ap;
948 void __iomem *port_mmio = (void __iomem *) ap->ioaddr.cmd_addr; 994 void __iomem *port_mmio = (void __iomem *) ap->ioaddr.cmd_addr;
949 995
950 writel(1, port_mmio + PORT_CMD_ISSUE); 996 if (qc->tf.protocol == ATA_PROT_NCQ)
997 writel(1 << qc->tag, port_mmio + PORT_SCR_ACT);
998 writel(1 << qc->tag, port_mmio + PORT_CMD_ISSUE);
951 readl(port_mmio + PORT_CMD_ISSUE); /* flush */ 999 readl(port_mmio + PORT_CMD_ISSUE); /* flush */
952 1000
953 return 0; 1001 return 0;
954} 1002}
955 1003
1004static void ahci_freeze(struct ata_port *ap)
1005{
1006 void __iomem *mmio = ap->host_set->mmio_base;
1007 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
1008
1009 /* turn IRQ off */
1010 writel(0, port_mmio + PORT_IRQ_MASK);
1011}
1012
1013static void ahci_thaw(struct ata_port *ap)
1014{
1015 void __iomem *mmio = ap->host_set->mmio_base;
1016 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
1017 u32 tmp;
1018
1019 /* clear IRQ */
1020 tmp = readl(port_mmio + PORT_IRQ_STAT);
1021 writel(tmp, port_mmio + PORT_IRQ_STAT);
1022 writel(1 << ap->id, mmio + HOST_IRQ_STAT);
1023
1024 /* turn IRQ back on */
1025 writel(DEF_PORT_IRQ, port_mmio + PORT_IRQ_MASK);
1026}
1027
1028static void ahci_error_handler(struct ata_port *ap)
1029{
1030 if (!(ap->flags & ATA_FLAG_FROZEN)) {
1031 /* restart engine */
1032 ahci_stop_engine(ap);
1033 ahci_start_engine(ap);
1034 }
1035
1036 /* perform recovery */
1037 ata_do_eh(ap, ahci_prereset, ahci_softreset, ahci_hardreset,
1038 ahci_postreset);
1039}
1040
1041static void ahci_post_internal_cmd(struct ata_queued_cmd *qc)
1042{
1043 struct ata_port *ap = qc->ap;
1044
1045 if (qc->flags & ATA_QCFLAG_FAILED)
1046 qc->err_mask |= AC_ERR_OTHER;
1047
1048 if (qc->err_mask) {
1049 /* make DMA engine forget about the failed command */
1050 ahci_stop_engine(ap);
1051 ahci_start_engine(ap);
1052 }
1053}
1054
956static void ahci_setup_port(struct ata_ioports *port, unsigned long base, 1055static void ahci_setup_port(struct ata_ioports *port, unsigned long base,
957 unsigned int port_idx) 1056 unsigned int port_idx)
958{ 1057{
@@ -1097,9 +1196,6 @@ static int ahci_host_init(struct ata_probe_ent *probe_ent)
1097 writel(tmp, port_mmio + PORT_IRQ_STAT); 1196 writel(tmp, port_mmio + PORT_IRQ_STAT);
1098 1197
1099 writel(1 << i, mmio + HOST_IRQ_STAT); 1198 writel(1 << i, mmio + HOST_IRQ_STAT);
1100
1101 /* set irq mask (enables interrupts) */
1102 writel(DEF_PORT_IRQ, port_mmio + PORT_IRQ_MASK);
1103 } 1199 }
1104 1200
1105 tmp = readl(mmio + HOST_CTL); 1201 tmp = readl(mmio + HOST_CTL);
@@ -1197,6 +1293,8 @@ static int ahci_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
1197 1293
1198 VPRINTK("ENTER\n"); 1294 VPRINTK("ENTER\n");
1199 1295
1296 WARN_ON(ATA_MAX_QUEUE > AHCI_MAX_CMDS);
1297
1200 if (!printed_version++) 1298 if (!printed_version++)
1201 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n"); 1299 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
1202 1300
@@ -1264,6 +1362,9 @@ static int ahci_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
1264 if (rc) 1362 if (rc)
1265 goto err_out_hpriv; 1363 goto err_out_hpriv;
1266 1364
1365 if (hpriv->cap & HOST_CAP_NCQ)
1366 probe_ent->host_flags |= ATA_FLAG_NCQ;
1367
1267 ahci_print_info(probe_ent); 1368 ahci_print_info(probe_ent);
1268 1369
1269 /* FIXME: check ata_device_add return value */ 1370 /* FIXME: check ata_device_add return value */
@@ -1295,21 +1396,17 @@ static void ahci_remove_one (struct pci_dev *pdev)
1295 struct device *dev = pci_dev_to_dev(pdev); 1396 struct device *dev = pci_dev_to_dev(pdev);
1296 struct ata_host_set *host_set = dev_get_drvdata(dev); 1397 struct ata_host_set *host_set = dev_get_drvdata(dev);
1297 struct ahci_host_priv *hpriv = host_set->private_data; 1398 struct ahci_host_priv *hpriv = host_set->private_data;
1298 struct ata_port *ap;
1299 unsigned int i; 1399 unsigned int i;
1300 int have_msi; 1400 int have_msi;
1301 1401
1302 for (i = 0; i < host_set->n_ports; i++) { 1402 for (i = 0; i < host_set->n_ports; i++)
1303 ap = host_set->ports[i]; 1403 ata_port_detach(host_set->ports[i]);
1304
1305 scsi_remove_host(ap->host);
1306 }
1307 1404
1308 have_msi = hpriv->flags & AHCI_FLAG_MSI; 1405 have_msi = hpriv->flags & AHCI_FLAG_MSI;
1309 free_irq(host_set->irq, host_set); 1406 free_irq(host_set->irq, host_set);
1310 1407
1311 for (i = 0; i < host_set->n_ports; i++) { 1408 for (i = 0; i < host_set->n_ports; i++) {
1312 ap = host_set->ports[i]; 1409 struct ata_port *ap = host_set->ports[i];
1313 1410
1314 ata_scsi_release(ap->host); 1411 ata_scsi_release(ap->host);
1315 scsi_host_put(ap->host); 1412 scsi_host_put(ap->host);
diff --git a/drivers/scsi/ata_piix.c b/drivers/scsi/ata_piix.c
index 6dc88149f9f1..521b718763f6 100644
--- a/drivers/scsi/ata_piix.c
+++ b/drivers/scsi/ata_piix.c
@@ -93,7 +93,7 @@
93#include <linux/libata.h> 93#include <linux/libata.h>
94 94
95#define DRV_NAME "ata_piix" 95#define DRV_NAME "ata_piix"
96#define DRV_VERSION "1.05" 96#define DRV_VERSION "1.10"
97 97
98enum { 98enum {
99 PIIX_IOCFG = 0x54, /* IDE I/O configuration register */ 99 PIIX_IOCFG = 0x54, /* IDE I/O configuration register */
@@ -146,11 +146,10 @@ struct piix_map_db {
146 146
147static int piix_init_one (struct pci_dev *pdev, 147static int piix_init_one (struct pci_dev *pdev,
148 const struct pci_device_id *ent); 148 const struct pci_device_id *ent);
149
150static int piix_pata_probe_reset(struct ata_port *ap, unsigned int *classes);
151static int piix_sata_probe_reset(struct ata_port *ap, unsigned int *classes);
152static void piix_set_piomode (struct ata_port *ap, struct ata_device *adev); 149static void piix_set_piomode (struct ata_port *ap, struct ata_device *adev);
153static void piix_set_dmamode (struct ata_port *ap, struct ata_device *adev); 150static void piix_set_dmamode (struct ata_port *ap, struct ata_device *adev);
151static void piix_pata_error_handler(struct ata_port *ap);
152static void piix_sata_error_handler(struct ata_port *ap);
154 153
155static unsigned int in_module_init = 1; 154static unsigned int in_module_init = 1;
156 155
@@ -159,6 +158,7 @@ static const struct pci_device_id piix_pci_tbl[] = {
159 { 0x8086, 0x7111, PCI_ANY_ID, PCI_ANY_ID, 0, 0, piix4_pata }, 158 { 0x8086, 0x7111, PCI_ANY_ID, PCI_ANY_ID, 0, 0, piix4_pata },
160 { 0x8086, 0x24db, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich5_pata }, 159 { 0x8086, 0x24db, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich5_pata },
161 { 0x8086, 0x25a2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich5_pata }, 160 { 0x8086, 0x25a2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich5_pata },
161 { 0x8086, 0x27df, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich5_pata },
162#endif 162#endif
163 163
164 /* NOTE: The following PCI ids must be kept in sync with the 164 /* NOTE: The following PCI ids must be kept in sync with the
@@ -218,6 +218,7 @@ static struct scsi_host_template piix_sht = {
218 .proc_name = DRV_NAME, 218 .proc_name = DRV_NAME,
219 .dma_boundary = ATA_DMA_BOUNDARY, 219 .dma_boundary = ATA_DMA_BOUNDARY,
220 .slave_configure = ata_scsi_slave_config, 220 .slave_configure = ata_scsi_slave_config,
221 .slave_destroy = ata_scsi_slave_destroy,
221 .bios_param = ata_std_bios_param, 222 .bios_param = ata_std_bios_param,
222 .resume = ata_scsi_device_resume, 223 .resume = ata_scsi_device_resume,
223 .suspend = ata_scsi_device_suspend, 224 .suspend = ata_scsi_device_suspend,
@@ -227,6 +228,7 @@ static const struct ata_port_operations piix_pata_ops = {
227 .port_disable = ata_port_disable, 228 .port_disable = ata_port_disable,
228 .set_piomode = piix_set_piomode, 229 .set_piomode = piix_set_piomode,
229 .set_dmamode = piix_set_dmamode, 230 .set_dmamode = piix_set_dmamode,
231 .mode_filter = ata_pci_default_filter,
230 232
231 .tf_load = ata_tf_load, 233 .tf_load = ata_tf_load,
232 .tf_read = ata_tf_read, 234 .tf_read = ata_tf_read,
@@ -234,16 +236,18 @@ static const struct ata_port_operations piix_pata_ops = {
234 .exec_command = ata_exec_command, 236 .exec_command = ata_exec_command,
235 .dev_select = ata_std_dev_select, 237 .dev_select = ata_std_dev_select,
236 238
237 .probe_reset = piix_pata_probe_reset,
238
239 .bmdma_setup = ata_bmdma_setup, 239 .bmdma_setup = ata_bmdma_setup,
240 .bmdma_start = ata_bmdma_start, 240 .bmdma_start = ata_bmdma_start,
241 .bmdma_stop = ata_bmdma_stop, 241 .bmdma_stop = ata_bmdma_stop,
242 .bmdma_status = ata_bmdma_status, 242 .bmdma_status = ata_bmdma_status,
243 .qc_prep = ata_qc_prep, 243 .qc_prep = ata_qc_prep,
244 .qc_issue = ata_qc_issue_prot, 244 .qc_issue = ata_qc_issue_prot,
245 .data_xfer = ata_pio_data_xfer,
245 246
246 .eng_timeout = ata_eng_timeout, 247 .freeze = ata_bmdma_freeze,
248 .thaw = ata_bmdma_thaw,
249 .error_handler = piix_pata_error_handler,
250 .post_internal_cmd = ata_bmdma_post_internal_cmd,
247 251
248 .irq_handler = ata_interrupt, 252 .irq_handler = ata_interrupt,
249 .irq_clear = ata_bmdma_irq_clear, 253 .irq_clear = ata_bmdma_irq_clear,
@@ -262,16 +266,18 @@ static const struct ata_port_operations piix_sata_ops = {
262 .exec_command = ata_exec_command, 266 .exec_command = ata_exec_command,
263 .dev_select = ata_std_dev_select, 267 .dev_select = ata_std_dev_select,
264 268
265 .probe_reset = piix_sata_probe_reset,
266
267 .bmdma_setup = ata_bmdma_setup, 269 .bmdma_setup = ata_bmdma_setup,
268 .bmdma_start = ata_bmdma_start, 270 .bmdma_start = ata_bmdma_start,
269 .bmdma_stop = ata_bmdma_stop, 271 .bmdma_stop = ata_bmdma_stop,
270 .bmdma_status = ata_bmdma_status, 272 .bmdma_status = ata_bmdma_status,
271 .qc_prep = ata_qc_prep, 273 .qc_prep = ata_qc_prep,
272 .qc_issue = ata_qc_issue_prot, 274 .qc_issue = ata_qc_issue_prot,
275 .data_xfer = ata_pio_data_xfer,
273 276
274 .eng_timeout = ata_eng_timeout, 277 .freeze = ata_bmdma_freeze,
278 .thaw = ata_bmdma_thaw,
279 .error_handler = piix_sata_error_handler,
280 .post_internal_cmd = ata_bmdma_post_internal_cmd,
275 281
276 .irq_handler = ata_interrupt, 282 .irq_handler = ata_interrupt,
277 .irq_clear = ata_bmdma_irq_clear, 283 .irq_clear = ata_bmdma_irq_clear,
@@ -455,59 +461,51 @@ cbl40:
455} 461}
456 462
457/** 463/**
458 * piix_pata_probeinit - probeinit for PATA host controller 464 * piix_pata_prereset - prereset for PATA host controller
459 * @ap: Target port 465 * @ap: Target port
460 * 466 *
461 * Probeinit including cable detection. 467 * Prereset including cable detection.
462 *
463 * LOCKING:
464 * None (inherited from caller).
465 */
466static void piix_pata_probeinit(struct ata_port *ap)
467{
468 piix_pata_cbl_detect(ap);
469 ata_std_probeinit(ap);
470}
471
472/**
473 * piix_pata_probe_reset - Perform reset on PATA port and classify
474 * @ap: Port to reset
475 * @classes: Resulting classes of attached devices
476 *
477 * Reset PATA phy and classify attached devices.
478 * 468 *
479 * LOCKING: 469 * LOCKING:
480 * None (inherited from caller). 470 * None (inherited from caller).
481 */ 471 */
482static int piix_pata_probe_reset(struct ata_port *ap, unsigned int *classes) 472static int piix_pata_prereset(struct ata_port *ap)
483{ 473{
484 struct pci_dev *pdev = to_pci_dev(ap->host_set->dev); 474 struct pci_dev *pdev = to_pci_dev(ap->host_set->dev);
485 475
486 if (!pci_test_config_bits(pdev, &piix_enable_bits[ap->hard_port_no])) { 476 if (!pci_test_config_bits(pdev, &piix_enable_bits[ap->hard_port_no])) {
487 printk(KERN_INFO "ata%u: port disabled. ignoring.\n", ap->id); 477 ata_port_printk(ap, KERN_INFO, "port disabled. ignoring.\n");
478 ap->eh_context.i.action &= ~ATA_EH_RESET_MASK;
488 return 0; 479 return 0;
489 } 480 }
490 481
491 return ata_drive_probe_reset(ap, piix_pata_probeinit, 482 piix_pata_cbl_detect(ap);
492 ata_std_softreset, NULL, 483
493 ata_std_postreset, classes); 484 return ata_std_prereset(ap);
485}
486
487static void piix_pata_error_handler(struct ata_port *ap)
488{
489 ata_bmdma_drive_eh(ap, piix_pata_prereset, ata_std_softreset, NULL,
490 ata_std_postreset);
494} 491}
495 492
496/** 493/**
497 * piix_sata_probe - Probe PCI device for present SATA devices 494 * piix_sata_prereset - prereset for SATA host controller
498 * @ap: Port associated with the PCI device we wish to probe 495 * @ap: Target port
499 * 496 *
500 * Reads and configures SATA PCI device's PCI config register 497 * Reads and configures SATA PCI device's PCI config register
501 * Port Configuration and Status (PCS) to determine port and 498 * Port Configuration and Status (PCS) to determine port and
502 * device availability. 499 * device availability. Return -ENODEV to skip reset if no
500 * device is present.
503 * 501 *
504 * LOCKING: 502 * LOCKING:
505 * None (inherited from caller). 503 * None (inherited from caller).
506 * 504 *
507 * RETURNS: 505 * RETURNS:
508 * Mask of avaliable devices on the port. 506 * 0 if device is present, -ENODEV otherwise.
509 */ 507 */
510static unsigned int piix_sata_probe (struct ata_port *ap) 508static int piix_sata_prereset(struct ata_port *ap)
511{ 509{
512 struct pci_dev *pdev = to_pci_dev(ap->host_set->dev); 510 struct pci_dev *pdev = to_pci_dev(ap->host_set->dev);
513 const unsigned int *map = ap->host_set->private_data; 511 const unsigned int *map = ap->host_set->private_data;
@@ -549,29 +547,19 @@ static unsigned int piix_sata_probe (struct ata_port *ap)
549 DPRINTK("ata%u: LEAVE, pcs=0x%x present_mask=0x%x\n", 547 DPRINTK("ata%u: LEAVE, pcs=0x%x present_mask=0x%x\n",
550 ap->id, pcs, present_mask); 548 ap->id, pcs, present_mask);
551 549
552 return present_mask; 550 if (!present_mask) {
553} 551 ata_port_printk(ap, KERN_INFO, "SATA port has no device.\n");
554 552 ap->eh_context.i.action &= ~ATA_EH_RESET_MASK;
555/**
556 * piix_sata_probe_reset - Perform reset on SATA port and classify
557 * @ap: Port to reset
558 * @classes: Resulting classes of attached devices
559 *
560 * Reset SATA phy and classify attached devices.
561 *
562 * LOCKING:
563 * None (inherited from caller).
564 */
565static int piix_sata_probe_reset(struct ata_port *ap, unsigned int *classes)
566{
567 if (!piix_sata_probe(ap)) {
568 printk(KERN_INFO "ata%u: SATA port has no device.\n", ap->id);
569 return 0; 553 return 0;
570 } 554 }
571 555
572 return ata_drive_probe_reset(ap, ata_std_probeinit, 556 return ata_std_prereset(ap);
573 ata_std_softreset, NULL, 557}
574 ata_std_postreset, classes); 558
559static void piix_sata_error_handler(struct ata_port *ap)
560{
561 ata_bmdma_drive_eh(ap, piix_sata_prereset, ata_std_softreset, NULL,
562 ata_std_postreset);
575} 563}
576 564
577/** 565/**
@@ -760,15 +748,15 @@ static int __devinit piix_check_450nx_errata(struct pci_dev *ata_dev)
760 pci_read_config_byte(pdev, PCI_REVISION_ID, &rev); 748 pci_read_config_byte(pdev, PCI_REVISION_ID, &rev);
761 pci_read_config_word(pdev, 0x41, &cfg); 749 pci_read_config_word(pdev, 0x41, &cfg);
762 /* Only on the original revision: IDE DMA can hang */ 750 /* Only on the original revision: IDE DMA can hang */
763 if(rev == 0x00) 751 if (rev == 0x00)
764 no_piix_dma = 1; 752 no_piix_dma = 1;
765 /* On all revisions below 5 PXB bus lock must be disabled for IDE */ 753 /* On all revisions below 5 PXB bus lock must be disabled for IDE */
766 else if(cfg & (1<<14) && rev < 5) 754 else if (cfg & (1<<14) && rev < 5)
767 no_piix_dma = 2; 755 no_piix_dma = 2;
768 } 756 }
769 if(no_piix_dma) 757 if (no_piix_dma)
770 dev_printk(KERN_WARNING, &ata_dev->dev, "450NX errata present, disabling IDE DMA.\n"); 758 dev_printk(KERN_WARNING, &ata_dev->dev, "450NX errata present, disabling IDE DMA.\n");
771 if(no_piix_dma == 2) 759 if (no_piix_dma == 2)
772 dev_printk(KERN_WARNING, &ata_dev->dev, "A BIOS update may resolve this.\n"); 760 dev_printk(KERN_WARNING, &ata_dev->dev, "A BIOS update may resolve this.\n");
773 return no_piix_dma; 761 return no_piix_dma;
774} 762}
diff --git a/drivers/scsi/libata-bmdma.c b/drivers/scsi/libata-bmdma.c
index 835dff0bafdc..13fab97c840e 100644
--- a/drivers/scsi/libata-bmdma.c
+++ b/drivers/scsi/libata-bmdma.c
@@ -652,6 +652,152 @@ void ata_bmdma_stop(struct ata_queued_cmd *qc)
652 ata_altstatus(ap); /* dummy read */ 652 ata_altstatus(ap); /* dummy read */
653} 653}
654 654
655/**
656 * ata_bmdma_freeze - Freeze BMDMA controller port
657 * @ap: port to freeze
658 *
659 * Freeze BMDMA controller port.
660 *
661 * LOCKING:
662 * Inherited from caller.
663 */
664void ata_bmdma_freeze(struct ata_port *ap)
665{
666 struct ata_ioports *ioaddr = &ap->ioaddr;
667
668 ap->ctl |= ATA_NIEN;
669 ap->last_ctl = ap->ctl;
670
671 if (ap->flags & ATA_FLAG_MMIO)
672 writeb(ap->ctl, (void __iomem *)ioaddr->ctl_addr);
673 else
674 outb(ap->ctl, ioaddr->ctl_addr);
675}
676
677/**
678 * ata_bmdma_thaw - Thaw BMDMA controller port
679 * @ap: port to thaw
680 *
681 * Thaw BMDMA controller port.
682 *
683 * LOCKING:
684 * Inherited from caller.
685 */
686void ata_bmdma_thaw(struct ata_port *ap)
687{
688 /* clear & re-enable interrupts */
689 ata_chk_status(ap);
690 ap->ops->irq_clear(ap);
691 if (ap->ioaddr.ctl_addr) /* FIXME: hack. create a hook instead */
692 ata_irq_on(ap);
693}
694
695/**
696 * ata_bmdma_drive_eh - Perform EH with given methods for BMDMA controller
697 * @ap: port to handle error for
698 * @prereset: prereset method (can be NULL)
699 * @softreset: softreset method (can be NULL)
700 * @hardreset: hardreset method (can be NULL)
701 * @postreset: postreset method (can be NULL)
702 *
703 * Handle error for ATA BMDMA controller. It can handle both
704 * PATA and SATA controllers. Many controllers should be able to
705 * use this EH as-is or with some added handling before and
706 * after.
707 *
708 * This function is intended to be used for constructing
709 * ->error_handler callback by low level drivers.
710 *
711 * LOCKING:
712 * Kernel thread context (may sleep)
713 */
714void ata_bmdma_drive_eh(struct ata_port *ap, ata_prereset_fn_t prereset,
715 ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
716 ata_postreset_fn_t postreset)
717{
718 struct ata_host_set *host_set = ap->host_set;
719 struct ata_eh_context *ehc = &ap->eh_context;
720 struct ata_queued_cmd *qc;
721 unsigned long flags;
722 int thaw = 0;
723
724 qc = __ata_qc_from_tag(ap, ap->active_tag);
725 if (qc && !(qc->flags & ATA_QCFLAG_FAILED))
726 qc = NULL;
727
728 /* reset PIO HSM and stop DMA engine */
729 spin_lock_irqsave(&host_set->lock, flags);
730
731 ap->hsm_task_state = HSM_ST_IDLE;
732
733 if (qc && (qc->tf.protocol == ATA_PROT_DMA ||
734 qc->tf.protocol == ATA_PROT_ATAPI_DMA)) {
735 u8 host_stat;
736
737 host_stat = ata_bmdma_status(ap);
738
739 ata_ehi_push_desc(&ehc->i, "BMDMA stat 0x%x", host_stat);
740
741 /* BMDMA controllers indicate host bus error by
742 * setting DMA_ERR bit and timing out. As it wasn't
743 * really a timeout event, adjust error mask and
744 * cancel frozen state.
745 */
746 if (qc->err_mask == AC_ERR_TIMEOUT && host_stat & ATA_DMA_ERR) {
747 qc->err_mask = AC_ERR_HOST_BUS;
748 thaw = 1;
749 }
750
751 ap->ops->bmdma_stop(qc);
752 }
753
754 ata_altstatus(ap);
755 ata_chk_status(ap);
756 ap->ops->irq_clear(ap);
757
758 spin_unlock_irqrestore(&host_set->lock, flags);
759
760 if (thaw)
761 ata_eh_thaw_port(ap);
762
763 /* PIO and DMA engines have been stopped, perform recovery */
764 ata_do_eh(ap, prereset, softreset, hardreset, postreset);
765}
766
767/**
768 * ata_bmdma_error_handler - Stock error handler for BMDMA controller
769 * @ap: port to handle error for
770 *
771 * Stock error handler for BMDMA controller.
772 *
773 * LOCKING:
774 * Kernel thread context (may sleep)
775 */
776void ata_bmdma_error_handler(struct ata_port *ap)
777{
778 ata_reset_fn_t hardreset;
779
780 hardreset = NULL;
781 if (sata_scr_valid(ap))
782 hardreset = sata_std_hardreset;
783
784 ata_bmdma_drive_eh(ap, ata_std_prereset, ata_std_softreset, hardreset,
785 ata_std_postreset);
786}
787
788/**
789 * ata_bmdma_post_internal_cmd - Stock post_internal_cmd for
790 * BMDMA controller
791 * @qc: internal command to clean up
792 *
793 * LOCKING:
794 * Kernel thread context (may sleep)
795 */
796void ata_bmdma_post_internal_cmd(struct ata_queued_cmd *qc)
797{
798 ata_bmdma_stop(qc);
799}
800
655#ifdef CONFIG_PCI 801#ifdef CONFIG_PCI
656static struct ata_probe_ent * 802static struct ata_probe_ent *
657ata_probe_ent_alloc(struct device *dev, const struct ata_port_info *port) 803ata_probe_ent_alloc(struct device *dev, const struct ata_port_info *port)
@@ -930,10 +1076,21 @@ int ata_pci_init_one (struct pci_dev *pdev, struct ata_port_info **port_info,
930 1076
931 /* FIXME: check ata_device_add return */ 1077 /* FIXME: check ata_device_add return */
932 if (legacy_mode) { 1078 if (legacy_mode) {
933 if (legacy_mode & (1 << 0)) 1079 struct device *dev = &pdev->dev;
1080 struct ata_host_set *host_set = NULL;
1081
1082 if (legacy_mode & (1 << 0)) {
934 ata_device_add(probe_ent); 1083 ata_device_add(probe_ent);
935 if (legacy_mode & (1 << 1)) 1084 host_set = dev_get_drvdata(dev);
1085 }
1086
1087 if (legacy_mode & (1 << 1)) {
936 ata_device_add(probe_ent2); 1088 ata_device_add(probe_ent2);
1089 if (host_set) {
1090 host_set->next = dev_get_drvdata(dev);
1091 dev_set_drvdata(dev, host_set);
1092 }
1093 }
937 } else 1094 } else
938 ata_device_add(probe_ent); 1095 ata_device_add(probe_ent);
939 1096
diff --git a/drivers/scsi/libata-core.c b/drivers/scsi/libata-core.c
index b046ffa22101..89c3fbe6e67c 100644
--- a/drivers/scsi/libata-core.c
+++ b/drivers/scsi/libata-core.c
@@ -61,22 +61,29 @@
61 61
62#include "libata.h" 62#include "libata.h"
63 63
64static unsigned int ata_dev_init_params(struct ata_port *ap, 64/* debounce timing parameters in msecs { interval, duration, timeout } */
65 struct ata_device *dev, 65const unsigned long sata_deb_timing_boot[] = { 5, 100, 2000 };
66 u16 heads, 66const unsigned long sata_deb_timing_eh[] = { 25, 500, 2000 };
67 u16 sectors); 67const unsigned long sata_deb_timing_before_fsrst[] = { 100, 2000, 5000 };
68static void ata_set_mode(struct ata_port *ap); 68
69static unsigned int ata_dev_set_xfermode(struct ata_port *ap, 69static unsigned int ata_dev_init_params(struct ata_device *dev,
70 struct ata_device *dev); 70 u16 heads, u16 sectors);
71static void ata_dev_xfermask(struct ata_port *ap, struct ata_device *dev); 71static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
72static void ata_dev_xfermask(struct ata_device *dev);
72 73
73static unsigned int ata_unique_id = 1; 74static unsigned int ata_unique_id = 1;
74static struct workqueue_struct *ata_wq; 75static struct workqueue_struct *ata_wq;
75 76
77struct workqueue_struct *ata_aux_wq;
78
76int atapi_enabled = 1; 79int atapi_enabled = 1;
77module_param(atapi_enabled, int, 0444); 80module_param(atapi_enabled, int, 0444);
78MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on)"); 81MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on)");
79 82
83int atapi_dmadir = 0;
84module_param(atapi_dmadir, int, 0444);
85MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off, 1=on)");
86
80int libata_fua = 0; 87int libata_fua = 0;
81module_param_named(fua, libata_fua, int, 0444); 88module_param_named(fua, libata_fua, int, 0444);
82MODULE_PARM_DESC(fua, "FUA support (0=off, 1=on)"); 89MODULE_PARM_DESC(fua, "FUA support (0=off, 1=on)");
@@ -397,11 +404,22 @@ static const char *ata_mode_string(unsigned int xfer_mask)
397 return "<n/a>"; 404 return "<n/a>";
398} 405}
399 406
400static void ata_dev_disable(struct ata_port *ap, struct ata_device *dev) 407static const char *sata_spd_string(unsigned int spd)
401{ 408{
402 if (ata_dev_present(dev)) { 409 static const char * const spd_str[] = {
403 printk(KERN_WARNING "ata%u: dev %u disabled\n", 410 "1.5 Gbps",
404 ap->id, dev->devno); 411 "3.0 Gbps",
412 };
413
414 if (spd == 0 || (spd - 1) >= ARRAY_SIZE(spd_str))
415 return "<unknown>";
416 return spd_str[spd - 1];
417}
418
419void ata_dev_disable(struct ata_device *dev)
420{
421 if (ata_dev_enabled(dev)) {
422 ata_dev_printk(dev, KERN_WARNING, "disabled\n");
405 dev->class++; 423 dev->class++;
406 } 424 }
407} 425}
@@ -943,15 +961,14 @@ void ata_qc_complete_internal(struct ata_queued_cmd *qc)
943{ 961{
944 struct completion *waiting = qc->private_data; 962 struct completion *waiting = qc->private_data;
945 963
946 qc->ap->ops->tf_read(qc->ap, &qc->tf);
947 complete(waiting); 964 complete(waiting);
948} 965}
949 966
950/** 967/**
951 * ata_exec_internal - execute libata internal command 968 * ata_exec_internal - execute libata internal command
952 * @ap: Port to which the command is sent
953 * @dev: Device to which the command is sent 969 * @dev: Device to which the command is sent
954 * @tf: Taskfile registers for the command and the result 970 * @tf: Taskfile registers for the command and the result
971 * @cdb: CDB for packet command
955 * @dma_dir: Data tranfer direction of the command 972 * @dma_dir: Data tranfer direction of the command
956 * @buf: Data buffer of the command 973 * @buf: Data buffer of the command
957 * @buflen: Length of data buffer 974 * @buflen: Length of data buffer
@@ -964,25 +981,66 @@ void ata_qc_complete_internal(struct ata_queued_cmd *qc)
964 * 981 *
965 * LOCKING: 982 * LOCKING:
966 * None. Should be called with kernel context, might sleep. 983 * None. Should be called with kernel context, might sleep.
984 *
985 * RETURNS:
986 * Zero on success, AC_ERR_* mask on failure
967 */ 987 */
968 988unsigned ata_exec_internal(struct ata_device *dev,
969static unsigned 989 struct ata_taskfile *tf, const u8 *cdb,
970ata_exec_internal(struct ata_port *ap, struct ata_device *dev, 990 int dma_dir, void *buf, unsigned int buflen)
971 struct ata_taskfile *tf,
972 int dma_dir, void *buf, unsigned int buflen)
973{ 991{
992 struct ata_port *ap = dev->ap;
974 u8 command = tf->command; 993 u8 command = tf->command;
975 struct ata_queued_cmd *qc; 994 struct ata_queued_cmd *qc;
995 unsigned int tag, preempted_tag;
996 u32 preempted_sactive, preempted_qc_active;
976 DECLARE_COMPLETION(wait); 997 DECLARE_COMPLETION(wait);
977 unsigned long flags; 998 unsigned long flags;
978 unsigned int err_mask; 999 unsigned int err_mask;
1000 int rc;
979 1001
980 spin_lock_irqsave(&ap->host_set->lock, flags); 1002 spin_lock_irqsave(&ap->host_set->lock, flags);
981 1003
982 qc = ata_qc_new_init(ap, dev); 1004 /* no internal command while frozen */
983 BUG_ON(qc == NULL); 1005 if (ap->flags & ATA_FLAG_FROZEN) {
1006 spin_unlock_irqrestore(&ap->host_set->lock, flags);
1007 return AC_ERR_SYSTEM;
1008 }
1009
1010 /* initialize internal qc */
1011
1012 /* XXX: Tag 0 is used for drivers with legacy EH as some
1013 * drivers choke if any other tag is given. This breaks
1014 * ata_tag_internal() test for those drivers. Don't use new
1015 * EH stuff without converting to it.
1016 */
1017 if (ap->ops->error_handler)
1018 tag = ATA_TAG_INTERNAL;
1019 else
1020 tag = 0;
1021
1022 if (test_and_set_bit(tag, &ap->qc_allocated))
1023 BUG();
1024 qc = __ata_qc_from_tag(ap, tag);
1025
1026 qc->tag = tag;
1027 qc->scsicmd = NULL;
1028 qc->ap = ap;
1029 qc->dev = dev;
1030 ata_qc_reinit(qc);
984 1031
1032 preempted_tag = ap->active_tag;
1033 preempted_sactive = ap->sactive;
1034 preempted_qc_active = ap->qc_active;
1035 ap->active_tag = ATA_TAG_POISON;
1036 ap->sactive = 0;
1037 ap->qc_active = 0;
1038
1039 /* prepare & issue qc */
985 qc->tf = *tf; 1040 qc->tf = *tf;
1041 if (cdb)
1042 memcpy(qc->cdb, cdb, ATAPI_CDB_LEN);
1043 qc->flags |= ATA_QCFLAG_RESULT_TF;
986 qc->dma_dir = dma_dir; 1044 qc->dma_dir = dma_dir;
987 if (dma_dir != DMA_NONE) { 1045 if (dma_dir != DMA_NONE) {
988 ata_sg_init_one(qc, buf, buflen); 1046 ata_sg_init_one(qc, buf, buflen);
@@ -996,31 +1054,53 @@ ata_exec_internal(struct ata_port *ap, struct ata_device *dev,
996 1054
997 spin_unlock_irqrestore(&ap->host_set->lock, flags); 1055 spin_unlock_irqrestore(&ap->host_set->lock, flags);
998 1056
999 if (!wait_for_completion_timeout(&wait, ATA_TMOUT_INTERNAL)) { 1057 rc = wait_for_completion_timeout(&wait, ATA_TMOUT_INTERNAL);
1000 ata_port_flush_task(ap);
1001 1058
1059 ata_port_flush_task(ap);
1060
1061 if (!rc) {
1002 spin_lock_irqsave(&ap->host_set->lock, flags); 1062 spin_lock_irqsave(&ap->host_set->lock, flags);
1003 1063
1004 /* We're racing with irq here. If we lose, the 1064 /* We're racing with irq here. If we lose, the
1005 * following test prevents us from completing the qc 1065 * following test prevents us from completing the qc
1006 * again. If completion irq occurs after here but 1066 * twice. If we win, the port is frozen and will be
1007 * before the caller cleans up, it will result in a 1067 * cleaned up by ->post_internal_cmd().
1008 * spurious interrupt. We can live with that.
1009 */ 1068 */
1010 if (qc->flags & ATA_QCFLAG_ACTIVE) { 1069 if (qc->flags & ATA_QCFLAG_ACTIVE) {
1011 qc->err_mask = AC_ERR_TIMEOUT; 1070 qc->err_mask |= AC_ERR_TIMEOUT;
1012 ata_qc_complete(qc); 1071
1013 printk(KERN_WARNING "ata%u: qc timeout (cmd 0x%x)\n", 1072 if (ap->ops->error_handler)
1014 ap->id, command); 1073 ata_port_freeze(ap);
1074 else
1075 ata_qc_complete(qc);
1076
1077 ata_dev_printk(dev, KERN_WARNING,
1078 "qc timeout (cmd 0x%x)\n", command);
1015 } 1079 }
1016 1080
1017 spin_unlock_irqrestore(&ap->host_set->lock, flags); 1081 spin_unlock_irqrestore(&ap->host_set->lock, flags);
1018 } 1082 }
1019 1083
1020 *tf = qc->tf; 1084 /* do post_internal_cmd */
1085 if (ap->ops->post_internal_cmd)
1086 ap->ops->post_internal_cmd(qc);
1087
1088 if (qc->flags & ATA_QCFLAG_FAILED && !qc->err_mask) {
1089 ata_dev_printk(dev, KERN_WARNING, "zero err_mask for failed "
1090 "internal command, assuming AC_ERR_OTHER\n");
1091 qc->err_mask |= AC_ERR_OTHER;
1092 }
1093
1094 /* finish up */
1095 spin_lock_irqsave(&ap->host_set->lock, flags);
1096
1097 *tf = qc->result_tf;
1021 err_mask = qc->err_mask; 1098 err_mask = qc->err_mask;
1022 1099
1023 ata_qc_free(qc); 1100 ata_qc_free(qc);
1101 ap->active_tag = preempted_tag;
1102 ap->sactive = preempted_sactive;
1103 ap->qc_active = preempted_qc_active;
1024 1104
1025 /* XXX - Some LLDDs (sata_mv) disable port on command failure. 1105 /* XXX - Some LLDDs (sata_mv) disable port on command failure.
1026 * Until those drivers are fixed, we detect the condition 1106 * Until those drivers are fixed, we detect the condition
@@ -1033,11 +1113,13 @@ ata_exec_internal(struct ata_port *ap, struct ata_device *dev,
1033 * 1113 *
1034 * Kill the following code as soon as those drivers are fixed. 1114 * Kill the following code as soon as those drivers are fixed.
1035 */ 1115 */
1036 if (ap->flags & ATA_FLAG_PORT_DISABLED) { 1116 if (ap->flags & ATA_FLAG_DISABLED) {
1037 err_mask |= AC_ERR_SYSTEM; 1117 err_mask |= AC_ERR_SYSTEM;
1038 ata_port_probe(ap); 1118 ata_port_probe(ap);
1039 } 1119 }
1040 1120
1121 spin_unlock_irqrestore(&ap->host_set->lock, flags);
1122
1041 return err_mask; 1123 return err_mask;
1042} 1124}
1043 1125
@@ -1076,11 +1158,10 @@ unsigned int ata_pio_need_iordy(const struct ata_device *adev)
1076 1158
1077/** 1159/**
1078 * ata_dev_read_id - Read ID data from the specified device 1160 * ata_dev_read_id - Read ID data from the specified device
1079 * @ap: port on which target device resides
1080 * @dev: target device 1161 * @dev: target device
1081 * @p_class: pointer to class of the target device (may be changed) 1162 * @p_class: pointer to class of the target device (may be changed)
1082 * @post_reset: is this read ID post-reset? 1163 * @post_reset: is this read ID post-reset?
1083 * @p_id: read IDENTIFY page (newly allocated) 1164 * @id: buffer to read IDENTIFY data into
1084 * 1165 *
1085 * Read ID data from the specified device. ATA_CMD_ID_ATA is 1166 * Read ID data from the specified device. ATA_CMD_ID_ATA is
1086 * performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI 1167 * performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI
@@ -1093,13 +1174,13 @@ unsigned int ata_pio_need_iordy(const struct ata_device *adev)
1093 * RETURNS: 1174 * RETURNS:
1094 * 0 on success, -errno otherwise. 1175 * 0 on success, -errno otherwise.
1095 */ 1176 */
1096static int ata_dev_read_id(struct ata_port *ap, struct ata_device *dev, 1177int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
1097 unsigned int *p_class, int post_reset, u16 **p_id) 1178 int post_reset, u16 *id)
1098{ 1179{
1180 struct ata_port *ap = dev->ap;
1099 unsigned int class = *p_class; 1181 unsigned int class = *p_class;
1100 struct ata_taskfile tf; 1182 struct ata_taskfile tf;
1101 unsigned int err_mask = 0; 1183 unsigned int err_mask = 0;
1102 u16 *id;
1103 const char *reason; 1184 const char *reason;
1104 int rc; 1185 int rc;
1105 1186
@@ -1107,15 +1188,8 @@ static int ata_dev_read_id(struct ata_port *ap, struct ata_device *dev,
1107 1188
1108 ata_dev_select(ap, dev->devno, 1, 1); /* select device 0/1 */ 1189 ata_dev_select(ap, dev->devno, 1, 1); /* select device 0/1 */
1109 1190
1110 id = kmalloc(sizeof(id[0]) * ATA_ID_WORDS, GFP_KERNEL);
1111 if (id == NULL) {
1112 rc = -ENOMEM;
1113 reason = "out of memory";
1114 goto err_out;
1115 }
1116
1117 retry: 1191 retry:
1118 ata_tf_init(ap, &tf, dev->devno); 1192 ata_tf_init(dev, &tf);
1119 1193
1120 switch (class) { 1194 switch (class) {
1121 case ATA_DEV_ATA: 1195 case ATA_DEV_ATA:
@@ -1132,7 +1206,7 @@ static int ata_dev_read_id(struct ata_port *ap, struct ata_device *dev,
1132 1206
1133 tf.protocol = ATA_PROT_PIO; 1207 tf.protocol = ATA_PROT_PIO;
1134 1208
1135 err_mask = ata_exec_internal(ap, dev, &tf, DMA_FROM_DEVICE, 1209 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE,
1136 id, sizeof(id[0]) * ATA_ID_WORDS); 1210 id, sizeof(id[0]) * ATA_ID_WORDS);
1137 if (err_mask) { 1211 if (err_mask) {
1138 rc = -EIO; 1212 rc = -EIO;
@@ -1159,7 +1233,7 @@ static int ata_dev_read_id(struct ata_port *ap, struct ata_device *dev,
1159 * Some drives were very specific about that exact sequence. 1233 * Some drives were very specific about that exact sequence.
1160 */ 1234 */
1161 if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) { 1235 if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
1162 err_mask = ata_dev_init_params(ap, dev, id[3], id[6]); 1236 err_mask = ata_dev_init_params(dev, id[3], id[6]);
1163 if (err_mask) { 1237 if (err_mask) {
1164 rc = -EIO; 1238 rc = -EIO;
1165 reason = "INIT_DEV_PARAMS failed"; 1239 reason = "INIT_DEV_PARAMS failed";
@@ -1175,25 +1249,44 @@ static int ata_dev_read_id(struct ata_port *ap, struct ata_device *dev,
1175 } 1249 }
1176 1250
1177 *p_class = class; 1251 *p_class = class;
1178 *p_id = id; 1252
1179 return 0; 1253 return 0;
1180 1254
1181 err_out: 1255 err_out:
1182 printk(KERN_WARNING "ata%u: dev %u failed to IDENTIFY (%s)\n", 1256 ata_dev_printk(dev, KERN_WARNING, "failed to IDENTIFY "
1183 ap->id, dev->devno, reason); 1257 "(%s, err_mask=0x%x)\n", reason, err_mask);
1184 kfree(id);
1185 return rc; 1258 return rc;
1186} 1259}
1187 1260
1188static inline u8 ata_dev_knobble(const struct ata_port *ap, 1261static inline u8 ata_dev_knobble(struct ata_device *dev)
1189 struct ata_device *dev)
1190{ 1262{
1191 return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id))); 1263 return ((dev->ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
1264}
1265
1266static void ata_dev_config_ncq(struct ata_device *dev,
1267 char *desc, size_t desc_sz)
1268{
1269 struct ata_port *ap = dev->ap;
1270 int hdepth = 0, ddepth = ata_id_queue_depth(dev->id);
1271
1272 if (!ata_id_has_ncq(dev->id)) {
1273 desc[0] = '\0';
1274 return;
1275 }
1276
1277 if (ap->flags & ATA_FLAG_NCQ) {
1278 hdepth = min(ap->host->can_queue, ATA_MAX_QUEUE - 1);
1279 dev->flags |= ATA_DFLAG_NCQ;
1280 }
1281
1282 if (hdepth >= ddepth)
1283 snprintf(desc, desc_sz, "NCQ (depth %d)", ddepth);
1284 else
1285 snprintf(desc, desc_sz, "NCQ (depth %d/%d)", hdepth, ddepth);
1192} 1286}
1193 1287
1194/** 1288/**
1195 * ata_dev_configure - Configure the specified ATA/ATAPI device 1289 * ata_dev_configure - Configure the specified ATA/ATAPI device
1196 * @ap: Port on which target device resides
1197 * @dev: Target device to configure 1290 * @dev: Target device to configure
1198 * @print_info: Enable device info printout 1291 * @print_info: Enable device info printout
1199 * 1292 *
@@ -1206,14 +1299,14 @@ static inline u8 ata_dev_knobble(const struct ata_port *ap,
1206 * RETURNS: 1299 * RETURNS:
1207 * 0 on success, -errno otherwise 1300 * 0 on success, -errno otherwise
1208 */ 1301 */
1209static int ata_dev_configure(struct ata_port *ap, struct ata_device *dev, 1302int ata_dev_configure(struct ata_device *dev, int print_info)
1210 int print_info)
1211{ 1303{
1304 struct ata_port *ap = dev->ap;
1212 const u16 *id = dev->id; 1305 const u16 *id = dev->id;
1213 unsigned int xfer_mask; 1306 unsigned int xfer_mask;
1214 int i, rc; 1307 int i, rc;
1215 1308
1216 if (!ata_dev_present(dev)) { 1309 if (!ata_dev_enabled(dev)) {
1217 DPRINTK("ENTER/EXIT (host %u, dev %u) -- nodev\n", 1310 DPRINTK("ENTER/EXIT (host %u, dev %u) -- nodev\n",
1218 ap->id, dev->devno); 1311 ap->id, dev->devno);
1219 return 0; 1312 return 0;
@@ -1223,13 +1316,13 @@ static int ata_dev_configure(struct ata_port *ap, struct ata_device *dev,
1223 1316
1224 /* print device capabilities */ 1317 /* print device capabilities */
1225 if (print_info) 1318 if (print_info)
1226 printk(KERN_DEBUG "ata%u: dev %u cfg 49:%04x 82:%04x 83:%04x " 1319 ata_dev_printk(dev, KERN_DEBUG, "cfg 49:%04x 82:%04x 83:%04x "
1227 "84:%04x 85:%04x 86:%04x 87:%04x 88:%04x\n", 1320 "84:%04x 85:%04x 86:%04x 87:%04x 88:%04x\n",
1228 ap->id, dev->devno, id[49], id[82], id[83], 1321 id[49], id[82], id[83], id[84],
1229 id[84], id[85], id[86], id[87], id[88]); 1322 id[85], id[86], id[87], id[88]);
1230 1323
1231 /* initialize to-be-configured parameters */ 1324 /* initialize to-be-configured parameters */
1232 dev->flags = 0; 1325 dev->flags &= ~ATA_DFLAG_CFG_MASK;
1233 dev->max_sectors = 0; 1326 dev->max_sectors = 0;
1234 dev->cdb_len = 0; 1327 dev->cdb_len = 0;
1235 dev->n_sectors = 0; 1328 dev->n_sectors = 0;
@@ -1252,6 +1345,7 @@ static int ata_dev_configure(struct ata_port *ap, struct ata_device *dev,
1252 1345
1253 if (ata_id_has_lba(id)) { 1346 if (ata_id_has_lba(id)) {
1254 const char *lba_desc; 1347 const char *lba_desc;
1348 char ncq_desc[20];
1255 1349
1256 lba_desc = "LBA"; 1350 lba_desc = "LBA";
1257 dev->flags |= ATA_DFLAG_LBA; 1351 dev->flags |= ATA_DFLAG_LBA;
@@ -1260,15 +1354,17 @@ static int ata_dev_configure(struct ata_port *ap, struct ata_device *dev,
1260 lba_desc = "LBA48"; 1354 lba_desc = "LBA48";
1261 } 1355 }
1262 1356
1357 /* config NCQ */
1358 ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc));
1359
1263 /* print device info to dmesg */ 1360 /* print device info to dmesg */
1264 if (print_info) 1361 if (print_info)
1265 printk(KERN_INFO "ata%u: dev %u ATA-%d, " 1362 ata_dev_printk(dev, KERN_INFO, "ATA-%d, "
1266 "max %s, %Lu sectors: %s\n", 1363 "max %s, %Lu sectors: %s %s\n",
1267 ap->id, dev->devno, 1364 ata_id_major_version(id),
1268 ata_id_major_version(id), 1365 ata_mode_string(xfer_mask),
1269 ata_mode_string(xfer_mask), 1366 (unsigned long long)dev->n_sectors,
1270 (unsigned long long)dev->n_sectors, 1367 lba_desc, ncq_desc);
1271 lba_desc);
1272 } else { 1368 } else {
1273 /* CHS */ 1369 /* CHS */
1274 1370
@@ -1286,13 +1382,18 @@ static int ata_dev_configure(struct ata_port *ap, struct ata_device *dev,
1286 1382
1287 /* print device info to dmesg */ 1383 /* print device info to dmesg */
1288 if (print_info) 1384 if (print_info)
1289 printk(KERN_INFO "ata%u: dev %u ATA-%d, " 1385 ata_dev_printk(dev, KERN_INFO, "ATA-%d, "
1290 "max %s, %Lu sectors: CHS %u/%u/%u\n", 1386 "max %s, %Lu sectors: CHS %u/%u/%u\n",
1291 ap->id, dev->devno, 1387 ata_id_major_version(id),
1292 ata_id_major_version(id), 1388 ata_mode_string(xfer_mask),
1293 ata_mode_string(xfer_mask), 1389 (unsigned long long)dev->n_sectors,
1294 (unsigned long long)dev->n_sectors, 1390 dev->cylinders, dev->heads, dev->sectors);
1295 dev->cylinders, dev->heads, dev->sectors); 1391 }
1392
1393 if (dev->id[59] & 0x100) {
1394 dev->multi_count = dev->id[59] & 0xff;
1395 DPRINTK("ata%u: dev %u multi count %u\n",
1396 ap->id, dev->devno, dev->multi_count);
1296 } 1397 }
1297 1398
1298 dev->cdb_len = 16; 1399 dev->cdb_len = 16;
@@ -1300,18 +1401,27 @@ static int ata_dev_configure(struct ata_port *ap, struct ata_device *dev,
1300 1401
1301 /* ATAPI-specific feature tests */ 1402 /* ATAPI-specific feature tests */
1302 else if (dev->class == ATA_DEV_ATAPI) { 1403 else if (dev->class == ATA_DEV_ATAPI) {
1404 char *cdb_intr_string = "";
1405
1303 rc = atapi_cdb_len(id); 1406 rc = atapi_cdb_len(id);
1304 if ((rc < 12) || (rc > ATAPI_CDB_LEN)) { 1407 if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
1305 printk(KERN_WARNING "ata%u: unsupported CDB len\n", ap->id); 1408 ata_dev_printk(dev, KERN_WARNING,
1409 "unsupported CDB len\n");
1306 rc = -EINVAL; 1410 rc = -EINVAL;
1307 goto err_out_nosup; 1411 goto err_out_nosup;
1308 } 1412 }
1309 dev->cdb_len = (unsigned int) rc; 1413 dev->cdb_len = (unsigned int) rc;
1310 1414
1415 if (ata_id_cdb_intr(dev->id)) {
1416 dev->flags |= ATA_DFLAG_CDB_INTR;
1417 cdb_intr_string = ", CDB intr";
1418 }
1419
1311 /* print device info to dmesg */ 1420 /* print device info to dmesg */
1312 if (print_info) 1421 if (print_info)
1313 printk(KERN_INFO "ata%u: dev %u ATAPI, max %s\n", 1422 ata_dev_printk(dev, KERN_INFO, "ATAPI, max %s%s\n",
1314 ap->id, dev->devno, ata_mode_string(xfer_mask)); 1423 ata_mode_string(xfer_mask),
1424 cdb_intr_string);
1315 } 1425 }
1316 1426
1317 ap->host->max_cmd_len = 0; 1427 ap->host->max_cmd_len = 0;
@@ -1321,10 +1431,10 @@ static int ata_dev_configure(struct ata_port *ap, struct ata_device *dev,
1321 ap->device[i].cdb_len); 1431 ap->device[i].cdb_len);
1322 1432
1323 /* limit bridge transfers to udma5, 200 sectors */ 1433 /* limit bridge transfers to udma5, 200 sectors */
1324 if (ata_dev_knobble(ap, dev)) { 1434 if (ata_dev_knobble(dev)) {
1325 if (print_info) 1435 if (print_info)
1326 printk(KERN_INFO "ata%u(%u): applying bridge limits\n", 1436 ata_dev_printk(dev, KERN_INFO,
1327 ap->id, dev->devno); 1437 "applying bridge limits\n");
1328 dev->udma_mask &= ATA_UDMA5; 1438 dev->udma_mask &= ATA_UDMA5;
1329 dev->max_sectors = ATA_MAX_SECTORS; 1439 dev->max_sectors = ATA_MAX_SECTORS;
1330 } 1440 }
@@ -1352,79 +1462,104 @@ err_out_nosup:
1352 * PCI/etc. bus probe sem. 1462 * PCI/etc. bus probe sem.
1353 * 1463 *
1354 * RETURNS: 1464 * RETURNS:
1355 * Zero on success, non-zero on error. 1465 * Zero on success, negative errno otherwise.
1356 */ 1466 */
1357 1467
1358static int ata_bus_probe(struct ata_port *ap) 1468static int ata_bus_probe(struct ata_port *ap)
1359{ 1469{
1360 unsigned int classes[ATA_MAX_DEVICES]; 1470 unsigned int classes[ATA_MAX_DEVICES];
1361 unsigned int i, rc, found = 0; 1471 int tries[ATA_MAX_DEVICES];
1472 int i, rc, down_xfermask;
1473 struct ata_device *dev;
1362 1474
1363 ata_port_probe(ap); 1475 ata_port_probe(ap);
1364 1476
1365 /* reset and determine device classes */
1366 for (i = 0; i < ATA_MAX_DEVICES; i++) 1477 for (i = 0; i < ATA_MAX_DEVICES; i++)
1367 classes[i] = ATA_DEV_UNKNOWN; 1478 tries[i] = ATA_PROBE_MAX_TRIES;
1368 1479
1369 if (ap->ops->probe_reset) { 1480 retry:
1370 rc = ap->ops->probe_reset(ap, classes); 1481 down_xfermask = 0;
1371 if (rc) {
1372 printk("ata%u: reset failed (errno=%d)\n", ap->id, rc);
1373 return rc;
1374 }
1375 } else {
1376 ap->ops->phy_reset(ap);
1377 1482
1378 if (!(ap->flags & ATA_FLAG_PORT_DISABLED)) 1483 /* reset and determine device classes */
1379 for (i = 0; i < ATA_MAX_DEVICES; i++) 1484 ap->ops->phy_reset(ap);
1380 classes[i] = ap->device[i].class;
1381 1485
1382 ata_port_probe(ap); 1486 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1487 dev = &ap->device[i];
1488
1489 if (!(ap->flags & ATA_FLAG_DISABLED) &&
1490 dev->class != ATA_DEV_UNKNOWN)
1491 classes[dev->devno] = dev->class;
1492 else
1493 classes[dev->devno] = ATA_DEV_NONE;
1494
1495 dev->class = ATA_DEV_UNKNOWN;
1383 } 1496 }
1384 1497
1498 ata_port_probe(ap);
1499
1500 /* after the reset the device state is PIO 0 and the controller
1501 state is undefined. Record the mode */
1502
1385 for (i = 0; i < ATA_MAX_DEVICES; i++) 1503 for (i = 0; i < ATA_MAX_DEVICES; i++)
1386 if (classes[i] == ATA_DEV_UNKNOWN) 1504 ap->device[i].pio_mode = XFER_PIO_0;
1387 classes[i] = ATA_DEV_NONE;
1388 1505
1389 /* read IDENTIFY page and configure devices */ 1506 /* read IDENTIFY page and configure devices */
1390 for (i = 0; i < ATA_MAX_DEVICES; i++) { 1507 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1391 struct ata_device *dev = &ap->device[i]; 1508 dev = &ap->device[i];
1392 1509
1393 dev->class = classes[i]; 1510 if (tries[i])
1511 dev->class = classes[i];
1394 1512
1395 if (!ata_dev_present(dev)) 1513 if (!ata_dev_enabled(dev))
1396 continue; 1514 continue;
1397 1515
1398 WARN_ON(dev->id != NULL); 1516 rc = ata_dev_read_id(dev, &dev->class, 1, dev->id);
1399 if (ata_dev_read_id(ap, dev, &dev->class, 1, &dev->id)) { 1517 if (rc)
1400 dev->class = ATA_DEV_NONE; 1518 goto fail;
1401 continue;
1402 }
1403 1519
1404 if (ata_dev_configure(ap, dev, 1)) { 1520 rc = ata_dev_configure(dev, 1);
1405 ata_dev_disable(ap, dev); 1521 if (rc)
1406 continue; 1522 goto fail;
1407 } 1523 }
1408 1524
1409 found = 1; 1525 /* configure transfer mode */
1526 rc = ata_set_mode(ap, &dev);
1527 if (rc) {
1528 down_xfermask = 1;
1529 goto fail;
1410 } 1530 }
1411 1531
1412 if (!found) 1532 for (i = 0; i < ATA_MAX_DEVICES; i++)
1413 goto err_out_disable; 1533 if (ata_dev_enabled(&ap->device[i]))
1534 return 0;
1414 1535
1415 if (ap->ops->set_mode) 1536 /* no device present, disable port */
1416 ap->ops->set_mode(ap); 1537 ata_port_disable(ap);
1417 else 1538 ap->ops->port_disable(ap);
1418 ata_set_mode(ap); 1539 return -ENODEV;
1419 1540
1420 if (ap->flags & ATA_FLAG_PORT_DISABLED) 1541 fail:
1421 goto err_out_disable; 1542 switch (rc) {
1543 case -EINVAL:
1544 case -ENODEV:
1545 tries[dev->devno] = 0;
1546 break;
1547 case -EIO:
1548 sata_down_spd_limit(ap);
1549 /* fall through */
1550 default:
1551 tries[dev->devno]--;
1552 if (down_xfermask &&
1553 ata_down_xfermask_limit(dev, tries[dev->devno] == 1))
1554 tries[dev->devno] = 0;
1555 }
1422 1556
1423 return 0; 1557 if (!tries[dev->devno]) {
1558 ata_down_xfermask_limit(dev, 1);
1559 ata_dev_disable(dev);
1560 }
1424 1561
1425err_out_disable: 1562 goto retry;
1426 ap->ops->port_disable(ap);
1427 return -1;
1428} 1563}
1429 1564
1430/** 1565/**
@@ -1440,7 +1575,7 @@ err_out_disable:
1440 1575
1441void ata_port_probe(struct ata_port *ap) 1576void ata_port_probe(struct ata_port *ap)
1442{ 1577{
1443 ap->flags &= ~ATA_FLAG_PORT_DISABLED; 1578 ap->flags &= ~ATA_FLAG_DISABLED;
1444} 1579}
1445 1580
1446/** 1581/**
@@ -1454,27 +1589,21 @@ void ata_port_probe(struct ata_port *ap)
1454 */ 1589 */
1455static void sata_print_link_status(struct ata_port *ap) 1590static void sata_print_link_status(struct ata_port *ap)
1456{ 1591{
1457 u32 sstatus, tmp; 1592 u32 sstatus, scontrol, tmp;
1458 const char *speed;
1459 1593
1460 if (!ap->ops->scr_read) 1594 if (sata_scr_read(ap, SCR_STATUS, &sstatus))
1461 return; 1595 return;
1596 sata_scr_read(ap, SCR_CONTROL, &scontrol);
1462 1597
1463 sstatus = scr_read(ap, SCR_STATUS); 1598 if (ata_port_online(ap)) {
1464
1465 if (sata_dev_present(ap)) {
1466 tmp = (sstatus >> 4) & 0xf; 1599 tmp = (sstatus >> 4) & 0xf;
1467 if (tmp & (1 << 0)) 1600 ata_port_printk(ap, KERN_INFO,
1468 speed = "1.5"; 1601 "SATA link up %s (SStatus %X SControl %X)\n",
1469 else if (tmp & (1 << 1)) 1602 sata_spd_string(tmp), sstatus, scontrol);
1470 speed = "3.0";
1471 else
1472 speed = "<unknown>";
1473 printk(KERN_INFO "ata%u: SATA link up %s Gbps (SStatus %X)\n",
1474 ap->id, speed, sstatus);
1475 } else { 1603 } else {
1476 printk(KERN_INFO "ata%u: SATA link down (SStatus %X)\n", 1604 ata_port_printk(ap, KERN_INFO,
1477 ap->id, sstatus); 1605 "SATA link down (SStatus %X SControl %X)\n",
1606 sstatus, scontrol);
1478 } 1607 }
1479} 1608}
1480 1609
@@ -1497,17 +1626,18 @@ void __sata_phy_reset(struct ata_port *ap)
1497 1626
1498 if (ap->flags & ATA_FLAG_SATA_RESET) { 1627 if (ap->flags & ATA_FLAG_SATA_RESET) {
1499 /* issue phy wake/reset */ 1628 /* issue phy wake/reset */
1500 scr_write_flush(ap, SCR_CONTROL, 0x301); 1629 sata_scr_write_flush(ap, SCR_CONTROL, 0x301);
1501 /* Couldn't find anything in SATA I/II specs, but 1630 /* Couldn't find anything in SATA I/II specs, but
1502 * AHCI-1.1 10.4.2 says at least 1 ms. */ 1631 * AHCI-1.1 10.4.2 says at least 1 ms. */
1503 mdelay(1); 1632 mdelay(1);
1504 } 1633 }
1505 scr_write_flush(ap, SCR_CONTROL, 0x300); /* phy wake/clear reset */ 1634 /* phy wake/clear reset */
1635 sata_scr_write_flush(ap, SCR_CONTROL, 0x300);
1506 1636
1507 /* wait for phy to become ready, if necessary */ 1637 /* wait for phy to become ready, if necessary */
1508 do { 1638 do {
1509 msleep(200); 1639 msleep(200);
1510 sstatus = scr_read(ap, SCR_STATUS); 1640 sata_scr_read(ap, SCR_STATUS, &sstatus);
1511 if ((sstatus & 0xf) != 1) 1641 if ((sstatus & 0xf) != 1)
1512 break; 1642 break;
1513 } while (time_before(jiffies, timeout)); 1643 } while (time_before(jiffies, timeout));
@@ -1516,12 +1646,12 @@ void __sata_phy_reset(struct ata_port *ap)
1516 sata_print_link_status(ap); 1646 sata_print_link_status(ap);
1517 1647
1518 /* TODO: phy layer with polling, timeouts, etc. */ 1648 /* TODO: phy layer with polling, timeouts, etc. */
1519 if (sata_dev_present(ap)) 1649 if (!ata_port_offline(ap))
1520 ata_port_probe(ap); 1650 ata_port_probe(ap);
1521 else 1651 else
1522 ata_port_disable(ap); 1652 ata_port_disable(ap);
1523 1653
1524 if (ap->flags & ATA_FLAG_PORT_DISABLED) 1654 if (ap->flags & ATA_FLAG_DISABLED)
1525 return; 1655 return;
1526 1656
1527 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) { 1657 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
@@ -1546,24 +1676,24 @@ void __sata_phy_reset(struct ata_port *ap)
1546void sata_phy_reset(struct ata_port *ap) 1676void sata_phy_reset(struct ata_port *ap)
1547{ 1677{
1548 __sata_phy_reset(ap); 1678 __sata_phy_reset(ap);
1549 if (ap->flags & ATA_FLAG_PORT_DISABLED) 1679 if (ap->flags & ATA_FLAG_DISABLED)
1550 return; 1680 return;
1551 ata_bus_reset(ap); 1681 ata_bus_reset(ap);
1552} 1682}
1553 1683
1554/** 1684/**
1555 * ata_dev_pair - return other device on cable 1685 * ata_dev_pair - return other device on cable
1556 * @ap: port
1557 * @adev: device 1686 * @adev: device
1558 * 1687 *
1559 * Obtain the other device on the same cable, or if none is 1688 * Obtain the other device on the same cable, or if none is
1560 * present NULL is returned 1689 * present NULL is returned
1561 */ 1690 */
1562 1691
1563struct ata_device *ata_dev_pair(struct ata_port *ap, struct ata_device *adev) 1692struct ata_device *ata_dev_pair(struct ata_device *adev)
1564{ 1693{
1694 struct ata_port *ap = adev->ap;
1565 struct ata_device *pair = &ap->device[1 - adev->devno]; 1695 struct ata_device *pair = &ap->device[1 - adev->devno];
1566 if (!ata_dev_present(pair)) 1696 if (!ata_dev_enabled(pair))
1567 return NULL; 1697 return NULL;
1568 return pair; 1698 return pair;
1569} 1699}
@@ -1585,7 +1715,122 @@ void ata_port_disable(struct ata_port *ap)
1585{ 1715{
1586 ap->device[0].class = ATA_DEV_NONE; 1716 ap->device[0].class = ATA_DEV_NONE;
1587 ap->device[1].class = ATA_DEV_NONE; 1717 ap->device[1].class = ATA_DEV_NONE;
1588 ap->flags |= ATA_FLAG_PORT_DISABLED; 1718 ap->flags |= ATA_FLAG_DISABLED;
1719}
1720
1721/**
1722 * sata_down_spd_limit - adjust SATA spd limit downward
1723 * @ap: Port to adjust SATA spd limit for
1724 *
1725 * Adjust SATA spd limit of @ap downward. Note that this
1726 * function only adjusts the limit. The change must be applied
1727 * using sata_set_spd().
1728 *
1729 * LOCKING:
1730 * Inherited from caller.
1731 *
1732 * RETURNS:
1733 * 0 on success, negative errno on failure
1734 */
1735int sata_down_spd_limit(struct ata_port *ap)
1736{
1737 u32 sstatus, spd, mask;
1738 int rc, highbit;
1739
1740 rc = sata_scr_read(ap, SCR_STATUS, &sstatus);
1741 if (rc)
1742 return rc;
1743
1744 mask = ap->sata_spd_limit;
1745 if (mask <= 1)
1746 return -EINVAL;
1747 highbit = fls(mask) - 1;
1748 mask &= ~(1 << highbit);
1749
1750 spd = (sstatus >> 4) & 0xf;
1751 if (spd <= 1)
1752 return -EINVAL;
1753 spd--;
1754 mask &= (1 << spd) - 1;
1755 if (!mask)
1756 return -EINVAL;
1757
1758 ap->sata_spd_limit = mask;
1759
1760 ata_port_printk(ap, KERN_WARNING, "limiting SATA link speed to %s\n",
1761 sata_spd_string(fls(mask)));
1762
1763 return 0;
1764}
1765
1766static int __sata_set_spd_needed(struct ata_port *ap, u32 *scontrol)
1767{
1768 u32 spd, limit;
1769
1770 if (ap->sata_spd_limit == UINT_MAX)
1771 limit = 0;
1772 else
1773 limit = fls(ap->sata_spd_limit);
1774
1775 spd = (*scontrol >> 4) & 0xf;
1776 *scontrol = (*scontrol & ~0xf0) | ((limit & 0xf) << 4);
1777
1778 return spd != limit;
1779}
1780
1781/**
1782 * sata_set_spd_needed - is SATA spd configuration needed
1783 * @ap: Port in question
1784 *
1785 * Test whether the spd limit in SControl matches
1786 * @ap->sata_spd_limit. This function is used to determine
1787 * whether hardreset is necessary to apply SATA spd
1788 * configuration.
1789 *
1790 * LOCKING:
1791 * Inherited from caller.
1792 *
1793 * RETURNS:
1794 * 1 if SATA spd configuration is needed, 0 otherwise.
1795 */
1796int sata_set_spd_needed(struct ata_port *ap)
1797{
1798 u32 scontrol;
1799
1800 if (sata_scr_read(ap, SCR_CONTROL, &scontrol))
1801 return 0;
1802
1803 return __sata_set_spd_needed(ap, &scontrol);
1804}
1805
1806/**
1807 * sata_set_spd - set SATA spd according to spd limit
1808 * @ap: Port to set SATA spd for
1809 *
1810 * Set SATA spd of @ap according to sata_spd_limit.
1811 *
1812 * LOCKING:
1813 * Inherited from caller.
1814 *
1815 * RETURNS:
1816 * 0 if spd doesn't need to be changed, 1 if spd has been
1817 * changed. Negative errno if SCR registers are inaccessible.
1818 */
1819int sata_set_spd(struct ata_port *ap)
1820{
1821 u32 scontrol;
1822 int rc;
1823
1824 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
1825 return rc;
1826
1827 if (!__sata_set_spd_needed(ap, &scontrol))
1828 return 0;
1829
1830 if ((rc = sata_scr_write(ap, SCR_CONTROL, scontrol)))
1831 return rc;
1832
1833 return 1;
1589} 1834}
1590 1835
1591/* 1836/*
@@ -1736,151 +1981,196 @@ int ata_timing_compute(struct ata_device *adev, unsigned short speed,
1736 return 0; 1981 return 0;
1737} 1982}
1738 1983
1739static int ata_dev_set_mode(struct ata_port *ap, struct ata_device *dev) 1984/**
1985 * ata_down_xfermask_limit - adjust dev xfer masks downward
1986 * @dev: Device to adjust xfer masks
1987 * @force_pio0: Force PIO0
1988 *
1989 * Adjust xfer masks of @dev downward. Note that this function
1990 * does not apply the change. Invoking ata_set_mode() afterwards
1991 * will apply the limit.
1992 *
1993 * LOCKING:
1994 * Inherited from caller.
1995 *
1996 * RETURNS:
1997 * 0 on success, negative errno on failure
1998 */
1999int ata_down_xfermask_limit(struct ata_device *dev, int force_pio0)
2000{
2001 unsigned long xfer_mask;
2002 int highbit;
2003
2004 xfer_mask = ata_pack_xfermask(dev->pio_mask, dev->mwdma_mask,
2005 dev->udma_mask);
2006
2007 if (!xfer_mask)
2008 goto fail;
2009 /* don't gear down to MWDMA from UDMA, go directly to PIO */
2010 if (xfer_mask & ATA_MASK_UDMA)
2011 xfer_mask &= ~ATA_MASK_MWDMA;
2012
2013 highbit = fls(xfer_mask) - 1;
2014 xfer_mask &= ~(1 << highbit);
2015 if (force_pio0)
2016 xfer_mask &= 1 << ATA_SHIFT_PIO;
2017 if (!xfer_mask)
2018 goto fail;
2019
2020 ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask,
2021 &dev->udma_mask);
2022
2023 ata_dev_printk(dev, KERN_WARNING, "limiting speed to %s\n",
2024 ata_mode_string(xfer_mask));
2025
2026 return 0;
2027
2028 fail:
2029 return -EINVAL;
2030}
2031
2032static int ata_dev_set_mode(struct ata_device *dev)
1740{ 2033{
1741 unsigned int err_mask; 2034 unsigned int err_mask;
1742 int rc; 2035 int rc;
1743 2036
2037 dev->flags &= ~ATA_DFLAG_PIO;
1744 if (dev->xfer_shift == ATA_SHIFT_PIO) 2038 if (dev->xfer_shift == ATA_SHIFT_PIO)
1745 dev->flags |= ATA_DFLAG_PIO; 2039 dev->flags |= ATA_DFLAG_PIO;
1746 2040
1747 err_mask = ata_dev_set_xfermode(ap, dev); 2041 err_mask = ata_dev_set_xfermode(dev);
1748 if (err_mask) { 2042 if (err_mask) {
1749 printk(KERN_ERR 2043 ata_dev_printk(dev, KERN_ERR, "failed to set xfermode "
1750 "ata%u: failed to set xfermode (err_mask=0x%x)\n", 2044 "(err_mask=0x%x)\n", err_mask);
1751 ap->id, err_mask);
1752 return -EIO; 2045 return -EIO;
1753 } 2046 }
1754 2047
1755 rc = ata_dev_revalidate(ap, dev, 0); 2048 rc = ata_dev_revalidate(dev, 0);
1756 if (rc) { 2049 if (rc)
1757 printk(KERN_ERR
1758 "ata%u: failed to revalidate after set xfermode\n",
1759 ap->id);
1760 return rc; 2050 return rc;
1761 }
1762 2051
1763 DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n", 2052 DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n",
1764 dev->xfer_shift, (int)dev->xfer_mode); 2053 dev->xfer_shift, (int)dev->xfer_mode);
1765 2054
1766 printk(KERN_INFO "ata%u: dev %u configured for %s\n", 2055 ata_dev_printk(dev, KERN_INFO, "configured for %s\n",
1767 ap->id, dev->devno, 2056 ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)));
1768 ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)));
1769 return 0;
1770}
1771
1772static int ata_host_set_pio(struct ata_port *ap)
1773{
1774 int i;
1775
1776 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1777 struct ata_device *dev = &ap->device[i];
1778
1779 if (!ata_dev_present(dev))
1780 continue;
1781
1782 if (!dev->pio_mode) {
1783 printk(KERN_WARNING "ata%u: no PIO support for device %d.\n", ap->id, i);
1784 return -1;
1785 }
1786
1787 dev->xfer_mode = dev->pio_mode;
1788 dev->xfer_shift = ATA_SHIFT_PIO;
1789 if (ap->ops->set_piomode)
1790 ap->ops->set_piomode(ap, dev);
1791 }
1792
1793 return 0; 2057 return 0;
1794} 2058}
1795 2059
1796static void ata_host_set_dma(struct ata_port *ap)
1797{
1798 int i;
1799
1800 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1801 struct ata_device *dev = &ap->device[i];
1802
1803 if (!ata_dev_present(dev) || !dev->dma_mode)
1804 continue;
1805
1806 dev->xfer_mode = dev->dma_mode;
1807 dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode);
1808 if (ap->ops->set_dmamode)
1809 ap->ops->set_dmamode(ap, dev);
1810 }
1811}
1812
1813/** 2060/**
1814 * ata_set_mode - Program timings and issue SET FEATURES - XFER 2061 * ata_set_mode - Program timings and issue SET FEATURES - XFER
1815 * @ap: port on which timings will be programmed 2062 * @ap: port on which timings will be programmed
2063 * @r_failed_dev: out paramter for failed device
1816 * 2064 *
1817 * Set ATA device disk transfer mode (PIO3, UDMA6, etc.). 2065 * Set ATA device disk transfer mode (PIO3, UDMA6, etc.). If
2066 * ata_set_mode() fails, pointer to the failing device is
2067 * returned in @r_failed_dev.
1818 * 2068 *
1819 * LOCKING: 2069 * LOCKING:
1820 * PCI/etc. bus probe sem. 2070 * PCI/etc. bus probe sem.
2071 *
2072 * RETURNS:
2073 * 0 on success, negative errno otherwise
1821 */ 2074 */
1822static void ata_set_mode(struct ata_port *ap) 2075int ata_set_mode(struct ata_port *ap, struct ata_device **r_failed_dev)
1823{ 2076{
1824 int i, rc, used_dma = 0; 2077 struct ata_device *dev;
2078 int i, rc = 0, used_dma = 0, found = 0;
2079
2080 /* has private set_mode? */
2081 if (ap->ops->set_mode) {
2082 /* FIXME: make ->set_mode handle no device case and
2083 * return error code and failing device on failure.
2084 */
2085 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2086 if (ata_dev_enabled(&ap->device[i])) {
2087 ap->ops->set_mode(ap);
2088 break;
2089 }
2090 }
2091 return 0;
2092 }
1825 2093
1826 /* step 1: calculate xfer_mask */ 2094 /* step 1: calculate xfer_mask */
1827 for (i = 0; i < ATA_MAX_DEVICES; i++) { 2095 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1828 struct ata_device *dev = &ap->device[i];
1829 unsigned int pio_mask, dma_mask; 2096 unsigned int pio_mask, dma_mask;
1830 2097
1831 if (!ata_dev_present(dev)) 2098 dev = &ap->device[i];
1832 continue;
1833 2099
1834 ata_dev_xfermask(ap, dev); 2100 if (!ata_dev_enabled(dev))
2101 continue;
1835 2102
1836 /* TODO: let LLDD filter dev->*_mask here */ 2103 ata_dev_xfermask(dev);
1837 2104
1838 pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0); 2105 pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0);
1839 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask); 2106 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
1840 dev->pio_mode = ata_xfer_mask2mode(pio_mask); 2107 dev->pio_mode = ata_xfer_mask2mode(pio_mask);
1841 dev->dma_mode = ata_xfer_mask2mode(dma_mask); 2108 dev->dma_mode = ata_xfer_mask2mode(dma_mask);
1842 2109
2110 found = 1;
1843 if (dev->dma_mode) 2111 if (dev->dma_mode)
1844 used_dma = 1; 2112 used_dma = 1;
1845 } 2113 }
2114 if (!found)
2115 goto out;
1846 2116
1847 /* step 2: always set host PIO timings */ 2117 /* step 2: always set host PIO timings */
1848 rc = ata_host_set_pio(ap); 2118 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1849 if (rc) 2119 dev = &ap->device[i];
1850 goto err_out; 2120 if (!ata_dev_enabled(dev))
2121 continue;
2122
2123 if (!dev->pio_mode) {
2124 ata_dev_printk(dev, KERN_WARNING, "no PIO support\n");
2125 rc = -EINVAL;
2126 goto out;
2127 }
2128
2129 dev->xfer_mode = dev->pio_mode;
2130 dev->xfer_shift = ATA_SHIFT_PIO;
2131 if (ap->ops->set_piomode)
2132 ap->ops->set_piomode(ap, dev);
2133 }
1851 2134
1852 /* step 3: set host DMA timings */ 2135 /* step 3: set host DMA timings */
1853 ata_host_set_dma(ap); 2136 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2137 dev = &ap->device[i];
2138
2139 if (!ata_dev_enabled(dev) || !dev->dma_mode)
2140 continue;
2141
2142 dev->xfer_mode = dev->dma_mode;
2143 dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode);
2144 if (ap->ops->set_dmamode)
2145 ap->ops->set_dmamode(ap, dev);
2146 }
1854 2147
1855 /* step 4: update devices' xfer mode */ 2148 /* step 4: update devices' xfer mode */
1856 for (i = 0; i < ATA_MAX_DEVICES; i++) { 2149 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1857 struct ata_device *dev = &ap->device[i]; 2150 dev = &ap->device[i];
1858 2151
1859 if (!ata_dev_present(dev)) 2152 if (!ata_dev_enabled(dev))
1860 continue; 2153 continue;
1861 2154
1862 if (ata_dev_set_mode(ap, dev)) 2155 rc = ata_dev_set_mode(dev);
1863 goto err_out; 2156 if (rc)
2157 goto out;
1864 } 2158 }
1865 2159
1866 /* 2160 /* Record simplex status. If we selected DMA then the other
1867 * Record simplex status. If we selected DMA then the other 2161 * host channels are not permitted to do so.
1868 * host channels are not permitted to do so.
1869 */ 2162 */
1870
1871 if (used_dma && (ap->host_set->flags & ATA_HOST_SIMPLEX)) 2163 if (used_dma && (ap->host_set->flags & ATA_HOST_SIMPLEX))
1872 ap->host_set->simplex_claimed = 1; 2164 ap->host_set->simplex_claimed = 1;
1873 2165
1874 /* 2166 /* step5: chip specific finalisation */
1875 * Chip specific finalisation
1876 */
1877 if (ap->ops->post_set_mode) 2167 if (ap->ops->post_set_mode)
1878 ap->ops->post_set_mode(ap); 2168 ap->ops->post_set_mode(ap);
1879 2169
1880 return; 2170 out:
1881 2171 if (rc)
1882err_out: 2172 *r_failed_dev = dev;
1883 ata_port_disable(ap); 2173 return rc;
1884} 2174}
1885 2175
1886/** 2176/**
@@ -1930,8 +2220,8 @@ unsigned int ata_busy_sleep (struct ata_port *ap,
1930 } 2220 }
1931 2221
1932 if (status & ATA_BUSY) 2222 if (status & ATA_BUSY)
1933 printk(KERN_WARNING "ata%u is slow to respond, " 2223 ata_port_printk(ap, KERN_WARNING,
1934 "please be patient\n", ap->id); 2224 "port is slow to respond, please be patient\n");
1935 2225
1936 timeout = timer_start + tmout; 2226 timeout = timer_start + tmout;
1937 while ((status & ATA_BUSY) && (time_before(jiffies, timeout))) { 2227 while ((status & ATA_BUSY) && (time_before(jiffies, timeout))) {
@@ -1940,8 +2230,8 @@ unsigned int ata_busy_sleep (struct ata_port *ap,
1940 } 2230 }
1941 2231
1942 if (status & ATA_BUSY) { 2232 if (status & ATA_BUSY) {
1943 printk(KERN_ERR "ata%u failed to respond (%lu secs)\n", 2233 ata_port_printk(ap, KERN_ERR, "port failed to respond "
1944 ap->id, tmout / HZ); 2234 "(%lu secs)\n", tmout / HZ);
1945 return 1; 2235 return 1;
1946 } 2236 }
1947 2237
@@ -2033,8 +2323,10 @@ static unsigned int ata_bus_softreset(struct ata_port *ap,
2033 * the bus shows 0xFF because the odd clown forgets the D7 2323 * the bus shows 0xFF because the odd clown forgets the D7
2034 * pulldown resistor. 2324 * pulldown resistor.
2035 */ 2325 */
2036 if (ata_check_status(ap) == 0xFF) 2326 if (ata_check_status(ap) == 0xFF) {
2327 ata_port_printk(ap, KERN_ERR, "SRST failed (status 0xFF)\n");
2037 return AC_ERR_OTHER; 2328 return AC_ERR_OTHER;
2329 }
2038 2330
2039 ata_bus_post_reset(ap, devmask); 2331 ata_bus_post_reset(ap, devmask);
2040 2332
@@ -2058,7 +2350,7 @@ static unsigned int ata_bus_softreset(struct ata_port *ap,
2058 * Obtains host_set lock. 2350 * Obtains host_set lock.
2059 * 2351 *
2060 * SIDE EFFECTS: 2352 * SIDE EFFECTS:
2061 * Sets ATA_FLAG_PORT_DISABLED if bus reset fails. 2353 * Sets ATA_FLAG_DISABLED if bus reset fails.
2062 */ 2354 */
2063 2355
2064void ata_bus_reset(struct ata_port *ap) 2356void ata_bus_reset(struct ata_port *ap)
@@ -2126,60 +2418,195 @@ void ata_bus_reset(struct ata_port *ap)
2126 return; 2418 return;
2127 2419
2128err_out: 2420err_out:
2129 printk(KERN_ERR "ata%u: disabling port\n", ap->id); 2421 ata_port_printk(ap, KERN_ERR, "disabling port\n");
2130 ap->ops->port_disable(ap); 2422 ap->ops->port_disable(ap);
2131 2423
2132 DPRINTK("EXIT\n"); 2424 DPRINTK("EXIT\n");
2133} 2425}
2134 2426
2135static int sata_phy_resume(struct ata_port *ap) 2427/**
2428 * sata_phy_debounce - debounce SATA phy status
2429 * @ap: ATA port to debounce SATA phy status for
2430 * @params: timing parameters { interval, duratinon, timeout } in msec
2431 *
2432 * Make sure SStatus of @ap reaches stable state, determined by
2433 * holding the same value where DET is not 1 for @duration polled
2434 * every @interval, before @timeout. Timeout constraints the
2435 * beginning of the stable state. Because, after hot unplugging,
2436 * DET gets stuck at 1 on some controllers, this functions waits
2437 * until timeout then returns 0 if DET is stable at 1.
2438 *
2439 * LOCKING:
2440 * Kernel thread context (may sleep)
2441 *
2442 * RETURNS:
2443 * 0 on success, -errno on failure.
2444 */
2445int sata_phy_debounce(struct ata_port *ap, const unsigned long *params)
2136{ 2446{
2137 unsigned long timeout = jiffies + (HZ * 5); 2447 unsigned long interval_msec = params[0];
2138 u32 sstatus; 2448 unsigned long duration = params[1] * HZ / 1000;
2449 unsigned long timeout = jiffies + params[2] * HZ / 1000;
2450 unsigned long last_jiffies;
2451 u32 last, cur;
2452 int rc;
2139 2453
2140 scr_write_flush(ap, SCR_CONTROL, 0x300); 2454 if ((rc = sata_scr_read(ap, SCR_STATUS, &cur)))
2455 return rc;
2456 cur &= 0xf;
2141 2457
2142 /* Wait for phy to become ready, if necessary. */ 2458 last = cur;
2143 do { 2459 last_jiffies = jiffies;
2144 msleep(200);
2145 sstatus = scr_read(ap, SCR_STATUS);
2146 if ((sstatus & 0xf) != 1)
2147 return 0;
2148 } while (time_before(jiffies, timeout));
2149 2460
2150 return -1; 2461 while (1) {
2462 msleep(interval_msec);
2463 if ((rc = sata_scr_read(ap, SCR_STATUS, &cur)))
2464 return rc;
2465 cur &= 0xf;
2466
2467 /* DET stable? */
2468 if (cur == last) {
2469 if (cur == 1 && time_before(jiffies, timeout))
2470 continue;
2471 if (time_after(jiffies, last_jiffies + duration))
2472 return 0;
2473 continue;
2474 }
2475
2476 /* unstable, start over */
2477 last = cur;
2478 last_jiffies = jiffies;
2479
2480 /* check timeout */
2481 if (time_after(jiffies, timeout))
2482 return -EBUSY;
2483 }
2151} 2484}
2152 2485
2153/** 2486/**
2154 * ata_std_probeinit - initialize probing 2487 * sata_phy_resume - resume SATA phy
2155 * @ap: port to be probed 2488 * @ap: ATA port to resume SATA phy for
2489 * @params: timing parameters { interval, duratinon, timeout } in msec
2156 * 2490 *
2157 * @ap is about to be probed. Initialize it. This function is 2491 * Resume SATA phy of @ap and debounce it.
2158 * to be used as standard callback for ata_drive_probe_reset().
2159 * 2492 *
2160 * NOTE!!! Do not use this function as probeinit if a low level 2493 * LOCKING:
2161 * driver implements only hardreset. Just pass NULL as probeinit 2494 * Kernel thread context (may sleep)
2162 * in that case. Using this function is probably okay but doing 2495 *
2163 * so makes reset sequence different from the original 2496 * RETURNS:
2164 * ->phy_reset implementation and Jeff nervous. :-P 2497 * 0 on success, -errno on failure.
2165 */ 2498 */
2166void ata_std_probeinit(struct ata_port *ap) 2499int sata_phy_resume(struct ata_port *ap, const unsigned long *params)
2167{ 2500{
2168 if ((ap->flags & ATA_FLAG_SATA) && ap->ops->scr_read) { 2501 u32 scontrol;
2169 sata_phy_resume(ap); 2502 int rc;
2170 if (sata_dev_present(ap)) 2503
2171 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT); 2504 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
2505 return rc;
2506
2507 scontrol = (scontrol & 0x0f0) | 0x300;
2508
2509 if ((rc = sata_scr_write(ap, SCR_CONTROL, scontrol)))
2510 return rc;
2511
2512 /* Some PHYs react badly if SStatus is pounded immediately
2513 * after resuming. Delay 200ms before debouncing.
2514 */
2515 msleep(200);
2516
2517 return sata_phy_debounce(ap, params);
2518}
2519
2520static void ata_wait_spinup(struct ata_port *ap)
2521{
2522 struct ata_eh_context *ehc = &ap->eh_context;
2523 unsigned long end, secs;
2524 int rc;
2525
2526 /* first, debounce phy if SATA */
2527 if (ap->cbl == ATA_CBL_SATA) {
2528 rc = sata_phy_debounce(ap, sata_deb_timing_eh);
2529
2530 /* if debounced successfully and offline, no need to wait */
2531 if ((rc == 0 || rc == -EOPNOTSUPP) && ata_port_offline(ap))
2532 return;
2172 } 2533 }
2534
2535 /* okay, let's give the drive time to spin up */
2536 end = ehc->i.hotplug_timestamp + ATA_SPINUP_WAIT * HZ / 1000;
2537 secs = ((end - jiffies) + HZ - 1) / HZ;
2538
2539 if (time_after(jiffies, end))
2540 return;
2541
2542 if (secs > 5)
2543 ata_port_printk(ap, KERN_INFO, "waiting for device to spin up "
2544 "(%lu secs)\n", secs);
2545
2546 schedule_timeout_uninterruptible(end - jiffies);
2547}
2548
2549/**
2550 * ata_std_prereset - prepare for reset
2551 * @ap: ATA port to be reset
2552 *
2553 * @ap is about to be reset. Initialize it.
2554 *
2555 * LOCKING:
2556 * Kernel thread context (may sleep)
2557 *
2558 * RETURNS:
2559 * 0 on success, -errno otherwise.
2560 */
2561int ata_std_prereset(struct ata_port *ap)
2562{
2563 struct ata_eh_context *ehc = &ap->eh_context;
2564 const unsigned long *timing;
2565 int rc;
2566
2567 /* hotplug? */
2568 if (ehc->i.flags & ATA_EHI_HOTPLUGGED) {
2569 if (ap->flags & ATA_FLAG_HRST_TO_RESUME)
2570 ehc->i.action |= ATA_EH_HARDRESET;
2571 if (ap->flags & ATA_FLAG_SKIP_D2H_BSY)
2572 ata_wait_spinup(ap);
2573 }
2574
2575 /* if we're about to do hardreset, nothing more to do */
2576 if (ehc->i.action & ATA_EH_HARDRESET)
2577 return 0;
2578
2579 /* if SATA, resume phy */
2580 if (ap->cbl == ATA_CBL_SATA) {
2581 if (ap->flags & ATA_FLAG_LOADING)
2582 timing = sata_deb_timing_boot;
2583 else
2584 timing = sata_deb_timing_eh;
2585
2586 rc = sata_phy_resume(ap, timing);
2587 if (rc && rc != -EOPNOTSUPP) {
2588 /* phy resume failed */
2589 ata_port_printk(ap, KERN_WARNING, "failed to resume "
2590 "link for reset (errno=%d)\n", rc);
2591 return rc;
2592 }
2593 }
2594
2595 /* Wait for !BSY if the controller can wait for the first D2H
2596 * Reg FIS and we don't know that no device is attached.
2597 */
2598 if (!(ap->flags & ATA_FLAG_SKIP_D2H_BSY) && !ata_port_offline(ap))
2599 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
2600
2601 return 0;
2173} 2602}
2174 2603
2175/** 2604/**
2176 * ata_std_softreset - reset host port via ATA SRST 2605 * ata_std_softreset - reset host port via ATA SRST
2177 * @ap: port to reset 2606 * @ap: port to reset
2178 * @verbose: fail verbosely
2179 * @classes: resulting classes of attached devices 2607 * @classes: resulting classes of attached devices
2180 * 2608 *
2181 * Reset host port using ATA SRST. This function is to be used 2609 * Reset host port using ATA SRST.
2182 * as standard callback for ata_drive_*_reset() functions.
2183 * 2610 *
2184 * LOCKING: 2611 * LOCKING:
2185 * Kernel thread context (may sleep) 2612 * Kernel thread context (may sleep)
@@ -2187,7 +2614,7 @@ void ata_std_probeinit(struct ata_port *ap)
2187 * RETURNS: 2614 * RETURNS:
2188 * 0 on success, -errno otherwise. 2615 * 0 on success, -errno otherwise.
2189 */ 2616 */
2190int ata_std_softreset(struct ata_port *ap, int verbose, unsigned int *classes) 2617int ata_std_softreset(struct ata_port *ap, unsigned int *classes)
2191{ 2618{
2192 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS; 2619 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
2193 unsigned int devmask = 0, err_mask; 2620 unsigned int devmask = 0, err_mask;
@@ -2195,7 +2622,7 @@ int ata_std_softreset(struct ata_port *ap, int verbose, unsigned int *classes)
2195 2622
2196 DPRINTK("ENTER\n"); 2623 DPRINTK("ENTER\n");
2197 2624
2198 if (ap->ops->scr_read && !sata_dev_present(ap)) { 2625 if (ata_port_offline(ap)) {
2199 classes[0] = ATA_DEV_NONE; 2626 classes[0] = ATA_DEV_NONE;
2200 goto out; 2627 goto out;
2201 } 2628 }
@@ -2213,11 +2640,7 @@ int ata_std_softreset(struct ata_port *ap, int verbose, unsigned int *classes)
2213 DPRINTK("about to softreset, devmask=%x\n", devmask); 2640 DPRINTK("about to softreset, devmask=%x\n", devmask);
2214 err_mask = ata_bus_softreset(ap, devmask); 2641 err_mask = ata_bus_softreset(ap, devmask);
2215 if (err_mask) { 2642 if (err_mask) {
2216 if (verbose) 2643 ata_port_printk(ap, KERN_ERR, "SRST failed (err_mask=0x%x)\n",
2217 printk(KERN_ERR "ata%u: SRST failed (err_mask=0x%x)\n",
2218 ap->id, err_mask);
2219 else
2220 DPRINTK("EXIT, softreset failed (err_mask=0x%x)\n",
2221 err_mask); 2644 err_mask);
2222 return -EIO; 2645 return -EIO;
2223 } 2646 }
@@ -2235,12 +2658,9 @@ int ata_std_softreset(struct ata_port *ap, int verbose, unsigned int *classes)
2235/** 2658/**
2236 * sata_std_hardreset - reset host port via SATA phy reset 2659 * sata_std_hardreset - reset host port via SATA phy reset
2237 * @ap: port to reset 2660 * @ap: port to reset
2238 * @verbose: fail verbosely
2239 * @class: resulting class of attached device 2661 * @class: resulting class of attached device
2240 * 2662 *
2241 * SATA phy-reset host port using DET bits of SControl register. 2663 * SATA phy-reset host port using DET bits of SControl register.
2242 * This function is to be used as standard callback for
2243 * ata_drive_*_reset().
2244 * 2664 *
2245 * LOCKING: 2665 * LOCKING:
2246 * Kernel thread context (may sleep) 2666 * Kernel thread context (may sleep)
@@ -2248,35 +2668,57 @@ int ata_std_softreset(struct ata_port *ap, int verbose, unsigned int *classes)
2248 * RETURNS: 2668 * RETURNS:
2249 * 0 on success, -errno otherwise. 2669 * 0 on success, -errno otherwise.
2250 */ 2670 */
2251int sata_std_hardreset(struct ata_port *ap, int verbose, unsigned int *class) 2671int sata_std_hardreset(struct ata_port *ap, unsigned int *class)
2252{ 2672{
2673 u32 scontrol;
2674 int rc;
2675
2253 DPRINTK("ENTER\n"); 2676 DPRINTK("ENTER\n");
2254 2677
2255 /* Issue phy wake/reset */ 2678 if (sata_set_spd_needed(ap)) {
2256 scr_write_flush(ap, SCR_CONTROL, 0x301); 2679 /* SATA spec says nothing about how to reconfigure
2680 * spd. To be on the safe side, turn off phy during
2681 * reconfiguration. This works for at least ICH7 AHCI
2682 * and Sil3124.
2683 */
2684 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
2685 return rc;
2257 2686
2258 /* 2687 scontrol = (scontrol & 0x0f0) | 0x302;
2259 * Couldn't find anything in SATA I/II specs, but AHCI-1.1 2688
2689 if ((rc = sata_scr_write(ap, SCR_CONTROL, scontrol)))
2690 return rc;
2691
2692 sata_set_spd(ap);
2693 }
2694
2695 /* issue phy wake/reset */
2696 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
2697 return rc;
2698
2699 scontrol = (scontrol & 0x0f0) | 0x301;
2700
2701 if ((rc = sata_scr_write_flush(ap, SCR_CONTROL, scontrol)))
2702 return rc;
2703
2704 /* Couldn't find anything in SATA I/II specs, but AHCI-1.1
2260 * 10.4.2 says at least 1 ms. 2705 * 10.4.2 says at least 1 ms.
2261 */ 2706 */
2262 msleep(1); 2707 msleep(1);
2263 2708
2264 /* Bring phy back */ 2709 /* bring phy back */
2265 sata_phy_resume(ap); 2710 sata_phy_resume(ap, sata_deb_timing_eh);
2266 2711
2267 /* TODO: phy layer with polling, timeouts, etc. */ 2712 /* TODO: phy layer with polling, timeouts, etc. */
2268 if (!sata_dev_present(ap)) { 2713 if (ata_port_offline(ap)) {
2269 *class = ATA_DEV_NONE; 2714 *class = ATA_DEV_NONE;
2270 DPRINTK("EXIT, link offline\n"); 2715 DPRINTK("EXIT, link offline\n");
2271 return 0; 2716 return 0;
2272 } 2717 }
2273 2718
2274 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) { 2719 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
2275 if (verbose) 2720 ata_port_printk(ap, KERN_ERR,
2276 printk(KERN_ERR "ata%u: COMRESET failed " 2721 "COMRESET failed (device not ready)\n");
2277 "(device not ready)\n", ap->id);
2278 else
2279 DPRINTK("EXIT, device not ready\n");
2280 return -EIO; 2722 return -EIO;
2281 } 2723 }
2282 2724
@@ -2297,27 +2739,28 @@ int sata_std_hardreset(struct ata_port *ap, int verbose, unsigned int *class)
2297 * the device might have been reset more than once using 2739 * the device might have been reset more than once using
2298 * different reset methods before postreset is invoked. 2740 * different reset methods before postreset is invoked.
2299 * 2741 *
2300 * This function is to be used as standard callback for
2301 * ata_drive_*_reset().
2302 *
2303 * LOCKING: 2742 * LOCKING:
2304 * Kernel thread context (may sleep) 2743 * Kernel thread context (may sleep)
2305 */ 2744 */
2306void ata_std_postreset(struct ata_port *ap, unsigned int *classes) 2745void ata_std_postreset(struct ata_port *ap, unsigned int *classes)
2307{ 2746{
2308 DPRINTK("ENTER\n"); 2747 u32 serror;
2309 2748
2310 /* set cable type if it isn't already set */ 2749 DPRINTK("ENTER\n");
2311 if (ap->cbl == ATA_CBL_NONE && ap->flags & ATA_FLAG_SATA)
2312 ap->cbl = ATA_CBL_SATA;
2313 2750
2314 /* print link status */ 2751 /* print link status */
2315 if (ap->cbl == ATA_CBL_SATA) 2752 sata_print_link_status(ap);
2316 sata_print_link_status(ap); 2753
2754 /* clear SError */
2755 if (sata_scr_read(ap, SCR_ERROR, &serror) == 0)
2756 sata_scr_write(ap, SCR_ERROR, serror);
2317 2757
2318 /* re-enable interrupts */ 2758 /* re-enable interrupts */
2319 if (ap->ioaddr.ctl_addr) /* FIXME: hack. create a hook instead */ 2759 if (!ap->ops->error_handler) {
2320 ata_irq_on(ap); 2760 /* FIXME: hack. create a hook instead */
2761 if (ap->ioaddr.ctl_addr)
2762 ata_irq_on(ap);
2763 }
2321 2764
2322 /* is double-select really necessary? */ 2765 /* is double-select really necessary? */
2323 if (classes[0] != ATA_DEV_NONE) 2766 if (classes[0] != ATA_DEV_NONE)
@@ -2343,126 +2786,7 @@ void ata_std_postreset(struct ata_port *ap, unsigned int *classes)
2343} 2786}
2344 2787
2345/** 2788/**
2346 * ata_std_probe_reset - standard probe reset method
2347 * @ap: prot to perform probe-reset
2348 * @classes: resulting classes of attached devices
2349 *
2350 * The stock off-the-shelf ->probe_reset method.
2351 *
2352 * LOCKING:
2353 * Kernel thread context (may sleep)
2354 *
2355 * RETURNS:
2356 * 0 on success, -errno otherwise.
2357 */
2358int ata_std_probe_reset(struct ata_port *ap, unsigned int *classes)
2359{
2360 ata_reset_fn_t hardreset;
2361
2362 hardreset = NULL;
2363 if (ap->flags & ATA_FLAG_SATA && ap->ops->scr_read)
2364 hardreset = sata_std_hardreset;
2365
2366 return ata_drive_probe_reset(ap, ata_std_probeinit,
2367 ata_std_softreset, hardreset,
2368 ata_std_postreset, classes);
2369}
2370
2371static int do_probe_reset(struct ata_port *ap, ata_reset_fn_t reset,
2372 ata_postreset_fn_t postreset,
2373 unsigned int *classes)
2374{
2375 int i, rc;
2376
2377 for (i = 0; i < ATA_MAX_DEVICES; i++)
2378 classes[i] = ATA_DEV_UNKNOWN;
2379
2380 rc = reset(ap, 0, classes);
2381 if (rc)
2382 return rc;
2383
2384 /* If any class isn't ATA_DEV_UNKNOWN, consider classification
2385 * is complete and convert all ATA_DEV_UNKNOWN to
2386 * ATA_DEV_NONE.
2387 */
2388 for (i = 0; i < ATA_MAX_DEVICES; i++)
2389 if (classes[i] != ATA_DEV_UNKNOWN)
2390 break;
2391
2392 if (i < ATA_MAX_DEVICES)
2393 for (i = 0; i < ATA_MAX_DEVICES; i++)
2394 if (classes[i] == ATA_DEV_UNKNOWN)
2395 classes[i] = ATA_DEV_NONE;
2396
2397 if (postreset)
2398 postreset(ap, classes);
2399
2400 return classes[0] != ATA_DEV_UNKNOWN ? 0 : -ENODEV;
2401}
2402
2403/**
2404 * ata_drive_probe_reset - Perform probe reset with given methods
2405 * @ap: port to reset
2406 * @probeinit: probeinit method (can be NULL)
2407 * @softreset: softreset method (can be NULL)
2408 * @hardreset: hardreset method (can be NULL)
2409 * @postreset: postreset method (can be NULL)
2410 * @classes: resulting classes of attached devices
2411 *
2412 * Reset the specified port and classify attached devices using
2413 * given methods. This function prefers softreset but tries all
2414 * possible reset sequences to reset and classify devices. This
2415 * function is intended to be used for constructing ->probe_reset
2416 * callback by low level drivers.
2417 *
2418 * Reset methods should follow the following rules.
2419 *
2420 * - Return 0 on sucess, -errno on failure.
2421 * - If classification is supported, fill classes[] with
2422 * recognized class codes.
2423 * - If classification is not supported, leave classes[] alone.
2424 * - If verbose is non-zero, print error message on failure;
2425 * otherwise, shut up.
2426 *
2427 * LOCKING:
2428 * Kernel thread context (may sleep)
2429 *
2430 * RETURNS:
2431 * 0 on success, -EINVAL if no reset method is avaliable, -ENODEV
2432 * if classification fails, and any error code from reset
2433 * methods.
2434 */
2435int ata_drive_probe_reset(struct ata_port *ap, ata_probeinit_fn_t probeinit,
2436 ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
2437 ata_postreset_fn_t postreset, unsigned int *classes)
2438{
2439 int rc = -EINVAL;
2440
2441 if (probeinit)
2442 probeinit(ap);
2443
2444 if (softreset) {
2445 rc = do_probe_reset(ap, softreset, postreset, classes);
2446 if (rc == 0)
2447 return 0;
2448 }
2449
2450 if (!hardreset)
2451 return rc;
2452
2453 rc = do_probe_reset(ap, hardreset, postreset, classes);
2454 if (rc == 0 || rc != -ENODEV)
2455 return rc;
2456
2457 if (softreset)
2458 rc = do_probe_reset(ap, softreset, postreset, classes);
2459
2460 return rc;
2461}
2462
2463/**
2464 * ata_dev_same_device - Determine whether new ID matches configured device 2789 * ata_dev_same_device - Determine whether new ID matches configured device
2465 * @ap: port on which the device to compare against resides
2466 * @dev: device to compare against 2790 * @dev: device to compare against
2467 * @new_class: class of the new device 2791 * @new_class: class of the new device
2468 * @new_id: IDENTIFY page of the new device 2792 * @new_id: IDENTIFY page of the new device
@@ -2477,17 +2801,16 @@ int ata_drive_probe_reset(struct ata_port *ap, ata_probeinit_fn_t probeinit,
2477 * RETURNS: 2801 * RETURNS:
2478 * 1 if @dev matches @new_class and @new_id, 0 otherwise. 2802 * 1 if @dev matches @new_class and @new_id, 0 otherwise.
2479 */ 2803 */
2480static int ata_dev_same_device(struct ata_port *ap, struct ata_device *dev, 2804static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class,
2481 unsigned int new_class, const u16 *new_id) 2805 const u16 *new_id)
2482{ 2806{
2483 const u16 *old_id = dev->id; 2807 const u16 *old_id = dev->id;
2484 unsigned char model[2][41], serial[2][21]; 2808 unsigned char model[2][41], serial[2][21];
2485 u64 new_n_sectors; 2809 u64 new_n_sectors;
2486 2810
2487 if (dev->class != new_class) { 2811 if (dev->class != new_class) {
2488 printk(KERN_INFO 2812 ata_dev_printk(dev, KERN_INFO, "class mismatch %d != %d\n",
2489 "ata%u: dev %u class mismatch %d != %d\n", 2813 dev->class, new_class);
2490 ap->id, dev->devno, dev->class, new_class);
2491 return 0; 2814 return 0;
2492 } 2815 }
2493 2816
@@ -2498,24 +2821,22 @@ static int ata_dev_same_device(struct ata_port *ap, struct ata_device *dev,
2498 new_n_sectors = ata_id_n_sectors(new_id); 2821 new_n_sectors = ata_id_n_sectors(new_id);
2499 2822
2500 if (strcmp(model[0], model[1])) { 2823 if (strcmp(model[0], model[1])) {
2501 printk(KERN_INFO 2824 ata_dev_printk(dev, KERN_INFO, "model number mismatch "
2502 "ata%u: dev %u model number mismatch '%s' != '%s'\n", 2825 "'%s' != '%s'\n", model[0], model[1]);
2503 ap->id, dev->devno, model[0], model[1]);
2504 return 0; 2826 return 0;
2505 } 2827 }
2506 2828
2507 if (strcmp(serial[0], serial[1])) { 2829 if (strcmp(serial[0], serial[1])) {
2508 printk(KERN_INFO 2830 ata_dev_printk(dev, KERN_INFO, "serial number mismatch "
2509 "ata%u: dev %u serial number mismatch '%s' != '%s'\n", 2831 "'%s' != '%s'\n", serial[0], serial[1]);
2510 ap->id, dev->devno, serial[0], serial[1]);
2511 return 0; 2832 return 0;
2512 } 2833 }
2513 2834
2514 if (dev->class == ATA_DEV_ATA && dev->n_sectors != new_n_sectors) { 2835 if (dev->class == ATA_DEV_ATA && dev->n_sectors != new_n_sectors) {
2515 printk(KERN_INFO 2836 ata_dev_printk(dev, KERN_INFO, "n_sectors mismatch "
2516 "ata%u: dev %u n_sectors mismatch %llu != %llu\n", 2837 "%llu != %llu\n",
2517 ap->id, dev->devno, (unsigned long long)dev->n_sectors, 2838 (unsigned long long)dev->n_sectors,
2518 (unsigned long long)new_n_sectors); 2839 (unsigned long long)new_n_sectors);
2519 return 0; 2840 return 0;
2520 } 2841 }
2521 2842
@@ -2524,7 +2845,6 @@ static int ata_dev_same_device(struct ata_port *ap, struct ata_device *dev,
2524 2845
2525/** 2846/**
2526 * ata_dev_revalidate - Revalidate ATA device 2847 * ata_dev_revalidate - Revalidate ATA device
2527 * @ap: port on which the device to revalidate resides
2528 * @dev: device to revalidate 2848 * @dev: device to revalidate
2529 * @post_reset: is this revalidation after reset? 2849 * @post_reset: is this revalidation after reset?
2530 * 2850 *
@@ -2537,40 +2857,37 @@ static int ata_dev_same_device(struct ata_port *ap, struct ata_device *dev,
2537 * RETURNS: 2857 * RETURNS:
2538 * 0 on success, negative errno otherwise 2858 * 0 on success, negative errno otherwise
2539 */ 2859 */
2540int ata_dev_revalidate(struct ata_port *ap, struct ata_device *dev, 2860int ata_dev_revalidate(struct ata_device *dev, int post_reset)
2541 int post_reset)
2542{ 2861{
2543 unsigned int class; 2862 unsigned int class = dev->class;
2544 u16 *id; 2863 u16 *id = (void *)dev->ap->sector_buf;
2545 int rc; 2864 int rc;
2546 2865
2547 if (!ata_dev_present(dev)) 2866 if (!ata_dev_enabled(dev)) {
2548 return -ENODEV; 2867 rc = -ENODEV;
2549 2868 goto fail;
2550 class = dev->class; 2869 }
2551 id = NULL;
2552 2870
2553 /* allocate & read ID data */ 2871 /* read ID data */
2554 rc = ata_dev_read_id(ap, dev, &class, post_reset, &id); 2872 rc = ata_dev_read_id(dev, &class, post_reset, id);
2555 if (rc) 2873 if (rc)
2556 goto fail; 2874 goto fail;
2557 2875
2558 /* is the device still there? */ 2876 /* is the device still there? */
2559 if (!ata_dev_same_device(ap, dev, class, id)) { 2877 if (!ata_dev_same_device(dev, class, id)) {
2560 rc = -ENODEV; 2878 rc = -ENODEV;
2561 goto fail; 2879 goto fail;
2562 } 2880 }
2563 2881
2564 kfree(dev->id); 2882 memcpy(dev->id, id, sizeof(id[0]) * ATA_ID_WORDS);
2565 dev->id = id;
2566 2883
2567 /* configure device according to the new ID */ 2884 /* configure device according to the new ID */
2568 return ata_dev_configure(ap, dev, 0); 2885 rc = ata_dev_configure(dev, 0);
2886 if (rc == 0)
2887 return 0;
2569 2888
2570 fail: 2889 fail:
2571 printk(KERN_ERR "ata%u: dev %u revalidation failed (errno=%d)\n", 2890 ata_dev_printk(dev, KERN_ERR, "revalidation failed (errno=%d)\n", rc);
2572 ap->id, dev->devno, rc);
2573 kfree(id);
2574 return rc; 2891 return rc;
2575} 2892}
2576 2893
@@ -2646,7 +2963,6 @@ static int ata_dma_blacklisted(const struct ata_device *dev)
2646 2963
2647/** 2964/**
2648 * ata_dev_xfermask - Compute supported xfermask of the given device 2965 * ata_dev_xfermask - Compute supported xfermask of the given device
2649 * @ap: Port on which the device to compute xfermask for resides
2650 * @dev: Device to compute xfermask for 2966 * @dev: Device to compute xfermask for
2651 * 2967 *
2652 * Compute supported xfermask of @dev and store it in 2968 * Compute supported xfermask of @dev and store it in
@@ -2661,49 +2977,61 @@ static int ata_dma_blacklisted(const struct ata_device *dev)
2661 * LOCKING: 2977 * LOCKING:
2662 * None. 2978 * None.
2663 */ 2979 */
2664static void ata_dev_xfermask(struct ata_port *ap, struct ata_device *dev) 2980static void ata_dev_xfermask(struct ata_device *dev)
2665{ 2981{
2982 struct ata_port *ap = dev->ap;
2666 struct ata_host_set *hs = ap->host_set; 2983 struct ata_host_set *hs = ap->host_set;
2667 unsigned long xfer_mask; 2984 unsigned long xfer_mask;
2668 int i; 2985 int i;
2669 2986
2670 xfer_mask = ata_pack_xfermask(ap->pio_mask, ap->mwdma_mask, 2987 xfer_mask = ata_pack_xfermask(ap->pio_mask,
2671 ap->udma_mask); 2988 ap->mwdma_mask, ap->udma_mask);
2989
2990 /* Apply cable rule here. Don't apply it early because when
2991 * we handle hot plug the cable type can itself change.
2992 */
2993 if (ap->cbl == ATA_CBL_PATA40)
2994 xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
2672 2995
2673 /* FIXME: Use port-wide xfermask for now */ 2996 /* FIXME: Use port-wide xfermask for now */
2674 for (i = 0; i < ATA_MAX_DEVICES; i++) { 2997 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2675 struct ata_device *d = &ap->device[i]; 2998 struct ata_device *d = &ap->device[i];
2676 if (!ata_dev_present(d)) 2999
3000 if (ata_dev_absent(d))
3001 continue;
3002
3003 if (ata_dev_disabled(d)) {
3004 /* to avoid violating device selection timing */
3005 xfer_mask &= ata_pack_xfermask(d->pio_mask,
3006 UINT_MAX, UINT_MAX);
2677 continue; 3007 continue;
2678 xfer_mask &= ata_pack_xfermask(d->pio_mask, d->mwdma_mask, 3008 }
2679 d->udma_mask); 3009
3010 xfer_mask &= ata_pack_xfermask(d->pio_mask,
3011 d->mwdma_mask, d->udma_mask);
2680 xfer_mask &= ata_id_xfermask(d->id); 3012 xfer_mask &= ata_id_xfermask(d->id);
2681 if (ata_dma_blacklisted(d)) 3013 if (ata_dma_blacklisted(d))
2682 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA); 3014 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
2683 /* Apply cable rule here. Don't apply it early because when
2684 we handle hot plug the cable type can itself change */
2685 if (ap->cbl == ATA_CBL_PATA40)
2686 xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
2687 } 3015 }
2688 3016
2689 if (ata_dma_blacklisted(dev)) 3017 if (ata_dma_blacklisted(dev))
2690 printk(KERN_WARNING "ata%u: dev %u is on DMA blacklist, " 3018 ata_dev_printk(dev, KERN_WARNING,
2691 "disabling DMA\n", ap->id, dev->devno); 3019 "device is on DMA blacklist, disabling DMA\n");
2692 3020
2693 if (hs->flags & ATA_HOST_SIMPLEX) { 3021 if (hs->flags & ATA_HOST_SIMPLEX) {
2694 if (hs->simplex_claimed) 3022 if (hs->simplex_claimed)
2695 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA); 3023 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
2696 } 3024 }
3025
2697 if (ap->ops->mode_filter) 3026 if (ap->ops->mode_filter)
2698 xfer_mask = ap->ops->mode_filter(ap, dev, xfer_mask); 3027 xfer_mask = ap->ops->mode_filter(ap, dev, xfer_mask);
2699 3028
2700 ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask, 3029 ata_unpack_xfermask(xfer_mask, &dev->pio_mask,
2701 &dev->udma_mask); 3030 &dev->mwdma_mask, &dev->udma_mask);
2702} 3031}
2703 3032
2704/** 3033/**
2705 * ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command 3034 * ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
2706 * @ap: Port associated with device @dev
2707 * @dev: Device to which command will be sent 3035 * @dev: Device to which command will be sent
2708 * 3036 *
2709 * Issue SET FEATURES - XFER MODE command to device @dev 3037 * Issue SET FEATURES - XFER MODE command to device @dev
@@ -2716,8 +3044,7 @@ static void ata_dev_xfermask(struct ata_port *ap, struct ata_device *dev)
2716 * 0 on success, AC_ERR_* mask otherwise. 3044 * 0 on success, AC_ERR_* mask otherwise.
2717 */ 3045 */
2718 3046
2719static unsigned int ata_dev_set_xfermode(struct ata_port *ap, 3047static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
2720 struct ata_device *dev)
2721{ 3048{
2722 struct ata_taskfile tf; 3049 struct ata_taskfile tf;
2723 unsigned int err_mask; 3050 unsigned int err_mask;
@@ -2725,14 +3052,14 @@ static unsigned int ata_dev_set_xfermode(struct ata_port *ap,
2725 /* set up set-features taskfile */ 3052 /* set up set-features taskfile */
2726 DPRINTK("set features - xfer mode\n"); 3053 DPRINTK("set features - xfer mode\n");
2727 3054
2728 ata_tf_init(ap, &tf, dev->devno); 3055 ata_tf_init(dev, &tf);
2729 tf.command = ATA_CMD_SET_FEATURES; 3056 tf.command = ATA_CMD_SET_FEATURES;
2730 tf.feature = SETFEATURES_XFER; 3057 tf.feature = SETFEATURES_XFER;
2731 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; 3058 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
2732 tf.protocol = ATA_PROT_NODATA; 3059 tf.protocol = ATA_PROT_NODATA;
2733 tf.nsect = dev->xfer_mode; 3060 tf.nsect = dev->xfer_mode;
2734 3061
2735 err_mask = ata_exec_internal(ap, dev, &tf, DMA_NONE, NULL, 0); 3062 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
2736 3063
2737 DPRINTK("EXIT, err_mask=%x\n", err_mask); 3064 DPRINTK("EXIT, err_mask=%x\n", err_mask);
2738 return err_mask; 3065 return err_mask;
@@ -2740,7 +3067,6 @@ static unsigned int ata_dev_set_xfermode(struct ata_port *ap,
2740 3067
2741/** 3068/**
2742 * ata_dev_init_params - Issue INIT DEV PARAMS command 3069 * ata_dev_init_params - Issue INIT DEV PARAMS command
2743 * @ap: Port associated with device @dev
2744 * @dev: Device to which command will be sent 3070 * @dev: Device to which command will be sent
2745 * @heads: Number of heads (taskfile parameter) 3071 * @heads: Number of heads (taskfile parameter)
2746 * @sectors: Number of sectors (taskfile parameter) 3072 * @sectors: Number of sectors (taskfile parameter)
@@ -2751,11 +3077,8 @@ static unsigned int ata_dev_set_xfermode(struct ata_port *ap,
2751 * RETURNS: 3077 * RETURNS:
2752 * 0 on success, AC_ERR_* mask otherwise. 3078 * 0 on success, AC_ERR_* mask otherwise.
2753 */ 3079 */
2754 3080static unsigned int ata_dev_init_params(struct ata_device *dev,
2755static unsigned int ata_dev_init_params(struct ata_port *ap, 3081 u16 heads, u16 sectors)
2756 struct ata_device *dev,
2757 u16 heads,
2758 u16 sectors)
2759{ 3082{
2760 struct ata_taskfile tf; 3083 struct ata_taskfile tf;
2761 unsigned int err_mask; 3084 unsigned int err_mask;
@@ -2767,14 +3090,14 @@ static unsigned int ata_dev_init_params(struct ata_port *ap,
2767 /* set up init dev params taskfile */ 3090 /* set up init dev params taskfile */
2768 DPRINTK("init dev params \n"); 3091 DPRINTK("init dev params \n");
2769 3092
2770 ata_tf_init(ap, &tf, dev->devno); 3093 ata_tf_init(dev, &tf);
2771 tf.command = ATA_CMD_INIT_DEV_PARAMS; 3094 tf.command = ATA_CMD_INIT_DEV_PARAMS;
2772 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; 3095 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
2773 tf.protocol = ATA_PROT_NODATA; 3096 tf.protocol = ATA_PROT_NODATA;
2774 tf.nsect = sectors; 3097 tf.nsect = sectors;
2775 tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */ 3098 tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
2776 3099
2777 err_mask = ata_exec_internal(ap, dev, &tf, DMA_NONE, NULL, 0); 3100 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
2778 3101
2779 DPRINTK("EXIT, err_mask=%x\n", err_mask); 3102 DPRINTK("EXIT, err_mask=%x\n", err_mask);
2780 return err_mask; 3103 return err_mask;
@@ -2912,6 +3235,15 @@ int ata_check_atapi_dma(struct ata_queued_cmd *qc)
2912 if (ap->ops->check_atapi_dma) 3235 if (ap->ops->check_atapi_dma)
2913 rc = ap->ops->check_atapi_dma(qc); 3236 rc = ap->ops->check_atapi_dma(qc);
2914 3237
3238 /* We don't support polling DMA.
3239 * Use PIO if the LLDD handles only interrupts in
3240 * the HSM_ST_LAST state and the ATAPI device
3241 * generates CDB interrupts.
3242 */
3243 if ((ap->flags & ATA_FLAG_PIO_POLLING) &&
3244 (qc->dev->flags & ATA_DFLAG_CDB_INTR))
3245 rc = 1;
3246
2915 return rc; 3247 return rc;
2916} 3248}
2917/** 3249/**
@@ -2957,6 +3289,7 @@ void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf, unsigned int buflen)
2957 qc->n_elem = 1; 3289 qc->n_elem = 1;
2958 qc->orig_n_elem = 1; 3290 qc->orig_n_elem = 1;
2959 qc->buf_virt = buf; 3291 qc->buf_virt = buf;
3292 qc->nbytes = buflen;
2960 3293
2961 sg = qc->__sg; 3294 sg = qc->__sg;
2962 sg_init_one(sg, buf, buflen); 3295 sg_init_one(sg, buf, buflen);
@@ -3140,134 +3473,6 @@ skip_map:
3140} 3473}
3141 3474
3142/** 3475/**
3143 * ata_poll_qc_complete - turn irq back on and finish qc
3144 * @qc: Command to complete
3145 * @err_mask: ATA status register content
3146 *
3147 * LOCKING:
3148 * None. (grabs host lock)
3149 */
3150
3151void ata_poll_qc_complete(struct ata_queued_cmd *qc)
3152{
3153 struct ata_port *ap = qc->ap;
3154 unsigned long flags;
3155
3156 spin_lock_irqsave(&ap->host_set->lock, flags);
3157 ap->flags &= ~ATA_FLAG_NOINTR;
3158 ata_irq_on(ap);
3159 ata_qc_complete(qc);
3160 spin_unlock_irqrestore(&ap->host_set->lock, flags);
3161}
3162
3163/**
3164 * ata_pio_poll - poll using PIO, depending on current state
3165 * @ap: the target ata_port
3166 *
3167 * LOCKING:
3168 * None. (executing in kernel thread context)
3169 *
3170 * RETURNS:
3171 * timeout value to use
3172 */
3173
3174static unsigned long ata_pio_poll(struct ata_port *ap)
3175{
3176 struct ata_queued_cmd *qc;
3177 u8 status;
3178 unsigned int poll_state = HSM_ST_UNKNOWN;
3179 unsigned int reg_state = HSM_ST_UNKNOWN;
3180
3181 qc = ata_qc_from_tag(ap, ap->active_tag);
3182 WARN_ON(qc == NULL);
3183
3184 switch (ap->hsm_task_state) {
3185 case HSM_ST:
3186 case HSM_ST_POLL:
3187 poll_state = HSM_ST_POLL;
3188 reg_state = HSM_ST;
3189 break;
3190 case HSM_ST_LAST:
3191 case HSM_ST_LAST_POLL:
3192 poll_state = HSM_ST_LAST_POLL;
3193 reg_state = HSM_ST_LAST;
3194 break;
3195 default:
3196 BUG();
3197 break;
3198 }
3199
3200 status = ata_chk_status(ap);
3201 if (status & ATA_BUSY) {
3202 if (time_after(jiffies, ap->pio_task_timeout)) {
3203 qc->err_mask |= AC_ERR_TIMEOUT;
3204 ap->hsm_task_state = HSM_ST_TMOUT;
3205 return 0;
3206 }
3207 ap->hsm_task_state = poll_state;
3208 return ATA_SHORT_PAUSE;
3209 }
3210
3211 ap->hsm_task_state = reg_state;
3212 return 0;
3213}
3214
3215/**
3216 * ata_pio_complete - check if drive is busy or idle
3217 * @ap: the target ata_port
3218 *
3219 * LOCKING:
3220 * None. (executing in kernel thread context)
3221 *
3222 * RETURNS:
3223 * Non-zero if qc completed, zero otherwise.
3224 */
3225
3226static int ata_pio_complete (struct ata_port *ap)
3227{
3228 struct ata_queued_cmd *qc;
3229 u8 drv_stat;
3230
3231 /*
3232 * This is purely heuristic. This is a fast path. Sometimes when
3233 * we enter, BSY will be cleared in a chk-status or two. If not,
3234 * the drive is probably seeking or something. Snooze for a couple
3235 * msecs, then chk-status again. If still busy, fall back to
3236 * HSM_ST_POLL state.
3237 */
3238 drv_stat = ata_busy_wait(ap, ATA_BUSY, 10);
3239 if (drv_stat & ATA_BUSY) {
3240 msleep(2);
3241 drv_stat = ata_busy_wait(ap, ATA_BUSY, 10);
3242 if (drv_stat & ATA_BUSY) {
3243 ap->hsm_task_state = HSM_ST_LAST_POLL;
3244 ap->pio_task_timeout = jiffies + ATA_TMOUT_PIO;
3245 return 0;
3246 }
3247 }
3248
3249 qc = ata_qc_from_tag(ap, ap->active_tag);
3250 WARN_ON(qc == NULL);
3251
3252 drv_stat = ata_wait_idle(ap);
3253 if (!ata_ok(drv_stat)) {
3254 qc->err_mask |= __ac_err_mask(drv_stat);
3255 ap->hsm_task_state = HSM_ST_ERR;
3256 return 0;
3257 }
3258
3259 ap->hsm_task_state = HSM_ST_IDLE;
3260
3261 WARN_ON(qc->err_mask);
3262 ata_poll_qc_complete(qc);
3263
3264 /* another command may start at this point */
3265
3266 return 1;
3267}
3268
3269
3270/**
3271 * swap_buf_le16 - swap halves of 16-bit words in place 3476 * swap_buf_le16 - swap halves of 16-bit words in place
3272 * @buf: Buffer to swap 3477 * @buf: Buffer to swap
3273 * @buf_words: Number of 16-bit words in buffer. 3478 * @buf_words: Number of 16-bit words in buffer.
@@ -3291,7 +3496,7 @@ void swap_buf_le16(u16 *buf, unsigned int buf_words)
3291 3496
3292/** 3497/**
3293 * ata_mmio_data_xfer - Transfer data by MMIO 3498 * ata_mmio_data_xfer - Transfer data by MMIO
3294 * @ap: port to read/write 3499 * @adev: device for this I/O
3295 * @buf: data buffer 3500 * @buf: data buffer
3296 * @buflen: buffer length 3501 * @buflen: buffer length
3297 * @write_data: read/write 3502 * @write_data: read/write
@@ -3302,9 +3507,10 @@ void swap_buf_le16(u16 *buf, unsigned int buf_words)
3302 * Inherited from caller. 3507 * Inherited from caller.
3303 */ 3508 */
3304 3509
3305static void ata_mmio_data_xfer(struct ata_port *ap, unsigned char *buf, 3510void ata_mmio_data_xfer(struct ata_device *adev, unsigned char *buf,
3306 unsigned int buflen, int write_data) 3511 unsigned int buflen, int write_data)
3307{ 3512{
3513 struct ata_port *ap = adev->ap;
3308 unsigned int i; 3514 unsigned int i;
3309 unsigned int words = buflen >> 1; 3515 unsigned int words = buflen >> 1;
3310 u16 *buf16 = (u16 *) buf; 3516 u16 *buf16 = (u16 *) buf;
@@ -3336,7 +3542,7 @@ static void ata_mmio_data_xfer(struct ata_port *ap, unsigned char *buf,
3336 3542
3337/** 3543/**
3338 * ata_pio_data_xfer - Transfer data by PIO 3544 * ata_pio_data_xfer - Transfer data by PIO
3339 * @ap: port to read/write 3545 * @adev: device to target
3340 * @buf: data buffer 3546 * @buf: data buffer
3341 * @buflen: buffer length 3547 * @buflen: buffer length
3342 * @write_data: read/write 3548 * @write_data: read/write
@@ -3347,9 +3553,10 @@ static void ata_mmio_data_xfer(struct ata_port *ap, unsigned char *buf,
3347 * Inherited from caller. 3553 * Inherited from caller.
3348 */ 3554 */
3349 3555
3350static void ata_pio_data_xfer(struct ata_port *ap, unsigned char *buf, 3556void ata_pio_data_xfer(struct ata_device *adev, unsigned char *buf,
3351 unsigned int buflen, int write_data) 3557 unsigned int buflen, int write_data)
3352{ 3558{
3559 struct ata_port *ap = adev->ap;
3353 unsigned int words = buflen >> 1; 3560 unsigned int words = buflen >> 1;
3354 3561
3355 /* Transfer multiple of 2 bytes */ 3562 /* Transfer multiple of 2 bytes */
@@ -3374,38 +3581,29 @@ static void ata_pio_data_xfer(struct ata_port *ap, unsigned char *buf,
3374} 3581}
3375 3582
3376/** 3583/**
3377 * ata_data_xfer - Transfer data from/to the data register. 3584 * ata_pio_data_xfer_noirq - Transfer data by PIO
3378 * @ap: port to read/write 3585 * @adev: device to target
3379 * @buf: data buffer 3586 * @buf: data buffer
3380 * @buflen: buffer length 3587 * @buflen: buffer length
3381 * @do_write: read/write 3588 * @write_data: read/write
3382 * 3589 *
3383 * Transfer data from/to the device data register. 3590 * Transfer data from/to the device data register by PIO. Do the
3591 * transfer with interrupts disabled.
3384 * 3592 *
3385 * LOCKING: 3593 * LOCKING:
3386 * Inherited from caller. 3594 * Inherited from caller.
3387 */ 3595 */
3388 3596
3389static void ata_data_xfer(struct ata_port *ap, unsigned char *buf, 3597void ata_pio_data_xfer_noirq(struct ata_device *adev, unsigned char *buf,
3390 unsigned int buflen, int do_write) 3598 unsigned int buflen, int write_data)
3391{ 3599{
3392 /* Make the crap hardware pay the costs not the good stuff */ 3600 unsigned long flags;
3393 if (unlikely(ap->flags & ATA_FLAG_IRQ_MASK)) { 3601 local_irq_save(flags);
3394 unsigned long flags; 3602 ata_pio_data_xfer(adev, buf, buflen, write_data);
3395 local_irq_save(flags); 3603 local_irq_restore(flags);
3396 if (ap->flags & ATA_FLAG_MMIO)
3397 ata_mmio_data_xfer(ap, buf, buflen, do_write);
3398 else
3399 ata_pio_data_xfer(ap, buf, buflen, do_write);
3400 local_irq_restore(flags);
3401 } else {
3402 if (ap->flags & ATA_FLAG_MMIO)
3403 ata_mmio_data_xfer(ap, buf, buflen, do_write);
3404 else
3405 ata_pio_data_xfer(ap, buf, buflen, do_write);
3406 }
3407} 3604}
3408 3605
3606
3409/** 3607/**
3410 * ata_pio_sector - Transfer ATA_SECT_SIZE (512 bytes) of data. 3608 * ata_pio_sector - Transfer ATA_SECT_SIZE (512 bytes) of data.
3411 * @qc: Command on going 3609 * @qc: Command on going
@@ -3435,7 +3633,24 @@ static void ata_pio_sector(struct ata_queued_cmd *qc)
3435 page = nth_page(page, (offset >> PAGE_SHIFT)); 3633 page = nth_page(page, (offset >> PAGE_SHIFT));
3436 offset %= PAGE_SIZE; 3634 offset %= PAGE_SIZE;
3437 3635
3438 buf = kmap(page) + offset; 3636 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
3637
3638 if (PageHighMem(page)) {
3639 unsigned long flags;
3640
3641 /* FIXME: use a bounce buffer */
3642 local_irq_save(flags);
3643 buf = kmap_atomic(page, KM_IRQ0);
3644
3645 /* do the actual data transfer */
3646 ap->ops->data_xfer(qc->dev, buf + offset, ATA_SECT_SIZE, do_write);
3647
3648 kunmap_atomic(buf, KM_IRQ0);
3649 local_irq_restore(flags);
3650 } else {
3651 buf = page_address(page);
3652 ap->ops->data_xfer(qc->dev, buf + offset, ATA_SECT_SIZE, do_write);
3653 }
3439 3654
3440 qc->cursect++; 3655 qc->cursect++;
3441 qc->cursg_ofs++; 3656 qc->cursg_ofs++;
@@ -3444,14 +3659,68 @@ static void ata_pio_sector(struct ata_queued_cmd *qc)
3444 qc->cursg++; 3659 qc->cursg++;
3445 qc->cursg_ofs = 0; 3660 qc->cursg_ofs = 0;
3446 } 3661 }
3662}
3447 3663
3448 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read"); 3664/**
3665 * ata_pio_sectors - Transfer one or many 512-byte sectors.
3666 * @qc: Command on going
3667 *
3668 * Transfer one or many ATA_SECT_SIZE of data from/to the
3669 * ATA device for the DRQ request.
3670 *
3671 * LOCKING:
3672 * Inherited from caller.
3673 */
3674
3675static void ata_pio_sectors(struct ata_queued_cmd *qc)
3676{
3677 if (is_multi_taskfile(&qc->tf)) {
3678 /* READ/WRITE MULTIPLE */
3679 unsigned int nsect;
3680
3681 WARN_ON(qc->dev->multi_count == 0);
3682
3683 nsect = min(qc->nsect - qc->cursect, qc->dev->multi_count);
3684 while (nsect--)
3685 ata_pio_sector(qc);
3686 } else
3687 ata_pio_sector(qc);
3688}
3689
3690/**
3691 * atapi_send_cdb - Write CDB bytes to hardware
3692 * @ap: Port to which ATAPI device is attached.
3693 * @qc: Taskfile currently active
3694 *
3695 * When device has indicated its readiness to accept
3696 * a CDB, this function is called. Send the CDB.
3697 *
3698 * LOCKING:
3699 * caller.
3700 */
3449 3701
3450 /* do the actual data transfer */ 3702static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc)
3451 do_write = (qc->tf.flags & ATA_TFLAG_WRITE); 3703{
3452 ata_data_xfer(ap, buf, ATA_SECT_SIZE, do_write); 3704 /* send SCSI cdb */
3705 DPRINTK("send cdb\n");
3706 WARN_ON(qc->dev->cdb_len < 12);
3453 3707
3454 kunmap(page); 3708 ap->ops->data_xfer(qc->dev, qc->cdb, qc->dev->cdb_len, 1);
3709 ata_altstatus(ap); /* flush */
3710
3711 switch (qc->tf.protocol) {
3712 case ATA_PROT_ATAPI:
3713 ap->hsm_task_state = HSM_ST;
3714 break;
3715 case ATA_PROT_ATAPI_NODATA:
3716 ap->hsm_task_state = HSM_ST_LAST;
3717 break;
3718 case ATA_PROT_ATAPI_DMA:
3719 ap->hsm_task_state = HSM_ST_LAST;
3720 /* initiate bmdma */
3721 ap->ops->bmdma_start(qc);
3722 break;
3723 }
3455} 3724}
3456 3725
3457/** 3726/**
@@ -3492,11 +3761,11 @@ next_sg:
3492 unsigned int i; 3761 unsigned int i;
3493 3762
3494 if (words) /* warning if bytes > 1 */ 3763 if (words) /* warning if bytes > 1 */
3495 printk(KERN_WARNING "ata%u: %u bytes trailing data\n", 3764 ata_dev_printk(qc->dev, KERN_WARNING,
3496 ap->id, bytes); 3765 "%u bytes trailing data\n", bytes);
3497 3766
3498 for (i = 0; i < words; i++) 3767 for (i = 0; i < words; i++)
3499 ata_data_xfer(ap, (unsigned char*)pad_buf, 2, do_write); 3768 ap->ops->data_xfer(qc->dev, (unsigned char*)pad_buf, 2, do_write);
3500 3769
3501 ap->hsm_task_state = HSM_ST_LAST; 3770 ap->hsm_task_state = HSM_ST_LAST;
3502 return; 3771 return;
@@ -3517,7 +3786,24 @@ next_sg:
3517 /* don't cross page boundaries */ 3786 /* don't cross page boundaries */
3518 count = min(count, (unsigned int)PAGE_SIZE - offset); 3787 count = min(count, (unsigned int)PAGE_SIZE - offset);
3519 3788
3520 buf = kmap(page) + offset; 3789 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
3790
3791 if (PageHighMem(page)) {
3792 unsigned long flags;
3793
3794 /* FIXME: use bounce buffer */
3795 local_irq_save(flags);
3796 buf = kmap_atomic(page, KM_IRQ0);
3797
3798 /* do the actual data transfer */
3799 ap->ops->data_xfer(qc->dev, buf + offset, count, do_write);
3800
3801 kunmap_atomic(buf, KM_IRQ0);
3802 local_irq_restore(flags);
3803 } else {
3804 buf = page_address(page);
3805 ap->ops->data_xfer(qc->dev, buf + offset, count, do_write);
3806 }
3521 3807
3522 bytes -= count; 3808 bytes -= count;
3523 qc->curbytes += count; 3809 qc->curbytes += count;
@@ -3528,13 +3814,6 @@ next_sg:
3528 qc->cursg_ofs = 0; 3814 qc->cursg_ofs = 0;
3529 } 3815 }
3530 3816
3531 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
3532
3533 /* do the actual data transfer */
3534 ata_data_xfer(ap, buf, count, do_write);
3535
3536 kunmap(page);
3537
3538 if (bytes) 3817 if (bytes)
3539 goto next_sg; 3818 goto next_sg;
3540} 3819}
@@ -3556,10 +3835,16 @@ static void atapi_pio_bytes(struct ata_queued_cmd *qc)
3556 unsigned int ireason, bc_lo, bc_hi, bytes; 3835 unsigned int ireason, bc_lo, bc_hi, bytes;
3557 int i_write, do_write = (qc->tf.flags & ATA_TFLAG_WRITE) ? 1 : 0; 3836 int i_write, do_write = (qc->tf.flags & ATA_TFLAG_WRITE) ? 1 : 0;
3558 3837
3559 ap->ops->tf_read(ap, &qc->tf); 3838 /* Abuse qc->result_tf for temp storage of intermediate TF
3560 ireason = qc->tf.nsect; 3839 * here to save some kernel stack usage.
3561 bc_lo = qc->tf.lbam; 3840 * For normal completion, qc->result_tf is not relevant. For
3562 bc_hi = qc->tf.lbah; 3841 * error, qc->result_tf is later overwritten by ata_qc_complete().
3842 * So, the correctness of qc->result_tf is not affected.
3843 */
3844 ap->ops->tf_read(ap, &qc->result_tf);
3845 ireason = qc->result_tf.nsect;
3846 bc_lo = qc->result_tf.lbam;
3847 bc_hi = qc->result_tf.lbah;
3563 bytes = (bc_hi << 8) | bc_lo; 3848 bytes = (bc_hi << 8) | bc_lo;
3564 3849
3565 /* shall be cleared to zero, indicating xfer of data */ 3850 /* shall be cleared to zero, indicating xfer of data */
@@ -3571,307 +3856,365 @@ static void atapi_pio_bytes(struct ata_queued_cmd *qc)
3571 if (do_write != i_write) 3856 if (do_write != i_write)
3572 goto err_out; 3857 goto err_out;
3573 3858
3859 VPRINTK("ata%u: xfering %d bytes\n", ap->id, bytes);
3860
3574 __atapi_pio_bytes(qc, bytes); 3861 __atapi_pio_bytes(qc, bytes);
3575 3862
3576 return; 3863 return;
3577 3864
3578err_out: 3865err_out:
3579 printk(KERN_INFO "ata%u: dev %u: ATAPI check failed\n", 3866 ata_dev_printk(dev, KERN_INFO, "ATAPI check failed\n");
3580 ap->id, dev->devno);
3581 qc->err_mask |= AC_ERR_HSM; 3867 qc->err_mask |= AC_ERR_HSM;
3582 ap->hsm_task_state = HSM_ST_ERR; 3868 ap->hsm_task_state = HSM_ST_ERR;
3583} 3869}
3584 3870
3585/** 3871/**
3586 * ata_pio_block - start PIO on a block 3872 * ata_hsm_ok_in_wq - Check if the qc can be handled in the workqueue.
3587 * @ap: the target ata_port 3873 * @ap: the target ata_port
3874 * @qc: qc on going
3588 * 3875 *
3589 * LOCKING: 3876 * RETURNS:
3590 * None. (executing in kernel thread context) 3877 * 1 if ok in workqueue, 0 otherwise.
3591 */ 3878 */
3592 3879
3593static void ata_pio_block(struct ata_port *ap) 3880static inline int ata_hsm_ok_in_wq(struct ata_port *ap, struct ata_queued_cmd *qc)
3594{ 3881{
3595 struct ata_queued_cmd *qc; 3882 if (qc->tf.flags & ATA_TFLAG_POLLING)
3596 u8 status; 3883 return 1;
3597 3884
3598 /* 3885 if (ap->hsm_task_state == HSM_ST_FIRST) {
3599 * This is purely heuristic. This is a fast path. 3886 if (qc->tf.protocol == ATA_PROT_PIO &&
3600 * Sometimes when we enter, BSY will be cleared in 3887 (qc->tf.flags & ATA_TFLAG_WRITE))
3601 * a chk-status or two. If not, the drive is probably seeking 3888 return 1;
3602 * or something. Snooze for a couple msecs, then 3889
3603 * chk-status again. If still busy, fall back to 3890 if (is_atapi_taskfile(&qc->tf) &&
3604 * HSM_ST_POLL state. 3891 !(qc->dev->flags & ATA_DFLAG_CDB_INTR))
3605 */ 3892 return 1;
3606 status = ata_busy_wait(ap, ATA_BUSY, 5);
3607 if (status & ATA_BUSY) {
3608 msleep(2);
3609 status = ata_busy_wait(ap, ATA_BUSY, 10);
3610 if (status & ATA_BUSY) {
3611 ap->hsm_task_state = HSM_ST_POLL;
3612 ap->pio_task_timeout = jiffies + ATA_TMOUT_PIO;
3613 return;
3614 }
3615 } 3893 }
3616 3894
3617 qc = ata_qc_from_tag(ap, ap->active_tag); 3895 return 0;
3618 WARN_ON(qc == NULL); 3896}
3619 3897
3620 /* check error */ 3898/**
3621 if (status & (ATA_ERR | ATA_DF)) { 3899 * ata_hsm_qc_complete - finish a qc running on standard HSM
3622 qc->err_mask |= AC_ERR_DEV; 3900 * @qc: Command to complete
3623 ap->hsm_task_state = HSM_ST_ERR; 3901 * @in_wq: 1 if called from workqueue, 0 otherwise
3624 return; 3902 *
3625 } 3903 * Finish @qc which is running on standard HSM.
3904 *
3905 * LOCKING:
3906 * If @in_wq is zero, spin_lock_irqsave(host_set lock).
3907 * Otherwise, none on entry and grabs host lock.
3908 */
3909static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
3910{
3911 struct ata_port *ap = qc->ap;
3912 unsigned long flags;
3626 3913
3627 /* transfer data if any */ 3914 if (ap->ops->error_handler) {
3628 if (is_atapi_taskfile(&qc->tf)) { 3915 if (in_wq) {
3629 /* DRQ=0 means no more data to transfer */ 3916 spin_lock_irqsave(&ap->host_set->lock, flags);
3630 if ((status & ATA_DRQ) == 0) {
3631 ap->hsm_task_state = HSM_ST_LAST;
3632 return;
3633 }
3634 3917
3635 atapi_pio_bytes(qc); 3918 /* EH might have kicked in while host_set lock
3636 } else { 3919 * is released.
3637 /* handle BSY=0, DRQ=0 as error */ 3920 */
3638 if ((status & ATA_DRQ) == 0) { 3921 qc = ata_qc_from_tag(ap, qc->tag);
3639 qc->err_mask |= AC_ERR_HSM; 3922 if (qc) {
3640 ap->hsm_task_state = HSM_ST_ERR; 3923 if (likely(!(qc->err_mask & AC_ERR_HSM))) {
3641 return; 3924 ata_irq_on(ap);
3642 } 3925 ata_qc_complete(qc);
3926 } else
3927 ata_port_freeze(ap);
3928 }
3643 3929
3644 ata_pio_sector(qc); 3930 spin_unlock_irqrestore(&ap->host_set->lock, flags);
3931 } else {
3932 if (likely(!(qc->err_mask & AC_ERR_HSM)))
3933 ata_qc_complete(qc);
3934 else
3935 ata_port_freeze(ap);
3936 }
3937 } else {
3938 if (in_wq) {
3939 spin_lock_irqsave(&ap->host_set->lock, flags);
3940 ata_irq_on(ap);
3941 ata_qc_complete(qc);
3942 spin_unlock_irqrestore(&ap->host_set->lock, flags);
3943 } else
3944 ata_qc_complete(qc);
3645 } 3945 }
3646 3946
3647 ata_altstatus(ap); /* flush */ 3947 ata_altstatus(ap); /* flush */
3648} 3948}
3649 3949
3650static void ata_pio_error(struct ata_port *ap) 3950/**
3951 * ata_hsm_move - move the HSM to the next state.
3952 * @ap: the target ata_port
3953 * @qc: qc on going
3954 * @status: current device status
3955 * @in_wq: 1 if called from workqueue, 0 otherwise
3956 *
3957 * RETURNS:
3958 * 1 when poll next status needed, 0 otherwise.
3959 */
3960int ata_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
3961 u8 status, int in_wq)
3651{ 3962{
3652 struct ata_queued_cmd *qc; 3963 unsigned long flags = 0;
3964 int poll_next;
3653 3965
3654 qc = ata_qc_from_tag(ap, ap->active_tag); 3966 WARN_ON((qc->flags & ATA_QCFLAG_ACTIVE) == 0);
3655 WARN_ON(qc == NULL);
3656 3967
3657 if (qc->tf.command != ATA_CMD_PACKET) 3968 /* Make sure ata_qc_issue_prot() does not throw things
3658 printk(KERN_WARNING "ata%u: PIO error\n", ap->id); 3969 * like DMA polling into the workqueue. Notice that
3659 3970 * in_wq is not equivalent to (qc->tf.flags & ATA_TFLAG_POLLING).
3660 /* make sure qc->err_mask is available to
3661 * know what's wrong and recover
3662 */ 3971 */
3663 WARN_ON(qc->err_mask == 0); 3972 WARN_ON(in_wq != ata_hsm_ok_in_wq(ap, qc));
3664
3665 ap->hsm_task_state = HSM_ST_IDLE;
3666
3667 ata_poll_qc_complete(qc);
3668}
3669
3670static void ata_pio_task(void *_data)
3671{
3672 struct ata_port *ap = _data;
3673 unsigned long timeout;
3674 int qc_completed;
3675 3973
3676fsm_start: 3974fsm_start:
3677 timeout = 0; 3975 DPRINTK("ata%u: protocol %d task_state %d (dev_stat 0x%X)\n",
3678 qc_completed = 0; 3976 ap->id, qc->tf.protocol, ap->hsm_task_state, status);
3679 3977
3680 switch (ap->hsm_task_state) { 3978 switch (ap->hsm_task_state) {
3681 case HSM_ST_IDLE: 3979 case HSM_ST_FIRST:
3682 return; 3980 /* Send first data block or PACKET CDB */
3683
3684 case HSM_ST:
3685 ata_pio_block(ap);
3686 break;
3687
3688 case HSM_ST_LAST:
3689 qc_completed = ata_pio_complete(ap);
3690 break;
3691
3692 case HSM_ST_POLL:
3693 case HSM_ST_LAST_POLL:
3694 timeout = ata_pio_poll(ap);
3695 break;
3696
3697 case HSM_ST_TMOUT:
3698 case HSM_ST_ERR:
3699 ata_pio_error(ap);
3700 return;
3701 }
3702
3703 if (timeout)
3704 ata_port_queue_task(ap, ata_pio_task, ap, timeout);
3705 else if (!qc_completed)
3706 goto fsm_start;
3707}
3708
3709/**
3710 * atapi_packet_task - Write CDB bytes to hardware
3711 * @_data: Port to which ATAPI device is attached.
3712 *
3713 * When device has indicated its readiness to accept
3714 * a CDB, this function is called. Send the CDB.
3715 * If DMA is to be performed, exit immediately.
3716 * Otherwise, we are in polling mode, so poll
3717 * status under operation succeeds or fails.
3718 *
3719 * LOCKING:
3720 * Kernel thread context (may sleep)
3721 */
3722
3723static void atapi_packet_task(void *_data)
3724{
3725 struct ata_port *ap = _data;
3726 struct ata_queued_cmd *qc;
3727 u8 status;
3728 3981
3729 qc = ata_qc_from_tag(ap, ap->active_tag); 3982 /* If polling, we will stay in the work queue after
3730 WARN_ON(qc == NULL); 3983 * sending the data. Otherwise, interrupt handler
3731 WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE)); 3984 * takes over after sending the data.
3985 */
3986 poll_next = (qc->tf.flags & ATA_TFLAG_POLLING);
3987
3988 /* check device status */
3989 if (unlikely((status & ATA_DRQ) == 0)) {
3990 /* handle BSY=0, DRQ=0 as error */
3991 if (likely(status & (ATA_ERR | ATA_DF)))
3992 /* device stops HSM for abort/error */
3993 qc->err_mask |= AC_ERR_DEV;
3994 else
3995 /* HSM violation. Let EH handle this */
3996 qc->err_mask |= AC_ERR_HSM;
3732 3997
3733 /* sleep-wait for BSY to clear */ 3998 ap->hsm_task_state = HSM_ST_ERR;
3734 DPRINTK("busy wait\n"); 3999 goto fsm_start;
3735 if (ata_busy_sleep(ap, ATA_TMOUT_CDB_QUICK, ATA_TMOUT_CDB)) { 4000 }
3736 qc->err_mask |= AC_ERR_TIMEOUT;
3737 goto err_out;
3738 }
3739 4001
3740 /* make sure DRQ is set */ 4002 /* Device should not ask for data transfer (DRQ=1)
3741 status = ata_chk_status(ap); 4003 * when it finds something wrong.
3742 if ((status & (ATA_BUSY | ATA_DRQ)) != ATA_DRQ) { 4004 * We ignore DRQ here and stop the HSM by
3743 qc->err_mask |= AC_ERR_HSM; 4005 * changing hsm_task_state to HSM_ST_ERR and
3744 goto err_out; 4006 * let the EH abort the command or reset the device.
3745 } 4007 */
4008 if (unlikely(status & (ATA_ERR | ATA_DF))) {
4009 printk(KERN_WARNING "ata%d: DRQ=1 with device error, dev_stat 0x%X\n",
4010 ap->id, status);
4011 qc->err_mask |= AC_ERR_HSM;
4012 ap->hsm_task_state = HSM_ST_ERR;
4013 goto fsm_start;
4014 }
3746 4015
3747 /* send SCSI cdb */ 4016 /* Send the CDB (atapi) or the first data block (ata pio out).
3748 DPRINTK("send cdb\n"); 4017 * During the state transition, interrupt handler shouldn't
3749 WARN_ON(qc->dev->cdb_len < 12); 4018 * be invoked before the data transfer is complete and
4019 * hsm_task_state is changed. Hence, the following locking.
4020 */
4021 if (in_wq)
4022 spin_lock_irqsave(&ap->host_set->lock, flags);
3750 4023
3751 if (qc->tf.protocol == ATA_PROT_ATAPI_DMA || 4024 if (qc->tf.protocol == ATA_PROT_PIO) {
3752 qc->tf.protocol == ATA_PROT_ATAPI_NODATA) { 4025 /* PIO data out protocol.
3753 unsigned long flags; 4026 * send first data block.
4027 */
3754 4028
3755 /* Once we're done issuing command and kicking bmdma, 4029 /* ata_pio_sectors() might change the state
3756 * irq handler takes over. To not lose irq, we need 4030 * to HSM_ST_LAST. so, the state is changed here
3757 * to clear NOINTR flag before sending cdb, but 4031 * before ata_pio_sectors().
3758 * interrupt handler shouldn't be invoked before we're 4032 */
3759 * finished. Hence, the following locking. 4033 ap->hsm_task_state = HSM_ST;
4034 ata_pio_sectors(qc);
4035 ata_altstatus(ap); /* flush */
4036 } else
4037 /* send CDB */
4038 atapi_send_cdb(ap, qc);
4039
4040 if (in_wq)
4041 spin_unlock_irqrestore(&ap->host_set->lock, flags);
4042
4043 /* if polling, ata_pio_task() handles the rest.
4044 * otherwise, interrupt handler takes over from here.
3760 */ 4045 */
3761 spin_lock_irqsave(&ap->host_set->lock, flags); 4046 break;
3762 ap->flags &= ~ATA_FLAG_NOINTR;
3763 ata_data_xfer(ap, qc->cdb, qc->dev->cdb_len, 1);
3764 ata_altstatus(ap); /* flush */
3765 4047
3766 if (qc->tf.protocol == ATA_PROT_ATAPI_DMA) 4048 case HSM_ST:
3767 ap->ops->bmdma_start(qc); /* initiate bmdma */ 4049 /* complete command or read/write the data register */
3768 spin_unlock_irqrestore(&ap->host_set->lock, flags); 4050 if (qc->tf.protocol == ATA_PROT_ATAPI) {
3769 } else { 4051 /* ATAPI PIO protocol */
3770 ata_data_xfer(ap, qc->cdb, qc->dev->cdb_len, 1); 4052 if ((status & ATA_DRQ) == 0) {
3771 ata_altstatus(ap); /* flush */ 4053 /* No more data to transfer or device error.
4054 * Device error will be tagged in HSM_ST_LAST.
4055 */
4056 ap->hsm_task_state = HSM_ST_LAST;
4057 goto fsm_start;
4058 }
3772 4059
3773 /* PIO commands are handled by polling */ 4060 /* Device should not ask for data transfer (DRQ=1)
3774 ap->hsm_task_state = HSM_ST; 4061 * when it finds something wrong.
3775 ata_port_queue_task(ap, ata_pio_task, ap, 0); 4062 * We ignore DRQ here and stop the HSM by
3776 } 4063 * changing hsm_task_state to HSM_ST_ERR and
4064 * let the EH abort the command or reset the device.
4065 */
4066 if (unlikely(status & (ATA_ERR | ATA_DF))) {
4067 printk(KERN_WARNING "ata%d: DRQ=1 with device error, dev_stat 0x%X\n",
4068 ap->id, status);
4069 qc->err_mask |= AC_ERR_HSM;
4070 ap->hsm_task_state = HSM_ST_ERR;
4071 goto fsm_start;
4072 }
3777 4073
3778 return; 4074 atapi_pio_bytes(qc);
3779 4075
3780err_out: 4076 if (unlikely(ap->hsm_task_state == HSM_ST_ERR))
3781 ata_poll_qc_complete(qc); 4077 /* bad ireason reported by device */
3782} 4078 goto fsm_start;
3783 4079
3784/** 4080 } else {
3785 * ata_qc_timeout - Handle timeout of queued command 4081 /* ATA PIO protocol */
3786 * @qc: Command that timed out 4082 if (unlikely((status & ATA_DRQ) == 0)) {
3787 * 4083 /* handle BSY=0, DRQ=0 as error */
3788 * Some part of the kernel (currently, only the SCSI layer) 4084 if (likely(status & (ATA_ERR | ATA_DF)))
3789 * has noticed that the active command on port @ap has not 4085 /* device stops HSM for abort/error */
3790 * completed after a specified length of time. Handle this 4086 qc->err_mask |= AC_ERR_DEV;
3791 * condition by disabling DMA (if necessary) and completing 4087 else
3792 * transactions, with error if necessary. 4088 /* HSM violation. Let EH handle this */
3793 * 4089 qc->err_mask |= AC_ERR_HSM;
3794 * This also handles the case of the "lost interrupt", where 4090
3795 * for some reason (possibly hardware bug, possibly driver bug) 4091 ap->hsm_task_state = HSM_ST_ERR;
3796 * an interrupt was not delivered to the driver, even though the 4092 goto fsm_start;
3797 * transaction completed successfully. 4093 }
3798 *
3799 * LOCKING:
3800 * Inherited from SCSI layer (none, can sleep)
3801 */
3802 4094
3803static void ata_qc_timeout(struct ata_queued_cmd *qc) 4095 /* For PIO reads, some devices may ask for
3804{ 4096 * data transfer (DRQ=1) alone with ERR=1.
3805 struct ata_port *ap = qc->ap; 4097 * We respect DRQ here and transfer one
3806 struct ata_host_set *host_set = ap->host_set; 4098 * block of junk data before changing the
3807 u8 host_stat = 0, drv_stat; 4099 * hsm_task_state to HSM_ST_ERR.
3808 unsigned long flags; 4100 *
4101 * For PIO writes, ERR=1 DRQ=1 doesn't make
4102 * sense since the data block has been
4103 * transferred to the device.
4104 */
4105 if (unlikely(status & (ATA_ERR | ATA_DF))) {
4106 /* data might be corrputed */
4107 qc->err_mask |= AC_ERR_DEV;
4108
4109 if (!(qc->tf.flags & ATA_TFLAG_WRITE)) {
4110 ata_pio_sectors(qc);
4111 ata_altstatus(ap);
4112 status = ata_wait_idle(ap);
4113 }
4114
4115 if (status & (ATA_BUSY | ATA_DRQ))
4116 qc->err_mask |= AC_ERR_HSM;
4117
4118 /* ata_pio_sectors() might change the
4119 * state to HSM_ST_LAST. so, the state
4120 * is changed after ata_pio_sectors().
4121 */
4122 ap->hsm_task_state = HSM_ST_ERR;
4123 goto fsm_start;
4124 }
3809 4125
3810 DPRINTK("ENTER\n"); 4126 ata_pio_sectors(qc);
3811 4127
3812 ap->hsm_task_state = HSM_ST_IDLE; 4128 if (ap->hsm_task_state == HSM_ST_LAST &&
4129 (!(qc->tf.flags & ATA_TFLAG_WRITE))) {
4130 /* all data read */
4131 ata_altstatus(ap);
4132 status = ata_wait_idle(ap);
4133 goto fsm_start;
4134 }
4135 }
3813 4136
3814 spin_lock_irqsave(&host_set->lock, flags); 4137 ata_altstatus(ap); /* flush */
4138 poll_next = 1;
4139 break;
3815 4140
3816 switch (qc->tf.protocol) { 4141 case HSM_ST_LAST:
4142 if (unlikely(!ata_ok(status))) {
4143 qc->err_mask |= __ac_err_mask(status);
4144 ap->hsm_task_state = HSM_ST_ERR;
4145 goto fsm_start;
4146 }
3817 4147
3818 case ATA_PROT_DMA: 4148 /* no more data to transfer */
3819 case ATA_PROT_ATAPI_DMA: 4149 DPRINTK("ata%u: dev %u command complete, drv_stat 0x%x\n",
3820 host_stat = ap->ops->bmdma_status(ap); 4150 ap->id, qc->dev->devno, status);
3821 4151
3822 /* before we do anything else, clear DMA-Start bit */ 4152 WARN_ON(qc->err_mask);
3823 ap->ops->bmdma_stop(qc);
3824 4153
3825 /* fall through */ 4154 ap->hsm_task_state = HSM_ST_IDLE;
3826 4155
3827 default: 4156 /* complete taskfile transaction */
3828 ata_altstatus(ap); 4157 ata_hsm_qc_complete(qc, in_wq);
3829 drv_stat = ata_chk_status(ap);
3830 4158
3831 /* ack bmdma irq events */ 4159 poll_next = 0;
3832 ap->ops->irq_clear(ap); 4160 break;
4161
4162 case HSM_ST_ERR:
4163 /* make sure qc->err_mask is available to
4164 * know what's wrong and recover
4165 */
4166 WARN_ON(qc->err_mask == 0);
3833 4167
3834 printk(KERN_ERR "ata%u: command 0x%x timeout, stat 0x%x host_stat 0x%x\n", 4168 ap->hsm_task_state = HSM_ST_IDLE;
3835 ap->id, qc->tf.command, drv_stat, host_stat);
3836 4169
3837 /* complete taskfile transaction */ 4170 /* complete taskfile transaction */
3838 qc->err_mask |= ac_err_mask(drv_stat); 4171 ata_hsm_qc_complete(qc, in_wq);
4172
4173 poll_next = 0;
3839 break; 4174 break;
4175 default:
4176 poll_next = 0;
4177 BUG();
3840 } 4178 }
3841 4179
3842 spin_unlock_irqrestore(&host_set->lock, flags); 4180 return poll_next;
3843
3844 ata_eh_qc_complete(qc);
3845
3846 DPRINTK("EXIT\n");
3847} 4181}
3848 4182
3849/** 4183static void ata_pio_task(void *_data)
3850 * ata_eng_timeout - Handle timeout of queued command
3851 * @ap: Port on which timed-out command is active
3852 *
3853 * Some part of the kernel (currently, only the SCSI layer)
3854 * has noticed that the active command on port @ap has not
3855 * completed after a specified length of time. Handle this
3856 * condition by disabling DMA (if necessary) and completing
3857 * transactions, with error if necessary.
3858 *
3859 * This also handles the case of the "lost interrupt", where
3860 * for some reason (possibly hardware bug, possibly driver bug)
3861 * an interrupt was not delivered to the driver, even though the
3862 * transaction completed successfully.
3863 *
3864 * LOCKING:
3865 * Inherited from SCSI layer (none, can sleep)
3866 */
3867
3868void ata_eng_timeout(struct ata_port *ap)
3869{ 4184{
3870 DPRINTK("ENTER\n"); 4185 struct ata_queued_cmd *qc = _data;
4186 struct ata_port *ap = qc->ap;
4187 u8 status;
4188 int poll_next;
3871 4189
3872 ata_qc_timeout(ata_qc_from_tag(ap, ap->active_tag)); 4190fsm_start:
4191 WARN_ON(ap->hsm_task_state == HSM_ST_IDLE);
3873 4192
3874 DPRINTK("EXIT\n"); 4193 /*
4194 * This is purely heuristic. This is a fast path.
4195 * Sometimes when we enter, BSY will be cleared in
4196 * a chk-status or two. If not, the drive is probably seeking
4197 * or something. Snooze for a couple msecs, then
4198 * chk-status again. If still busy, queue delayed work.
4199 */
4200 status = ata_busy_wait(ap, ATA_BUSY, 5);
4201 if (status & ATA_BUSY) {
4202 msleep(2);
4203 status = ata_busy_wait(ap, ATA_BUSY, 10);
4204 if (status & ATA_BUSY) {
4205 ata_port_queue_task(ap, ata_pio_task, qc, ATA_SHORT_PAUSE);
4206 return;
4207 }
4208 }
4209
4210 /* move the HSM */
4211 poll_next = ata_hsm_move(ap, qc, status, 1);
4212
4213 /* another command or interrupt handler
4214 * may be running at this point.
4215 */
4216 if (poll_next)
4217 goto fsm_start;
3875} 4218}
3876 4219
3877/** 4220/**
@@ -3888,9 +4231,14 @@ static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
3888 struct ata_queued_cmd *qc = NULL; 4231 struct ata_queued_cmd *qc = NULL;
3889 unsigned int i; 4232 unsigned int i;
3890 4233
3891 for (i = 0; i < ATA_MAX_QUEUE; i++) 4234 /* no command while frozen */
3892 if (!test_and_set_bit(i, &ap->qactive)) { 4235 if (unlikely(ap->flags & ATA_FLAG_FROZEN))
3893 qc = ata_qc_from_tag(ap, i); 4236 return NULL;
4237
4238 /* the last tag is reserved for internal command. */
4239 for (i = 0; i < ATA_MAX_QUEUE - 1; i++)
4240 if (!test_and_set_bit(i, &ap->qc_allocated)) {
4241 qc = __ata_qc_from_tag(ap, i);
3894 break; 4242 break;
3895 } 4243 }
3896 4244
@@ -3902,16 +4250,15 @@ static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
3902 4250
3903/** 4251/**
3904 * ata_qc_new_init - Request an available ATA command, and initialize it 4252 * ata_qc_new_init - Request an available ATA command, and initialize it
3905 * @ap: Port associated with device @dev
3906 * @dev: Device from whom we request an available command structure 4253 * @dev: Device from whom we request an available command structure
3907 * 4254 *
3908 * LOCKING: 4255 * LOCKING:
3909 * None. 4256 * None.
3910 */ 4257 */
3911 4258
3912struct ata_queued_cmd *ata_qc_new_init(struct ata_port *ap, 4259struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev)
3913 struct ata_device *dev)
3914{ 4260{
4261 struct ata_port *ap = dev->ap;
3915 struct ata_queued_cmd *qc; 4262 struct ata_queued_cmd *qc;
3916 4263
3917 qc = ata_qc_new(ap); 4264 qc = ata_qc_new(ap);
@@ -3946,36 +4293,153 @@ void ata_qc_free(struct ata_queued_cmd *qc)
3946 qc->flags = 0; 4293 qc->flags = 0;
3947 tag = qc->tag; 4294 tag = qc->tag;
3948 if (likely(ata_tag_valid(tag))) { 4295 if (likely(ata_tag_valid(tag))) {
3949 if (tag == ap->active_tag)
3950 ap->active_tag = ATA_TAG_POISON;
3951 qc->tag = ATA_TAG_POISON; 4296 qc->tag = ATA_TAG_POISON;
3952 clear_bit(tag, &ap->qactive); 4297 clear_bit(tag, &ap->qc_allocated);
3953 } 4298 }
3954} 4299}
3955 4300
3956void __ata_qc_complete(struct ata_queued_cmd *qc) 4301void __ata_qc_complete(struct ata_queued_cmd *qc)
3957{ 4302{
4303 struct ata_port *ap = qc->ap;
4304
3958 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */ 4305 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
3959 WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE)); 4306 WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
3960 4307
3961 if (likely(qc->flags & ATA_QCFLAG_DMAMAP)) 4308 if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
3962 ata_sg_clean(qc); 4309 ata_sg_clean(qc);
3963 4310
4311 /* command should be marked inactive atomically with qc completion */
4312 if (qc->tf.protocol == ATA_PROT_NCQ)
4313 ap->sactive &= ~(1 << qc->tag);
4314 else
4315 ap->active_tag = ATA_TAG_POISON;
4316
3964 /* atapi: mark qc as inactive to prevent the interrupt handler 4317 /* atapi: mark qc as inactive to prevent the interrupt handler
3965 * from completing the command twice later, before the error handler 4318 * from completing the command twice later, before the error handler
3966 * is called. (when rc != 0 and atapi request sense is needed) 4319 * is called. (when rc != 0 and atapi request sense is needed)
3967 */ 4320 */
3968 qc->flags &= ~ATA_QCFLAG_ACTIVE; 4321 qc->flags &= ~ATA_QCFLAG_ACTIVE;
4322 ap->qc_active &= ~(1 << qc->tag);
3969 4323
3970 /* call completion callback */ 4324 /* call completion callback */
3971 qc->complete_fn(qc); 4325 qc->complete_fn(qc);
3972} 4326}
3973 4327
4328/**
4329 * ata_qc_complete - Complete an active ATA command
4330 * @qc: Command to complete
4331 * @err_mask: ATA Status register contents
4332 *
4333 * Indicate to the mid and upper layers that an ATA
4334 * command has completed, with either an ok or not-ok status.
4335 *
4336 * LOCKING:
4337 * spin_lock_irqsave(host_set lock)
4338 */
4339void ata_qc_complete(struct ata_queued_cmd *qc)
4340{
4341 struct ata_port *ap = qc->ap;
4342
4343 /* XXX: New EH and old EH use different mechanisms to
4344 * synchronize EH with regular execution path.
4345 *
4346 * In new EH, a failed qc is marked with ATA_QCFLAG_FAILED.
4347 * Normal execution path is responsible for not accessing a
4348 * failed qc. libata core enforces the rule by returning NULL
4349 * from ata_qc_from_tag() for failed qcs.
4350 *
4351 * Old EH depends on ata_qc_complete() nullifying completion
4352 * requests if ATA_QCFLAG_EH_SCHEDULED is set. Old EH does
4353 * not synchronize with interrupt handler. Only PIO task is
4354 * taken care of.
4355 */
4356 if (ap->ops->error_handler) {
4357 WARN_ON(ap->flags & ATA_FLAG_FROZEN);
4358
4359 if (unlikely(qc->err_mask))
4360 qc->flags |= ATA_QCFLAG_FAILED;
4361
4362 if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) {
4363 if (!ata_tag_internal(qc->tag)) {
4364 /* always fill result TF for failed qc */
4365 ap->ops->tf_read(ap, &qc->result_tf);
4366 ata_qc_schedule_eh(qc);
4367 return;
4368 }
4369 }
4370
4371 /* read result TF if requested */
4372 if (qc->flags & ATA_QCFLAG_RESULT_TF)
4373 ap->ops->tf_read(ap, &qc->result_tf);
4374
4375 __ata_qc_complete(qc);
4376 } else {
4377 if (qc->flags & ATA_QCFLAG_EH_SCHEDULED)
4378 return;
4379
4380 /* read result TF if failed or requested */
4381 if (qc->err_mask || qc->flags & ATA_QCFLAG_RESULT_TF)
4382 ap->ops->tf_read(ap, &qc->result_tf);
4383
4384 __ata_qc_complete(qc);
4385 }
4386}
4387
4388/**
4389 * ata_qc_complete_multiple - Complete multiple qcs successfully
4390 * @ap: port in question
4391 * @qc_active: new qc_active mask
4392 * @finish_qc: LLDD callback invoked before completing a qc
4393 *
4394 * Complete in-flight commands. This functions is meant to be
4395 * called from low-level driver's interrupt routine to complete
4396 * requests normally. ap->qc_active and @qc_active is compared
4397 * and commands are completed accordingly.
4398 *
4399 * LOCKING:
4400 * spin_lock_irqsave(host_set lock)
4401 *
4402 * RETURNS:
4403 * Number of completed commands on success, -errno otherwise.
4404 */
4405int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active,
4406 void (*finish_qc)(struct ata_queued_cmd *))
4407{
4408 int nr_done = 0;
4409 u32 done_mask;
4410 int i;
4411
4412 done_mask = ap->qc_active ^ qc_active;
4413
4414 if (unlikely(done_mask & qc_active)) {
4415 ata_port_printk(ap, KERN_ERR, "illegal qc_active transition "
4416 "(%08x->%08x)\n", ap->qc_active, qc_active);
4417 return -EINVAL;
4418 }
4419
4420 for (i = 0; i < ATA_MAX_QUEUE; i++) {
4421 struct ata_queued_cmd *qc;
4422
4423 if (!(done_mask & (1 << i)))
4424 continue;
4425
4426 if ((qc = ata_qc_from_tag(ap, i))) {
4427 if (finish_qc)
4428 finish_qc(qc);
4429 ata_qc_complete(qc);
4430 nr_done++;
4431 }
4432 }
4433
4434 return nr_done;
4435}
4436
3974static inline int ata_should_dma_map(struct ata_queued_cmd *qc) 4437static inline int ata_should_dma_map(struct ata_queued_cmd *qc)
3975{ 4438{
3976 struct ata_port *ap = qc->ap; 4439 struct ata_port *ap = qc->ap;
3977 4440
3978 switch (qc->tf.protocol) { 4441 switch (qc->tf.protocol) {
4442 case ATA_PROT_NCQ:
3979 case ATA_PROT_DMA: 4443 case ATA_PROT_DMA:
3980 case ATA_PROT_ATAPI_DMA: 4444 case ATA_PROT_ATAPI_DMA:
3981 return 1; 4445 return 1;
@@ -4010,8 +4474,22 @@ void ata_qc_issue(struct ata_queued_cmd *qc)
4010{ 4474{
4011 struct ata_port *ap = qc->ap; 4475 struct ata_port *ap = qc->ap;
4012 4476
4013 qc->ap->active_tag = qc->tag; 4477 /* Make sure only one non-NCQ command is outstanding. The
4478 * check is skipped for old EH because it reuses active qc to
4479 * request ATAPI sense.
4480 */
4481 WARN_ON(ap->ops->error_handler && ata_tag_valid(ap->active_tag));
4482
4483 if (qc->tf.protocol == ATA_PROT_NCQ) {
4484 WARN_ON(ap->sactive & (1 << qc->tag));
4485 ap->sactive |= 1 << qc->tag;
4486 } else {
4487 WARN_ON(ap->sactive);
4488 ap->active_tag = qc->tag;
4489 }
4490
4014 qc->flags |= ATA_QCFLAG_ACTIVE; 4491 qc->flags |= ATA_QCFLAG_ACTIVE;
4492 ap->qc_active |= 1 << qc->tag;
4015 4493
4016 if (ata_should_dma_map(qc)) { 4494 if (ata_should_dma_map(qc)) {
4017 if (qc->flags & ATA_QCFLAG_SG) { 4495 if (qc->flags & ATA_QCFLAG_SG) {
@@ -4061,43 +4539,105 @@ unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc)
4061{ 4539{
4062 struct ata_port *ap = qc->ap; 4540 struct ata_port *ap = qc->ap;
4063 4541
4542 /* Use polling pio if the LLD doesn't handle
4543 * interrupt driven pio and atapi CDB interrupt.
4544 */
4545 if (ap->flags & ATA_FLAG_PIO_POLLING) {
4546 switch (qc->tf.protocol) {
4547 case ATA_PROT_PIO:
4548 case ATA_PROT_ATAPI:
4549 case ATA_PROT_ATAPI_NODATA:
4550 qc->tf.flags |= ATA_TFLAG_POLLING;
4551 break;
4552 case ATA_PROT_ATAPI_DMA:
4553 if (qc->dev->flags & ATA_DFLAG_CDB_INTR)
4554 /* see ata_check_atapi_dma() */
4555 BUG();
4556 break;
4557 default:
4558 break;
4559 }
4560 }
4561
4562 /* select the device */
4064 ata_dev_select(ap, qc->dev->devno, 1, 0); 4563 ata_dev_select(ap, qc->dev->devno, 1, 0);
4065 4564
4565 /* start the command */
4066 switch (qc->tf.protocol) { 4566 switch (qc->tf.protocol) {
4067 case ATA_PROT_NODATA: 4567 case ATA_PROT_NODATA:
4568 if (qc->tf.flags & ATA_TFLAG_POLLING)
4569 ata_qc_set_polling(qc);
4570
4068 ata_tf_to_host(ap, &qc->tf); 4571 ata_tf_to_host(ap, &qc->tf);
4572 ap->hsm_task_state = HSM_ST_LAST;
4573
4574 if (qc->tf.flags & ATA_TFLAG_POLLING)
4575 ata_port_queue_task(ap, ata_pio_task, qc, 0);
4576
4069 break; 4577 break;
4070 4578
4071 case ATA_PROT_DMA: 4579 case ATA_PROT_DMA:
4580 WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
4581
4072 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */ 4582 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
4073 ap->ops->bmdma_setup(qc); /* set up bmdma */ 4583 ap->ops->bmdma_setup(qc); /* set up bmdma */
4074 ap->ops->bmdma_start(qc); /* initiate bmdma */ 4584 ap->ops->bmdma_start(qc); /* initiate bmdma */
4585 ap->hsm_task_state = HSM_ST_LAST;
4075 break; 4586 break;
4076 4587
4077 case ATA_PROT_PIO: /* load tf registers, initiate polling pio */ 4588 case ATA_PROT_PIO:
4078 ata_qc_set_polling(qc); 4589 if (qc->tf.flags & ATA_TFLAG_POLLING)
4079 ata_tf_to_host(ap, &qc->tf); 4590 ata_qc_set_polling(qc);
4080 ap->hsm_task_state = HSM_ST;
4081 ata_port_queue_task(ap, ata_pio_task, ap, 0);
4082 break;
4083 4591
4084 case ATA_PROT_ATAPI:
4085 ata_qc_set_polling(qc);
4086 ata_tf_to_host(ap, &qc->tf); 4592 ata_tf_to_host(ap, &qc->tf);
4087 ata_port_queue_task(ap, atapi_packet_task, ap, 0); 4593
4594 if (qc->tf.flags & ATA_TFLAG_WRITE) {
4595 /* PIO data out protocol */
4596 ap->hsm_task_state = HSM_ST_FIRST;
4597 ata_port_queue_task(ap, ata_pio_task, qc, 0);
4598
4599 /* always send first data block using
4600 * the ata_pio_task() codepath.
4601 */
4602 } else {
4603 /* PIO data in protocol */
4604 ap->hsm_task_state = HSM_ST;
4605
4606 if (qc->tf.flags & ATA_TFLAG_POLLING)
4607 ata_port_queue_task(ap, ata_pio_task, qc, 0);
4608
4609 /* if polling, ata_pio_task() handles the rest.
4610 * otherwise, interrupt handler takes over from here.
4611 */
4612 }
4613
4088 break; 4614 break;
4089 4615
4616 case ATA_PROT_ATAPI:
4090 case ATA_PROT_ATAPI_NODATA: 4617 case ATA_PROT_ATAPI_NODATA:
4091 ap->flags |= ATA_FLAG_NOINTR; 4618 if (qc->tf.flags & ATA_TFLAG_POLLING)
4619 ata_qc_set_polling(qc);
4620
4092 ata_tf_to_host(ap, &qc->tf); 4621 ata_tf_to_host(ap, &qc->tf);
4093 ata_port_queue_task(ap, atapi_packet_task, ap, 0); 4622
4623 ap->hsm_task_state = HSM_ST_FIRST;
4624
4625 /* send cdb by polling if no cdb interrupt */
4626 if ((!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) ||
4627 (qc->tf.flags & ATA_TFLAG_POLLING))
4628 ata_port_queue_task(ap, ata_pio_task, qc, 0);
4094 break; 4629 break;
4095 4630
4096 case ATA_PROT_ATAPI_DMA: 4631 case ATA_PROT_ATAPI_DMA:
4097 ap->flags |= ATA_FLAG_NOINTR; 4632 WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
4633
4098 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */ 4634 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
4099 ap->ops->bmdma_setup(qc); /* set up bmdma */ 4635 ap->ops->bmdma_setup(qc); /* set up bmdma */
4100 ata_port_queue_task(ap, atapi_packet_task, ap, 0); 4636 ap->hsm_task_state = HSM_ST_FIRST;
4637
4638 /* send cdb by polling if no cdb interrupt */
4639 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
4640 ata_port_queue_task(ap, ata_pio_task, qc, 0);
4101 break; 4641 break;
4102 4642
4103 default: 4643 default:
@@ -4127,52 +4667,66 @@ unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc)
4127inline unsigned int ata_host_intr (struct ata_port *ap, 4667inline unsigned int ata_host_intr (struct ata_port *ap,
4128 struct ata_queued_cmd *qc) 4668 struct ata_queued_cmd *qc)
4129{ 4669{
4130 u8 status, host_stat; 4670 u8 status, host_stat = 0;
4131 4671
4132 switch (qc->tf.protocol) { 4672 VPRINTK("ata%u: protocol %d task_state %d\n",
4673 ap->id, qc->tf.protocol, ap->hsm_task_state);
4133 4674
4134 case ATA_PROT_DMA: 4675 /* Check whether we are expecting interrupt in this state */
4135 case ATA_PROT_ATAPI_DMA: 4676 switch (ap->hsm_task_state) {
4136 case ATA_PROT_ATAPI: 4677 case HSM_ST_FIRST:
4137 /* check status of DMA engine */ 4678 /* Some pre-ATAPI-4 devices assert INTRQ
4138 host_stat = ap->ops->bmdma_status(ap); 4679 * at this state when ready to receive CDB.
4139 VPRINTK("ata%u: host_stat 0x%X\n", ap->id, host_stat); 4680 */
4140
4141 /* if it's not our irq... */
4142 if (!(host_stat & ATA_DMA_INTR))
4143 goto idle_irq;
4144
4145 /* before we do anything else, clear DMA-Start bit */
4146 ap->ops->bmdma_stop(qc);
4147
4148 /* fall through */
4149
4150 case ATA_PROT_ATAPI_NODATA:
4151 case ATA_PROT_NODATA:
4152 /* check altstatus */
4153 status = ata_altstatus(ap);
4154 if (status & ATA_BUSY)
4155 goto idle_irq;
4156 4681
4157 /* check main status, clearing INTRQ */ 4682 /* Check the ATA_DFLAG_CDB_INTR flag is enough here.
4158 status = ata_chk_status(ap); 4683 * The flag was turned on only for atapi devices.
4159 if (unlikely(status & ATA_BUSY)) 4684 * No need to check is_atapi_taskfile(&qc->tf) again.
4685 */
4686 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
4160 goto idle_irq; 4687 goto idle_irq;
4161 DPRINTK("ata%u: protocol %d (dev_stat 0x%X)\n",
4162 ap->id, qc->tf.protocol, status);
4163
4164 /* ack bmdma irq events */
4165 ap->ops->irq_clear(ap);
4166
4167 /* complete taskfile transaction */
4168 qc->err_mask |= ac_err_mask(status);
4169 ata_qc_complete(qc);
4170 break; 4688 break;
4171 4689 case HSM_ST_LAST:
4690 if (qc->tf.protocol == ATA_PROT_DMA ||
4691 qc->tf.protocol == ATA_PROT_ATAPI_DMA) {
4692 /* check status of DMA engine */
4693 host_stat = ap->ops->bmdma_status(ap);
4694 VPRINTK("ata%u: host_stat 0x%X\n", ap->id, host_stat);
4695
4696 /* if it's not our irq... */
4697 if (!(host_stat & ATA_DMA_INTR))
4698 goto idle_irq;
4699
4700 /* before we do anything else, clear DMA-Start bit */
4701 ap->ops->bmdma_stop(qc);
4702
4703 if (unlikely(host_stat & ATA_DMA_ERR)) {
4704 /* error when transfering data to/from memory */
4705 qc->err_mask |= AC_ERR_HOST_BUS;
4706 ap->hsm_task_state = HSM_ST_ERR;
4707 }
4708 }
4709 break;
4710 case HSM_ST:
4711 break;
4172 default: 4712 default:
4173 goto idle_irq; 4713 goto idle_irq;
4174 } 4714 }
4175 4715
4716 /* check altstatus */
4717 status = ata_altstatus(ap);
4718 if (status & ATA_BUSY)
4719 goto idle_irq;
4720
4721 /* check main status, clearing INTRQ */
4722 status = ata_chk_status(ap);
4723 if (unlikely(status & ATA_BUSY))
4724 goto idle_irq;
4725
4726 /* ack bmdma irq events */
4727 ap->ops->irq_clear(ap);
4728
4729 ata_hsm_move(ap, qc, status, 0);
4176 return 1; /* irq handled */ 4730 return 1; /* irq handled */
4177 4731
4178idle_irq: 4732idle_irq:
@@ -4181,7 +4735,7 @@ idle_irq:
4181#ifdef ATA_IRQ_TRAP 4735#ifdef ATA_IRQ_TRAP
4182 if ((ap->stats.idle_irq % 1000) == 0) { 4736 if ((ap->stats.idle_irq % 1000) == 0) {
4183 ata_irq_ack(ap, 0); /* debug trap */ 4737 ata_irq_ack(ap, 0); /* debug trap */
4184 printk(KERN_WARNING "ata%d: irq trap\n", ap->id); 4738 ata_port_printk(ap, KERN_WARNING, "irq trap\n");
4185 return 1; 4739 return 1;
4186 } 4740 }
4187#endif 4741#endif
@@ -4219,11 +4773,11 @@ irqreturn_t ata_interrupt (int irq, void *dev_instance, struct pt_regs *regs)
4219 4773
4220 ap = host_set->ports[i]; 4774 ap = host_set->ports[i];
4221 if (ap && 4775 if (ap &&
4222 !(ap->flags & (ATA_FLAG_PORT_DISABLED | ATA_FLAG_NOINTR))) { 4776 !(ap->flags & ATA_FLAG_DISABLED)) {
4223 struct ata_queued_cmd *qc; 4777 struct ata_queued_cmd *qc;
4224 4778
4225 qc = ata_qc_from_tag(ap, ap->active_tag); 4779 qc = ata_qc_from_tag(ap, ap->active_tag);
4226 if (qc && (!(qc->tf.ctl & ATA_NIEN)) && 4780 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)) &&
4227 (qc->flags & ATA_QCFLAG_ACTIVE)) 4781 (qc->flags & ATA_QCFLAG_ACTIVE))
4228 handled |= ata_host_intr(ap, qc); 4782 handled |= ata_host_intr(ap, qc);
4229 } 4783 }
@@ -4234,32 +4788,168 @@ irqreturn_t ata_interrupt (int irq, void *dev_instance, struct pt_regs *regs)
4234 return IRQ_RETVAL(handled); 4788 return IRQ_RETVAL(handled);
4235} 4789}
4236 4790
4791/**
4792 * sata_scr_valid - test whether SCRs are accessible
4793 * @ap: ATA port to test SCR accessibility for
4794 *
4795 * Test whether SCRs are accessible for @ap.
4796 *
4797 * LOCKING:
4798 * None.
4799 *
4800 * RETURNS:
4801 * 1 if SCRs are accessible, 0 otherwise.
4802 */
4803int sata_scr_valid(struct ata_port *ap)
4804{
4805 return ap->cbl == ATA_CBL_SATA && ap->ops->scr_read;
4806}
4807
4808/**
4809 * sata_scr_read - read SCR register of the specified port
4810 * @ap: ATA port to read SCR for
4811 * @reg: SCR to read
4812 * @val: Place to store read value
4813 *
4814 * Read SCR register @reg of @ap into *@val. This function is
4815 * guaranteed to succeed if the cable type of the port is SATA
4816 * and the port implements ->scr_read.
4817 *
4818 * LOCKING:
4819 * None.
4820 *
4821 * RETURNS:
4822 * 0 on success, negative errno on failure.
4823 */
4824int sata_scr_read(struct ata_port *ap, int reg, u32 *val)
4825{
4826 if (sata_scr_valid(ap)) {
4827 *val = ap->ops->scr_read(ap, reg);
4828 return 0;
4829 }
4830 return -EOPNOTSUPP;
4831}
4832
4833/**
4834 * sata_scr_write - write SCR register of the specified port
4835 * @ap: ATA port to write SCR for
4836 * @reg: SCR to write
4837 * @val: value to write
4838 *
4839 * Write @val to SCR register @reg of @ap. This function is
4840 * guaranteed to succeed if the cable type of the port is SATA
4841 * and the port implements ->scr_read.
4842 *
4843 * LOCKING:
4844 * None.
4845 *
4846 * RETURNS:
4847 * 0 on success, negative errno on failure.
4848 */
4849int sata_scr_write(struct ata_port *ap, int reg, u32 val)
4850{
4851 if (sata_scr_valid(ap)) {
4852 ap->ops->scr_write(ap, reg, val);
4853 return 0;
4854 }
4855 return -EOPNOTSUPP;
4856}
4857
4858/**
4859 * sata_scr_write_flush - write SCR register of the specified port and flush
4860 * @ap: ATA port to write SCR for
4861 * @reg: SCR to write
4862 * @val: value to write
4863 *
4864 * This function is identical to sata_scr_write() except that this
4865 * function performs flush after writing to the register.
4866 *
4867 * LOCKING:
4868 * None.
4869 *
4870 * RETURNS:
4871 * 0 on success, negative errno on failure.
4872 */
4873int sata_scr_write_flush(struct ata_port *ap, int reg, u32 val)
4874{
4875 if (sata_scr_valid(ap)) {
4876 ap->ops->scr_write(ap, reg, val);
4877 ap->ops->scr_read(ap, reg);
4878 return 0;
4879 }
4880 return -EOPNOTSUPP;
4881}
4882
4883/**
4884 * ata_port_online - test whether the given port is online
4885 * @ap: ATA port to test
4886 *
4887 * Test whether @ap is online. Note that this function returns 0
4888 * if online status of @ap cannot be obtained, so
4889 * ata_port_online(ap) != !ata_port_offline(ap).
4890 *
4891 * LOCKING:
4892 * None.
4893 *
4894 * RETURNS:
4895 * 1 if the port online status is available and online.
4896 */
4897int ata_port_online(struct ata_port *ap)
4898{
4899 u32 sstatus;
4900
4901 if (!sata_scr_read(ap, SCR_STATUS, &sstatus) && (sstatus & 0xf) == 0x3)
4902 return 1;
4903 return 0;
4904}
4905
4906/**
4907 * ata_port_offline - test whether the given port is offline
4908 * @ap: ATA port to test
4909 *
4910 * Test whether @ap is offline. Note that this function returns
4911 * 0 if offline status of @ap cannot be obtained, so
4912 * ata_port_online(ap) != !ata_port_offline(ap).
4913 *
4914 * LOCKING:
4915 * None.
4916 *
4917 * RETURNS:
4918 * 1 if the port offline status is available and offline.
4919 */
4920int ata_port_offline(struct ata_port *ap)
4921{
4922 u32 sstatus;
4923
4924 if (!sata_scr_read(ap, SCR_STATUS, &sstatus) && (sstatus & 0xf) != 0x3)
4925 return 1;
4926 return 0;
4927}
4237 4928
4238/* 4929/*
4239 * Execute a 'simple' command, that only consists of the opcode 'cmd' itself, 4930 * Execute a 'simple' command, that only consists of the opcode 'cmd' itself,
4240 * without filling any other registers 4931 * without filling any other registers
4241 */ 4932 */
4242static int ata_do_simple_cmd(struct ata_port *ap, struct ata_device *dev, 4933static int ata_do_simple_cmd(struct ata_device *dev, u8 cmd)
4243 u8 cmd)
4244{ 4934{
4245 struct ata_taskfile tf; 4935 struct ata_taskfile tf;
4246 int err; 4936 int err;
4247 4937
4248 ata_tf_init(ap, &tf, dev->devno); 4938 ata_tf_init(dev, &tf);
4249 4939
4250 tf.command = cmd; 4940 tf.command = cmd;
4251 tf.flags |= ATA_TFLAG_DEVICE; 4941 tf.flags |= ATA_TFLAG_DEVICE;
4252 tf.protocol = ATA_PROT_NODATA; 4942 tf.protocol = ATA_PROT_NODATA;
4253 4943
4254 err = ata_exec_internal(ap, dev, &tf, DMA_NONE, NULL, 0); 4944 err = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
4255 if (err) 4945 if (err)
4256 printk(KERN_ERR "%s: ata command failed: %d\n", 4946 ata_dev_printk(dev, KERN_ERR, "%s: ata command failed: %d\n",
4257 __FUNCTION__, err); 4947 __FUNCTION__, err);
4258 4948
4259 return err; 4949 return err;
4260} 4950}
4261 4951
4262static int ata_flush_cache(struct ata_port *ap, struct ata_device *dev) 4952static int ata_flush_cache(struct ata_device *dev)
4263{ 4953{
4264 u8 cmd; 4954 u8 cmd;
4265 4955
@@ -4271,22 +4961,21 @@ static int ata_flush_cache(struct ata_port *ap, struct ata_device *dev)
4271 else 4961 else
4272 cmd = ATA_CMD_FLUSH; 4962 cmd = ATA_CMD_FLUSH;
4273 4963
4274 return ata_do_simple_cmd(ap, dev, cmd); 4964 return ata_do_simple_cmd(dev, cmd);
4275} 4965}
4276 4966
4277static int ata_standby_drive(struct ata_port *ap, struct ata_device *dev) 4967static int ata_standby_drive(struct ata_device *dev)
4278{ 4968{
4279 return ata_do_simple_cmd(ap, dev, ATA_CMD_STANDBYNOW1); 4969 return ata_do_simple_cmd(dev, ATA_CMD_STANDBYNOW1);
4280} 4970}
4281 4971
4282static int ata_start_drive(struct ata_port *ap, struct ata_device *dev) 4972static int ata_start_drive(struct ata_device *dev)
4283{ 4973{
4284 return ata_do_simple_cmd(ap, dev, ATA_CMD_IDLEIMMEDIATE); 4974 return ata_do_simple_cmd(dev, ATA_CMD_IDLEIMMEDIATE);
4285} 4975}
4286 4976
4287/** 4977/**
4288 * ata_device_resume - wakeup a previously suspended devices 4978 * ata_device_resume - wakeup a previously suspended devices
4289 * @ap: port the device is connected to
4290 * @dev: the device to resume 4979 * @dev: the device to resume
4291 * 4980 *
4292 * Kick the drive back into action, by sending it an idle immediate 4981 * Kick the drive back into action, by sending it an idle immediate
@@ -4294,39 +4983,46 @@ static int ata_start_drive(struct ata_port *ap, struct ata_device *dev)
4294 * and host. 4983 * and host.
4295 * 4984 *
4296 */ 4985 */
4297int ata_device_resume(struct ata_port *ap, struct ata_device *dev) 4986int ata_device_resume(struct ata_device *dev)
4298{ 4987{
4988 struct ata_port *ap = dev->ap;
4989
4299 if (ap->flags & ATA_FLAG_SUSPENDED) { 4990 if (ap->flags & ATA_FLAG_SUSPENDED) {
4991 struct ata_device *failed_dev;
4992
4300 ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 200000); 4993 ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 200000);
4994
4301 ap->flags &= ~ATA_FLAG_SUSPENDED; 4995 ap->flags &= ~ATA_FLAG_SUSPENDED;
4302 ata_set_mode(ap); 4996 while (ata_set_mode(ap, &failed_dev))
4997 ata_dev_disable(failed_dev);
4303 } 4998 }
4304 if (!ata_dev_present(dev)) 4999 if (!ata_dev_enabled(dev))
4305 return 0; 5000 return 0;
4306 if (dev->class == ATA_DEV_ATA) 5001 if (dev->class == ATA_DEV_ATA)
4307 ata_start_drive(ap, dev); 5002 ata_start_drive(dev);
4308 5003
4309 return 0; 5004 return 0;
4310} 5005}
4311 5006
4312/** 5007/**
4313 * ata_device_suspend - prepare a device for suspend 5008 * ata_device_suspend - prepare a device for suspend
4314 * @ap: port the device is connected to
4315 * @dev: the device to suspend 5009 * @dev: the device to suspend
4316 * @state: target power management state 5010 * @state: target power management state
4317 * 5011 *
4318 * Flush the cache on the drive, if appropriate, then issue a 5012 * Flush the cache on the drive, if appropriate, then issue a
4319 * standbynow command. 5013 * standbynow command.
4320 */ 5014 */
4321int ata_device_suspend(struct ata_port *ap, struct ata_device *dev, pm_message_t state) 5015int ata_device_suspend(struct ata_device *dev, pm_message_t state)
4322{ 5016{
4323 if (!ata_dev_present(dev)) 5017 struct ata_port *ap = dev->ap;
5018
5019 if (!ata_dev_enabled(dev))
4324 return 0; 5020 return 0;
4325 if (dev->class == ATA_DEV_ATA) 5021 if (dev->class == ATA_DEV_ATA)
4326 ata_flush_cache(ap, dev); 5022 ata_flush_cache(dev);
4327 5023
4328 if (state.event != PM_EVENT_FREEZE) 5024 if (state.event != PM_EVENT_FREEZE)
4329 ata_standby_drive(ap, dev); 5025 ata_standby_drive(dev);
4330 ap->flags |= ATA_FLAG_SUSPENDED; 5026 ap->flags |= ATA_FLAG_SUSPENDED;
4331 return 0; 5027 return 0;
4332} 5028}
@@ -4414,6 +5110,38 @@ static void ata_host_remove(struct ata_port *ap, unsigned int do_unregister)
4414} 5110}
4415 5111
4416/** 5112/**
5113 * ata_dev_init - Initialize an ata_device structure
5114 * @dev: Device structure to initialize
5115 *
5116 * Initialize @dev in preparation for probing.
5117 *
5118 * LOCKING:
5119 * Inherited from caller.
5120 */
5121void ata_dev_init(struct ata_device *dev)
5122{
5123 struct ata_port *ap = dev->ap;
5124 unsigned long flags;
5125
5126 /* SATA spd limit is bound to the first device */
5127 ap->sata_spd_limit = ap->hw_sata_spd_limit;
5128
5129 /* High bits of dev->flags are used to record warm plug
5130 * requests which occur asynchronously. Synchronize using
5131 * host_set lock.
5132 */
5133 spin_lock_irqsave(&ap->host_set->lock, flags);
5134 dev->flags &= ~ATA_DFLAG_INIT_MASK;
5135 spin_unlock_irqrestore(&ap->host_set->lock, flags);
5136
5137 memset((void *)dev + ATA_DEVICE_CLEAR_OFFSET, 0,
5138 sizeof(*dev) - ATA_DEVICE_CLEAR_OFFSET);
5139 dev->pio_mask = UINT_MAX;
5140 dev->mwdma_mask = UINT_MAX;
5141 dev->udma_mask = UINT_MAX;
5142}
5143
5144/**
4417 * ata_host_init - Initialize an ata_port structure 5145 * ata_host_init - Initialize an ata_port structure
4418 * @ap: Structure to initialize 5146 * @ap: Structure to initialize
4419 * @host: associated SCSI mid-layer structure 5147 * @host: associated SCSI mid-layer structure
@@ -4427,7 +5155,6 @@ static void ata_host_remove(struct ata_port *ap, unsigned int do_unregister)
4427 * LOCKING: 5155 * LOCKING:
4428 * Inherited from caller. 5156 * Inherited from caller.
4429 */ 5157 */
4430
4431static void ata_host_init(struct ata_port *ap, struct Scsi_Host *host, 5158static void ata_host_init(struct ata_port *ap, struct Scsi_Host *host,
4432 struct ata_host_set *host_set, 5159 struct ata_host_set *host_set,
4433 const struct ata_probe_ent *ent, unsigned int port_no) 5160 const struct ata_probe_ent *ent, unsigned int port_no)
@@ -4440,7 +5167,7 @@ static void ata_host_init(struct ata_port *ap, struct Scsi_Host *host,
4440 host->unique_id = ata_unique_id++; 5167 host->unique_id = ata_unique_id++;
4441 host->max_cmd_len = 12; 5168 host->max_cmd_len = 12;
4442 5169
4443 ap->flags = ATA_FLAG_PORT_DISABLED; 5170 ap->flags = ATA_FLAG_DISABLED;
4444 ap->id = host->unique_id; 5171 ap->id = host->unique_id;
4445 ap->host = host; 5172 ap->host = host;
4446 ap->ctl = ATA_DEVCTL_OBS; 5173 ap->ctl = ATA_DEVCTL_OBS;
@@ -4454,19 +5181,35 @@ static void ata_host_init(struct ata_port *ap, struct Scsi_Host *host,
4454 ap->udma_mask = ent->udma_mask; 5181 ap->udma_mask = ent->udma_mask;
4455 ap->flags |= ent->host_flags; 5182 ap->flags |= ent->host_flags;
4456 ap->ops = ent->port_ops; 5183 ap->ops = ent->port_ops;
4457 ap->cbl = ATA_CBL_NONE; 5184 ap->hw_sata_spd_limit = UINT_MAX;
4458 ap->active_tag = ATA_TAG_POISON; 5185 ap->active_tag = ATA_TAG_POISON;
4459 ap->last_ctl = 0xFF; 5186 ap->last_ctl = 0xFF;
4460 5187
5188#if defined(ATA_VERBOSE_DEBUG)
5189 /* turn on all debugging levels */
5190 ap->msg_enable = 0x00FF;
5191#elif defined(ATA_DEBUG)
5192 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_INFO | ATA_MSG_CTL | ATA_MSG_WARN | ATA_MSG_ERR;
5193#else
5194 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR;
5195#endif
5196
4461 INIT_WORK(&ap->port_task, NULL, NULL); 5197 INIT_WORK(&ap->port_task, NULL, NULL);
5198 INIT_WORK(&ap->hotplug_task, ata_scsi_hotplug, ap);
5199 INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan, ap);
4462 INIT_LIST_HEAD(&ap->eh_done_q); 5200 INIT_LIST_HEAD(&ap->eh_done_q);
5201 init_waitqueue_head(&ap->eh_wait_q);
5202
5203 /* set cable type */
5204 ap->cbl = ATA_CBL_NONE;
5205 if (ap->flags & ATA_FLAG_SATA)
5206 ap->cbl = ATA_CBL_SATA;
4463 5207
4464 for (i = 0; i < ATA_MAX_DEVICES; i++) { 5208 for (i = 0; i < ATA_MAX_DEVICES; i++) {
4465 struct ata_device *dev = &ap->device[i]; 5209 struct ata_device *dev = &ap->device[i];
5210 dev->ap = ap;
4466 dev->devno = i; 5211 dev->devno = i;
4467 dev->pio_mask = UINT_MAX; 5212 ata_dev_init(dev);
4468 dev->mwdma_mask = UINT_MAX;
4469 dev->udma_mask = UINT_MAX;
4470 } 5213 }
4471 5214
4472#ifdef ATA_IRQ_TRAP 5215#ifdef ATA_IRQ_TRAP
@@ -4502,7 +5245,7 @@ static struct ata_port * ata_host_add(const struct ata_probe_ent *ent,
4502 5245
4503 DPRINTK("ENTER\n"); 5246 DPRINTK("ENTER\n");
4504 5247
4505 if (!ent->port_ops->probe_reset && 5248 if (!ent->port_ops->error_handler &&
4506 !(ent->host_flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST))) { 5249 !(ent->host_flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST))) {
4507 printk(KERN_ERR "ata%u: no reset mechanism available\n", 5250 printk(KERN_ERR "ata%u: no reset mechanism available\n",
4508 port_no); 5251 port_no);
@@ -4515,7 +5258,7 @@ static struct ata_port * ata_host_add(const struct ata_probe_ent *ent,
4515 5258
4516 host->transportt = &ata_scsi_transport_template; 5259 host->transportt = &ata_scsi_transport_template;
4517 5260
4518 ap = (struct ata_port *) &host->hostdata[0]; 5261 ap = ata_shost_to_port(host);
4519 5262
4520 ata_host_init(ap, host, host_set, ent, port_no); 5263 ata_host_init(ap, host, host_set, ent, port_no);
4521 5264
@@ -4548,12 +5291,12 @@ err_out:
4548 * RETURNS: 5291 * RETURNS:
4549 * Number of ports registered. Zero on error (no ports registered). 5292 * Number of ports registered. Zero on error (no ports registered).
4550 */ 5293 */
4551
4552int ata_device_add(const struct ata_probe_ent *ent) 5294int ata_device_add(const struct ata_probe_ent *ent)
4553{ 5295{
4554 unsigned int count = 0, i; 5296 unsigned int count = 0, i;
4555 struct device *dev = ent->dev; 5297 struct device *dev = ent->dev;
4556 struct ata_host_set *host_set; 5298 struct ata_host_set *host_set;
5299 int rc;
4557 5300
4558 DPRINTK("ENTER\n"); 5301 DPRINTK("ENTER\n");
4559 /* alloc a container for our list of ATA ports (buses) */ 5302 /* alloc a container for our list of ATA ports (buses) */
@@ -4586,18 +5329,18 @@ int ata_device_add(const struct ata_probe_ent *ent)
4586 (ap->pio_mask << ATA_SHIFT_PIO); 5329 (ap->pio_mask << ATA_SHIFT_PIO);
4587 5330
4588 /* print per-port info to dmesg */ 5331 /* print per-port info to dmesg */
4589 printk(KERN_INFO "ata%u: %cATA max %s cmd 0x%lX ctl 0x%lX " 5332 ata_port_printk(ap, KERN_INFO, "%cATA max %s cmd 0x%lX "
4590 "bmdma 0x%lX irq %lu\n", 5333 "ctl 0x%lX bmdma 0x%lX irq %lu\n",
4591 ap->id, 5334 ap->flags & ATA_FLAG_SATA ? 'S' : 'P',
4592 ap->flags & ATA_FLAG_SATA ? 'S' : 'P', 5335 ata_mode_string(xfer_mode_mask),
4593 ata_mode_string(xfer_mode_mask), 5336 ap->ioaddr.cmd_addr,
4594 ap->ioaddr.cmd_addr, 5337 ap->ioaddr.ctl_addr,
4595 ap->ioaddr.ctl_addr, 5338 ap->ioaddr.bmdma_addr,
4596 ap->ioaddr.bmdma_addr, 5339 ent->irq);
4597 ent->irq);
4598 5340
4599 ata_chk_status(ap); 5341 ata_chk_status(ap);
4600 host_set->ops->irq_clear(ap); 5342 host_set->ops->irq_clear(ap);
5343 ata_eh_freeze_port(ap); /* freeze port before requesting IRQ */
4601 count++; 5344 count++;
4602 } 5345 }
4603 5346
@@ -4605,41 +5348,72 @@ int ata_device_add(const struct ata_probe_ent *ent)
4605 goto err_free_ret; 5348 goto err_free_ret;
4606 5349
4607 /* obtain irq, that is shared between channels */ 5350 /* obtain irq, that is shared between channels */
4608 if (request_irq(ent->irq, ent->port_ops->irq_handler, ent->irq_flags, 5351 rc = request_irq(ent->irq, ent->port_ops->irq_handler, ent->irq_flags,
4609 DRV_NAME, host_set)) 5352 DRV_NAME, host_set);
5353 if (rc) {
5354 dev_printk(KERN_ERR, dev, "irq %lu request failed: %d\n",
5355 ent->irq, rc);
4610 goto err_out; 5356 goto err_out;
5357 }
4611 5358
4612 /* perform each probe synchronously */ 5359 /* perform each probe synchronously */
4613 DPRINTK("probe begin\n"); 5360 DPRINTK("probe begin\n");
4614 for (i = 0; i < count; i++) { 5361 for (i = 0; i < count; i++) {
4615 struct ata_port *ap; 5362 struct ata_port *ap;
5363 u32 scontrol;
4616 int rc; 5364 int rc;
4617 5365
4618 ap = host_set->ports[i]; 5366 ap = host_set->ports[i];
4619 5367
4620 DPRINTK("ata%u: bus probe begin\n", ap->id); 5368 /* init sata_spd_limit to the current value */
4621 rc = ata_bus_probe(ap); 5369 if (sata_scr_read(ap, SCR_CONTROL, &scontrol) == 0) {
4622 DPRINTK("ata%u: bus probe end\n", ap->id); 5370 int spd = (scontrol >> 4) & 0xf;
4623 5371 ap->hw_sata_spd_limit &= (1 << spd) - 1;
4624 if (rc) {
4625 /* FIXME: do something useful here?
4626 * Current libata behavior will
4627 * tear down everything when
4628 * the module is removed
4629 * or the h/w is unplugged.
4630 */
4631 } 5372 }
5373 ap->sata_spd_limit = ap->hw_sata_spd_limit;
4632 5374
4633 rc = scsi_add_host(ap->host, dev); 5375 rc = scsi_add_host(ap->host, dev);
4634 if (rc) { 5376 if (rc) {
4635 printk(KERN_ERR "ata%u: scsi_add_host failed\n", 5377 ata_port_printk(ap, KERN_ERR, "scsi_add_host failed\n");
4636 ap->id);
4637 /* FIXME: do something useful here */ 5378 /* FIXME: do something useful here */
4638 /* FIXME: handle unconditional calls to 5379 /* FIXME: handle unconditional calls to
4639 * scsi_scan_host and ata_host_remove, below, 5380 * scsi_scan_host and ata_host_remove, below,
4640 * at the very least 5381 * at the very least
4641 */ 5382 */
4642 } 5383 }
5384
5385 if (ap->ops->error_handler) {
5386 unsigned long flags;
5387
5388 ata_port_probe(ap);
5389
5390 /* kick EH for boot probing */
5391 spin_lock_irqsave(&ap->host_set->lock, flags);
5392
5393 ap->eh_info.probe_mask = (1 << ATA_MAX_DEVICES) - 1;
5394 ap->eh_info.action |= ATA_EH_SOFTRESET;
5395
5396 ap->flags |= ATA_FLAG_LOADING;
5397 ata_port_schedule_eh(ap);
5398
5399 spin_unlock_irqrestore(&ap->host_set->lock, flags);
5400
5401 /* wait for EH to finish */
5402 ata_port_wait_eh(ap);
5403 } else {
5404 DPRINTK("ata%u: bus probe begin\n", ap->id);
5405 rc = ata_bus_probe(ap);
5406 DPRINTK("ata%u: bus probe end\n", ap->id);
5407
5408 if (rc) {
5409 /* FIXME: do something useful here?
5410 * Current libata behavior will
5411 * tear down everything when
5412 * the module is removed
5413 * or the h/w is unplugged.
5414 */
5415 }
5416 }
4643 } 5417 }
4644 5418
4645 /* probes are done, now scan each port's disk(s) */ 5419 /* probes are done, now scan each port's disk(s) */
@@ -4667,6 +5441,63 @@ err_free_ret:
4667} 5441}
4668 5442
4669/** 5443/**
5444 * ata_port_detach - Detach ATA port in prepration of device removal
5445 * @ap: ATA port to be detached
5446 *
5447 * Detach all ATA devices and the associated SCSI devices of @ap;
5448 * then, remove the associated SCSI host. @ap is guaranteed to
5449 * be quiescent on return from this function.
5450 *
5451 * LOCKING:
5452 * Kernel thread context (may sleep).
5453 */
5454void ata_port_detach(struct ata_port *ap)
5455{
5456 unsigned long flags;
5457 int i;
5458
5459 if (!ap->ops->error_handler)
5460 return;
5461
5462 /* tell EH we're leaving & flush EH */
5463 spin_lock_irqsave(&ap->host_set->lock, flags);
5464 ap->flags |= ATA_FLAG_UNLOADING;
5465 spin_unlock_irqrestore(&ap->host_set->lock, flags);
5466
5467 ata_port_wait_eh(ap);
5468
5469 /* EH is now guaranteed to see UNLOADING, so no new device
5470 * will be attached. Disable all existing devices.
5471 */
5472 spin_lock_irqsave(&ap->host_set->lock, flags);
5473
5474 for (i = 0; i < ATA_MAX_DEVICES; i++)
5475 ata_dev_disable(&ap->device[i]);
5476
5477 spin_unlock_irqrestore(&ap->host_set->lock, flags);
5478
5479 /* Final freeze & EH. All in-flight commands are aborted. EH
5480 * will be skipped and retrials will be terminated with bad
5481 * target.
5482 */
5483 spin_lock_irqsave(&ap->host_set->lock, flags);
5484 ata_port_freeze(ap); /* won't be thawed */
5485 spin_unlock_irqrestore(&ap->host_set->lock, flags);
5486
5487 ata_port_wait_eh(ap);
5488
5489 /* Flush hotplug task. The sequence is similar to
5490 * ata_port_flush_task().
5491 */
5492 flush_workqueue(ata_aux_wq);
5493 cancel_delayed_work(&ap->hotplug_task);
5494 flush_workqueue(ata_aux_wq);
5495
5496 /* remove the associated SCSI host */
5497 scsi_remove_host(ap->host);
5498}
5499
5500/**
4670 * ata_host_set_remove - PCI layer callback for device removal 5501 * ata_host_set_remove - PCI layer callback for device removal
4671 * @host_set: ATA host set that was removed 5502 * @host_set: ATA host set that was removed
4672 * 5503 *
@@ -4679,18 +5510,15 @@ err_free_ret:
4679 5510
4680void ata_host_set_remove(struct ata_host_set *host_set) 5511void ata_host_set_remove(struct ata_host_set *host_set)
4681{ 5512{
4682 struct ata_port *ap;
4683 unsigned int i; 5513 unsigned int i;
4684 5514
4685 for (i = 0; i < host_set->n_ports; i++) { 5515 for (i = 0; i < host_set->n_ports; i++)
4686 ap = host_set->ports[i]; 5516 ata_port_detach(host_set->ports[i]);
4687 scsi_remove_host(ap->host);
4688 }
4689 5517
4690 free_irq(host_set->irq, host_set); 5518 free_irq(host_set->irq, host_set);
4691 5519
4692 for (i = 0; i < host_set->n_ports; i++) { 5520 for (i = 0; i < host_set->n_ports; i++) {
4693 ap = host_set->ports[i]; 5521 struct ata_port *ap = host_set->ports[i];
4694 5522
4695 ata_scsi_release(ap->host); 5523 ata_scsi_release(ap->host);
4696 5524
@@ -4728,15 +5556,12 @@ void ata_host_set_remove(struct ata_host_set *host_set)
4728 5556
4729int ata_scsi_release(struct Scsi_Host *host) 5557int ata_scsi_release(struct Scsi_Host *host)
4730{ 5558{
4731 struct ata_port *ap = (struct ata_port *) &host->hostdata[0]; 5559 struct ata_port *ap = ata_shost_to_port(host);
4732 int i;
4733 5560
4734 DPRINTK("ENTER\n"); 5561 DPRINTK("ENTER\n");
4735 5562
4736 ap->ops->port_disable(ap); 5563 ap->ops->port_disable(ap);
4737 ata_host_remove(ap, 0); 5564 ata_host_remove(ap, 0);
4738 for (i = 0; i < ATA_MAX_DEVICES; i++)
4739 kfree(ap->device[i].id);
4740 5565
4741 DPRINTK("EXIT\n"); 5566 DPRINTK("EXIT\n");
4742 return 1; 5567 return 1;
@@ -4796,8 +5621,12 @@ void ata_pci_remove_one (struct pci_dev *pdev)
4796{ 5621{
4797 struct device *dev = pci_dev_to_dev(pdev); 5622 struct device *dev = pci_dev_to_dev(pdev);
4798 struct ata_host_set *host_set = dev_get_drvdata(dev); 5623 struct ata_host_set *host_set = dev_get_drvdata(dev);
5624 struct ata_host_set *host_set2 = host_set->next;
4799 5625
4800 ata_host_set_remove(host_set); 5626 ata_host_set_remove(host_set);
5627 if (host_set2)
5628 ata_host_set_remove(host_set2);
5629
4801 pci_release_regions(pdev); 5630 pci_release_regions(pdev);
4802 pci_disable_device(pdev); 5631 pci_disable_device(pdev);
4803 dev_set_drvdata(dev, NULL); 5632 dev_set_drvdata(dev, NULL);
@@ -4862,6 +5691,12 @@ static int __init ata_init(void)
4862 if (!ata_wq) 5691 if (!ata_wq)
4863 return -ENOMEM; 5692 return -ENOMEM;
4864 5693
5694 ata_aux_wq = create_singlethread_workqueue("ata_aux");
5695 if (!ata_aux_wq) {
5696 destroy_workqueue(ata_wq);
5697 return -ENOMEM;
5698 }
5699
4865 printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n"); 5700 printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
4866 return 0; 5701 return 0;
4867} 5702}
@@ -4869,6 +5704,7 @@ static int __init ata_init(void)
4869static void __exit ata_exit(void) 5704static void __exit ata_exit(void)
4870{ 5705{
4871 destroy_workqueue(ata_wq); 5706 destroy_workqueue(ata_wq);
5707 destroy_workqueue(ata_aux_wq);
4872} 5708}
4873 5709
4874module_init(ata_init); 5710module_init(ata_init);
@@ -4895,6 +5731,52 @@ int ata_ratelimit(void)
4895 return rc; 5731 return rc;
4896} 5732}
4897 5733
5734/**
5735 * ata_wait_register - wait until register value changes
5736 * @reg: IO-mapped register
5737 * @mask: Mask to apply to read register value
5738 * @val: Wait condition
5739 * @interval_msec: polling interval in milliseconds
5740 * @timeout_msec: timeout in milliseconds
5741 *
5742 * Waiting for some bits of register to change is a common
5743 * operation for ATA controllers. This function reads 32bit LE
5744 * IO-mapped register @reg and tests for the following condition.
5745 *
5746 * (*@reg & mask) != val
5747 *
5748 * If the condition is met, it returns; otherwise, the process is
5749 * repeated after @interval_msec until timeout.
5750 *
5751 * LOCKING:
5752 * Kernel thread context (may sleep)
5753 *
5754 * RETURNS:
5755 * The final register value.
5756 */
5757u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val,
5758 unsigned long interval_msec,
5759 unsigned long timeout_msec)
5760{
5761 unsigned long timeout;
5762 u32 tmp;
5763
5764 tmp = ioread32(reg);
5765
5766 /* Calculate timeout _after_ the first read to make sure
5767 * preceding writes reach the controller before starting to
5768 * eat away the timeout.
5769 */
5770 timeout = jiffies + (timeout_msec * HZ) / 1000;
5771
5772 while ((tmp & mask) == val && time_before(jiffies, timeout)) {
5773 msleep(interval_msec);
5774 tmp = ioread32(reg);
5775 }
5776
5777 return tmp;
5778}
5779
4898/* 5780/*
4899 * libata is essentially a library of internal helper functions for 5781 * libata is essentially a library of internal helper functions for
4900 * low-level ATA host controller drivers. As such, the API/ABI is 5782 * low-level ATA host controller drivers. As such, the API/ABI is
@@ -4902,15 +5784,20 @@ int ata_ratelimit(void)
4902 * Do not depend on ABI/API stability. 5784 * Do not depend on ABI/API stability.
4903 */ 5785 */
4904 5786
5787EXPORT_SYMBOL_GPL(sata_deb_timing_boot);
5788EXPORT_SYMBOL_GPL(sata_deb_timing_eh);
5789EXPORT_SYMBOL_GPL(sata_deb_timing_before_fsrst);
4905EXPORT_SYMBOL_GPL(ata_std_bios_param); 5790EXPORT_SYMBOL_GPL(ata_std_bios_param);
4906EXPORT_SYMBOL_GPL(ata_std_ports); 5791EXPORT_SYMBOL_GPL(ata_std_ports);
4907EXPORT_SYMBOL_GPL(ata_device_add); 5792EXPORT_SYMBOL_GPL(ata_device_add);
5793EXPORT_SYMBOL_GPL(ata_port_detach);
4908EXPORT_SYMBOL_GPL(ata_host_set_remove); 5794EXPORT_SYMBOL_GPL(ata_host_set_remove);
4909EXPORT_SYMBOL_GPL(ata_sg_init); 5795EXPORT_SYMBOL_GPL(ata_sg_init);
4910EXPORT_SYMBOL_GPL(ata_sg_init_one); 5796EXPORT_SYMBOL_GPL(ata_sg_init_one);
4911EXPORT_SYMBOL_GPL(__ata_qc_complete); 5797EXPORT_SYMBOL_GPL(ata_hsm_move);
5798EXPORT_SYMBOL_GPL(ata_qc_complete);
5799EXPORT_SYMBOL_GPL(ata_qc_complete_multiple);
4912EXPORT_SYMBOL_GPL(ata_qc_issue_prot); 5800EXPORT_SYMBOL_GPL(ata_qc_issue_prot);
4913EXPORT_SYMBOL_GPL(ata_eng_timeout);
4914EXPORT_SYMBOL_GPL(ata_tf_load); 5801EXPORT_SYMBOL_GPL(ata_tf_load);
4915EXPORT_SYMBOL_GPL(ata_tf_read); 5802EXPORT_SYMBOL_GPL(ata_tf_read);
4916EXPORT_SYMBOL_GPL(ata_noop_dev_select); 5803EXPORT_SYMBOL_GPL(ata_noop_dev_select);
@@ -4924,6 +5811,9 @@ EXPORT_SYMBOL_GPL(ata_port_start);
4924EXPORT_SYMBOL_GPL(ata_port_stop); 5811EXPORT_SYMBOL_GPL(ata_port_stop);
4925EXPORT_SYMBOL_GPL(ata_host_stop); 5812EXPORT_SYMBOL_GPL(ata_host_stop);
4926EXPORT_SYMBOL_GPL(ata_interrupt); 5813EXPORT_SYMBOL_GPL(ata_interrupt);
5814EXPORT_SYMBOL_GPL(ata_mmio_data_xfer);
5815EXPORT_SYMBOL_GPL(ata_pio_data_xfer);
5816EXPORT_SYMBOL_GPL(ata_pio_data_xfer_noirq);
4927EXPORT_SYMBOL_GPL(ata_qc_prep); 5817EXPORT_SYMBOL_GPL(ata_qc_prep);
4928EXPORT_SYMBOL_GPL(ata_noop_qc_prep); 5818EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
4929EXPORT_SYMBOL_GPL(ata_bmdma_setup); 5819EXPORT_SYMBOL_GPL(ata_bmdma_setup);
@@ -4931,33 +5821,46 @@ EXPORT_SYMBOL_GPL(ata_bmdma_start);
4931EXPORT_SYMBOL_GPL(ata_bmdma_irq_clear); 5821EXPORT_SYMBOL_GPL(ata_bmdma_irq_clear);
4932EXPORT_SYMBOL_GPL(ata_bmdma_status); 5822EXPORT_SYMBOL_GPL(ata_bmdma_status);
4933EXPORT_SYMBOL_GPL(ata_bmdma_stop); 5823EXPORT_SYMBOL_GPL(ata_bmdma_stop);
5824EXPORT_SYMBOL_GPL(ata_bmdma_freeze);
5825EXPORT_SYMBOL_GPL(ata_bmdma_thaw);
5826EXPORT_SYMBOL_GPL(ata_bmdma_drive_eh);
5827EXPORT_SYMBOL_GPL(ata_bmdma_error_handler);
5828EXPORT_SYMBOL_GPL(ata_bmdma_post_internal_cmd);
4934EXPORT_SYMBOL_GPL(ata_port_probe); 5829EXPORT_SYMBOL_GPL(ata_port_probe);
5830EXPORT_SYMBOL_GPL(sata_set_spd);
5831EXPORT_SYMBOL_GPL(sata_phy_debounce);
5832EXPORT_SYMBOL_GPL(sata_phy_resume);
4935EXPORT_SYMBOL_GPL(sata_phy_reset); 5833EXPORT_SYMBOL_GPL(sata_phy_reset);
4936EXPORT_SYMBOL_GPL(__sata_phy_reset); 5834EXPORT_SYMBOL_GPL(__sata_phy_reset);
4937EXPORT_SYMBOL_GPL(ata_bus_reset); 5835EXPORT_SYMBOL_GPL(ata_bus_reset);
4938EXPORT_SYMBOL_GPL(ata_std_probeinit); 5836EXPORT_SYMBOL_GPL(ata_std_prereset);
4939EXPORT_SYMBOL_GPL(ata_std_softreset); 5837EXPORT_SYMBOL_GPL(ata_std_softreset);
4940EXPORT_SYMBOL_GPL(sata_std_hardreset); 5838EXPORT_SYMBOL_GPL(sata_std_hardreset);
4941EXPORT_SYMBOL_GPL(ata_std_postreset); 5839EXPORT_SYMBOL_GPL(ata_std_postreset);
4942EXPORT_SYMBOL_GPL(ata_std_probe_reset);
4943EXPORT_SYMBOL_GPL(ata_drive_probe_reset);
4944EXPORT_SYMBOL_GPL(ata_dev_revalidate); 5840EXPORT_SYMBOL_GPL(ata_dev_revalidate);
4945EXPORT_SYMBOL_GPL(ata_dev_classify); 5841EXPORT_SYMBOL_GPL(ata_dev_classify);
4946EXPORT_SYMBOL_GPL(ata_dev_pair); 5842EXPORT_SYMBOL_GPL(ata_dev_pair);
4947EXPORT_SYMBOL_GPL(ata_port_disable); 5843EXPORT_SYMBOL_GPL(ata_port_disable);
4948EXPORT_SYMBOL_GPL(ata_ratelimit); 5844EXPORT_SYMBOL_GPL(ata_ratelimit);
5845EXPORT_SYMBOL_GPL(ata_wait_register);
4949EXPORT_SYMBOL_GPL(ata_busy_sleep); 5846EXPORT_SYMBOL_GPL(ata_busy_sleep);
4950EXPORT_SYMBOL_GPL(ata_port_queue_task); 5847EXPORT_SYMBOL_GPL(ata_port_queue_task);
4951EXPORT_SYMBOL_GPL(ata_scsi_ioctl); 5848EXPORT_SYMBOL_GPL(ata_scsi_ioctl);
4952EXPORT_SYMBOL_GPL(ata_scsi_queuecmd); 5849EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
4953EXPORT_SYMBOL_GPL(ata_scsi_slave_config); 5850EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
5851EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy);
5852EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth);
4954EXPORT_SYMBOL_GPL(ata_scsi_release); 5853EXPORT_SYMBOL_GPL(ata_scsi_release);
4955EXPORT_SYMBOL_GPL(ata_host_intr); 5854EXPORT_SYMBOL_GPL(ata_host_intr);
5855EXPORT_SYMBOL_GPL(sata_scr_valid);
5856EXPORT_SYMBOL_GPL(sata_scr_read);
5857EXPORT_SYMBOL_GPL(sata_scr_write);
5858EXPORT_SYMBOL_GPL(sata_scr_write_flush);
5859EXPORT_SYMBOL_GPL(ata_port_online);
5860EXPORT_SYMBOL_GPL(ata_port_offline);
4956EXPORT_SYMBOL_GPL(ata_id_string); 5861EXPORT_SYMBOL_GPL(ata_id_string);
4957EXPORT_SYMBOL_GPL(ata_id_c_string); 5862EXPORT_SYMBOL_GPL(ata_id_c_string);
4958EXPORT_SYMBOL_GPL(ata_scsi_simulate); 5863EXPORT_SYMBOL_GPL(ata_scsi_simulate);
4959EXPORT_SYMBOL_GPL(ata_eh_qc_complete);
4960EXPORT_SYMBOL_GPL(ata_eh_qc_retry);
4961 5864
4962EXPORT_SYMBOL_GPL(ata_pio_need_iordy); 5865EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
4963EXPORT_SYMBOL_GPL(ata_timing_compute); 5866EXPORT_SYMBOL_GPL(ata_timing_compute);
@@ -4979,3 +5882,13 @@ EXPORT_SYMBOL_GPL(ata_device_suspend);
4979EXPORT_SYMBOL_GPL(ata_device_resume); 5882EXPORT_SYMBOL_GPL(ata_device_resume);
4980EXPORT_SYMBOL_GPL(ata_scsi_device_suspend); 5883EXPORT_SYMBOL_GPL(ata_scsi_device_suspend);
4981EXPORT_SYMBOL_GPL(ata_scsi_device_resume); 5884EXPORT_SYMBOL_GPL(ata_scsi_device_resume);
5885
5886EXPORT_SYMBOL_GPL(ata_eng_timeout);
5887EXPORT_SYMBOL_GPL(ata_port_schedule_eh);
5888EXPORT_SYMBOL_GPL(ata_port_abort);
5889EXPORT_SYMBOL_GPL(ata_port_freeze);
5890EXPORT_SYMBOL_GPL(ata_eh_freeze_port);
5891EXPORT_SYMBOL_GPL(ata_eh_thaw_port);
5892EXPORT_SYMBOL_GPL(ata_eh_qc_complete);
5893EXPORT_SYMBOL_GPL(ata_eh_qc_retry);
5894EXPORT_SYMBOL_GPL(ata_do_eh);
diff --git a/drivers/scsi/libata-eh.c b/drivers/scsi/libata-eh.c
new file mode 100644
index 000000000000..531a4e11c078
--- /dev/null
+++ b/drivers/scsi/libata-eh.c
@@ -0,0 +1,1855 @@
1/*
2 * libata-eh.c - libata error handling
3 *
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
7 *
8 * Copyright 2006 Tejun Heo <htejun@gmail.com>
9 *
10 *
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License as
13 * published by the Free Software Foundation; either version 2, or
14 * (at your option) any later version.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; see the file COPYING. If not, write to
23 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
24 * USA.
25 *
26 *
27 * libata documentation is available via 'make {ps|pdf}docs',
28 * as Documentation/DocBook/libata.*
29 *
30 * Hardware documentation available from http://www.t13.org/ and
31 * http://www.sata-io.org/
32 *
33 */
34
35#include <linux/config.h>
36#include <linux/kernel.h>
37#include <scsi/scsi.h>
38#include <scsi/scsi_host.h>
39#include <scsi/scsi_eh.h>
40#include <scsi/scsi_device.h>
41#include <scsi/scsi_cmnd.h>
42#include "scsi_transport_api.h"
43
44#include <linux/libata.h>
45
46#include "libata.h"
47
48static void __ata_port_freeze(struct ata_port *ap);
49static void ata_eh_finish(struct ata_port *ap);
50
51static void ata_ering_record(struct ata_ering *ering, int is_io,
52 unsigned int err_mask)
53{
54 struct ata_ering_entry *ent;
55
56 WARN_ON(!err_mask);
57
58 ering->cursor++;
59 ering->cursor %= ATA_ERING_SIZE;
60
61 ent = &ering->ring[ering->cursor];
62 ent->is_io = is_io;
63 ent->err_mask = err_mask;
64 ent->timestamp = get_jiffies_64();
65}
66
67static struct ata_ering_entry * ata_ering_top(struct ata_ering *ering)
68{
69 struct ata_ering_entry *ent = &ering->ring[ering->cursor];
70 if (!ent->err_mask)
71 return NULL;
72 return ent;
73}
74
75static int ata_ering_map(struct ata_ering *ering,
76 int (*map_fn)(struct ata_ering_entry *, void *),
77 void *arg)
78{
79 int idx, rc = 0;
80 struct ata_ering_entry *ent;
81
82 idx = ering->cursor;
83 do {
84 ent = &ering->ring[idx];
85 if (!ent->err_mask)
86 break;
87 rc = map_fn(ent, arg);
88 if (rc)
89 break;
90 idx = (idx - 1 + ATA_ERING_SIZE) % ATA_ERING_SIZE;
91 } while (idx != ering->cursor);
92
93 return rc;
94}
95
96/**
97 * ata_scsi_timed_out - SCSI layer time out callback
98 * @cmd: timed out SCSI command
99 *
100 * Handles SCSI layer timeout. We race with normal completion of
101 * the qc for @cmd. If the qc is already gone, we lose and let
102 * the scsi command finish (EH_HANDLED). Otherwise, the qc has
103 * timed out and EH should be invoked. Prevent ata_qc_complete()
104 * from finishing it by setting EH_SCHEDULED and return
105 * EH_NOT_HANDLED.
106 *
107 * TODO: kill this function once old EH is gone.
108 *
109 * LOCKING:
110 * Called from timer context
111 *
112 * RETURNS:
113 * EH_HANDLED or EH_NOT_HANDLED
114 */
115enum scsi_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd)
116{
117 struct Scsi_Host *host = cmd->device->host;
118 struct ata_port *ap = ata_shost_to_port(host);
119 unsigned long flags;
120 struct ata_queued_cmd *qc;
121 enum scsi_eh_timer_return ret;
122
123 DPRINTK("ENTER\n");
124
125 if (ap->ops->error_handler) {
126 ret = EH_NOT_HANDLED;
127 goto out;
128 }
129
130 ret = EH_HANDLED;
131 spin_lock_irqsave(&ap->host_set->lock, flags);
132 qc = ata_qc_from_tag(ap, ap->active_tag);
133 if (qc) {
134 WARN_ON(qc->scsicmd != cmd);
135 qc->flags |= ATA_QCFLAG_EH_SCHEDULED;
136 qc->err_mask |= AC_ERR_TIMEOUT;
137 ret = EH_NOT_HANDLED;
138 }
139 spin_unlock_irqrestore(&ap->host_set->lock, flags);
140
141 out:
142 DPRINTK("EXIT, ret=%d\n", ret);
143 return ret;
144}
145
146/**
147 * ata_scsi_error - SCSI layer error handler callback
148 * @host: SCSI host on which error occurred
149 *
150 * Handles SCSI-layer-thrown error events.
151 *
152 * LOCKING:
153 * Inherited from SCSI layer (none, can sleep)
154 *
155 * RETURNS:
156 * Zero.
157 */
158void ata_scsi_error(struct Scsi_Host *host)
159{
160 struct ata_port *ap = ata_shost_to_port(host);
161 spinlock_t *hs_lock = &ap->host_set->lock;
162 int i, repeat_cnt = ATA_EH_MAX_REPEAT;
163 unsigned long flags;
164
165 DPRINTK("ENTER\n");
166
167 /* synchronize with port task */
168 ata_port_flush_task(ap);
169
170 /* synchronize with host_set lock and sort out timeouts */
171
172 /* For new EH, all qcs are finished in one of three ways -
173 * normal completion, error completion, and SCSI timeout.
174 * Both cmpletions can race against SCSI timeout. When normal
175 * completion wins, the qc never reaches EH. When error
176 * completion wins, the qc has ATA_QCFLAG_FAILED set.
177 *
178 * When SCSI timeout wins, things are a bit more complex.
179 * Normal or error completion can occur after the timeout but
180 * before this point. In such cases, both types of
181 * completions are honored. A scmd is determined to have
182 * timed out iff its associated qc is active and not failed.
183 */
184 if (ap->ops->error_handler) {
185 struct scsi_cmnd *scmd, *tmp;
186 int nr_timedout = 0;
187
188 spin_lock_irqsave(hs_lock, flags);
189
190 list_for_each_entry_safe(scmd, tmp, &host->eh_cmd_q, eh_entry) {
191 struct ata_queued_cmd *qc;
192
193 for (i = 0; i < ATA_MAX_QUEUE; i++) {
194 qc = __ata_qc_from_tag(ap, i);
195 if (qc->flags & ATA_QCFLAG_ACTIVE &&
196 qc->scsicmd == scmd)
197 break;
198 }
199
200 if (i < ATA_MAX_QUEUE) {
201 /* the scmd has an associated qc */
202 if (!(qc->flags & ATA_QCFLAG_FAILED)) {
203 /* which hasn't failed yet, timeout */
204 qc->err_mask |= AC_ERR_TIMEOUT;
205 qc->flags |= ATA_QCFLAG_FAILED;
206 nr_timedout++;
207 }
208 } else {
209 /* Normal completion occurred after
210 * SCSI timeout but before this point.
211 * Successfully complete it.
212 */
213 scmd->retries = scmd->allowed;
214 scsi_eh_finish_cmd(scmd, &ap->eh_done_q);
215 }
216 }
217
218 /* If we have timed out qcs. They belong to EH from
219 * this point but the state of the controller is
220 * unknown. Freeze the port to make sure the IRQ
221 * handler doesn't diddle with those qcs. This must
222 * be done atomically w.r.t. setting QCFLAG_FAILED.
223 */
224 if (nr_timedout)
225 __ata_port_freeze(ap);
226
227 spin_unlock_irqrestore(hs_lock, flags);
228 } else
229 spin_unlock_wait(hs_lock);
230
231 repeat:
232 /* invoke error handler */
233 if (ap->ops->error_handler) {
234 /* fetch & clear EH info */
235 spin_lock_irqsave(hs_lock, flags);
236
237 memset(&ap->eh_context, 0, sizeof(ap->eh_context));
238 ap->eh_context.i = ap->eh_info;
239 memset(&ap->eh_info, 0, sizeof(ap->eh_info));
240
241 ap->flags |= ATA_FLAG_EH_IN_PROGRESS;
242 ap->flags &= ~ATA_FLAG_EH_PENDING;
243
244 spin_unlock_irqrestore(hs_lock, flags);
245
246 /* invoke EH. if unloading, just finish failed qcs */
247 if (!(ap->flags & ATA_FLAG_UNLOADING))
248 ap->ops->error_handler(ap);
249 else
250 ata_eh_finish(ap);
251
252 /* Exception might have happend after ->error_handler
253 * recovered the port but before this point. Repeat
254 * EH in such case.
255 */
256 spin_lock_irqsave(hs_lock, flags);
257
258 if (ap->flags & ATA_FLAG_EH_PENDING) {
259 if (--repeat_cnt) {
260 ata_port_printk(ap, KERN_INFO,
261 "EH pending after completion, "
262 "repeating EH (cnt=%d)\n", repeat_cnt);
263 spin_unlock_irqrestore(hs_lock, flags);
264 goto repeat;
265 }
266 ata_port_printk(ap, KERN_ERR, "EH pending after %d "
267 "tries, giving up\n", ATA_EH_MAX_REPEAT);
268 }
269
270 /* this run is complete, make sure EH info is clear */
271 memset(&ap->eh_info, 0, sizeof(ap->eh_info));
272
273 /* Clear host_eh_scheduled while holding hs_lock such
274 * that if exception occurs after this point but
275 * before EH completion, SCSI midlayer will
276 * re-initiate EH.
277 */
278 host->host_eh_scheduled = 0;
279
280 spin_unlock_irqrestore(hs_lock, flags);
281 } else {
282 WARN_ON(ata_qc_from_tag(ap, ap->active_tag) == NULL);
283 ap->ops->eng_timeout(ap);
284 }
285
286 /* finish or retry handled scmd's and clean up */
287 WARN_ON(host->host_failed || !list_empty(&host->eh_cmd_q));
288
289 scsi_eh_flush_done_q(&ap->eh_done_q);
290
291 /* clean up */
292 spin_lock_irqsave(hs_lock, flags);
293
294 if (ap->flags & ATA_FLAG_LOADING) {
295 ap->flags &= ~ATA_FLAG_LOADING;
296 } else {
297 if (ap->flags & ATA_FLAG_SCSI_HOTPLUG)
298 queue_work(ata_aux_wq, &ap->hotplug_task);
299 if (ap->flags & ATA_FLAG_RECOVERED)
300 ata_port_printk(ap, KERN_INFO, "EH complete\n");
301 }
302
303 ap->flags &= ~(ATA_FLAG_SCSI_HOTPLUG | ATA_FLAG_RECOVERED);
304
305 /* tell wait_eh that we're done */
306 ap->flags &= ~ATA_FLAG_EH_IN_PROGRESS;
307 wake_up_all(&ap->eh_wait_q);
308
309 spin_unlock_irqrestore(hs_lock, flags);
310
311 DPRINTK("EXIT\n");
312}
313
314/**
315 * ata_port_wait_eh - Wait for the currently pending EH to complete
316 * @ap: Port to wait EH for
317 *
318 * Wait until the currently pending EH is complete.
319 *
320 * LOCKING:
321 * Kernel thread context (may sleep).
322 */
323void ata_port_wait_eh(struct ata_port *ap)
324{
325 unsigned long flags;
326 DEFINE_WAIT(wait);
327
328 retry:
329 spin_lock_irqsave(&ap->host_set->lock, flags);
330
331 while (ap->flags & (ATA_FLAG_EH_PENDING | ATA_FLAG_EH_IN_PROGRESS)) {
332 prepare_to_wait(&ap->eh_wait_q, &wait, TASK_UNINTERRUPTIBLE);
333 spin_unlock_irqrestore(&ap->host_set->lock, flags);
334 schedule();
335 spin_lock_irqsave(&ap->host_set->lock, flags);
336 }
337 finish_wait(&ap->eh_wait_q, &wait);
338
339 spin_unlock_irqrestore(&ap->host_set->lock, flags);
340
341 /* make sure SCSI EH is complete */
342 if (scsi_host_in_recovery(ap->host)) {
343 msleep(10);
344 goto retry;
345 }
346}
347
348/**
349 * ata_qc_timeout - Handle timeout of queued command
350 * @qc: Command that timed out
351 *
352 * Some part of the kernel (currently, only the SCSI layer)
353 * has noticed that the active command on port @ap has not
354 * completed after a specified length of time. Handle this
355 * condition by disabling DMA (if necessary) and completing
356 * transactions, with error if necessary.
357 *
358 * This also handles the case of the "lost interrupt", where
359 * for some reason (possibly hardware bug, possibly driver bug)
360 * an interrupt was not delivered to the driver, even though the
361 * transaction completed successfully.
362 *
363 * TODO: kill this function once old EH is gone.
364 *
365 * LOCKING:
366 * Inherited from SCSI layer (none, can sleep)
367 */
368static void ata_qc_timeout(struct ata_queued_cmd *qc)
369{
370 struct ata_port *ap = qc->ap;
371 struct ata_host_set *host_set = ap->host_set;
372 u8 host_stat = 0, drv_stat;
373 unsigned long flags;
374
375 DPRINTK("ENTER\n");
376
377 ap->hsm_task_state = HSM_ST_IDLE;
378
379 spin_lock_irqsave(&host_set->lock, flags);
380
381 switch (qc->tf.protocol) {
382
383 case ATA_PROT_DMA:
384 case ATA_PROT_ATAPI_DMA:
385 host_stat = ap->ops->bmdma_status(ap);
386
387 /* before we do anything else, clear DMA-Start bit */
388 ap->ops->bmdma_stop(qc);
389
390 /* fall through */
391
392 default:
393 ata_altstatus(ap);
394 drv_stat = ata_chk_status(ap);
395
396 /* ack bmdma irq events */
397 ap->ops->irq_clear(ap);
398
399 ata_dev_printk(qc->dev, KERN_ERR, "command 0x%x timeout, "
400 "stat 0x%x host_stat 0x%x\n",
401 qc->tf.command, drv_stat, host_stat);
402
403 /* complete taskfile transaction */
404 qc->err_mask |= AC_ERR_TIMEOUT;
405 break;
406 }
407
408 spin_unlock_irqrestore(&host_set->lock, flags);
409
410 ata_eh_qc_complete(qc);
411
412 DPRINTK("EXIT\n");
413}
414
415/**
416 * ata_eng_timeout - Handle timeout of queued command
417 * @ap: Port on which timed-out command is active
418 *
419 * Some part of the kernel (currently, only the SCSI layer)
420 * has noticed that the active command on port @ap has not
421 * completed after a specified length of time. Handle this
422 * condition by disabling DMA (if necessary) and completing
423 * transactions, with error if necessary.
424 *
425 * This also handles the case of the "lost interrupt", where
426 * for some reason (possibly hardware bug, possibly driver bug)
427 * an interrupt was not delivered to the driver, even though the
428 * transaction completed successfully.
429 *
430 * TODO: kill this function once old EH is gone.
431 *
432 * LOCKING:
433 * Inherited from SCSI layer (none, can sleep)
434 */
435void ata_eng_timeout(struct ata_port *ap)
436{
437 DPRINTK("ENTER\n");
438
439 ata_qc_timeout(ata_qc_from_tag(ap, ap->active_tag));
440
441 DPRINTK("EXIT\n");
442}
443
444/**
445 * ata_qc_schedule_eh - schedule qc for error handling
446 * @qc: command to schedule error handling for
447 *
448 * Schedule error handling for @qc. EH will kick in as soon as
449 * other commands are drained.
450 *
451 * LOCKING:
452 * spin_lock_irqsave(host_set lock)
453 */
454void ata_qc_schedule_eh(struct ata_queued_cmd *qc)
455{
456 struct ata_port *ap = qc->ap;
457
458 WARN_ON(!ap->ops->error_handler);
459
460 qc->flags |= ATA_QCFLAG_FAILED;
461 qc->ap->flags |= ATA_FLAG_EH_PENDING;
462
463 /* The following will fail if timeout has already expired.
464 * ata_scsi_error() takes care of such scmds on EH entry.
465 * Note that ATA_QCFLAG_FAILED is unconditionally set after
466 * this function completes.
467 */
468 scsi_req_abort_cmd(qc->scsicmd);
469}
470
471/**
472 * ata_port_schedule_eh - schedule error handling without a qc
473 * @ap: ATA port to schedule EH for
474 *
475 * Schedule error handling for @ap. EH will kick in as soon as
476 * all commands are drained.
477 *
478 * LOCKING:
479 * spin_lock_irqsave(host_set lock)
480 */
481void ata_port_schedule_eh(struct ata_port *ap)
482{
483 WARN_ON(!ap->ops->error_handler);
484
485 ap->flags |= ATA_FLAG_EH_PENDING;
486 scsi_schedule_eh(ap->host);
487
488 DPRINTK("port EH scheduled\n");
489}
490
491/**
492 * ata_port_abort - abort all qc's on the port
493 * @ap: ATA port to abort qc's for
494 *
495 * Abort all active qc's of @ap and schedule EH.
496 *
497 * LOCKING:
498 * spin_lock_irqsave(host_set lock)
499 *
500 * RETURNS:
501 * Number of aborted qc's.
502 */
503int ata_port_abort(struct ata_port *ap)
504{
505 int tag, nr_aborted = 0;
506
507 WARN_ON(!ap->ops->error_handler);
508
509 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
510 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, tag);
511
512 if (qc) {
513 qc->flags |= ATA_QCFLAG_FAILED;
514 ata_qc_complete(qc);
515 nr_aborted++;
516 }
517 }
518
519 if (!nr_aborted)
520 ata_port_schedule_eh(ap);
521
522 return nr_aborted;
523}
524
525/**
526 * __ata_port_freeze - freeze port
527 * @ap: ATA port to freeze
528 *
529 * This function is called when HSM violation or some other
530 * condition disrupts normal operation of the port. Frozen port
531 * is not allowed to perform any operation until the port is
532 * thawed, which usually follows a successful reset.
533 *
534 * ap->ops->freeze() callback can be used for freezing the port
535 * hardware-wise (e.g. mask interrupt and stop DMA engine). If a
536 * port cannot be frozen hardware-wise, the interrupt handler
537 * must ack and clear interrupts unconditionally while the port
538 * is frozen.
539 *
540 * LOCKING:
541 * spin_lock_irqsave(host_set lock)
542 */
543static void __ata_port_freeze(struct ata_port *ap)
544{
545 WARN_ON(!ap->ops->error_handler);
546
547 if (ap->ops->freeze)
548 ap->ops->freeze(ap);
549
550 ap->flags |= ATA_FLAG_FROZEN;
551
552 DPRINTK("ata%u port frozen\n", ap->id);
553}
554
555/**
556 * ata_port_freeze - abort & freeze port
557 * @ap: ATA port to freeze
558 *
559 * Abort and freeze @ap.
560 *
561 * LOCKING:
562 * spin_lock_irqsave(host_set lock)
563 *
564 * RETURNS:
565 * Number of aborted commands.
566 */
567int ata_port_freeze(struct ata_port *ap)
568{
569 int nr_aborted;
570
571 WARN_ON(!ap->ops->error_handler);
572
573 nr_aborted = ata_port_abort(ap);
574 __ata_port_freeze(ap);
575
576 return nr_aborted;
577}
578
579/**
580 * ata_eh_freeze_port - EH helper to freeze port
581 * @ap: ATA port to freeze
582 *
583 * Freeze @ap.
584 *
585 * LOCKING:
586 * None.
587 */
588void ata_eh_freeze_port(struct ata_port *ap)
589{
590 unsigned long flags;
591
592 if (!ap->ops->error_handler)
593 return;
594
595 spin_lock_irqsave(&ap->host_set->lock, flags);
596 __ata_port_freeze(ap);
597 spin_unlock_irqrestore(&ap->host_set->lock, flags);
598}
599
600/**
601 * ata_port_thaw_port - EH helper to thaw port
602 * @ap: ATA port to thaw
603 *
604 * Thaw frozen port @ap.
605 *
606 * LOCKING:
607 * None.
608 */
609void ata_eh_thaw_port(struct ata_port *ap)
610{
611 unsigned long flags;
612
613 if (!ap->ops->error_handler)
614 return;
615
616 spin_lock_irqsave(&ap->host_set->lock, flags);
617
618 ap->flags &= ~ATA_FLAG_FROZEN;
619
620 if (ap->ops->thaw)
621 ap->ops->thaw(ap);
622
623 spin_unlock_irqrestore(&ap->host_set->lock, flags);
624
625 DPRINTK("ata%u port thawed\n", ap->id);
626}
627
628static void ata_eh_scsidone(struct scsi_cmnd *scmd)
629{
630 /* nada */
631}
632
633static void __ata_eh_qc_complete(struct ata_queued_cmd *qc)
634{
635 struct ata_port *ap = qc->ap;
636 struct scsi_cmnd *scmd = qc->scsicmd;
637 unsigned long flags;
638
639 spin_lock_irqsave(&ap->host_set->lock, flags);
640 qc->scsidone = ata_eh_scsidone;
641 __ata_qc_complete(qc);
642 WARN_ON(ata_tag_valid(qc->tag));
643 spin_unlock_irqrestore(&ap->host_set->lock, flags);
644
645 scsi_eh_finish_cmd(scmd, &ap->eh_done_q);
646}
647
648/**
649 * ata_eh_qc_complete - Complete an active ATA command from EH
650 * @qc: Command to complete
651 *
652 * Indicate to the mid and upper layers that an ATA command has
653 * completed. To be used from EH.
654 */
655void ata_eh_qc_complete(struct ata_queued_cmd *qc)
656{
657 struct scsi_cmnd *scmd = qc->scsicmd;
658 scmd->retries = scmd->allowed;
659 __ata_eh_qc_complete(qc);
660}
661
662/**
663 * ata_eh_qc_retry - Tell midlayer to retry an ATA command after EH
664 * @qc: Command to retry
665 *
666 * Indicate to the mid and upper layers that an ATA command
667 * should be retried. To be used from EH.
668 *
669 * SCSI midlayer limits the number of retries to scmd->allowed.
670 * scmd->retries is decremented for commands which get retried
671 * due to unrelated failures (qc->err_mask is zero).
672 */
673void ata_eh_qc_retry(struct ata_queued_cmd *qc)
674{
675 struct scsi_cmnd *scmd = qc->scsicmd;
676 if (!qc->err_mask && scmd->retries)
677 scmd->retries--;
678 __ata_eh_qc_complete(qc);
679}
680
681/**
682 * ata_eh_detach_dev - detach ATA device
683 * @dev: ATA device to detach
684 *
685 * Detach @dev.
686 *
687 * LOCKING:
688 * None.
689 */
690static void ata_eh_detach_dev(struct ata_device *dev)
691{
692 struct ata_port *ap = dev->ap;
693 unsigned long flags;
694
695 ata_dev_disable(dev);
696
697 spin_lock_irqsave(&ap->host_set->lock, flags);
698
699 dev->flags &= ~ATA_DFLAG_DETACH;
700
701 if (ata_scsi_offline_dev(dev)) {
702 dev->flags |= ATA_DFLAG_DETACHED;
703 ap->flags |= ATA_FLAG_SCSI_HOTPLUG;
704 }
705
706 spin_unlock_irqrestore(&ap->host_set->lock, flags);
707}
708
709/**
710 * ata_eh_about_to_do - about to perform eh_action
711 * @ap: target ATA port
712 * @action: action about to be performed
713 *
714 * Called just before performing EH actions to clear related bits
715 * in @ap->eh_info such that eh actions are not unnecessarily
716 * repeated.
717 *
718 * LOCKING:
719 * None.
720 */
721static void ata_eh_about_to_do(struct ata_port *ap, unsigned int action)
722{
723 unsigned long flags;
724
725 spin_lock_irqsave(&ap->host_set->lock, flags);
726 ap->eh_info.action &= ~action;
727 ap->flags |= ATA_FLAG_RECOVERED;
728 spin_unlock_irqrestore(&ap->host_set->lock, flags);
729}
730
731/**
732 * ata_err_string - convert err_mask to descriptive string
733 * @err_mask: error mask to convert to string
734 *
735 * Convert @err_mask to descriptive string. Errors are
736 * prioritized according to severity and only the most severe
737 * error is reported.
738 *
739 * LOCKING:
740 * None.
741 *
742 * RETURNS:
743 * Descriptive string for @err_mask
744 */
745static const char * ata_err_string(unsigned int err_mask)
746{
747 if (err_mask & AC_ERR_HOST_BUS)
748 return "host bus error";
749 if (err_mask & AC_ERR_ATA_BUS)
750 return "ATA bus error";
751 if (err_mask & AC_ERR_TIMEOUT)
752 return "timeout";
753 if (err_mask & AC_ERR_HSM)
754 return "HSM violation";
755 if (err_mask & AC_ERR_SYSTEM)
756 return "internal error";
757 if (err_mask & AC_ERR_MEDIA)
758 return "media error";
759 if (err_mask & AC_ERR_INVALID)
760 return "invalid argument";
761 if (err_mask & AC_ERR_DEV)
762 return "device error";
763 return "unknown error";
764}
765
766/**
767 * ata_read_log_page - read a specific log page
768 * @dev: target device
769 * @page: page to read
770 * @buf: buffer to store read page
771 * @sectors: number of sectors to read
772 *
773 * Read log page using READ_LOG_EXT command.
774 *
775 * LOCKING:
776 * Kernel thread context (may sleep).
777 *
778 * RETURNS:
779 * 0 on success, AC_ERR_* mask otherwise.
780 */
781static unsigned int ata_read_log_page(struct ata_device *dev,
782 u8 page, void *buf, unsigned int sectors)
783{
784 struct ata_taskfile tf;
785 unsigned int err_mask;
786
787 DPRINTK("read log page - page %d\n", page);
788
789 ata_tf_init(dev, &tf);
790 tf.command = ATA_CMD_READ_LOG_EXT;
791 tf.lbal = page;
792 tf.nsect = sectors;
793 tf.hob_nsect = sectors >> 8;
794 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_LBA48 | ATA_TFLAG_DEVICE;
795 tf.protocol = ATA_PROT_PIO;
796
797 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE,
798 buf, sectors * ATA_SECT_SIZE);
799
800 DPRINTK("EXIT, err_mask=%x\n", err_mask);
801 return err_mask;
802}
803
804/**
805 * ata_eh_read_log_10h - Read log page 10h for NCQ error details
806 * @dev: Device to read log page 10h from
807 * @tag: Resulting tag of the failed command
808 * @tf: Resulting taskfile registers of the failed command
809 *
810 * Read log page 10h to obtain NCQ error details and clear error
811 * condition.
812 *
813 * LOCKING:
814 * Kernel thread context (may sleep).
815 *
816 * RETURNS:
817 * 0 on success, -errno otherwise.
818 */
819static int ata_eh_read_log_10h(struct ata_device *dev,
820 int *tag, struct ata_taskfile *tf)
821{
822 u8 *buf = dev->ap->sector_buf;
823 unsigned int err_mask;
824 u8 csum;
825 int i;
826
827 err_mask = ata_read_log_page(dev, ATA_LOG_SATA_NCQ, buf, 1);
828 if (err_mask)
829 return -EIO;
830
831 csum = 0;
832 for (i = 0; i < ATA_SECT_SIZE; i++)
833 csum += buf[i];
834 if (csum)
835 ata_dev_printk(dev, KERN_WARNING,
836 "invalid checksum 0x%x on log page 10h\n", csum);
837
838 if (buf[0] & 0x80)
839 return -ENOENT;
840
841 *tag = buf[0] & 0x1f;
842
843 tf->command = buf[2];
844 tf->feature = buf[3];
845 tf->lbal = buf[4];
846 tf->lbam = buf[5];
847 tf->lbah = buf[6];
848 tf->device = buf[7];
849 tf->hob_lbal = buf[8];
850 tf->hob_lbam = buf[9];
851 tf->hob_lbah = buf[10];
852 tf->nsect = buf[12];
853 tf->hob_nsect = buf[13];
854
855 return 0;
856}
857
858/**
859 * atapi_eh_request_sense - perform ATAPI REQUEST_SENSE
860 * @dev: device to perform REQUEST_SENSE to
861 * @sense_buf: result sense data buffer (SCSI_SENSE_BUFFERSIZE bytes long)
862 *
863 * Perform ATAPI REQUEST_SENSE after the device reported CHECK
864 * SENSE. This function is EH helper.
865 *
866 * LOCKING:
867 * Kernel thread context (may sleep).
868 *
869 * RETURNS:
870 * 0 on success, AC_ERR_* mask on failure
871 */
872static unsigned int atapi_eh_request_sense(struct ata_device *dev,
873 unsigned char *sense_buf)
874{
875 struct ata_port *ap = dev->ap;
876 struct ata_taskfile tf;
877 u8 cdb[ATAPI_CDB_LEN];
878
879 DPRINTK("ATAPI request sense\n");
880
881 ata_tf_init(dev, &tf);
882
883 /* FIXME: is this needed? */
884 memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
885
886 /* XXX: why tf_read here? */
887 ap->ops->tf_read(ap, &tf);
888
889 /* fill these in, for the case where they are -not- overwritten */
890 sense_buf[0] = 0x70;
891 sense_buf[2] = tf.feature >> 4;
892
893 memset(cdb, 0, ATAPI_CDB_LEN);
894 cdb[0] = REQUEST_SENSE;
895 cdb[4] = SCSI_SENSE_BUFFERSIZE;
896
897 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
898 tf.command = ATA_CMD_PACKET;
899
900 /* is it pointless to prefer PIO for "safety reasons"? */
901 if (ap->flags & ATA_FLAG_PIO_DMA) {
902 tf.protocol = ATA_PROT_ATAPI_DMA;
903 tf.feature |= ATAPI_PKT_DMA;
904 } else {
905 tf.protocol = ATA_PROT_ATAPI;
906 tf.lbam = (8 * 1024) & 0xff;
907 tf.lbah = (8 * 1024) >> 8;
908 }
909
910 return ata_exec_internal(dev, &tf, cdb, DMA_FROM_DEVICE,
911 sense_buf, SCSI_SENSE_BUFFERSIZE);
912}
913
914/**
915 * ata_eh_analyze_serror - analyze SError for a failed port
916 * @ap: ATA port to analyze SError for
917 *
918 * Analyze SError if available and further determine cause of
919 * failure.
920 *
921 * LOCKING:
922 * None.
923 */
924static void ata_eh_analyze_serror(struct ata_port *ap)
925{
926 struct ata_eh_context *ehc = &ap->eh_context;
927 u32 serror = ehc->i.serror;
928 unsigned int err_mask = 0, action = 0;
929
930 if (serror & SERR_PERSISTENT) {
931 err_mask |= AC_ERR_ATA_BUS;
932 action |= ATA_EH_HARDRESET;
933 }
934 if (serror &
935 (SERR_DATA_RECOVERED | SERR_COMM_RECOVERED | SERR_DATA)) {
936 err_mask |= AC_ERR_ATA_BUS;
937 action |= ATA_EH_SOFTRESET;
938 }
939 if (serror & SERR_PROTOCOL) {
940 err_mask |= AC_ERR_HSM;
941 action |= ATA_EH_SOFTRESET;
942 }
943 if (serror & SERR_INTERNAL) {
944 err_mask |= AC_ERR_SYSTEM;
945 action |= ATA_EH_SOFTRESET;
946 }
947 if (serror & (SERR_PHYRDY_CHG | SERR_DEV_XCHG))
948 ata_ehi_hotplugged(&ehc->i);
949
950 ehc->i.err_mask |= err_mask;
951 ehc->i.action |= action;
952}
953
954/**
955 * ata_eh_analyze_ncq_error - analyze NCQ error
956 * @ap: ATA port to analyze NCQ error for
957 *
958 * Read log page 10h, determine the offending qc and acquire
959 * error status TF. For NCQ device errors, all LLDDs have to do
960 * is setting AC_ERR_DEV in ehi->err_mask. This function takes
961 * care of the rest.
962 *
963 * LOCKING:
964 * Kernel thread context (may sleep).
965 */
966static void ata_eh_analyze_ncq_error(struct ata_port *ap)
967{
968 struct ata_eh_context *ehc = &ap->eh_context;
969 struct ata_device *dev = ap->device;
970 struct ata_queued_cmd *qc;
971 struct ata_taskfile tf;
972 int tag, rc;
973
974 /* if frozen, we can't do much */
975 if (ap->flags & ATA_FLAG_FROZEN)
976 return;
977
978 /* is it NCQ device error? */
979 if (!ap->sactive || !(ehc->i.err_mask & AC_ERR_DEV))
980 return;
981
982 /* has LLDD analyzed already? */
983 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
984 qc = __ata_qc_from_tag(ap, tag);
985
986 if (!(qc->flags & ATA_QCFLAG_FAILED))
987 continue;
988
989 if (qc->err_mask)
990 return;
991 }
992
993 /* okay, this error is ours */
994 rc = ata_eh_read_log_10h(dev, &tag, &tf);
995 if (rc) {
996 ata_port_printk(ap, KERN_ERR, "failed to read log page 10h "
997 "(errno=%d)\n", rc);
998 return;
999 }
1000
1001 if (!(ap->sactive & (1 << tag))) {
1002 ata_port_printk(ap, KERN_ERR, "log page 10h reported "
1003 "inactive tag %d\n", tag);
1004 return;
1005 }
1006
1007 /* we've got the perpetrator, condemn it */
1008 qc = __ata_qc_from_tag(ap, tag);
1009 memcpy(&qc->result_tf, &tf, sizeof(tf));
1010 qc->err_mask |= AC_ERR_DEV;
1011 ehc->i.err_mask &= ~AC_ERR_DEV;
1012}
1013
1014/**
1015 * ata_eh_analyze_tf - analyze taskfile of a failed qc
1016 * @qc: qc to analyze
1017 * @tf: Taskfile registers to analyze
1018 *
1019 * Analyze taskfile of @qc and further determine cause of
1020 * failure. This function also requests ATAPI sense data if
1021 * avaliable.
1022 *
1023 * LOCKING:
1024 * Kernel thread context (may sleep).
1025 *
1026 * RETURNS:
1027 * Determined recovery action
1028 */
1029static unsigned int ata_eh_analyze_tf(struct ata_queued_cmd *qc,
1030 const struct ata_taskfile *tf)
1031{
1032 unsigned int tmp, action = 0;
1033 u8 stat = tf->command, err = tf->feature;
1034
1035 if ((stat & (ATA_BUSY | ATA_DRQ | ATA_DRDY)) != ATA_DRDY) {
1036 qc->err_mask |= AC_ERR_HSM;
1037 return ATA_EH_SOFTRESET;
1038 }
1039
1040 if (!(qc->err_mask & AC_ERR_DEV))
1041 return 0;
1042
1043 switch (qc->dev->class) {
1044 case ATA_DEV_ATA:
1045 if (err & ATA_ICRC)
1046 qc->err_mask |= AC_ERR_ATA_BUS;
1047 if (err & ATA_UNC)
1048 qc->err_mask |= AC_ERR_MEDIA;
1049 if (err & ATA_IDNF)
1050 qc->err_mask |= AC_ERR_INVALID;
1051 break;
1052
1053 case ATA_DEV_ATAPI:
1054 tmp = atapi_eh_request_sense(qc->dev,
1055 qc->scsicmd->sense_buffer);
1056 if (!tmp) {
1057 /* ATA_QCFLAG_SENSE_VALID is used to tell
1058 * atapi_qc_complete() that sense data is
1059 * already valid.
1060 *
1061 * TODO: interpret sense data and set
1062 * appropriate err_mask.
1063 */
1064 qc->flags |= ATA_QCFLAG_SENSE_VALID;
1065 } else
1066 qc->err_mask |= tmp;
1067 }
1068
1069 if (qc->err_mask & (AC_ERR_HSM | AC_ERR_TIMEOUT | AC_ERR_ATA_BUS))
1070 action |= ATA_EH_SOFTRESET;
1071
1072 return action;
1073}
1074
1075static int ata_eh_categorize_ering_entry(struct ata_ering_entry *ent)
1076{
1077 if (ent->err_mask & (AC_ERR_ATA_BUS | AC_ERR_TIMEOUT))
1078 return 1;
1079
1080 if (ent->is_io) {
1081 if (ent->err_mask & AC_ERR_HSM)
1082 return 1;
1083 if ((ent->err_mask &
1084 (AC_ERR_DEV|AC_ERR_MEDIA|AC_ERR_INVALID)) == AC_ERR_DEV)
1085 return 2;
1086 }
1087
1088 return 0;
1089}
1090
1091struct speed_down_needed_arg {
1092 u64 since;
1093 int nr_errors[3];
1094};
1095
1096static int speed_down_needed_cb(struct ata_ering_entry *ent, void *void_arg)
1097{
1098 struct speed_down_needed_arg *arg = void_arg;
1099
1100 if (ent->timestamp < arg->since)
1101 return -1;
1102
1103 arg->nr_errors[ata_eh_categorize_ering_entry(ent)]++;
1104 return 0;
1105}
1106
1107/**
1108 * ata_eh_speed_down_needed - Determine wheter speed down is necessary
1109 * @dev: Device of interest
1110 *
1111 * This function examines error ring of @dev and determines
1112 * whether speed down is necessary. Speed down is necessary if
1113 * there have been more than 3 of Cat-1 errors or 10 of Cat-2
1114 * errors during last 15 minutes.
1115 *
1116 * Cat-1 errors are ATA_BUS, TIMEOUT for any command and HSM
1117 * violation for known supported commands.
1118 *
1119 * Cat-2 errors are unclassified DEV error for known supported
1120 * command.
1121 *
1122 * LOCKING:
1123 * Inherited from caller.
1124 *
1125 * RETURNS:
1126 * 1 if speed down is necessary, 0 otherwise
1127 */
1128static int ata_eh_speed_down_needed(struct ata_device *dev)
1129{
1130 const u64 interval = 15LLU * 60 * HZ;
1131 static const int err_limits[3] = { -1, 3, 10 };
1132 struct speed_down_needed_arg arg;
1133 struct ata_ering_entry *ent;
1134 int err_cat;
1135 u64 j64;
1136
1137 ent = ata_ering_top(&dev->ering);
1138 if (!ent)
1139 return 0;
1140
1141 err_cat = ata_eh_categorize_ering_entry(ent);
1142 if (err_cat == 0)
1143 return 0;
1144
1145 memset(&arg, 0, sizeof(arg));
1146
1147 j64 = get_jiffies_64();
1148 if (j64 >= interval)
1149 arg.since = j64 - interval;
1150 else
1151 arg.since = 0;
1152
1153 ata_ering_map(&dev->ering, speed_down_needed_cb, &arg);
1154
1155 return arg.nr_errors[err_cat] > err_limits[err_cat];
1156}
1157
1158/**
1159 * ata_eh_speed_down - record error and speed down if necessary
1160 * @dev: Failed device
1161 * @is_io: Did the device fail during normal IO?
1162 * @err_mask: err_mask of the error
1163 *
1164 * Record error and examine error history to determine whether
1165 * adjusting transmission speed is necessary. It also sets
1166 * transmission limits appropriately if such adjustment is
1167 * necessary.
1168 *
1169 * LOCKING:
1170 * Kernel thread context (may sleep).
1171 *
1172 * RETURNS:
1173 * 0 on success, -errno otherwise
1174 */
1175static int ata_eh_speed_down(struct ata_device *dev, int is_io,
1176 unsigned int err_mask)
1177{
1178 if (!err_mask)
1179 return 0;
1180
1181 /* record error and determine whether speed down is necessary */
1182 ata_ering_record(&dev->ering, is_io, err_mask);
1183
1184 if (!ata_eh_speed_down_needed(dev))
1185 return 0;
1186
1187 /* speed down SATA link speed if possible */
1188 if (sata_down_spd_limit(dev->ap) == 0)
1189 return ATA_EH_HARDRESET;
1190
1191 /* lower transfer mode */
1192 if (ata_down_xfermask_limit(dev, 0) == 0)
1193 return ATA_EH_SOFTRESET;
1194
1195 ata_dev_printk(dev, KERN_ERR,
1196 "speed down requested but no transfer mode left\n");
1197 return 0;
1198}
1199
1200/**
1201 * ata_eh_autopsy - analyze error and determine recovery action
1202 * @ap: ATA port to perform autopsy on
1203 *
1204 * Analyze why @ap failed and determine which recovery action is
1205 * needed. This function also sets more detailed AC_ERR_* values
1206 * and fills sense data for ATAPI CHECK SENSE.
1207 *
1208 * LOCKING:
1209 * Kernel thread context (may sleep).
1210 */
1211static void ata_eh_autopsy(struct ata_port *ap)
1212{
1213 struct ata_eh_context *ehc = &ap->eh_context;
1214 unsigned int action = ehc->i.action;
1215 struct ata_device *failed_dev = NULL;
1216 unsigned int all_err_mask = 0;
1217 int tag, is_io = 0;
1218 u32 serror;
1219 int rc;
1220
1221 DPRINTK("ENTER\n");
1222
1223 /* obtain and analyze SError */
1224 rc = sata_scr_read(ap, SCR_ERROR, &serror);
1225 if (rc == 0) {
1226 ehc->i.serror |= serror;
1227 ata_eh_analyze_serror(ap);
1228 } else if (rc != -EOPNOTSUPP)
1229 action |= ATA_EH_HARDRESET;
1230
1231 /* analyze NCQ failure */
1232 ata_eh_analyze_ncq_error(ap);
1233
1234 /* any real error trumps AC_ERR_OTHER */
1235 if (ehc->i.err_mask & ~AC_ERR_OTHER)
1236 ehc->i.err_mask &= ~AC_ERR_OTHER;
1237
1238 all_err_mask |= ehc->i.err_mask;
1239
1240 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
1241 struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
1242
1243 if (!(qc->flags & ATA_QCFLAG_FAILED))
1244 continue;
1245
1246 /* inherit upper level err_mask */
1247 qc->err_mask |= ehc->i.err_mask;
1248
1249 /* analyze TF */
1250 action |= ata_eh_analyze_tf(qc, &qc->result_tf);
1251
1252 /* DEV errors are probably spurious in case of ATA_BUS error */
1253 if (qc->err_mask & AC_ERR_ATA_BUS)
1254 qc->err_mask &= ~(AC_ERR_DEV | AC_ERR_MEDIA |
1255 AC_ERR_INVALID);
1256
1257 /* any real error trumps unknown error */
1258 if (qc->err_mask & ~AC_ERR_OTHER)
1259 qc->err_mask &= ~AC_ERR_OTHER;
1260
1261 /* SENSE_VALID trumps dev/unknown error and revalidation */
1262 if (qc->flags & ATA_QCFLAG_SENSE_VALID) {
1263 qc->err_mask &= ~(AC_ERR_DEV | AC_ERR_OTHER);
1264 action &= ~ATA_EH_REVALIDATE;
1265 }
1266
1267 /* accumulate error info */
1268 failed_dev = qc->dev;
1269 all_err_mask |= qc->err_mask;
1270 if (qc->flags & ATA_QCFLAG_IO)
1271 is_io = 1;
1272 }
1273
1274 /* speed down iff command was in progress */
1275 if (failed_dev)
1276 action |= ata_eh_speed_down(failed_dev, is_io, all_err_mask);
1277
1278 /* enforce default EH actions */
1279 if (ap->flags & ATA_FLAG_FROZEN ||
1280 all_err_mask & (AC_ERR_HSM | AC_ERR_TIMEOUT))
1281 action |= ATA_EH_SOFTRESET;
1282 else if (all_err_mask)
1283 action |= ATA_EH_REVALIDATE;
1284
1285 /* record autopsy result */
1286 ehc->i.dev = failed_dev;
1287 ehc->i.action = action;
1288
1289 DPRINTK("EXIT\n");
1290}
1291
1292/**
1293 * ata_eh_report - report error handling to user
1294 * @ap: ATA port EH is going on
1295 *
1296 * Report EH to user.
1297 *
1298 * LOCKING:
1299 * None.
1300 */
1301static void ata_eh_report(struct ata_port *ap)
1302{
1303 struct ata_eh_context *ehc = &ap->eh_context;
1304 const char *frozen, *desc;
1305 int tag, nr_failed = 0;
1306
1307 desc = NULL;
1308 if (ehc->i.desc[0] != '\0')
1309 desc = ehc->i.desc;
1310
1311 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
1312 struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
1313
1314 if (!(qc->flags & ATA_QCFLAG_FAILED))
1315 continue;
1316 if (qc->flags & ATA_QCFLAG_SENSE_VALID && !qc->err_mask)
1317 continue;
1318
1319 nr_failed++;
1320 }
1321
1322 if (!nr_failed && !ehc->i.err_mask)
1323 return;
1324
1325 frozen = "";
1326 if (ap->flags & ATA_FLAG_FROZEN)
1327 frozen = " frozen";
1328
1329 if (ehc->i.dev) {
1330 ata_dev_printk(ehc->i.dev, KERN_ERR, "exception Emask 0x%x "
1331 "SAct 0x%x SErr 0x%x action 0x%x%s\n",
1332 ehc->i.err_mask, ap->sactive, ehc->i.serror,
1333 ehc->i.action, frozen);
1334 if (desc)
1335 ata_dev_printk(ehc->i.dev, KERN_ERR, "(%s)\n", desc);
1336 } else {
1337 ata_port_printk(ap, KERN_ERR, "exception Emask 0x%x "
1338 "SAct 0x%x SErr 0x%x action 0x%x%s\n",
1339 ehc->i.err_mask, ap->sactive, ehc->i.serror,
1340 ehc->i.action, frozen);
1341 if (desc)
1342 ata_port_printk(ap, KERN_ERR, "(%s)\n", desc);
1343 }
1344
1345 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
1346 struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
1347
1348 if (!(qc->flags & ATA_QCFLAG_FAILED) || !qc->err_mask)
1349 continue;
1350
1351 ata_dev_printk(qc->dev, KERN_ERR, "tag %d cmd 0x%x "
1352 "Emask 0x%x stat 0x%x err 0x%x (%s)\n",
1353 qc->tag, qc->tf.command, qc->err_mask,
1354 qc->result_tf.command, qc->result_tf.feature,
1355 ata_err_string(qc->err_mask));
1356 }
1357}
1358
1359static int ata_do_reset(struct ata_port *ap, ata_reset_fn_t reset,
1360 unsigned int *classes)
1361{
1362 int i, rc;
1363
1364 for (i = 0; i < ATA_MAX_DEVICES; i++)
1365 classes[i] = ATA_DEV_UNKNOWN;
1366
1367 rc = reset(ap, classes);
1368 if (rc)
1369 return rc;
1370
1371 /* If any class isn't ATA_DEV_UNKNOWN, consider classification
1372 * is complete and convert all ATA_DEV_UNKNOWN to
1373 * ATA_DEV_NONE.
1374 */
1375 for (i = 0; i < ATA_MAX_DEVICES; i++)
1376 if (classes[i] != ATA_DEV_UNKNOWN)
1377 break;
1378
1379 if (i < ATA_MAX_DEVICES)
1380 for (i = 0; i < ATA_MAX_DEVICES; i++)
1381 if (classes[i] == ATA_DEV_UNKNOWN)
1382 classes[i] = ATA_DEV_NONE;
1383
1384 return 0;
1385}
1386
1387static int ata_eh_followup_srst_needed(int rc, int classify,
1388 const unsigned int *classes)
1389{
1390 if (rc == -EAGAIN)
1391 return 1;
1392 if (rc != 0)
1393 return 0;
1394 if (classify && classes[0] == ATA_DEV_UNKNOWN)
1395 return 1;
1396 return 0;
1397}
1398
1399static int ata_eh_reset(struct ata_port *ap, int classify,
1400 ata_prereset_fn_t prereset, ata_reset_fn_t softreset,
1401 ata_reset_fn_t hardreset, ata_postreset_fn_t postreset)
1402{
1403 struct ata_eh_context *ehc = &ap->eh_context;
1404 unsigned int *classes = ehc->classes;
1405 int tries = ATA_EH_RESET_TRIES;
1406 int verbose = !(ap->flags & ATA_FLAG_LOADING);
1407 unsigned int action;
1408 ata_reset_fn_t reset;
1409 int i, did_followup_srst, rc;
1410
1411 /* Determine which reset to use and record in ehc->i.action.
1412 * prereset() may examine and modify it.
1413 */
1414 action = ehc->i.action;
1415 ehc->i.action &= ~ATA_EH_RESET_MASK;
1416 if (softreset && (!hardreset || (!sata_set_spd_needed(ap) &&
1417 !(action & ATA_EH_HARDRESET))))
1418 ehc->i.action |= ATA_EH_SOFTRESET;
1419 else
1420 ehc->i.action |= ATA_EH_HARDRESET;
1421
1422 if (prereset) {
1423 rc = prereset(ap);
1424 if (rc) {
1425 ata_port_printk(ap, KERN_ERR,
1426 "prereset failed (errno=%d)\n", rc);
1427 return rc;
1428 }
1429 }
1430
1431 /* prereset() might have modified ehc->i.action */
1432 if (ehc->i.action & ATA_EH_HARDRESET)
1433 reset = hardreset;
1434 else if (ehc->i.action & ATA_EH_SOFTRESET)
1435 reset = softreset;
1436 else {
1437 /* prereset told us not to reset, bang classes and return */
1438 for (i = 0; i < ATA_MAX_DEVICES; i++)
1439 classes[i] = ATA_DEV_NONE;
1440 return 0;
1441 }
1442
1443 /* did prereset() screw up? if so, fix up to avoid oopsing */
1444 if (!reset) {
1445 ata_port_printk(ap, KERN_ERR, "BUG: prereset() requested "
1446 "invalid reset type\n");
1447 if (softreset)
1448 reset = softreset;
1449 else
1450 reset = hardreset;
1451 }
1452
1453 retry:
1454 /* shut up during boot probing */
1455 if (verbose)
1456 ata_port_printk(ap, KERN_INFO, "%s resetting port\n",
1457 reset == softreset ? "soft" : "hard");
1458
1459 /* reset */
1460 ata_eh_about_to_do(ap, ATA_EH_RESET_MASK);
1461 ehc->i.flags |= ATA_EHI_DID_RESET;
1462
1463 rc = ata_do_reset(ap, reset, classes);
1464
1465 did_followup_srst = 0;
1466 if (reset == hardreset &&
1467 ata_eh_followup_srst_needed(rc, classify, classes)) {
1468 /* okay, let's do follow-up softreset */
1469 did_followup_srst = 1;
1470 reset = softreset;
1471
1472 if (!reset) {
1473 ata_port_printk(ap, KERN_ERR,
1474 "follow-up softreset required "
1475 "but no softreset avaliable\n");
1476 return -EINVAL;
1477 }
1478
1479 ata_eh_about_to_do(ap, ATA_EH_RESET_MASK);
1480 rc = ata_do_reset(ap, reset, classes);
1481
1482 if (rc == 0 && classify &&
1483 classes[0] == ATA_DEV_UNKNOWN) {
1484 ata_port_printk(ap, KERN_ERR,
1485 "classification failed\n");
1486 return -EINVAL;
1487 }
1488 }
1489
1490 if (rc && --tries) {
1491 const char *type;
1492
1493 if (reset == softreset) {
1494 if (did_followup_srst)
1495 type = "follow-up soft";
1496 else
1497 type = "soft";
1498 } else
1499 type = "hard";
1500
1501 ata_port_printk(ap, KERN_WARNING,
1502 "%sreset failed, retrying in 5 secs\n", type);
1503 ssleep(5);
1504
1505 if (reset == hardreset)
1506 sata_down_spd_limit(ap);
1507 if (hardreset)
1508 reset = hardreset;
1509 goto retry;
1510 }
1511
1512 if (rc == 0) {
1513 /* After the reset, the device state is PIO 0 and the
1514 * controller state is undefined. Record the mode.
1515 */
1516 for (i = 0; i < ATA_MAX_DEVICES; i++)
1517 ap->device[i].pio_mode = XFER_PIO_0;
1518
1519 if (postreset)
1520 postreset(ap, classes);
1521
1522 /* reset successful, schedule revalidation */
1523 ehc->i.dev = NULL;
1524 ehc->i.action &= ~ATA_EH_RESET_MASK;
1525 ehc->i.action |= ATA_EH_REVALIDATE;
1526 }
1527
1528 return rc;
1529}
1530
1531static int ata_eh_revalidate_and_attach(struct ata_port *ap,
1532 struct ata_device **r_failed_dev)
1533{
1534 struct ata_eh_context *ehc = &ap->eh_context;
1535 struct ata_device *dev;
1536 unsigned long flags;
1537 int i, rc = 0;
1538
1539 DPRINTK("ENTER\n");
1540
1541 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1542 dev = &ap->device[i];
1543
1544 if (ehc->i.action & ATA_EH_REVALIDATE && ata_dev_enabled(dev) &&
1545 (!ehc->i.dev || ehc->i.dev == dev)) {
1546 if (ata_port_offline(ap)) {
1547 rc = -EIO;
1548 break;
1549 }
1550
1551 ata_eh_about_to_do(ap, ATA_EH_REVALIDATE);
1552 rc = ata_dev_revalidate(dev,
1553 ehc->i.flags & ATA_EHI_DID_RESET);
1554 if (rc)
1555 break;
1556
1557 /* schedule the scsi_rescan_device() here */
1558 queue_work(ata_aux_wq, &(ap->scsi_rescan_task));
1559 } else if (dev->class == ATA_DEV_UNKNOWN &&
1560 ehc->tries[dev->devno] &&
1561 ata_class_enabled(ehc->classes[dev->devno])) {
1562 dev->class = ehc->classes[dev->devno];
1563
1564 rc = ata_dev_read_id(dev, &dev->class, 1, dev->id);
1565 if (rc == 0)
1566 rc = ata_dev_configure(dev, 1);
1567
1568 if (rc) {
1569 dev->class = ATA_DEV_UNKNOWN;
1570 break;
1571 }
1572
1573 spin_lock_irqsave(&ap->host_set->lock, flags);
1574 ap->flags |= ATA_FLAG_SCSI_HOTPLUG;
1575 spin_unlock_irqrestore(&ap->host_set->lock, flags);
1576 }
1577 }
1578
1579 if (rc == 0)
1580 ehc->i.action &= ~ATA_EH_REVALIDATE;
1581 else
1582 *r_failed_dev = dev;
1583
1584 DPRINTK("EXIT\n");
1585 return rc;
1586}
1587
1588static int ata_port_nr_enabled(struct ata_port *ap)
1589{
1590 int i, cnt = 0;
1591
1592 for (i = 0; i < ATA_MAX_DEVICES; i++)
1593 if (ata_dev_enabled(&ap->device[i]))
1594 cnt++;
1595 return cnt;
1596}
1597
1598static int ata_port_nr_vacant(struct ata_port *ap)
1599{
1600 int i, cnt = 0;
1601
1602 for (i = 0; i < ATA_MAX_DEVICES; i++)
1603 if (ap->device[i].class == ATA_DEV_UNKNOWN)
1604 cnt++;
1605 return cnt;
1606}
1607
1608static int ata_eh_skip_recovery(struct ata_port *ap)
1609{
1610 struct ata_eh_context *ehc = &ap->eh_context;
1611 int i;
1612
1613 if (ap->flags & ATA_FLAG_FROZEN || ata_port_nr_enabled(ap))
1614 return 0;
1615
1616 /* skip if class codes for all vacant slots are ATA_DEV_NONE */
1617 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1618 struct ata_device *dev = &ap->device[i];
1619
1620 if (dev->class == ATA_DEV_UNKNOWN &&
1621 ehc->classes[dev->devno] != ATA_DEV_NONE)
1622 return 0;
1623 }
1624
1625 return 1;
1626}
1627
1628/**
1629 * ata_eh_recover - recover host port after error
1630 * @ap: host port to recover
1631 * @prereset: prereset method (can be NULL)
1632 * @softreset: softreset method (can be NULL)
1633 * @hardreset: hardreset method (can be NULL)
1634 * @postreset: postreset method (can be NULL)
1635 *
1636 * This is the alpha and omega, eum and yang, heart and soul of
1637 * libata exception handling. On entry, actions required to
1638 * recover the port and hotplug requests are recorded in
1639 * eh_context. This function executes all the operations with
1640 * appropriate retrials and fallbacks to resurrect failed
1641 * devices, detach goners and greet newcomers.
1642 *
1643 * LOCKING:
1644 * Kernel thread context (may sleep).
1645 *
1646 * RETURNS:
1647 * 0 on success, -errno on failure.
1648 */
1649static int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset,
1650 ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
1651 ata_postreset_fn_t postreset)
1652{
1653 struct ata_eh_context *ehc = &ap->eh_context;
1654 struct ata_device *dev;
1655 int down_xfermask, i, rc;
1656
1657 DPRINTK("ENTER\n");
1658
1659 /* prep for recovery */
1660 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1661 dev = &ap->device[i];
1662
1663 ehc->tries[dev->devno] = ATA_EH_DEV_TRIES;
1664
1665 /* process hotplug request */
1666 if (dev->flags & ATA_DFLAG_DETACH)
1667 ata_eh_detach_dev(dev);
1668
1669 if (!ata_dev_enabled(dev) &&
1670 ((ehc->i.probe_mask & (1 << dev->devno)) &&
1671 !(ehc->did_probe_mask & (1 << dev->devno)))) {
1672 ata_eh_detach_dev(dev);
1673 ata_dev_init(dev);
1674 ehc->did_probe_mask |= (1 << dev->devno);
1675 ehc->i.action |= ATA_EH_SOFTRESET;
1676 }
1677 }
1678
1679 retry:
1680 down_xfermask = 0;
1681 rc = 0;
1682
1683 /* if UNLOADING, finish immediately */
1684 if (ap->flags & ATA_FLAG_UNLOADING)
1685 goto out;
1686
1687 /* skip EH if possible. */
1688 if (ata_eh_skip_recovery(ap))
1689 ehc->i.action = 0;
1690
1691 for (i = 0; i < ATA_MAX_DEVICES; i++)
1692 ehc->classes[i] = ATA_DEV_UNKNOWN;
1693
1694 /* reset */
1695 if (ehc->i.action & ATA_EH_RESET_MASK) {
1696 ata_eh_freeze_port(ap);
1697
1698 rc = ata_eh_reset(ap, ata_port_nr_vacant(ap), prereset,
1699 softreset, hardreset, postreset);
1700 if (rc) {
1701 ata_port_printk(ap, KERN_ERR,
1702 "reset failed, giving up\n");
1703 goto out;
1704 }
1705
1706 ata_eh_thaw_port(ap);
1707 }
1708
1709 /* revalidate existing devices and attach new ones */
1710 rc = ata_eh_revalidate_and_attach(ap, &dev);
1711 if (rc)
1712 goto dev_fail;
1713
1714 /* configure transfer mode if the port has been reset */
1715 if (ehc->i.flags & ATA_EHI_DID_RESET) {
1716 rc = ata_set_mode(ap, &dev);
1717 if (rc) {
1718 down_xfermask = 1;
1719 goto dev_fail;
1720 }
1721 }
1722
1723 goto out;
1724
1725 dev_fail:
1726 switch (rc) {
1727 case -ENODEV:
1728 /* device missing, schedule probing */
1729 ehc->i.probe_mask |= (1 << dev->devno);
1730 case -EINVAL:
1731 ehc->tries[dev->devno] = 0;
1732 break;
1733 case -EIO:
1734 sata_down_spd_limit(ap);
1735 default:
1736 ehc->tries[dev->devno]--;
1737 if (down_xfermask &&
1738 ata_down_xfermask_limit(dev, ehc->tries[dev->devno] == 1))
1739 ehc->tries[dev->devno] = 0;
1740 }
1741
1742 if (ata_dev_enabled(dev) && !ehc->tries[dev->devno]) {
1743 /* disable device if it has used up all its chances */
1744 ata_dev_disable(dev);
1745
1746 /* detach if offline */
1747 if (ata_port_offline(ap))
1748 ata_eh_detach_dev(dev);
1749
1750 /* probe if requested */
1751 if ((ehc->i.probe_mask & (1 << dev->devno)) &&
1752 !(ehc->did_probe_mask & (1 << dev->devno))) {
1753 ata_eh_detach_dev(dev);
1754 ata_dev_init(dev);
1755
1756 ehc->tries[dev->devno] = ATA_EH_DEV_TRIES;
1757 ehc->did_probe_mask |= (1 << dev->devno);
1758 ehc->i.action |= ATA_EH_SOFTRESET;
1759 }
1760 } else {
1761 /* soft didn't work? be haaaaard */
1762 if (ehc->i.flags & ATA_EHI_DID_RESET)
1763 ehc->i.action |= ATA_EH_HARDRESET;
1764 else
1765 ehc->i.action |= ATA_EH_SOFTRESET;
1766 }
1767
1768 if (ata_port_nr_enabled(ap)) {
1769 ata_port_printk(ap, KERN_WARNING, "failed to recover some "
1770 "devices, retrying in 5 secs\n");
1771 ssleep(5);
1772 } else {
1773 /* no device left, repeat fast */
1774 msleep(500);
1775 }
1776
1777 goto retry;
1778
1779 out:
1780 if (rc) {
1781 for (i = 0; i < ATA_MAX_DEVICES; i++)
1782 ata_dev_disable(&ap->device[i]);
1783 }
1784
1785 DPRINTK("EXIT, rc=%d\n", rc);
1786 return rc;
1787}
1788
1789/**
1790 * ata_eh_finish - finish up EH
1791 * @ap: host port to finish EH for
1792 *
1793 * Recovery is complete. Clean up EH states and retry or finish
1794 * failed qcs.
1795 *
1796 * LOCKING:
1797 * None.
1798 */
1799static void ata_eh_finish(struct ata_port *ap)
1800{
1801 int tag;
1802
1803 /* retry or finish qcs */
1804 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
1805 struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
1806
1807 if (!(qc->flags & ATA_QCFLAG_FAILED))
1808 continue;
1809
1810 if (qc->err_mask) {
1811 /* FIXME: Once EH migration is complete,
1812 * generate sense data in this function,
1813 * considering both err_mask and tf.
1814 */
1815 if (qc->err_mask & AC_ERR_INVALID)
1816 ata_eh_qc_complete(qc);
1817 else
1818 ata_eh_qc_retry(qc);
1819 } else {
1820 if (qc->flags & ATA_QCFLAG_SENSE_VALID) {
1821 ata_eh_qc_complete(qc);
1822 } else {
1823 /* feed zero TF to sense generation */
1824 memset(&qc->result_tf, 0, sizeof(qc->result_tf));
1825 ata_eh_qc_retry(qc);
1826 }
1827 }
1828 }
1829}
1830
1831/**
1832 * ata_do_eh - do standard error handling
1833 * @ap: host port to handle error for
1834 * @prereset: prereset method (can be NULL)
1835 * @softreset: softreset method (can be NULL)
1836 * @hardreset: hardreset method (can be NULL)
1837 * @postreset: postreset method (can be NULL)
1838 *
1839 * Perform standard error handling sequence.
1840 *
1841 * LOCKING:
1842 * Kernel thread context (may sleep).
1843 */
1844void ata_do_eh(struct ata_port *ap, ata_prereset_fn_t prereset,
1845 ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
1846 ata_postreset_fn_t postreset)
1847{
1848 if (!(ap->flags & ATA_FLAG_LOADING)) {
1849 ata_eh_autopsy(ap);
1850 ata_eh_report(ap);
1851 }
1852
1853 ata_eh_recover(ap, prereset, softreset, hardreset, postreset);
1854 ata_eh_finish(ap);
1855}
diff --git a/drivers/scsi/libata-scsi.c b/drivers/scsi/libata-scsi.c
index a0289ec3e283..32c1df69091b 100644
--- a/drivers/scsi/libata-scsi.c
+++ b/drivers/scsi/libata-scsi.c
@@ -41,6 +41,7 @@
41#include <scsi/scsi_eh.h> 41#include <scsi/scsi_eh.h>
42#include <scsi/scsi_device.h> 42#include <scsi/scsi_device.h>
43#include <scsi/scsi_request.h> 43#include <scsi/scsi_request.h>
44#include <scsi/scsi_tcq.h>
44#include <scsi/scsi_transport.h> 45#include <scsi/scsi_transport.h>
45#include <linux/libata.h> 46#include <linux/libata.h>
46#include <linux/hdreg.h> 47#include <linux/hdreg.h>
@@ -51,10 +52,14 @@
51#define SECTOR_SIZE 512 52#define SECTOR_SIZE 512
52 53
53typedef unsigned int (*ata_xlat_func_t)(struct ata_queued_cmd *qc, const u8 *scsicmd); 54typedef unsigned int (*ata_xlat_func_t)(struct ata_queued_cmd *qc, const u8 *scsicmd);
54static struct ata_device * 55
55ata_scsi_find_dev(struct ata_port *ap, const struct scsi_device *scsidev); 56static struct ata_device * __ata_scsi_find_dev(struct ata_port *ap,
56static void ata_scsi_error(struct Scsi_Host *host); 57 const struct scsi_device *scsidev);
57enum scsi_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd); 58static struct ata_device * ata_scsi_find_dev(struct ata_port *ap,
59 const struct scsi_device *scsidev);
60static int ata_scsi_user_scan(struct Scsi_Host *shost, unsigned int channel,
61 unsigned int id, unsigned int lun);
62
58 63
59#define RW_RECOVERY_MPAGE 0x1 64#define RW_RECOVERY_MPAGE 0x1
60#define RW_RECOVERY_MPAGE_LEN 12 65#define RW_RECOVERY_MPAGE_LEN 12
@@ -102,6 +107,7 @@ static const u8 def_control_mpage[CONTROL_MPAGE_LEN] = {
102struct scsi_transport_template ata_scsi_transport_template = { 107struct scsi_transport_template ata_scsi_transport_template = {
103 .eh_strategy_handler = ata_scsi_error, 108 .eh_strategy_handler = ata_scsi_error,
104 .eh_timed_out = ata_scsi_timed_out, 109 .eh_timed_out = ata_scsi_timed_out,
110 .user_scan = ata_scsi_user_scan,
105}; 111};
106 112
107 113
@@ -304,7 +310,6 @@ int ata_scsi_ioctl(struct scsi_device *scsidev, int cmd, void __user *arg)
304 310
305/** 311/**
306 * ata_scsi_qc_new - acquire new ata_queued_cmd reference 312 * ata_scsi_qc_new - acquire new ata_queued_cmd reference
307 * @ap: ATA port to which the new command is attached
308 * @dev: ATA device to which the new command is attached 313 * @dev: ATA device to which the new command is attached
309 * @cmd: SCSI command that originated this ATA command 314 * @cmd: SCSI command that originated this ATA command
310 * @done: SCSI command completion function 315 * @done: SCSI command completion function
@@ -323,14 +328,13 @@ int ata_scsi_ioctl(struct scsi_device *scsidev, int cmd, void __user *arg)
323 * RETURNS: 328 * RETURNS:
324 * Command allocated, or %NULL if none available. 329 * Command allocated, or %NULL if none available.
325 */ 330 */
326struct ata_queued_cmd *ata_scsi_qc_new(struct ata_port *ap, 331struct ata_queued_cmd *ata_scsi_qc_new(struct ata_device *dev,
327 struct ata_device *dev,
328 struct scsi_cmnd *cmd, 332 struct scsi_cmnd *cmd,
329 void (*done)(struct scsi_cmnd *)) 333 void (*done)(struct scsi_cmnd *))
330{ 334{
331 struct ata_queued_cmd *qc; 335 struct ata_queued_cmd *qc;
332 336
333 qc = ata_qc_new_init(ap, dev); 337 qc = ata_qc_new_init(dev);
334 if (qc) { 338 if (qc) {
335 qc->scsicmd = cmd; 339 qc->scsicmd = cmd;
336 qc->scsidone = done; 340 qc->scsidone = done;
@@ -397,18 +401,18 @@ void ata_dump_status(unsigned id, struct ata_taskfile *tf)
397 401
398int ata_scsi_device_resume(struct scsi_device *sdev) 402int ata_scsi_device_resume(struct scsi_device *sdev)
399{ 403{
400 struct ata_port *ap = (struct ata_port *) &sdev->host->hostdata[0]; 404 struct ata_port *ap = ata_shost_to_port(sdev->host);
401 struct ata_device *dev = &ap->device[sdev->id]; 405 struct ata_device *dev = __ata_scsi_find_dev(ap, sdev);
402 406
403 return ata_device_resume(ap, dev); 407 return ata_device_resume(dev);
404} 408}
405 409
406int ata_scsi_device_suspend(struct scsi_device *sdev, pm_message_t state) 410int ata_scsi_device_suspend(struct scsi_device *sdev, pm_message_t state)
407{ 411{
408 struct ata_port *ap = (struct ata_port *) &sdev->host->hostdata[0]; 412 struct ata_port *ap = ata_shost_to_port(sdev->host);
409 struct ata_device *dev = &ap->device[sdev->id]; 413 struct ata_device *dev = __ata_scsi_find_dev(ap, sdev);
410 414
411 return ata_device_suspend(ap, dev, state); 415 return ata_device_suspend(dev, state);
412} 416}
413 417
414/** 418/**
@@ -419,6 +423,7 @@ int ata_scsi_device_suspend(struct scsi_device *sdev, pm_message_t state)
419 * @sk: the sense key we'll fill out 423 * @sk: the sense key we'll fill out
420 * @asc: the additional sense code we'll fill out 424 * @asc: the additional sense code we'll fill out
421 * @ascq: the additional sense code qualifier we'll fill out 425 * @ascq: the additional sense code qualifier we'll fill out
426 * @verbose: be verbose
422 * 427 *
423 * Converts an ATA error into a SCSI error. Fill out pointers to 428 * Converts an ATA error into a SCSI error. Fill out pointers to
424 * SK, ASC, and ASCQ bytes for later use in fixed or descriptor 429 * SK, ASC, and ASCQ bytes for later use in fixed or descriptor
@@ -428,7 +433,7 @@ int ata_scsi_device_suspend(struct scsi_device *sdev, pm_message_t state)
428 * spin_lock_irqsave(host_set lock) 433 * spin_lock_irqsave(host_set lock)
429 */ 434 */
430void ata_to_sense_error(unsigned id, u8 drv_stat, u8 drv_err, u8 *sk, u8 *asc, 435void ata_to_sense_error(unsigned id, u8 drv_stat, u8 drv_err, u8 *sk, u8 *asc,
431 u8 *ascq) 436 u8 *ascq, int verbose)
432{ 437{
433 int i; 438 int i;
434 439
@@ -493,8 +498,9 @@ void ata_to_sense_error(unsigned id, u8 drv_stat, u8 drv_err, u8 *sk, u8 *asc,
493 } 498 }
494 } 499 }
495 /* No immediate match */ 500 /* No immediate match */
496 printk(KERN_WARNING "ata%u: no sense translation for " 501 if (verbose)
497 "error 0x%02x\n", id, drv_err); 502 printk(KERN_WARNING "ata%u: no sense translation for "
503 "error 0x%02x\n", id, drv_err);
498 } 504 }
499 505
500 /* Fall back to interpreting status bits */ 506 /* Fall back to interpreting status bits */
@@ -507,8 +513,9 @@ void ata_to_sense_error(unsigned id, u8 drv_stat, u8 drv_err, u8 *sk, u8 *asc,
507 } 513 }
508 } 514 }
509 /* No error? Undecoded? */ 515 /* No error? Undecoded? */
510 printk(KERN_WARNING "ata%u: no sense translation for status: 0x%02x\n", 516 if (verbose)
511 id, drv_stat); 517 printk(KERN_WARNING "ata%u: no sense translation for "
518 "status: 0x%02x\n", id, drv_stat);
512 519
513 /* We need a sensible error return here, which is tricky, and one 520 /* We need a sensible error return here, which is tricky, and one
514 that won't cause people to do things like return a disk wrongly */ 521 that won't cause people to do things like return a disk wrongly */
@@ -517,9 +524,10 @@ void ata_to_sense_error(unsigned id, u8 drv_stat, u8 drv_err, u8 *sk, u8 *asc,
517 *ascq = 0x00; 524 *ascq = 0x00;
518 525
519 translate_done: 526 translate_done:
520 printk(KERN_ERR "ata%u: translated ATA stat/err 0x%02x/%02x to " 527 if (verbose)
521 "SCSI SK/ASC/ASCQ 0x%x/%02x/%02x\n", id, drv_stat, drv_err, 528 printk(KERN_ERR "ata%u: translated ATA stat/err 0x%02x/%02x "
522 *sk, *asc, *ascq); 529 "to SCSI SK/ASC/ASCQ 0x%x/%02x/%02x\n",
530 id, drv_stat, drv_err, *sk, *asc, *ascq);
523 return; 531 return;
524} 532}
525 533
@@ -539,27 +547,23 @@ void ata_to_sense_error(unsigned id, u8 drv_stat, u8 drv_err, u8 *sk, u8 *asc,
539void ata_gen_ata_desc_sense(struct ata_queued_cmd *qc) 547void ata_gen_ata_desc_sense(struct ata_queued_cmd *qc)
540{ 548{
541 struct scsi_cmnd *cmd = qc->scsicmd; 549 struct scsi_cmnd *cmd = qc->scsicmd;
542 struct ata_taskfile *tf = &qc->tf; 550 struct ata_taskfile *tf = &qc->result_tf;
543 unsigned char *sb = cmd->sense_buffer; 551 unsigned char *sb = cmd->sense_buffer;
544 unsigned char *desc = sb + 8; 552 unsigned char *desc = sb + 8;
553 int verbose = qc->ap->ops->error_handler == NULL;
545 554
546 memset(sb, 0, SCSI_SENSE_BUFFERSIZE); 555 memset(sb, 0, SCSI_SENSE_BUFFERSIZE);
547 556
548 cmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION; 557 cmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
549 558
550 /* 559 /*
551 * Read the controller registers.
552 */
553 WARN_ON(qc->ap->ops->tf_read == NULL);
554 qc->ap->ops->tf_read(qc->ap, tf);
555
556 /*
557 * Use ata_to_sense_error() to map status register bits 560 * Use ata_to_sense_error() to map status register bits
558 * onto sense key, asc & ascq. 561 * onto sense key, asc & ascq.
559 */ 562 */
560 if (tf->command & (ATA_BUSY | ATA_DF | ATA_ERR | ATA_DRQ)) { 563 if (qc->err_mask ||
564 tf->command & (ATA_BUSY | ATA_DF | ATA_ERR | ATA_DRQ)) {
561 ata_to_sense_error(qc->ap->id, tf->command, tf->feature, 565 ata_to_sense_error(qc->ap->id, tf->command, tf->feature,
562 &sb[1], &sb[2], &sb[3]); 566 &sb[1], &sb[2], &sb[3], verbose);
563 sb[1] &= 0x0f; 567 sb[1] &= 0x0f;
564 } 568 }
565 569
@@ -615,26 +619,22 @@ void ata_gen_ata_desc_sense(struct ata_queued_cmd *qc)
615void ata_gen_fixed_sense(struct ata_queued_cmd *qc) 619void ata_gen_fixed_sense(struct ata_queued_cmd *qc)
616{ 620{
617 struct scsi_cmnd *cmd = qc->scsicmd; 621 struct scsi_cmnd *cmd = qc->scsicmd;
618 struct ata_taskfile *tf = &qc->tf; 622 struct ata_taskfile *tf = &qc->result_tf;
619 unsigned char *sb = cmd->sense_buffer; 623 unsigned char *sb = cmd->sense_buffer;
624 int verbose = qc->ap->ops->error_handler == NULL;
620 625
621 memset(sb, 0, SCSI_SENSE_BUFFERSIZE); 626 memset(sb, 0, SCSI_SENSE_BUFFERSIZE);
622 627
623 cmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION; 628 cmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
624 629
625 /* 630 /*
626 * Read the controller registers.
627 */
628 WARN_ON(qc->ap->ops->tf_read == NULL);
629 qc->ap->ops->tf_read(qc->ap, tf);
630
631 /*
632 * Use ata_to_sense_error() to map status register bits 631 * Use ata_to_sense_error() to map status register bits
633 * onto sense key, asc & ascq. 632 * onto sense key, asc & ascq.
634 */ 633 */
635 if (tf->command & (ATA_BUSY | ATA_DF | ATA_ERR | ATA_DRQ)) { 634 if (qc->err_mask ||
635 tf->command & (ATA_BUSY | ATA_DF | ATA_ERR | ATA_DRQ)) {
636 ata_to_sense_error(qc->ap->id, tf->command, tf->feature, 636 ata_to_sense_error(qc->ap->id, tf->command, tf->feature,
637 &sb[2], &sb[12], &sb[13]); 637 &sb[2], &sb[12], &sb[13], verbose);
638 sb[2] &= 0x0f; 638 sb[2] &= 0x0f;
639 } 639 }
640 640
@@ -677,7 +677,7 @@ static void ata_scsi_dev_config(struct scsi_device *sdev,
677 */ 677 */
678 max_sectors = ATA_MAX_SECTORS; 678 max_sectors = ATA_MAX_SECTORS;
679 if (dev->flags & ATA_DFLAG_LBA48) 679 if (dev->flags & ATA_DFLAG_LBA48)
680 max_sectors = 2048; 680 max_sectors = ATA_MAX_SECTORS_LBA48;
681 if (dev->max_sectors) 681 if (dev->max_sectors)
682 max_sectors = dev->max_sectors; 682 max_sectors = dev->max_sectors;
683 683
@@ -692,6 +692,14 @@ static void ata_scsi_dev_config(struct scsi_device *sdev,
692 request_queue_t *q = sdev->request_queue; 692 request_queue_t *q = sdev->request_queue;
693 blk_queue_max_hw_segments(q, q->max_hw_segments - 1); 693 blk_queue_max_hw_segments(q, q->max_hw_segments - 1);
694 } 694 }
695
696 if (dev->flags & ATA_DFLAG_NCQ) {
697 int depth;
698
699 depth = min(sdev->host->can_queue, ata_id_queue_depth(dev->id));
700 depth = min(ATA_MAX_QUEUE - 1, depth);
701 scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, depth);
702 }
695} 703}
696 704
697/** 705/**
@@ -708,152 +716,88 @@ static void ata_scsi_dev_config(struct scsi_device *sdev,
708 716
709int ata_scsi_slave_config(struct scsi_device *sdev) 717int ata_scsi_slave_config(struct scsi_device *sdev)
710{ 718{
719 struct ata_port *ap = ata_shost_to_port(sdev->host);
720 struct ata_device *dev = __ata_scsi_find_dev(ap, sdev);
721
711 ata_scsi_sdev_config(sdev); 722 ata_scsi_sdev_config(sdev);
712 723
713 blk_queue_max_phys_segments(sdev->request_queue, LIBATA_MAX_PRD); 724 blk_queue_max_phys_segments(sdev->request_queue, LIBATA_MAX_PRD);
714 725
715 if (sdev->id < ATA_MAX_DEVICES) { 726 if (dev)
716 struct ata_port *ap;
717 struct ata_device *dev;
718
719 ap = (struct ata_port *) &sdev->host->hostdata[0];
720 dev = &ap->device[sdev->id];
721
722 ata_scsi_dev_config(sdev, dev); 727 ata_scsi_dev_config(sdev, dev);
723 }
724 728
725 return 0; /* scsi layer doesn't check return value, sigh */ 729 return 0; /* scsi layer doesn't check return value, sigh */
726} 730}
727 731
728/** 732/**
729 * ata_scsi_timed_out - SCSI layer time out callback 733 * ata_scsi_slave_destroy - SCSI device is about to be destroyed
730 * @cmd: timed out SCSI command 734 * @sdev: SCSI device to be destroyed
731 * 735 *
732 * Handles SCSI layer timeout. We race with normal completion of 736 * @sdev is about to be destroyed for hot/warm unplugging. If
733 * the qc for @cmd. If the qc is already gone, we lose and let 737 * this unplugging was initiated by libata as indicated by NULL
734 * the scsi command finish (EH_HANDLED). Otherwise, the qc has 738 * dev->sdev, this function doesn't have to do anything.
735 * timed out and EH should be invoked. Prevent ata_qc_complete() 739 * Otherwise, SCSI layer initiated warm-unplug is in progress.
736 * from finishing it by setting EH_SCHEDULED and return 740 * Clear dev->sdev, schedule the device for ATA detach and invoke
737 * EH_NOT_HANDLED. 741 * EH.
738 * 742 *
739 * LOCKING: 743 * LOCKING:
740 * Called from timer context 744 * Defined by SCSI layer. We don't really care.
741 *
742 * RETURNS:
743 * EH_HANDLED or EH_NOT_HANDLED
744 */ 745 */
745enum scsi_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd) 746void ata_scsi_slave_destroy(struct scsi_device *sdev)
746{ 747{
747 struct Scsi_Host *host = cmd->device->host; 748 struct ata_port *ap = ata_shost_to_port(sdev->host);
748 struct ata_port *ap = (struct ata_port *) &host->hostdata[0];
749 unsigned long flags; 749 unsigned long flags;
750 struct ata_queued_cmd *qc; 750 struct ata_device *dev;
751 enum scsi_eh_timer_return ret = EH_HANDLED;
752 751
753 DPRINTK("ENTER\n"); 752 if (!ap->ops->error_handler)
753 return;
754 754
755 spin_lock_irqsave(&ap->host_set->lock, flags); 755 spin_lock_irqsave(&ap->host_set->lock, flags);
756 qc = ata_qc_from_tag(ap, ap->active_tag); 756 dev = __ata_scsi_find_dev(ap, sdev);
757 if (qc) { 757 if (dev && dev->sdev) {
758 WARN_ON(qc->scsicmd != cmd); 758 /* SCSI device already in CANCEL state, no need to offline it */
759 qc->flags |= ATA_QCFLAG_EH_SCHEDULED; 759 dev->sdev = NULL;
760 qc->err_mask |= AC_ERR_TIMEOUT; 760 dev->flags |= ATA_DFLAG_DETACH;
761 ret = EH_NOT_HANDLED; 761 ata_port_schedule_eh(ap);
762 } 762 }
763 spin_unlock_irqrestore(&ap->host_set->lock, flags); 763 spin_unlock_irqrestore(&ap->host_set->lock, flags);
764
765 DPRINTK("EXIT, ret=%d\n", ret);
766 return ret;
767} 764}
768 765
769/** 766/**
770 * ata_scsi_error - SCSI layer error handler callback 767 * ata_scsi_change_queue_depth - SCSI callback for queue depth config
771 * @host: SCSI host on which error occurred 768 * @sdev: SCSI device to configure queue depth for
769 * @queue_depth: new queue depth
772 * 770 *
773 * Handles SCSI-layer-thrown error events. 771 * This is libata standard hostt->change_queue_depth callback.
772 * SCSI will call into this callback when user tries to set queue
773 * depth via sysfs.
774 * 774 *
775 * LOCKING: 775 * LOCKING:
776 * Inherited from SCSI layer (none, can sleep) 776 * SCSI layer (we don't care)
777 *
778 * RETURNS:
779 * Newly configured queue depth.
777 */ 780 */
778 781int ata_scsi_change_queue_depth(struct scsi_device *sdev, int queue_depth)
779static void ata_scsi_error(struct Scsi_Host *host)
780{ 782{
781 struct ata_port *ap; 783 struct ata_port *ap = ata_shost_to_port(sdev->host);
782 unsigned long flags; 784 struct ata_device *dev;
783 785 int max_depth;
784 DPRINTK("ENTER\n");
785
786 ap = (struct ata_port *) &host->hostdata[0];
787
788 spin_lock_irqsave(&ap->host_set->lock, flags);
789 WARN_ON(ap->flags & ATA_FLAG_IN_EH);
790 ap->flags |= ATA_FLAG_IN_EH;
791 WARN_ON(ata_qc_from_tag(ap, ap->active_tag) == NULL);
792 spin_unlock_irqrestore(&ap->host_set->lock, flags);
793
794 ata_port_flush_task(ap);
795
796 ap->ops->eng_timeout(ap);
797
798 WARN_ON(host->host_failed || !list_empty(&host->eh_cmd_q));
799
800 scsi_eh_flush_done_q(&ap->eh_done_q);
801
802 spin_lock_irqsave(&ap->host_set->lock, flags);
803 ap->flags &= ~ATA_FLAG_IN_EH;
804 spin_unlock_irqrestore(&ap->host_set->lock, flags);
805
806 DPRINTK("EXIT\n");
807}
808
809static void ata_eh_scsidone(struct scsi_cmnd *scmd)
810{
811 /* nada */
812}
813 786
814static void __ata_eh_qc_complete(struct ata_queued_cmd *qc) 787 if (queue_depth < 1)
815{ 788 return sdev->queue_depth;
816 struct ata_port *ap = qc->ap;
817 struct scsi_cmnd *scmd = qc->scsicmd;
818 unsigned long flags;
819 789
820 spin_lock_irqsave(&ap->host_set->lock, flags); 790 dev = ata_scsi_find_dev(ap, sdev);
821 qc->scsidone = ata_eh_scsidone; 791 if (!dev || !ata_dev_enabled(dev))
822 __ata_qc_complete(qc); 792 return sdev->queue_depth;
823 WARN_ON(ata_tag_valid(qc->tag));
824 spin_unlock_irqrestore(&ap->host_set->lock, flags);
825 793
826 scsi_eh_finish_cmd(scmd, &ap->eh_done_q); 794 max_depth = min(sdev->host->can_queue, ata_id_queue_depth(dev->id));
827} 795 max_depth = min(ATA_MAX_QUEUE - 1, max_depth);
796 if (queue_depth > max_depth)
797 queue_depth = max_depth;
828 798
829/** 799 scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, queue_depth);
830 * ata_eh_qc_complete - Complete an active ATA command from EH 800 return queue_depth;
831 * @qc: Command to complete
832 *
833 * Indicate to the mid and upper layers that an ATA command has
834 * completed. To be used from EH.
835 */
836void ata_eh_qc_complete(struct ata_queued_cmd *qc)
837{
838 struct scsi_cmnd *scmd = qc->scsicmd;
839 scmd->retries = scmd->allowed;
840 __ata_eh_qc_complete(qc);
841}
842
843/**
844 * ata_eh_qc_retry - Tell midlayer to retry an ATA command after EH
845 * @qc: Command to retry
846 *
847 * Indicate to the mid and upper layers that an ATA command
848 * should be retried. To be used from EH.
849 *
850 * SCSI midlayer limits the number of retries to scmd->allowed.
851 * This function might need to adjust scmd->retries for commands
852 * which get retried due to unrelated NCQ failures.
853 */
854void ata_eh_qc_retry(struct ata_queued_cmd *qc)
855{
856 __ata_eh_qc_complete(qc);
857} 801}
858 802
859/** 803/**
@@ -891,7 +835,7 @@ static unsigned int ata_scsi_start_stop_xlat(struct ata_queued_cmd *qc,
891 tf->nsect = 1; /* 1 sector, lba=0 */ 835 tf->nsect = 1; /* 1 sector, lba=0 */
892 836
893 if (qc->dev->flags & ATA_DFLAG_LBA) { 837 if (qc->dev->flags & ATA_DFLAG_LBA) {
894 qc->tf.flags |= ATA_TFLAG_LBA; 838 tf->flags |= ATA_TFLAG_LBA;
895 839
896 tf->lbah = 0x0; 840 tf->lbah = 0x0;
897 tf->lbam = 0x0; 841 tf->lbam = 0x0;
@@ -1195,6 +1139,7 @@ static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc, const u8 *scsicm
1195 u64 block; 1139 u64 block;
1196 u32 n_block; 1140 u32 n_block;
1197 1141
1142 qc->flags |= ATA_QCFLAG_IO;
1198 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; 1143 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1199 1144
1200 if (scsicmd[0] == WRITE_10 || scsicmd[0] == WRITE_6 || 1145 if (scsicmd[0] == WRITE_10 || scsicmd[0] == WRITE_6 ||
@@ -1241,7 +1186,36 @@ static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc, const u8 *scsicm
1241 */ 1186 */
1242 goto nothing_to_do; 1187 goto nothing_to_do;
1243 1188
1244 if (dev->flags & ATA_DFLAG_LBA) { 1189 if ((dev->flags & (ATA_DFLAG_PIO | ATA_DFLAG_NCQ)) == ATA_DFLAG_NCQ) {
1190 /* yay, NCQ */
1191 if (!lba_48_ok(block, n_block))
1192 goto out_of_range;
1193
1194 tf->protocol = ATA_PROT_NCQ;
1195 tf->flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
1196
1197 if (tf->flags & ATA_TFLAG_WRITE)
1198 tf->command = ATA_CMD_FPDMA_WRITE;
1199 else
1200 tf->command = ATA_CMD_FPDMA_READ;
1201
1202 qc->nsect = n_block;
1203
1204 tf->nsect = qc->tag << 3;
1205 tf->hob_feature = (n_block >> 8) & 0xff;
1206 tf->feature = n_block & 0xff;
1207
1208 tf->hob_lbah = (block >> 40) & 0xff;
1209 tf->hob_lbam = (block >> 32) & 0xff;
1210 tf->hob_lbal = (block >> 24) & 0xff;
1211 tf->lbah = (block >> 16) & 0xff;
1212 tf->lbam = (block >> 8) & 0xff;
1213 tf->lbal = block & 0xff;
1214
1215 tf->device = 1 << 6;
1216 if (tf->flags & ATA_TFLAG_FUA)
1217 tf->device |= 1 << 7;
1218 } else if (dev->flags & ATA_DFLAG_LBA) {
1245 tf->flags |= ATA_TFLAG_LBA; 1219 tf->flags |= ATA_TFLAG_LBA;
1246 1220
1247 if (lba_28_ok(block, n_block)) { 1221 if (lba_28_ok(block, n_block)) {
@@ -1332,6 +1306,17 @@ static void ata_scsi_qc_complete(struct ata_queued_cmd *qc)
1332 u8 *cdb = cmd->cmnd; 1306 u8 *cdb = cmd->cmnd;
1333 int need_sense = (qc->err_mask != 0); 1307 int need_sense = (qc->err_mask != 0);
1334 1308
1309 /* We snoop the SET_FEATURES - Write Cache ON/OFF command, and
1310 * schedule EH_REVALIDATE operation to update the IDENTIFY DEVICE
1311 * cache
1312 */
1313 if (!need_sense && (qc->tf.command == ATA_CMD_SET_FEATURES) &&
1314 ((qc->tf.feature == SETFEATURES_WC_ON) ||
1315 (qc->tf.feature == SETFEATURES_WC_OFF))) {
1316 qc->ap->eh_info.action |= ATA_EH_REVALIDATE;
1317 ata_port_schedule_eh(qc->ap);
1318 }
1319
1335 /* For ATA pass thru (SAT) commands, generate a sense block if 1320 /* For ATA pass thru (SAT) commands, generate a sense block if
1336 * user mandated it or if there's an error. Note that if we 1321 * user mandated it or if there's an error. Note that if we
1337 * generate because the user forced us to, a check condition 1322 * generate because the user forced us to, a check condition
@@ -1356,10 +1341,8 @@ static void ata_scsi_qc_complete(struct ata_queued_cmd *qc)
1356 } 1341 }
1357 } 1342 }
1358 1343
1359 if (need_sense) { 1344 if (need_sense && !qc->ap->ops->error_handler)
1360 /* The ata_gen_..._sense routines fill in tf */ 1345 ata_dump_status(qc->ap->id, &qc->result_tf);
1361 ata_dump_status(qc->ap->id, &qc->tf);
1362 }
1363 1346
1364 qc->scsidone(cmd); 1347 qc->scsidone(cmd);
1365 1348
@@ -1367,8 +1350,40 @@ static void ata_scsi_qc_complete(struct ata_queued_cmd *qc)
1367} 1350}
1368 1351
1369/** 1352/**
1353 * ata_scmd_need_defer - Check whether we need to defer scmd
1354 * @dev: ATA device to which the command is addressed
1355 * @is_io: Is the command IO (and thus possibly NCQ)?
1356 *
1357 * NCQ and non-NCQ commands cannot run together. As upper layer
1358 * only knows the queue depth, we are responsible for maintaining
1359 * exclusion. This function checks whether a new command can be
1360 * issued to @dev.
1361 *
1362 * LOCKING:
1363 * spin_lock_irqsave(host_set lock)
1364 *
1365 * RETURNS:
1366 * 1 if deferring is needed, 0 otherwise.
1367 */
1368static int ata_scmd_need_defer(struct ata_device *dev, int is_io)
1369{
1370 struct ata_port *ap = dev->ap;
1371
1372 if (!(dev->flags & ATA_DFLAG_NCQ))
1373 return 0;
1374
1375 if (is_io) {
1376 if (!ata_tag_valid(ap->active_tag))
1377 return 0;
1378 } else {
1379 if (!ata_tag_valid(ap->active_tag) && !ap->sactive)
1380 return 0;
1381 }
1382 return 1;
1383}
1384
1385/**
1370 * ata_scsi_translate - Translate then issue SCSI command to ATA device 1386 * ata_scsi_translate - Translate then issue SCSI command to ATA device
1371 * @ap: ATA port to which the command is addressed
1372 * @dev: ATA device to which the command is addressed 1387 * @dev: ATA device to which the command is addressed
1373 * @cmd: SCSI command to execute 1388 * @cmd: SCSI command to execute
1374 * @done: SCSI command completion function 1389 * @done: SCSI command completion function
@@ -1389,19 +1404,25 @@ static void ata_scsi_qc_complete(struct ata_queued_cmd *qc)
1389 * 1404 *
1390 * LOCKING: 1405 * LOCKING:
1391 * spin_lock_irqsave(host_set lock) 1406 * spin_lock_irqsave(host_set lock)
1407 *
1408 * RETURNS:
1409 * 0 on success, SCSI_ML_QUEUE_DEVICE_BUSY if the command
1410 * needs to be deferred.
1392 */ 1411 */
1393 1412static int ata_scsi_translate(struct ata_device *dev, struct scsi_cmnd *cmd,
1394static void ata_scsi_translate(struct ata_port *ap, struct ata_device *dev,
1395 struct scsi_cmnd *cmd,
1396 void (*done)(struct scsi_cmnd *), 1413 void (*done)(struct scsi_cmnd *),
1397 ata_xlat_func_t xlat_func) 1414 ata_xlat_func_t xlat_func)
1398{ 1415{
1399 struct ata_queued_cmd *qc; 1416 struct ata_queued_cmd *qc;
1400 u8 *scsicmd = cmd->cmnd; 1417 u8 *scsicmd = cmd->cmnd;
1418 int is_io = xlat_func == ata_scsi_rw_xlat;
1401 1419
1402 VPRINTK("ENTER\n"); 1420 VPRINTK("ENTER\n");
1403 1421
1404 qc = ata_scsi_qc_new(ap, dev, cmd, done); 1422 if (unlikely(ata_scmd_need_defer(dev, is_io)))
1423 goto defer;
1424
1425 qc = ata_scsi_qc_new(dev, cmd, done);
1405 if (!qc) 1426 if (!qc)
1406 goto err_mem; 1427 goto err_mem;
1407 1428
@@ -1409,8 +1430,8 @@ static void ata_scsi_translate(struct ata_port *ap, struct ata_device *dev,
1409 if (cmd->sc_data_direction == DMA_FROM_DEVICE || 1430 if (cmd->sc_data_direction == DMA_FROM_DEVICE ||
1410 cmd->sc_data_direction == DMA_TO_DEVICE) { 1431 cmd->sc_data_direction == DMA_TO_DEVICE) {
1411 if (unlikely(cmd->request_bufflen < 1)) { 1432 if (unlikely(cmd->request_bufflen < 1)) {
1412 printk(KERN_WARNING "ata%u(%u): WARNING: zero len r/w req\n", 1433 ata_dev_printk(dev, KERN_WARNING,
1413 ap->id, dev->devno); 1434 "WARNING: zero len r/w req\n");
1414 goto err_did; 1435 goto err_did;
1415 } 1436 }
1416 1437
@@ -1432,13 +1453,13 @@ static void ata_scsi_translate(struct ata_port *ap, struct ata_device *dev,
1432 ata_qc_issue(qc); 1453 ata_qc_issue(qc);
1433 1454
1434 VPRINTK("EXIT\n"); 1455 VPRINTK("EXIT\n");
1435 return; 1456 return 0;
1436 1457
1437early_finish: 1458early_finish:
1438 ata_qc_free(qc); 1459 ata_qc_free(qc);
1439 done(cmd); 1460 done(cmd);
1440 DPRINTK("EXIT - early finish (good or error)\n"); 1461 DPRINTK("EXIT - early finish (good or error)\n");
1441 return; 1462 return 0;
1442 1463
1443err_did: 1464err_did:
1444 ata_qc_free(qc); 1465 ata_qc_free(qc);
@@ -1446,7 +1467,11 @@ err_mem:
1446 cmd->result = (DID_ERROR << 16); 1467 cmd->result = (DID_ERROR << 16);
1447 done(cmd); 1468 done(cmd);
1448 DPRINTK("EXIT - internal\n"); 1469 DPRINTK("EXIT - internal\n");
1449 return; 1470 return 0;
1471
1472defer:
1473 DPRINTK("EXIT - defer\n");
1474 return SCSI_MLQUEUE_DEVICE_BUSY;
1450} 1475}
1451 1476
1452/** 1477/**
@@ -1944,7 +1969,7 @@ unsigned int ata_scsiop_mode_sense(struct ata_scsi_args *args, u8 *rbuf,
1944 return 0; 1969 return 0;
1945 1970
1946 dpofua = 0; 1971 dpofua = 0;
1947 if (ata_dev_supports_fua(args->id) && dev->flags & ATA_DFLAG_LBA48 && 1972 if (ata_dev_supports_fua(args->id) && (dev->flags & ATA_DFLAG_LBA48) &&
1948 (!(dev->flags & ATA_DFLAG_PIO) || dev->multi_count)) 1973 (!(dev->flags & ATA_DFLAG_PIO) || dev->multi_count))
1949 dpofua = 1 << 4; 1974 dpofua = 1 << 4;
1950 1975
@@ -2137,13 +2162,14 @@ void ata_scsi_badcmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *), u8
2137 2162
2138static void atapi_sense_complete(struct ata_queued_cmd *qc) 2163static void atapi_sense_complete(struct ata_queued_cmd *qc)
2139{ 2164{
2140 if (qc->err_mask && ((qc->err_mask & AC_ERR_DEV) == 0)) 2165 if (qc->err_mask && ((qc->err_mask & AC_ERR_DEV) == 0)) {
2141 /* FIXME: not quite right; we don't want the 2166 /* FIXME: not quite right; we don't want the
2142 * translation of taskfile registers into 2167 * translation of taskfile registers into
2143 * a sense descriptors, since that's only 2168 * a sense descriptors, since that's only
2144 * correct for ATA, not ATAPI 2169 * correct for ATA, not ATAPI
2145 */ 2170 */
2146 ata_gen_ata_desc_sense(qc); 2171 ata_gen_ata_desc_sense(qc);
2172 }
2147 2173
2148 qc->scsidone(qc->scsicmd); 2174 qc->scsidone(qc->scsicmd);
2149 ata_qc_free(qc); 2175 ata_qc_free(qc);
@@ -2207,21 +2233,38 @@ static void atapi_qc_complete(struct ata_queued_cmd *qc)
2207 2233
2208 VPRINTK("ENTER, err_mask 0x%X\n", err_mask); 2234 VPRINTK("ENTER, err_mask 0x%X\n", err_mask);
2209 2235
2236 /* handle completion from new EH */
2237 if (unlikely(qc->ap->ops->error_handler &&
2238 (err_mask || qc->flags & ATA_QCFLAG_SENSE_VALID))) {
2239
2240 if (!(qc->flags & ATA_QCFLAG_SENSE_VALID)) {
2241 /* FIXME: not quite right; we don't want the
2242 * translation of taskfile registers into a
2243 * sense descriptors, since that's only
2244 * correct for ATA, not ATAPI
2245 */
2246 ata_gen_ata_desc_sense(qc);
2247 }
2248
2249 qc->scsicmd->result = SAM_STAT_CHECK_CONDITION;
2250 qc->scsidone(cmd);
2251 ata_qc_free(qc);
2252 return;
2253 }
2254
2255 /* successful completion or old EH failure path */
2210 if (unlikely(err_mask & AC_ERR_DEV)) { 2256 if (unlikely(err_mask & AC_ERR_DEV)) {
2211 cmd->result = SAM_STAT_CHECK_CONDITION; 2257 cmd->result = SAM_STAT_CHECK_CONDITION;
2212 atapi_request_sense(qc); 2258 atapi_request_sense(qc);
2213 return; 2259 return;
2214 } 2260 } else if (unlikely(err_mask)) {
2215
2216 else if (unlikely(err_mask))
2217 /* FIXME: not quite right; we don't want the 2261 /* FIXME: not quite right; we don't want the
2218 * translation of taskfile registers into 2262 * translation of taskfile registers into
2219 * a sense descriptors, since that's only 2263 * a sense descriptors, since that's only
2220 * correct for ATA, not ATAPI 2264 * correct for ATA, not ATAPI
2221 */ 2265 */
2222 ata_gen_ata_desc_sense(qc); 2266 ata_gen_ata_desc_sense(qc);
2223 2267 } else {
2224 else {
2225 u8 *scsicmd = cmd->cmnd; 2268 u8 *scsicmd = cmd->cmnd;
2226 2269
2227 if ((scsicmd[0] == INQUIRY) && ((scsicmd[1] & 0x03) == 0)) { 2270 if ((scsicmd[0] == INQUIRY) && ((scsicmd[1] & 0x03) == 0)) {
@@ -2303,11 +2346,9 @@ static unsigned int atapi_xlat(struct ata_queued_cmd *qc, const u8 *scsicmd)
2303 qc->tf.protocol = ATA_PROT_ATAPI_DMA; 2346 qc->tf.protocol = ATA_PROT_ATAPI_DMA;
2304 qc->tf.feature |= ATAPI_PKT_DMA; 2347 qc->tf.feature |= ATAPI_PKT_DMA;
2305 2348
2306#ifdef ATAPI_ENABLE_DMADIR 2349 if (atapi_dmadir && (cmd->sc_data_direction != DMA_TO_DEVICE))
2307 /* some SATA bridges need us to indicate data xfer direction */ 2350 /* some SATA bridges need us to indicate data xfer direction */
2308 if (cmd->sc_data_direction != DMA_TO_DEVICE)
2309 qc->tf.feature |= ATAPI_DMADIR; 2351 qc->tf.feature |= ATAPI_DMADIR;
2310#endif
2311 } 2352 }
2312 2353
2313 qc->nbytes = cmd->bufflen; 2354 qc->nbytes = cmd->bufflen;
@@ -2315,6 +2356,23 @@ static unsigned int atapi_xlat(struct ata_queued_cmd *qc, const u8 *scsicmd)
2315 return 0; 2356 return 0;
2316} 2357}
2317 2358
2359static struct ata_device * ata_find_dev(struct ata_port *ap, int id)
2360{
2361 if (likely(id < ATA_MAX_DEVICES))
2362 return &ap->device[id];
2363 return NULL;
2364}
2365
2366static struct ata_device * __ata_scsi_find_dev(struct ata_port *ap,
2367 const struct scsi_device *scsidev)
2368{
2369 /* skip commands not addressed to targets we simulate */
2370 if (unlikely(scsidev->channel || scsidev->lun))
2371 return NULL;
2372
2373 return ata_find_dev(ap, scsidev->id);
2374}
2375
2318/** 2376/**
2319 * ata_scsi_find_dev - lookup ata_device from scsi_cmnd 2377 * ata_scsi_find_dev - lookup ata_device from scsi_cmnd
2320 * @ap: ATA port to which the device is attached 2378 * @ap: ATA port to which the device is attached
@@ -2331,29 +2389,19 @@ static unsigned int atapi_xlat(struct ata_queued_cmd *qc, const u8 *scsicmd)
2331 * RETURNS: 2389 * RETURNS:
2332 * Associated ATA device, or %NULL if not found. 2390 * Associated ATA device, or %NULL if not found.
2333 */ 2391 */
2334
2335static struct ata_device * 2392static struct ata_device *
2336ata_scsi_find_dev(struct ata_port *ap, const struct scsi_device *scsidev) 2393ata_scsi_find_dev(struct ata_port *ap, const struct scsi_device *scsidev)
2337{ 2394{
2338 struct ata_device *dev; 2395 struct ata_device *dev = __ata_scsi_find_dev(ap, scsidev);
2339
2340 /* skip commands not addressed to targets we simulate */
2341 if (likely(scsidev->id < ATA_MAX_DEVICES))
2342 dev = &ap->device[scsidev->id];
2343 else
2344 return NULL;
2345 2396
2346 if (unlikely((scsidev->channel != 0) || 2397 if (unlikely(!dev || !ata_dev_enabled(dev)))
2347 (scsidev->lun != 0)))
2348 return NULL;
2349
2350 if (unlikely(!ata_dev_present(dev)))
2351 return NULL; 2398 return NULL;
2352 2399
2353 if (!atapi_enabled || (ap->flags & ATA_FLAG_NO_ATAPI)) { 2400 if (!atapi_enabled || (ap->flags & ATA_FLAG_NO_ATAPI)) {
2354 if (unlikely(dev->class == ATA_DEV_ATAPI)) { 2401 if (unlikely(dev->class == ATA_DEV_ATAPI)) {
2355 printk(KERN_WARNING "ata%u(%u): WARNING: ATAPI is %s, device ignored.\n", 2402 ata_dev_printk(dev, KERN_WARNING,
2356 ap->id, dev->devno, atapi_enabled ? "not supported with this driver" : "disabled"); 2403 "WARNING: ATAPI is %s, device ignored.\n",
2404 atapi_enabled ? "not supported with this driver" : "disabled");
2357 return NULL; 2405 return NULL;
2358 } 2406 }
2359 } 2407 }
@@ -2414,10 +2462,15 @@ ata_scsi_pass_thru(struct ata_queued_cmd *qc, const u8 *scsicmd)
2414{ 2462{
2415 struct ata_taskfile *tf = &(qc->tf); 2463 struct ata_taskfile *tf = &(qc->tf);
2416 struct scsi_cmnd *cmd = qc->scsicmd; 2464 struct scsi_cmnd *cmd = qc->scsicmd;
2465 struct ata_device *dev = qc->dev;
2417 2466
2418 if ((tf->protocol = ata_scsi_map_proto(scsicmd[1])) == ATA_PROT_UNKNOWN) 2467 if ((tf->protocol = ata_scsi_map_proto(scsicmd[1])) == ATA_PROT_UNKNOWN)
2419 goto invalid_fld; 2468 goto invalid_fld;
2420 2469
2470 /* We may not issue DMA commands if no DMA mode is set */
2471 if (tf->protocol == ATA_PROT_DMA && dev->dma_mode == 0)
2472 goto invalid_fld;
2473
2421 if (scsicmd[1] & 0xe0) 2474 if (scsicmd[1] & 0xe0)
2422 /* PIO multi not supported yet */ 2475 /* PIO multi not supported yet */
2423 goto invalid_fld; 2476 goto invalid_fld;
@@ -2502,6 +2555,9 @@ ata_scsi_pass_thru(struct ata_queued_cmd *qc, const u8 *scsicmd)
2502 */ 2555 */
2503 qc->nsect = cmd->bufflen / ATA_SECT_SIZE; 2556 qc->nsect = cmd->bufflen / ATA_SECT_SIZE;
2504 2557
2558 /* request result TF */
2559 qc->flags |= ATA_QCFLAG_RESULT_TF;
2560
2505 return 0; 2561 return 0;
2506 2562
2507 invalid_fld: 2563 invalid_fld:
@@ -2578,19 +2634,24 @@ static inline void ata_scsi_dump_cdb(struct ata_port *ap,
2578#endif 2634#endif
2579} 2635}
2580 2636
2581static inline void __ata_scsi_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *), 2637static inline int __ata_scsi_queuecmd(struct scsi_cmnd *cmd,
2582 struct ata_port *ap, struct ata_device *dev) 2638 void (*done)(struct scsi_cmnd *),
2639 struct ata_device *dev)
2583{ 2640{
2641 int rc = 0;
2642
2584 if (dev->class == ATA_DEV_ATA) { 2643 if (dev->class == ATA_DEV_ATA) {
2585 ata_xlat_func_t xlat_func = ata_get_xlat_func(dev, 2644 ata_xlat_func_t xlat_func = ata_get_xlat_func(dev,
2586 cmd->cmnd[0]); 2645 cmd->cmnd[0]);
2587 2646
2588 if (xlat_func) 2647 if (xlat_func)
2589 ata_scsi_translate(ap, dev, cmd, done, xlat_func); 2648 rc = ata_scsi_translate(dev, cmd, done, xlat_func);
2590 else 2649 else
2591 ata_scsi_simulate(ap, dev, cmd, done); 2650 ata_scsi_simulate(dev, cmd, done);
2592 } else 2651 } else
2593 ata_scsi_translate(ap, dev, cmd, done, atapi_xlat); 2652 rc = ata_scsi_translate(dev, cmd, done, atapi_xlat);
2653
2654 return rc;
2594} 2655}
2595 2656
2596/** 2657/**
@@ -2609,17 +2670,18 @@ static inline void __ata_scsi_queuecmd(struct scsi_cmnd *cmd, void (*done)(struc
2609 * Releases scsi-layer-held lock, and obtains host_set lock. 2670 * Releases scsi-layer-held lock, and obtains host_set lock.
2610 * 2671 *
2611 * RETURNS: 2672 * RETURNS:
2612 * Zero. 2673 * Return value from __ata_scsi_queuecmd() if @cmd can be queued,
2674 * 0 otherwise.
2613 */ 2675 */
2614
2615int ata_scsi_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)) 2676int ata_scsi_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
2616{ 2677{
2617 struct ata_port *ap; 2678 struct ata_port *ap;
2618 struct ata_device *dev; 2679 struct ata_device *dev;
2619 struct scsi_device *scsidev = cmd->device; 2680 struct scsi_device *scsidev = cmd->device;
2620 struct Scsi_Host *shost = scsidev->host; 2681 struct Scsi_Host *shost = scsidev->host;
2682 int rc = 0;
2621 2683
2622 ap = (struct ata_port *) &shost->hostdata[0]; 2684 ap = ata_shost_to_port(shost);
2623 2685
2624 spin_unlock(shost->host_lock); 2686 spin_unlock(shost->host_lock);
2625 spin_lock(&ap->host_set->lock); 2687 spin_lock(&ap->host_set->lock);
@@ -2628,7 +2690,7 @@ int ata_scsi_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
2628 2690
2629 dev = ata_scsi_find_dev(ap, scsidev); 2691 dev = ata_scsi_find_dev(ap, scsidev);
2630 if (likely(dev)) 2692 if (likely(dev))
2631 __ata_scsi_queuecmd(cmd, done, ap, dev); 2693 rc = __ata_scsi_queuecmd(cmd, done, dev);
2632 else { 2694 else {
2633 cmd->result = (DID_BAD_TARGET << 16); 2695 cmd->result = (DID_BAD_TARGET << 16);
2634 done(cmd); 2696 done(cmd);
@@ -2636,12 +2698,11 @@ int ata_scsi_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
2636 2698
2637 spin_unlock(&ap->host_set->lock); 2699 spin_unlock(&ap->host_set->lock);
2638 spin_lock(shost->host_lock); 2700 spin_lock(shost->host_lock);
2639 return 0; 2701 return rc;
2640} 2702}
2641 2703
2642/** 2704/**
2643 * ata_scsi_simulate - simulate SCSI command on ATA device 2705 * ata_scsi_simulate - simulate SCSI command on ATA device
2644 * @ap: port the device is connected to
2645 * @dev: the target device 2706 * @dev: the target device
2646 * @cmd: SCSI command being sent to device. 2707 * @cmd: SCSI command being sent to device.
2647 * @done: SCSI command completion function. 2708 * @done: SCSI command completion function.
@@ -2653,14 +2714,12 @@ int ata_scsi_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
2653 * spin_lock_irqsave(host_set lock) 2714 * spin_lock_irqsave(host_set lock)
2654 */ 2715 */
2655 2716
2656void ata_scsi_simulate(struct ata_port *ap, struct ata_device *dev, 2717void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd,
2657 struct scsi_cmnd *cmd,
2658 void (*done)(struct scsi_cmnd *)) 2718 void (*done)(struct scsi_cmnd *))
2659{ 2719{
2660 struct ata_scsi_args args; 2720 struct ata_scsi_args args;
2661 const u8 *scsicmd = cmd->cmnd; 2721 const u8 *scsicmd = cmd->cmnd;
2662 2722
2663 args.ap = ap;
2664 args.dev = dev; 2723 args.dev = dev;
2665 args.id = dev->id; 2724 args.id = dev->id;
2666 args.cmd = cmd; 2725 args.cmd = cmd;
@@ -2732,17 +2791,241 @@ void ata_scsi_simulate(struct ata_port *ap, struct ata_device *dev,
2732 2791
2733void ata_scsi_scan_host(struct ata_port *ap) 2792void ata_scsi_scan_host(struct ata_port *ap)
2734{ 2793{
2735 struct ata_device *dev;
2736 unsigned int i; 2794 unsigned int i;
2737 2795
2738 if (ap->flags & ATA_FLAG_PORT_DISABLED) 2796 if (ap->flags & ATA_FLAG_DISABLED)
2739 return; 2797 return;
2740 2798
2741 for (i = 0; i < ATA_MAX_DEVICES; i++) { 2799 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2742 dev = &ap->device[i]; 2800 struct ata_device *dev = &ap->device[i];
2801 struct scsi_device *sdev;
2802
2803 if (!ata_dev_enabled(dev) || dev->sdev)
2804 continue;
2805
2806 sdev = __scsi_add_device(ap->host, 0, i, 0, NULL);
2807 if (!IS_ERR(sdev)) {
2808 dev->sdev = sdev;
2809 scsi_device_put(sdev);
2810 }
2811 }
2812}
2813
2814/**
2815 * ata_scsi_offline_dev - offline attached SCSI device
2816 * @dev: ATA device to offline attached SCSI device for
2817 *
2818 * This function is called from ata_eh_hotplug() and responsible
2819 * for taking the SCSI device attached to @dev offline. This
2820 * function is called with host_set lock which protects dev->sdev
2821 * against clearing.
2822 *
2823 * LOCKING:
2824 * spin_lock_irqsave(host_set lock)
2825 *
2826 * RETURNS:
2827 * 1 if attached SCSI device exists, 0 otherwise.
2828 */
2829int ata_scsi_offline_dev(struct ata_device *dev)
2830{
2831 if (dev->sdev) {
2832 scsi_device_set_state(dev->sdev, SDEV_OFFLINE);
2833 return 1;
2834 }
2835 return 0;
2836}
2837
2838/**
2839 * ata_scsi_remove_dev - remove attached SCSI device
2840 * @dev: ATA device to remove attached SCSI device for
2841 *
2842 * This function is called from ata_eh_scsi_hotplug() and
2843 * responsible for removing the SCSI device attached to @dev.
2844 *
2845 * LOCKING:
2846 * Kernel thread context (may sleep).
2847 */
2848static void ata_scsi_remove_dev(struct ata_device *dev)
2849{
2850 struct ata_port *ap = dev->ap;
2851 struct scsi_device *sdev;
2852 unsigned long flags;
2853
2854 /* Alas, we need to grab scan_mutex to ensure SCSI device
2855 * state doesn't change underneath us and thus
2856 * scsi_device_get() always succeeds. The mutex locking can
2857 * be removed if there is __scsi_device_get() interface which
2858 * increments reference counts regardless of device state.
2859 */
2860 mutex_lock(&ap->host->scan_mutex);
2861 spin_lock_irqsave(&ap->host_set->lock, flags);
2862
2863 /* clearing dev->sdev is protected by host_set lock */
2864 sdev = dev->sdev;
2865 dev->sdev = NULL;
2866
2867 if (sdev) {
2868 /* If user initiated unplug races with us, sdev can go
2869 * away underneath us after the host_set lock and
2870 * scan_mutex are released. Hold onto it.
2871 */
2872 if (scsi_device_get(sdev) == 0) {
2873 /* The following ensures the attached sdev is
2874 * offline on return from ata_scsi_offline_dev()
2875 * regardless it wins or loses the race
2876 * against this function.
2877 */
2878 scsi_device_set_state(sdev, SDEV_OFFLINE);
2879 } else {
2880 WARN_ON(1);
2881 sdev = NULL;
2882 }
2883 }
2884
2885 spin_unlock_irqrestore(&ap->host_set->lock, flags);
2886 mutex_unlock(&ap->host->scan_mutex);
2887
2888 if (sdev) {
2889 ata_dev_printk(dev, KERN_INFO, "detaching (SCSI %s)\n",
2890 sdev->sdev_gendev.bus_id);
2891
2892 scsi_remove_device(sdev);
2893 scsi_device_put(sdev);
2894 }
2895}
2743 2896
2744 if (ata_dev_present(dev)) 2897/**
2745 scsi_scan_target(&ap->host->shost_gendev, 0, i, 0, 0); 2898 * ata_scsi_hotplug - SCSI part of hotplug
2899 * @data: Pointer to ATA port to perform SCSI hotplug on
2900 *
2901 * Perform SCSI part of hotplug. It's executed from a separate
2902 * workqueue after EH completes. This is necessary because SCSI
2903 * hot plugging requires working EH and hot unplugging is
2904 * synchronized with hot plugging with a mutex.
2905 *
2906 * LOCKING:
2907 * Kernel thread context (may sleep).
2908 */
2909void ata_scsi_hotplug(void *data)
2910{
2911 struct ata_port *ap = data;
2912 int i;
2913
2914 if (ap->flags & ATA_FLAG_UNLOADING) {
2915 DPRINTK("ENTER/EXIT - unloading\n");
2916 return;
2746 } 2917 }
2918
2919 DPRINTK("ENTER\n");
2920
2921 /* unplug detached devices */
2922 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2923 struct ata_device *dev = &ap->device[i];
2924 unsigned long flags;
2925
2926 if (!(dev->flags & ATA_DFLAG_DETACHED))
2927 continue;
2928
2929 spin_lock_irqsave(&ap->host_set->lock, flags);
2930 dev->flags &= ~ATA_DFLAG_DETACHED;
2931 spin_unlock_irqrestore(&ap->host_set->lock, flags);
2932
2933 ata_scsi_remove_dev(dev);
2934 }
2935
2936 /* scan for new ones */
2937 ata_scsi_scan_host(ap);
2938
2939 /* If we scanned while EH was in progress, scan would have
2940 * failed silently. Requeue if there are enabled but
2941 * unattached devices.
2942 */
2943 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2944 struct ata_device *dev = &ap->device[i];
2945 if (ata_dev_enabled(dev) && !dev->sdev) {
2946 queue_delayed_work(ata_aux_wq, &ap->hotplug_task, HZ);
2947 break;
2948 }
2949 }
2950
2951 DPRINTK("EXIT\n");
2747} 2952}
2748 2953
2954/**
2955 * ata_scsi_user_scan - indication for user-initiated bus scan
2956 * @shost: SCSI host to scan
2957 * @channel: Channel to scan
2958 * @id: ID to scan
2959 * @lun: LUN to scan
2960 *
2961 * This function is called when user explicitly requests bus
2962 * scan. Set probe pending flag and invoke EH.
2963 *
2964 * LOCKING:
2965 * SCSI layer (we don't care)
2966 *
2967 * RETURNS:
2968 * Zero.
2969 */
2970static int ata_scsi_user_scan(struct Scsi_Host *shost, unsigned int channel,
2971 unsigned int id, unsigned int lun)
2972{
2973 struct ata_port *ap = ata_shost_to_port(shost);
2974 unsigned long flags;
2975 int rc = 0;
2976
2977 if (!ap->ops->error_handler)
2978 return -EOPNOTSUPP;
2979
2980 if ((channel != SCAN_WILD_CARD && channel != 0) ||
2981 (lun != SCAN_WILD_CARD && lun != 0))
2982 return -EINVAL;
2983
2984 spin_lock_irqsave(&ap->host_set->lock, flags);
2985
2986 if (id == SCAN_WILD_CARD) {
2987 ap->eh_info.probe_mask |= (1 << ATA_MAX_DEVICES) - 1;
2988 ap->eh_info.action |= ATA_EH_SOFTRESET;
2989 } else {
2990 struct ata_device *dev = ata_find_dev(ap, id);
2991
2992 if (dev) {
2993 ap->eh_info.probe_mask |= 1 << dev->devno;
2994 ap->eh_info.action |= ATA_EH_SOFTRESET;
2995 } else
2996 rc = -EINVAL;
2997 }
2998
2999 if (rc == 0)
3000 ata_port_schedule_eh(ap);
3001
3002 spin_unlock_irqrestore(&ap->host_set->lock, flags);
3003
3004 return rc;
3005}
3006
3007/**
3008 * ata_scsi_dev_rescan - initiate scsi_rescan_device()
3009 * @data: Pointer to ATA port to perform scsi_rescan_device()
3010 *
3011 * After ATA pass thru (SAT) commands are executed successfully,
3012 * libata need to propagate the changes to SCSI layer. This
3013 * function must be executed from ata_aux_wq such that sdev
3014 * attach/detach don't race with rescan.
3015 *
3016 * LOCKING:
3017 * Kernel thread context (may sleep).
3018 */
3019void ata_scsi_dev_rescan(void *data)
3020{
3021 struct ata_port *ap = data;
3022 struct ata_device *dev;
3023 unsigned int i;
3024
3025 for (i = 0; i < ATA_MAX_DEVICES; i++) {
3026 dev = &ap->device[i];
3027
3028 if (ata_dev_enabled(dev) && dev->sdev)
3029 scsi_rescan_device(&(dev->sdev->sdev_gendev));
3030 }
3031}
diff --git a/drivers/scsi/libata.h b/drivers/scsi/libata.h
index bac8cbae06fe..bdd488897096 100644
--- a/drivers/scsi/libata.h
+++ b/drivers/scsi/libata.h
@@ -29,10 +29,9 @@
29#define __LIBATA_H__ 29#define __LIBATA_H__
30 30
31#define DRV_NAME "libata" 31#define DRV_NAME "libata"
32#define DRV_VERSION "1.20" /* must be exactly four chars */ 32#define DRV_VERSION "1.30" /* must be exactly four chars */
33 33
34struct ata_scsi_args { 34struct ata_scsi_args {
35 struct ata_port *ap;
36 struct ata_device *dev; 35 struct ata_device *dev;
37 u16 *id; 36 u16 *id;
38 struct scsi_cmnd *cmd; 37 struct scsi_cmnd *cmd;
@@ -40,18 +39,32 @@ struct ata_scsi_args {
40}; 39};
41 40
42/* libata-core.c */ 41/* libata-core.c */
42extern struct workqueue_struct *ata_aux_wq;
43extern int atapi_enabled; 43extern int atapi_enabled;
44extern int atapi_dmadir;
44extern int libata_fua; 45extern int libata_fua;
45extern struct ata_queued_cmd *ata_qc_new_init(struct ata_port *ap, 46extern struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev);
46 struct ata_device *dev);
47extern int ata_rwcmd_protocol(struct ata_queued_cmd *qc); 47extern int ata_rwcmd_protocol(struct ata_queued_cmd *qc);
48extern void ata_dev_disable(struct ata_device *dev);
48extern void ata_port_flush_task(struct ata_port *ap); 49extern void ata_port_flush_task(struct ata_port *ap);
50extern unsigned ata_exec_internal(struct ata_device *dev,
51 struct ata_taskfile *tf, const u8 *cdb,
52 int dma_dir, void *buf, unsigned int buflen);
53extern int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
54 int post_reset, u16 *id);
55extern int ata_dev_configure(struct ata_device *dev, int print_info);
56extern int sata_down_spd_limit(struct ata_port *ap);
57extern int sata_set_spd_needed(struct ata_port *ap);
58extern int ata_down_xfermask_limit(struct ata_device *dev, int force_pio0);
59extern int ata_set_mode(struct ata_port *ap, struct ata_device **r_failed_dev);
49extern void ata_qc_free(struct ata_queued_cmd *qc); 60extern void ata_qc_free(struct ata_queued_cmd *qc);
50extern void ata_qc_issue(struct ata_queued_cmd *qc); 61extern void ata_qc_issue(struct ata_queued_cmd *qc);
62extern void __ata_qc_complete(struct ata_queued_cmd *qc);
51extern int ata_check_atapi_dma(struct ata_queued_cmd *qc); 63extern int ata_check_atapi_dma(struct ata_queued_cmd *qc);
52extern void ata_dev_select(struct ata_port *ap, unsigned int device, 64extern void ata_dev_select(struct ata_port *ap, unsigned int device,
53 unsigned int wait, unsigned int can_sleep); 65 unsigned int wait, unsigned int can_sleep);
54extern void swap_buf_le16(u16 *buf, unsigned int buf_words); 66extern void swap_buf_le16(u16 *buf, unsigned int buf_words);
67extern void ata_dev_init(struct ata_device *dev);
55extern int ata_task_ioctl(struct scsi_device *scsidev, void __user *arg); 68extern int ata_task_ioctl(struct scsi_device *scsidev, void __user *arg);
56extern int ata_cmd_ioctl(struct scsi_device *scsidev, void __user *arg); 69extern int ata_cmd_ioctl(struct scsi_device *scsidev, void __user *arg);
57 70
@@ -60,6 +73,8 @@ extern int ata_cmd_ioctl(struct scsi_device *scsidev, void __user *arg);
60extern struct scsi_transport_template ata_scsi_transport_template; 73extern struct scsi_transport_template ata_scsi_transport_template;
61 74
62extern void ata_scsi_scan_host(struct ata_port *ap); 75extern void ata_scsi_scan_host(struct ata_port *ap);
76extern int ata_scsi_offline_dev(struct ata_device *dev);
77extern void ata_scsi_hotplug(void *data);
63extern unsigned int ata_scsiop_inq_std(struct ata_scsi_args *args, u8 *rbuf, 78extern unsigned int ata_scsiop_inq_std(struct ata_scsi_args *args, u8 *rbuf,
64 unsigned int buflen); 79 unsigned int buflen);
65 80
@@ -88,5 +103,13 @@ extern void ata_scsi_set_sense(struct scsi_cmnd *cmd,
88extern void ata_scsi_rbuf_fill(struct ata_scsi_args *args, 103extern void ata_scsi_rbuf_fill(struct ata_scsi_args *args,
89 unsigned int (*actor) (struct ata_scsi_args *args, 104 unsigned int (*actor) (struct ata_scsi_args *args,
90 u8 *rbuf, unsigned int buflen)); 105 u8 *rbuf, unsigned int buflen));
106extern void ata_schedule_scsi_eh(struct Scsi_Host *shost);
107extern void ata_scsi_dev_rescan(void *data);
108
109/* libata-eh.c */
110extern enum scsi_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd);
111extern void ata_scsi_error(struct Scsi_Host *host);
112extern void ata_port_wait_eh(struct ata_port *ap);
113extern void ata_qc_schedule_eh(struct ata_queued_cmd *qc);
91 114
92#endif /* __LIBATA_H__ */ 115#endif /* __LIBATA_H__ */
diff --git a/drivers/scsi/pdc_adma.c b/drivers/scsi/pdc_adma.c
index 5cda16cfacb0..7ebe8e03aa96 100644
--- a/drivers/scsi/pdc_adma.c
+++ b/drivers/scsi/pdc_adma.c
@@ -46,7 +46,7 @@
46#include <linux/libata.h> 46#include <linux/libata.h>
47 47
48#define DRV_NAME "pdc_adma" 48#define DRV_NAME "pdc_adma"
49#define DRV_VERSION "0.03" 49#define DRV_VERSION "0.04"
50 50
51/* macro to calculate base address for ATA regs */ 51/* macro to calculate base address for ATA regs */
52#define ADMA_ATA_REGS(base,port_no) ((base) + ((port_no) * 0x40)) 52#define ADMA_ATA_REGS(base,port_no) ((base) + ((port_no) * 0x40))
@@ -152,6 +152,7 @@ static struct scsi_host_template adma_ata_sht = {
152 .proc_name = DRV_NAME, 152 .proc_name = DRV_NAME,
153 .dma_boundary = ADMA_DMA_BOUNDARY, 153 .dma_boundary = ADMA_DMA_BOUNDARY,
154 .slave_configure = ata_scsi_slave_config, 154 .slave_configure = ata_scsi_slave_config,
155 .slave_destroy = ata_scsi_slave_destroy,
155 .bios_param = ata_std_bios_param, 156 .bios_param = ata_std_bios_param,
156}; 157};
157 158
@@ -167,6 +168,7 @@ static const struct ata_port_operations adma_ata_ops = {
167 .qc_prep = adma_qc_prep, 168 .qc_prep = adma_qc_prep,
168 .qc_issue = adma_qc_issue, 169 .qc_issue = adma_qc_issue,
169 .eng_timeout = adma_eng_timeout, 170 .eng_timeout = adma_eng_timeout,
171 .data_xfer = ata_mmio_data_xfer,
170 .irq_handler = adma_intr, 172 .irq_handler = adma_intr,
171 .irq_clear = adma_irq_clear, 173 .irq_clear = adma_irq_clear,
172 .port_start = adma_port_start, 174 .port_start = adma_port_start,
@@ -455,13 +457,13 @@ static inline unsigned int adma_intr_pkt(struct ata_host_set *host_set)
455 continue; 457 continue;
456 handled = 1; 458 handled = 1;
457 adma_enter_reg_mode(ap); 459 adma_enter_reg_mode(ap);
458 if (ap->flags & (ATA_FLAG_PORT_DISABLED | ATA_FLAG_NOINTR)) 460 if (ap->flags & ATA_FLAG_DISABLED)
459 continue; 461 continue;
460 pp = ap->private_data; 462 pp = ap->private_data;
461 if (!pp || pp->state != adma_state_pkt) 463 if (!pp || pp->state != adma_state_pkt)
462 continue; 464 continue;
463 qc = ata_qc_from_tag(ap, ap->active_tag); 465 qc = ata_qc_from_tag(ap, ap->active_tag);
464 if (qc && (!(qc->tf.ctl & ATA_NIEN))) { 466 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) {
465 if ((status & (aPERR | aPSD | aUIRQ))) 467 if ((status & (aPERR | aPSD | aUIRQ)))
466 qc->err_mask |= AC_ERR_OTHER; 468 qc->err_mask |= AC_ERR_OTHER;
467 else if (pp->pkt[0] != cDONE) 469 else if (pp->pkt[0] != cDONE)
@@ -480,13 +482,13 @@ static inline unsigned int adma_intr_mmio(struct ata_host_set *host_set)
480 for (port_no = 0; port_no < host_set->n_ports; ++port_no) { 482 for (port_no = 0; port_no < host_set->n_ports; ++port_no) {
481 struct ata_port *ap; 483 struct ata_port *ap;
482 ap = host_set->ports[port_no]; 484 ap = host_set->ports[port_no];
483 if (ap && (!(ap->flags & (ATA_FLAG_PORT_DISABLED | ATA_FLAG_NOINTR)))) { 485 if (ap && (!(ap->flags & ATA_FLAG_DISABLED))) {
484 struct ata_queued_cmd *qc; 486 struct ata_queued_cmd *qc;
485 struct adma_port_priv *pp = ap->private_data; 487 struct adma_port_priv *pp = ap->private_data;
486 if (!pp || pp->state != adma_state_mmio) 488 if (!pp || pp->state != adma_state_mmio)
487 continue; 489 continue;
488 qc = ata_qc_from_tag(ap, ap->active_tag); 490 qc = ata_qc_from_tag(ap, ap->active_tag);
489 if (qc && (!(qc->tf.ctl & ATA_NIEN))) { 491 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) {
490 492
491 /* check main status, clearing INTRQ */ 493 /* check main status, clearing INTRQ */
492 u8 status = ata_check_status(ap); 494 u8 status = ata_check_status(ap);
diff --git a/drivers/scsi/sata_mv.c b/drivers/scsi/sata_mv.c
index f16f92a6ec0f..4a71578df3c1 100644
--- a/drivers/scsi/sata_mv.c
+++ b/drivers/scsi/sata_mv.c
@@ -93,7 +93,7 @@ enum {
93 MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */ 93 MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */
94 MV_COMMON_FLAGS = (ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 94 MV_COMMON_FLAGS = (ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
95 ATA_FLAG_SATA_RESET | ATA_FLAG_MMIO | 95 ATA_FLAG_SATA_RESET | ATA_FLAG_MMIO |
96 ATA_FLAG_NO_ATAPI), 96 ATA_FLAG_NO_ATAPI | ATA_FLAG_PIO_POLLING),
97 MV_6XXX_FLAGS = MV_FLAG_IRQ_COALESCE, 97 MV_6XXX_FLAGS = MV_FLAG_IRQ_COALESCE,
98 98
99 CRQB_FLAG_READ = (1 << 0), 99 CRQB_FLAG_READ = (1 << 0),
@@ -272,33 +272,33 @@ enum chip_type {
272 272
273/* Command ReQuest Block: 32B */ 273/* Command ReQuest Block: 32B */
274struct mv_crqb { 274struct mv_crqb {
275 u32 sg_addr; 275 __le32 sg_addr;
276 u32 sg_addr_hi; 276 __le32 sg_addr_hi;
277 u16 ctrl_flags; 277 __le16 ctrl_flags;
278 u16 ata_cmd[11]; 278 __le16 ata_cmd[11];
279}; 279};
280 280
281struct mv_crqb_iie { 281struct mv_crqb_iie {
282 u32 addr; 282 __le32 addr;
283 u32 addr_hi; 283 __le32 addr_hi;
284 u32 flags; 284 __le32 flags;
285 u32 len; 285 __le32 len;
286 u32 ata_cmd[4]; 286 __le32 ata_cmd[4];
287}; 287};
288 288
289/* Command ResPonse Block: 8B */ 289/* Command ResPonse Block: 8B */
290struct mv_crpb { 290struct mv_crpb {
291 u16 id; 291 __le16 id;
292 u16 flags; 292 __le16 flags;
293 u32 tmstmp; 293 __le32 tmstmp;
294}; 294};
295 295
296/* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */ 296/* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */
297struct mv_sg { 297struct mv_sg {
298 u32 addr; 298 __le32 addr;
299 u32 flags_size; 299 __le32 flags_size;
300 u32 addr_hi; 300 __le32 addr_hi;
301 u32 reserved; 301 __le32 reserved;
302}; 302};
303 303
304struct mv_port_priv { 304struct mv_port_priv {
@@ -390,6 +390,7 @@ static struct scsi_host_template mv_sht = {
390 .proc_name = DRV_NAME, 390 .proc_name = DRV_NAME,
391 .dma_boundary = MV_DMA_BOUNDARY, 391 .dma_boundary = MV_DMA_BOUNDARY,
392 .slave_configure = ata_scsi_slave_config, 392 .slave_configure = ata_scsi_slave_config,
393 .slave_destroy = ata_scsi_slave_destroy,
393 .bios_param = ata_std_bios_param, 394 .bios_param = ata_std_bios_param,
394}; 395};
395 396
@@ -406,6 +407,7 @@ static const struct ata_port_operations mv5_ops = {
406 407
407 .qc_prep = mv_qc_prep, 408 .qc_prep = mv_qc_prep,
408 .qc_issue = mv_qc_issue, 409 .qc_issue = mv_qc_issue,
410 .data_xfer = ata_mmio_data_xfer,
409 411
410 .eng_timeout = mv_eng_timeout, 412 .eng_timeout = mv_eng_timeout,
411 413
@@ -433,6 +435,7 @@ static const struct ata_port_operations mv6_ops = {
433 435
434 .qc_prep = mv_qc_prep, 436 .qc_prep = mv_qc_prep,
435 .qc_issue = mv_qc_issue, 437 .qc_issue = mv_qc_issue,
438 .data_xfer = ata_mmio_data_xfer,
436 439
437 .eng_timeout = mv_eng_timeout, 440 .eng_timeout = mv_eng_timeout,
438 441
@@ -683,7 +686,7 @@ static void mv_stop_dma(struct ata_port *ap)
683 } 686 }
684 687
685 if (EDMA_EN & reg) { 688 if (EDMA_EN & reg) {
686 printk(KERN_ERR "ata%u: Unable to stop eDMA\n", ap->id); 689 ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n");
687 /* FIXME: Consider doing a reset here to recover */ 690 /* FIXME: Consider doing a reset here to recover */
688 } 691 }
689} 692}
@@ -1028,7 +1031,7 @@ static inline unsigned mv_inc_q_index(unsigned index)
1028 return (index + 1) & MV_MAX_Q_DEPTH_MASK; 1031 return (index + 1) & MV_MAX_Q_DEPTH_MASK;
1029} 1032}
1030 1033
1031static inline void mv_crqb_pack_cmd(u16 *cmdw, u8 data, u8 addr, unsigned last) 1034static inline void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last)
1032{ 1035{
1033 u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS | 1036 u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS |
1034 (last ? CRQB_CMD_LAST : 0); 1037 (last ? CRQB_CMD_LAST : 0);
@@ -1051,7 +1054,7 @@ static void mv_qc_prep(struct ata_queued_cmd *qc)
1051{ 1054{
1052 struct ata_port *ap = qc->ap; 1055 struct ata_port *ap = qc->ap;
1053 struct mv_port_priv *pp = ap->private_data; 1056 struct mv_port_priv *pp = ap->private_data;
1054 u16 *cw; 1057 __le16 *cw;
1055 struct ata_taskfile *tf; 1058 struct ata_taskfile *tf;
1056 u16 flags = 0; 1059 u16 flags = 0;
1057 unsigned in_index; 1060 unsigned in_index;
@@ -1307,8 +1310,8 @@ static void mv_err_intr(struct ata_port *ap, int reset_allowed)
1307 edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS); 1310 edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1308 1311
1309 if (EDMA_ERR_SERR & edma_err_cause) { 1312 if (EDMA_ERR_SERR & edma_err_cause) {
1310 serr = scr_read(ap, SCR_ERROR); 1313 sata_scr_read(ap, SCR_ERROR, &serr);
1311 scr_write_flush(ap, SCR_ERROR, serr); 1314 sata_scr_write_flush(ap, SCR_ERROR, serr);
1312 } 1315 }
1313 if (EDMA_ERR_SELF_DIS & edma_err_cause) { 1316 if (EDMA_ERR_SELF_DIS & edma_err_cause) {
1314 struct mv_port_priv *pp = ap->private_data; 1317 struct mv_port_priv *pp = ap->private_data;
@@ -1377,7 +1380,7 @@ static void mv_host_intr(struct ata_host_set *host_set, u32 relevant,
1377 /* Note that DEV_IRQ might happen spuriously during EDMA, 1380 /* Note that DEV_IRQ might happen spuriously during EDMA,
1378 * and should be ignored in such cases. 1381 * and should be ignored in such cases.
1379 * The cause of this is still under investigation. 1382 * The cause of this is still under investigation.
1380 */ 1383 */
1381 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) { 1384 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
1382 /* EDMA: check for response queue interrupt */ 1385 /* EDMA: check for response queue interrupt */
1383 if ((CRPB_DMA_DONE << hard_port) & hc_irq_cause) { 1386 if ((CRPB_DMA_DONE << hard_port) & hc_irq_cause) {
@@ -1398,7 +1401,7 @@ static void mv_host_intr(struct ata_host_set *host_set, u32 relevant,
1398 } 1401 }
1399 } 1402 }
1400 1403
1401 if (ap->flags & (ATA_FLAG_PORT_DISABLED | ATA_FLAG_NOINTR)) 1404 if (ap && (ap->flags & ATA_FLAG_DISABLED))
1402 continue; 1405 continue;
1403 1406
1404 err_mask = ac_err_mask(ata_status); 1407 err_mask = ac_err_mask(ata_status);
@@ -1419,7 +1422,7 @@ static void mv_host_intr(struct ata_host_set *host_set, u32 relevant,
1419 VPRINTK("port %u IRQ found for qc, " 1422 VPRINTK("port %u IRQ found for qc, "
1420 "ata_status 0x%x\n", port,ata_status); 1423 "ata_status 0x%x\n", port,ata_status);
1421 /* mark qc status appropriately */ 1424 /* mark qc status appropriately */
1422 if (!(qc->tf.ctl & ATA_NIEN)) { 1425 if (!(qc->tf.flags & ATA_TFLAG_POLLING)) {
1423 qc->err_mask |= err_mask; 1426 qc->err_mask |= err_mask;
1424 ata_qc_complete(qc); 1427 ata_qc_complete(qc);
1425 } 1428 }
@@ -1949,15 +1952,16 @@ static void __mv_phy_reset(struct ata_port *ap, int can_sleep)
1949 1952
1950 /* Issue COMRESET via SControl */ 1953 /* Issue COMRESET via SControl */
1951comreset_retry: 1954comreset_retry:
1952 scr_write_flush(ap, SCR_CONTROL, 0x301); 1955 sata_scr_write_flush(ap, SCR_CONTROL, 0x301);
1953 __msleep(1, can_sleep); 1956 __msleep(1, can_sleep);
1954 1957
1955 scr_write_flush(ap, SCR_CONTROL, 0x300); 1958 sata_scr_write_flush(ap, SCR_CONTROL, 0x300);
1956 __msleep(20, can_sleep); 1959 __msleep(20, can_sleep);
1957 1960
1958 timeout = jiffies + msecs_to_jiffies(200); 1961 timeout = jiffies + msecs_to_jiffies(200);
1959 do { 1962 do {
1960 sstatus = scr_read(ap, SCR_STATUS) & 0x3; 1963 sata_scr_read(ap, SCR_STATUS, &sstatus);
1964 sstatus &= 0x3;
1961 if ((sstatus == 3) || (sstatus == 0)) 1965 if ((sstatus == 3) || (sstatus == 0))
1962 break; 1966 break;
1963 1967
@@ -1974,11 +1978,12 @@ comreset_retry:
1974 "SCtrl 0x%08x\n", mv_scr_read(ap, SCR_STATUS), 1978 "SCtrl 0x%08x\n", mv_scr_read(ap, SCR_STATUS),
1975 mv_scr_read(ap, SCR_ERROR), mv_scr_read(ap, SCR_CONTROL)); 1979 mv_scr_read(ap, SCR_ERROR), mv_scr_read(ap, SCR_CONTROL));
1976 1980
1977 if (sata_dev_present(ap)) { 1981 if (ata_port_online(ap)) {
1978 ata_port_probe(ap); 1982 ata_port_probe(ap);
1979 } else { 1983 } else {
1980 printk(KERN_INFO "ata%u: no device found (phy stat %08x)\n", 1984 sata_scr_read(ap, SCR_STATUS, &sstatus);
1981 ap->id, scr_read(ap, SCR_STATUS)); 1985 ata_port_printk(ap, KERN_INFO,
1986 "no device found (phy stat %08x)\n", sstatus);
1982 ata_port_disable(ap); 1987 ata_port_disable(ap);
1983 return; 1988 return;
1984 } 1989 }
@@ -2005,7 +2010,7 @@ comreset_retry:
2005 tf.nsect = readb((void __iomem *) ap->ioaddr.nsect_addr); 2010 tf.nsect = readb((void __iomem *) ap->ioaddr.nsect_addr);
2006 2011
2007 dev->class = ata_dev_classify(&tf); 2012 dev->class = ata_dev_classify(&tf);
2008 if (!ata_dev_present(dev)) { 2013 if (!ata_dev_enabled(dev)) {
2009 VPRINTK("Port disabled post-sig: No device present.\n"); 2014 VPRINTK("Port disabled post-sig: No device present.\n");
2010 ata_port_disable(ap); 2015 ata_port_disable(ap);
2011 } 2016 }
@@ -2037,7 +2042,7 @@ static void mv_eng_timeout(struct ata_port *ap)
2037 struct ata_queued_cmd *qc; 2042 struct ata_queued_cmd *qc;
2038 unsigned long flags; 2043 unsigned long flags;
2039 2044
2040 printk(KERN_ERR "ata%u: Entering mv_eng_timeout\n",ap->id); 2045 ata_port_printk(ap, KERN_ERR, "Entering mv_eng_timeout\n");
2041 DPRINTK("All regs @ start of eng_timeout\n"); 2046 DPRINTK("All regs @ start of eng_timeout\n");
2042 mv_dump_all_regs(ap->host_set->mmio_base, ap->port_no, 2047 mv_dump_all_regs(ap->host_set->mmio_base, ap->port_no,
2043 to_pci_dev(ap->host_set->dev)); 2048 to_pci_dev(ap->host_set->dev));
diff --git a/drivers/scsi/sata_nv.c b/drivers/scsi/sata_nv.c
index 9f553081b5e8..90551246aa46 100644
--- a/drivers/scsi/sata_nv.c
+++ b/drivers/scsi/sata_nv.c
@@ -44,7 +44,7 @@
44#include <linux/libata.h> 44#include <linux/libata.h>
45 45
46#define DRV_NAME "sata_nv" 46#define DRV_NAME "sata_nv"
47#define DRV_VERSION "0.8" 47#define DRV_VERSION "0.9"
48 48
49enum { 49enum {
50 NV_PORTS = 2, 50 NV_PORTS = 2,
@@ -140,6 +140,12 @@ static const struct pci_device_id nv_pci_tbl[] = {
140 PCI_ANY_ID, PCI_ANY_ID, 0, 0, GENERIC }, 140 PCI_ANY_ID, PCI_ANY_ID, 0, 0, GENERIC },
141 { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2, 141 { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2,
142 PCI_ANY_ID, PCI_ANY_ID, 0, 0, GENERIC }, 142 PCI_ANY_ID, PCI_ANY_ID, 0, 0, GENERIC },
143 { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA,
144 PCI_ANY_ID, PCI_ANY_ID, 0, 0, GENERIC },
145 { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA2,
146 PCI_ANY_ID, PCI_ANY_ID, 0, 0, GENERIC },
147 { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA3,
148 PCI_ANY_ID, PCI_ANY_ID, 0, 0, GENERIC },
143 { PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID, 149 { PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
144 PCI_ANY_ID, PCI_ANY_ID, 150 PCI_ANY_ID, PCI_ANY_ID,
145 PCI_CLASS_STORAGE_IDE<<8, 0xffff00, GENERIC }, 151 PCI_CLASS_STORAGE_IDE<<8, 0xffff00, GENERIC },
@@ -210,6 +216,7 @@ static struct scsi_host_template nv_sht = {
210 .proc_name = DRV_NAME, 216 .proc_name = DRV_NAME,
211 .dma_boundary = ATA_DMA_BOUNDARY, 217 .dma_boundary = ATA_DMA_BOUNDARY,
212 .slave_configure = ata_scsi_slave_config, 218 .slave_configure = ata_scsi_slave_config,
219 .slave_destroy = ata_scsi_slave_destroy,
213 .bios_param = ata_std_bios_param, 220 .bios_param = ata_std_bios_param,
214}; 221};
215 222
@@ -228,6 +235,7 @@ static const struct ata_port_operations nv_ops = {
228 .qc_prep = ata_qc_prep, 235 .qc_prep = ata_qc_prep,
229 .qc_issue = ata_qc_issue_prot, 236 .qc_issue = ata_qc_issue_prot,
230 .eng_timeout = ata_eng_timeout, 237 .eng_timeout = ata_eng_timeout,
238 .data_xfer = ata_pio_data_xfer,
231 .irq_handler = nv_interrupt, 239 .irq_handler = nv_interrupt,
232 .irq_clear = ata_bmdma_irq_clear, 240 .irq_clear = ata_bmdma_irq_clear,
233 .scr_read = nv_scr_read, 241 .scr_read = nv_scr_read,
@@ -279,11 +287,11 @@ static irqreturn_t nv_interrupt (int irq, void *dev_instance,
279 287
280 ap = host_set->ports[i]; 288 ap = host_set->ports[i];
281 if (ap && 289 if (ap &&
282 !(ap->flags & (ATA_FLAG_PORT_DISABLED | ATA_FLAG_NOINTR))) { 290 !(ap->flags & ATA_FLAG_DISABLED)) {
283 struct ata_queued_cmd *qc; 291 struct ata_queued_cmd *qc;
284 292
285 qc = ata_qc_from_tag(ap, ap->active_tag); 293 qc = ata_qc_from_tag(ap, ap->active_tag);
286 if (qc && (!(qc->tf.ctl & ATA_NIEN))) 294 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)))
287 handled += ata_host_intr(ap, qc); 295 handled += ata_host_intr(ap, qc);
288 else 296 else
289 // No request pending? Clear interrupt status 297 // No request pending? Clear interrupt status
diff --git a/drivers/scsi/sata_promise.c b/drivers/scsi/sata_promise.c
index 7eb67a6bdc64..b2b6ed5216e0 100644
--- a/drivers/scsi/sata_promise.c
+++ b/drivers/scsi/sata_promise.c
@@ -76,7 +76,8 @@ enum {
76 PDC_RESET = (1 << 11), /* HDMA reset */ 76 PDC_RESET = (1 << 11), /* HDMA reset */
77 77
78 PDC_COMMON_FLAGS = ATA_FLAG_NO_LEGACY | ATA_FLAG_SRST | 78 PDC_COMMON_FLAGS = ATA_FLAG_NO_LEGACY | ATA_FLAG_SRST |
79 ATA_FLAG_MMIO | ATA_FLAG_NO_ATAPI, 79 ATA_FLAG_MMIO | ATA_FLAG_NO_ATAPI |
80 ATA_FLAG_PIO_POLLING,
80}; 81};
81 82
82 83
@@ -120,6 +121,7 @@ static struct scsi_host_template pdc_ata_sht = {
120 .proc_name = DRV_NAME, 121 .proc_name = DRV_NAME,
121 .dma_boundary = ATA_DMA_BOUNDARY, 122 .dma_boundary = ATA_DMA_BOUNDARY,
122 .slave_configure = ata_scsi_slave_config, 123 .slave_configure = ata_scsi_slave_config,
124 .slave_destroy = ata_scsi_slave_destroy,
123 .bios_param = ata_std_bios_param, 125 .bios_param = ata_std_bios_param,
124}; 126};
125 127
@@ -136,6 +138,7 @@ static const struct ata_port_operations pdc_sata_ops = {
136 .qc_prep = pdc_qc_prep, 138 .qc_prep = pdc_qc_prep,
137 .qc_issue = pdc_qc_issue_prot, 139 .qc_issue = pdc_qc_issue_prot,
138 .eng_timeout = pdc_eng_timeout, 140 .eng_timeout = pdc_eng_timeout,
141 .data_xfer = ata_mmio_data_xfer,
139 .irq_handler = pdc_interrupt, 142 .irq_handler = pdc_interrupt,
140 .irq_clear = pdc_irq_clear, 143 .irq_clear = pdc_irq_clear,
141 144
@@ -158,6 +161,7 @@ static const struct ata_port_operations pdc_pata_ops = {
158 161
159 .qc_prep = pdc_qc_prep, 162 .qc_prep = pdc_qc_prep,
160 .qc_issue = pdc_qc_issue_prot, 163 .qc_issue = pdc_qc_issue_prot,
164 .data_xfer = ata_mmio_data_xfer,
161 .eng_timeout = pdc_eng_timeout, 165 .eng_timeout = pdc_eng_timeout,
162 .irq_handler = pdc_interrupt, 166 .irq_handler = pdc_interrupt,
163 .irq_clear = pdc_irq_clear, 167 .irq_clear = pdc_irq_clear,
@@ -363,12 +367,23 @@ static void pdc_sata_phy_reset(struct ata_port *ap)
363 sata_phy_reset(ap); 367 sata_phy_reset(ap);
364} 368}
365 369
366static void pdc_pata_phy_reset(struct ata_port *ap) 370static void pdc_pata_cbl_detect(struct ata_port *ap)
367{ 371{
368 /* FIXME: add cable detect. Don't assume 40-pin cable */ 372 u8 tmp;
369 ap->cbl = ATA_CBL_PATA40; 373 void __iomem *mmio = (void *) ap->ioaddr.cmd_addr + PDC_CTLSTAT + 0x03;
370 ap->udma_mask &= ATA_UDMA_MASK_40C; 374
375 tmp = readb(mmio);
376
377 if (tmp & 0x01) {
378 ap->cbl = ATA_CBL_PATA40;
379 ap->udma_mask &= ATA_UDMA_MASK_40C;
380 } else
381 ap->cbl = ATA_CBL_PATA80;
382}
371 383
384static void pdc_pata_phy_reset(struct ata_port *ap)
385{
386 pdc_pata_cbl_detect(ap);
372 pdc_reset_port(ap); 387 pdc_reset_port(ap);
373 ata_port_probe(ap); 388 ata_port_probe(ap);
374 ata_bus_reset(ap); 389 ata_bus_reset(ap);
@@ -435,7 +450,7 @@ static void pdc_eng_timeout(struct ata_port *ap)
435 switch (qc->tf.protocol) { 450 switch (qc->tf.protocol) {
436 case ATA_PROT_DMA: 451 case ATA_PROT_DMA:
437 case ATA_PROT_NODATA: 452 case ATA_PROT_NODATA:
438 printk(KERN_ERR "ata%u: command timeout\n", ap->id); 453 ata_port_printk(ap, KERN_ERR, "command timeout\n");
439 drv_stat = ata_wait_idle(ap); 454 drv_stat = ata_wait_idle(ap);
440 qc->err_mask |= __ac_err_mask(drv_stat); 455 qc->err_mask |= __ac_err_mask(drv_stat);
441 break; 456 break;
@@ -443,8 +458,9 @@ static void pdc_eng_timeout(struct ata_port *ap)
443 default: 458 default:
444 drv_stat = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 1000); 459 drv_stat = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 1000);
445 460
446 printk(KERN_ERR "ata%u: unknown timeout, cmd 0x%x stat 0x%x\n", 461 ata_port_printk(ap, KERN_ERR,
447 ap->id, qc->tf.command, drv_stat); 462 "unknown timeout, cmd 0x%x stat 0x%x\n",
463 qc->tf.command, drv_stat);
448 464
449 qc->err_mask |= ac_err_mask(drv_stat); 465 qc->err_mask |= ac_err_mask(drv_stat);
450 break; 466 break;
@@ -533,11 +549,11 @@ static irqreturn_t pdc_interrupt (int irq, void *dev_instance, struct pt_regs *r
533 ap = host_set->ports[i]; 549 ap = host_set->ports[i];
534 tmp = mask & (1 << (i + 1)); 550 tmp = mask & (1 << (i + 1));
535 if (tmp && ap && 551 if (tmp && ap &&
536 !(ap->flags & (ATA_FLAG_PORT_DISABLED | ATA_FLAG_NOINTR))) { 552 !(ap->flags & ATA_FLAG_DISABLED)) {
537 struct ata_queued_cmd *qc; 553 struct ata_queued_cmd *qc;
538 554
539 qc = ata_qc_from_tag(ap, ap->active_tag); 555 qc = ata_qc_from_tag(ap, ap->active_tag);
540 if (qc && (!(qc->tf.ctl & ATA_NIEN))) 556 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)))
541 handled += pdc_host_intr(ap, qc); 557 handled += pdc_host_intr(ap, qc);
542 } 558 }
543 } 559 }
@@ -676,10 +692,6 @@ static int pdc_ata_init_one (struct pci_dev *pdev, const struct pci_device_id *e
676 if (!printed_version++) 692 if (!printed_version++)
677 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n"); 693 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
678 694
679 /*
680 * If this driver happens to only be useful on Apple's K2, then
681 * we should check that here as it has a normal Serverworks ID
682 */
683 rc = pci_enable_device(pdev); 695 rc = pci_enable_device(pdev);
684 if (rc) 696 if (rc)
685 return rc; 697 return rc;
diff --git a/drivers/scsi/sata_qstor.c b/drivers/scsi/sata_qstor.c
index 886f3447dd48..98ddc25655f0 100644
--- a/drivers/scsi/sata_qstor.c
+++ b/drivers/scsi/sata_qstor.c
@@ -41,7 +41,7 @@
41#include <linux/libata.h> 41#include <linux/libata.h>
42 42
43#define DRV_NAME "sata_qstor" 43#define DRV_NAME "sata_qstor"
44#define DRV_VERSION "0.05" 44#define DRV_VERSION "0.06"
45 45
46enum { 46enum {
47 QS_PORTS = 4, 47 QS_PORTS = 4,
@@ -142,6 +142,7 @@ static struct scsi_host_template qs_ata_sht = {
142 .proc_name = DRV_NAME, 142 .proc_name = DRV_NAME,
143 .dma_boundary = QS_DMA_BOUNDARY, 143 .dma_boundary = QS_DMA_BOUNDARY,
144 .slave_configure = ata_scsi_slave_config, 144 .slave_configure = ata_scsi_slave_config,
145 .slave_destroy = ata_scsi_slave_destroy,
145 .bios_param = ata_std_bios_param, 146 .bios_param = ata_std_bios_param,
146}; 147};
147 148
@@ -156,6 +157,7 @@ static const struct ata_port_operations qs_ata_ops = {
156 .phy_reset = qs_phy_reset, 157 .phy_reset = qs_phy_reset,
157 .qc_prep = qs_qc_prep, 158 .qc_prep = qs_qc_prep,
158 .qc_issue = qs_qc_issue, 159 .qc_issue = qs_qc_issue,
160 .data_xfer = ata_mmio_data_xfer,
159 .eng_timeout = qs_eng_timeout, 161 .eng_timeout = qs_eng_timeout,
160 .irq_handler = qs_intr, 162 .irq_handler = qs_intr,
161 .irq_clear = qs_irq_clear, 163 .irq_clear = qs_irq_clear,
@@ -175,7 +177,7 @@ static const struct ata_port_info qs_port_info[] = {
175 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 177 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
176 ATA_FLAG_SATA_RESET | 178 ATA_FLAG_SATA_RESET |
177 //FIXME ATA_FLAG_SRST | 179 //FIXME ATA_FLAG_SRST |
178 ATA_FLAG_MMIO, 180 ATA_FLAG_MMIO | ATA_FLAG_PIO_POLLING,
179 .pio_mask = 0x10, /* pio4 */ 181 .pio_mask = 0x10, /* pio4 */
180 .udma_mask = 0x7f, /* udma0-6 */ 182 .udma_mask = 0x7f, /* udma0-6 */
181 .port_ops = &qs_ata_ops, 183 .port_ops = &qs_ata_ops,
@@ -394,14 +396,13 @@ static inline unsigned int qs_intr_pkt(struct ata_host_set *host_set)
394 DPRINTK("SFF=%08x%08x: sCHAN=%u sHST=%d sDST=%02x\n", 396 DPRINTK("SFF=%08x%08x: sCHAN=%u sHST=%d sDST=%02x\n",
395 sff1, sff0, port_no, sHST, sDST); 397 sff1, sff0, port_no, sHST, sDST);
396 handled = 1; 398 handled = 1;
397 if (ap && !(ap->flags & 399 if (ap && !(ap->flags & ATA_FLAG_DISABLED)) {
398 (ATA_FLAG_PORT_DISABLED|ATA_FLAG_NOINTR))) {
399 struct ata_queued_cmd *qc; 400 struct ata_queued_cmd *qc;
400 struct qs_port_priv *pp = ap->private_data; 401 struct qs_port_priv *pp = ap->private_data;
401 if (!pp || pp->state != qs_state_pkt) 402 if (!pp || pp->state != qs_state_pkt)
402 continue; 403 continue;
403 qc = ata_qc_from_tag(ap, ap->active_tag); 404 qc = ata_qc_from_tag(ap, ap->active_tag);
404 if (qc && (!(qc->tf.ctl & ATA_NIEN))) { 405 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) {
405 switch (sHST) { 406 switch (sHST) {
406 case 0: /* successful CPB */ 407 case 0: /* successful CPB */
407 case 3: /* device error */ 408 case 3: /* device error */
@@ -428,13 +429,13 @@ static inline unsigned int qs_intr_mmio(struct ata_host_set *host_set)
428 struct ata_port *ap; 429 struct ata_port *ap;
429 ap = host_set->ports[port_no]; 430 ap = host_set->ports[port_no];
430 if (ap && 431 if (ap &&
431 !(ap->flags & (ATA_FLAG_PORT_DISABLED | ATA_FLAG_NOINTR))) { 432 !(ap->flags & ATA_FLAG_DISABLED)) {
432 struct ata_queued_cmd *qc; 433 struct ata_queued_cmd *qc;
433 struct qs_port_priv *pp = ap->private_data; 434 struct qs_port_priv *pp = ap->private_data;
434 if (!pp || pp->state != qs_state_mmio) 435 if (!pp || pp->state != qs_state_mmio)
435 continue; 436 continue;
436 qc = ata_qc_from_tag(ap, ap->active_tag); 437 qc = ata_qc_from_tag(ap, ap->active_tag);
437 if (qc && (!(qc->tf.ctl & ATA_NIEN))) { 438 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) {
438 439
439 /* check main status, clearing INTRQ */ 440 /* check main status, clearing INTRQ */
440 u8 status = ata_check_status(ap); 441 u8 status = ata_check_status(ap);
diff --git a/drivers/scsi/sata_sil.c b/drivers/scsi/sata_sil.c
index 106627299d55..bc9f918a7f28 100644
--- a/drivers/scsi/sata_sil.c
+++ b/drivers/scsi/sata_sil.c
@@ -46,7 +46,7 @@
46#include <linux/libata.h> 46#include <linux/libata.h>
47 47
48#define DRV_NAME "sata_sil" 48#define DRV_NAME "sata_sil"
49#define DRV_VERSION "0.9" 49#define DRV_VERSION "1.0"
50 50
51enum { 51enum {
52 /* 52 /*
@@ -54,8 +54,9 @@ enum {
54 */ 54 */
55 SIL_FLAG_RERR_ON_DMA_ACT = (1 << 29), 55 SIL_FLAG_RERR_ON_DMA_ACT = (1 << 29),
56 SIL_FLAG_MOD15WRITE = (1 << 30), 56 SIL_FLAG_MOD15WRITE = (1 << 30),
57
57 SIL_DFL_HOST_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 58 SIL_DFL_HOST_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
58 ATA_FLAG_MMIO, 59 ATA_FLAG_MMIO | ATA_FLAG_HRST_TO_RESUME,
59 60
60 /* 61 /*
61 * Controller IDs 62 * Controller IDs
@@ -84,6 +85,20 @@ enum {
84 /* BMDMA/BMDMA2 */ 85 /* BMDMA/BMDMA2 */
85 SIL_INTR_STEERING = (1 << 1), 86 SIL_INTR_STEERING = (1 << 1),
86 87
88 SIL_DMA_ENABLE = (1 << 0), /* DMA run switch */
89 SIL_DMA_RDWR = (1 << 3), /* DMA Rd-Wr */
90 SIL_DMA_SATA_IRQ = (1 << 4), /* OR of all SATA IRQs */
91 SIL_DMA_ACTIVE = (1 << 16), /* DMA running */
92 SIL_DMA_ERROR = (1 << 17), /* PCI bus error */
93 SIL_DMA_COMPLETE = (1 << 18), /* cmd complete / IRQ pending */
94 SIL_DMA_N_SATA_IRQ = (1 << 6), /* SATA_IRQ for the next channel */
95 SIL_DMA_N_ACTIVE = (1 << 24), /* ACTIVE for the next channel */
96 SIL_DMA_N_ERROR = (1 << 25), /* ERROR for the next channel */
97 SIL_DMA_N_COMPLETE = (1 << 26), /* COMPLETE for the next channel */
98
99 /* SIEN */
100 SIL_SIEN_N = (1 << 16), /* triggered by SError.N */
101
87 /* 102 /*
88 * Others 103 * Others
89 */ 104 */
@@ -96,6 +111,10 @@ static void sil_dev_config(struct ata_port *ap, struct ata_device *dev);
96static u32 sil_scr_read (struct ata_port *ap, unsigned int sc_reg); 111static u32 sil_scr_read (struct ata_port *ap, unsigned int sc_reg);
97static void sil_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val); 112static void sil_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val);
98static void sil_post_set_mode (struct ata_port *ap); 113static void sil_post_set_mode (struct ata_port *ap);
114static irqreturn_t sil_interrupt(int irq, void *dev_instance,
115 struct pt_regs *regs);
116static void sil_freeze(struct ata_port *ap);
117static void sil_thaw(struct ata_port *ap);
99 118
100 119
101static const struct pci_device_id sil_pci_tbl[] = { 120static const struct pci_device_id sil_pci_tbl[] = {
@@ -155,6 +174,7 @@ static struct scsi_host_template sil_sht = {
155 .proc_name = DRV_NAME, 174 .proc_name = DRV_NAME,
156 .dma_boundary = ATA_DMA_BOUNDARY, 175 .dma_boundary = ATA_DMA_BOUNDARY,
157 .slave_configure = ata_scsi_slave_config, 176 .slave_configure = ata_scsi_slave_config,
177 .slave_destroy = ata_scsi_slave_destroy,
158 .bios_param = ata_std_bios_param, 178 .bios_param = ata_std_bios_param,
159}; 179};
160 180
@@ -166,7 +186,6 @@ static const struct ata_port_operations sil_ops = {
166 .check_status = ata_check_status, 186 .check_status = ata_check_status,
167 .exec_command = ata_exec_command, 187 .exec_command = ata_exec_command,
168 .dev_select = ata_std_dev_select, 188 .dev_select = ata_std_dev_select,
169 .probe_reset = ata_std_probe_reset,
170 .post_set_mode = sil_post_set_mode, 189 .post_set_mode = sil_post_set_mode,
171 .bmdma_setup = ata_bmdma_setup, 190 .bmdma_setup = ata_bmdma_setup,
172 .bmdma_start = ata_bmdma_start, 191 .bmdma_start = ata_bmdma_start,
@@ -174,8 +193,12 @@ static const struct ata_port_operations sil_ops = {
174 .bmdma_status = ata_bmdma_status, 193 .bmdma_status = ata_bmdma_status,
175 .qc_prep = ata_qc_prep, 194 .qc_prep = ata_qc_prep,
176 .qc_issue = ata_qc_issue_prot, 195 .qc_issue = ata_qc_issue_prot,
177 .eng_timeout = ata_eng_timeout, 196 .data_xfer = ata_mmio_data_xfer,
178 .irq_handler = ata_interrupt, 197 .freeze = sil_freeze,
198 .thaw = sil_thaw,
199 .error_handler = ata_bmdma_error_handler,
200 .post_internal_cmd = ata_bmdma_post_internal_cmd,
201 .irq_handler = sil_interrupt,
179 .irq_clear = ata_bmdma_irq_clear, 202 .irq_clear = ata_bmdma_irq_clear,
180 .scr_read = sil_scr_read, 203 .scr_read = sil_scr_read,
181 .scr_write = sil_scr_write, 204 .scr_write = sil_scr_write,
@@ -220,6 +243,7 @@ static const struct {
220 unsigned long tf; /* ATA taskfile register block */ 243 unsigned long tf; /* ATA taskfile register block */
221 unsigned long ctl; /* ATA control/altstatus register block */ 244 unsigned long ctl; /* ATA control/altstatus register block */
222 unsigned long bmdma; /* DMA register block */ 245 unsigned long bmdma; /* DMA register block */
246 unsigned long bmdma2; /* DMA register block #2 */
223 unsigned long fifo_cfg; /* FIFO Valid Byte Count and Control */ 247 unsigned long fifo_cfg; /* FIFO Valid Byte Count and Control */
224 unsigned long scr; /* SATA control register block */ 248 unsigned long scr; /* SATA control register block */
225 unsigned long sien; /* SATA Interrupt Enable register */ 249 unsigned long sien; /* SATA Interrupt Enable register */
@@ -227,10 +251,10 @@ static const struct {
227 unsigned long sfis_cfg; /* SATA FIS reception config register */ 251 unsigned long sfis_cfg; /* SATA FIS reception config register */
228} sil_port[] = { 252} sil_port[] = {
229 /* port 0 ... */ 253 /* port 0 ... */
230 { 0x80, 0x8A, 0x00, 0x40, 0x100, 0x148, 0xb4, 0x14c }, 254 { 0x80, 0x8A, 0x00, 0x10, 0x40, 0x100, 0x148, 0xb4, 0x14c },
231 { 0xC0, 0xCA, 0x08, 0x44, 0x180, 0x1c8, 0xf4, 0x1cc }, 255 { 0xC0, 0xCA, 0x08, 0x18, 0x44, 0x180, 0x1c8, 0xf4, 0x1cc },
232 { 0x280, 0x28A, 0x200, 0x240, 0x300, 0x348, 0x2b4, 0x34c }, 256 { 0x280, 0x28A, 0x200, 0x210, 0x240, 0x300, 0x348, 0x2b4, 0x34c },
233 { 0x2C0, 0x2CA, 0x208, 0x244, 0x380, 0x3c8, 0x2f4, 0x3cc }, 257 { 0x2C0, 0x2CA, 0x208, 0x218, 0x244, 0x380, 0x3c8, 0x2f4, 0x3cc },
234 /* ... port 3 */ 258 /* ... port 3 */
235}; 259};
236 260
@@ -263,7 +287,7 @@ static void sil_post_set_mode (struct ata_port *ap)
263 287
264 for (i = 0; i < 2; i++) { 288 for (i = 0; i < 2; i++) {
265 dev = &ap->device[i]; 289 dev = &ap->device[i];
266 if (!ata_dev_present(dev)) 290 if (!ata_dev_enabled(dev))
267 dev_mode[i] = 0; /* PIO0/1/2 */ 291 dev_mode[i] = 0; /* PIO0/1/2 */
268 else if (dev->flags & ATA_DFLAG_PIO) 292 else if (dev->flags & ATA_DFLAG_PIO)
269 dev_mode[i] = 1; /* PIO3/4 */ 293 dev_mode[i] = 1; /* PIO3/4 */
@@ -314,6 +338,151 @@ static void sil_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val)
314 writel(val, mmio); 338 writel(val, mmio);
315} 339}
316 340
341static void sil_host_intr(struct ata_port *ap, u32 bmdma2)
342{
343 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->active_tag);
344 u8 status;
345
346 if (unlikely(bmdma2 & SIL_DMA_SATA_IRQ)) {
347 u32 serror;
348
349 /* SIEN doesn't mask SATA IRQs on some 3112s. Those
350 * controllers continue to assert IRQ as long as
351 * SError bits are pending. Clear SError immediately.
352 */
353 serror = sil_scr_read(ap, SCR_ERROR);
354 sil_scr_write(ap, SCR_ERROR, serror);
355
356 /* Trigger hotplug and accumulate SError only if the
357 * port isn't already frozen. Otherwise, PHY events
358 * during hardreset makes controllers with broken SIEN
359 * repeat probing needlessly.
360 */
361 if (!(ap->flags & ATA_FLAG_FROZEN)) {
362 ata_ehi_hotplugged(&ap->eh_info);
363 ap->eh_info.serror |= serror;
364 }
365
366 goto freeze;
367 }
368
369 if (unlikely(!qc || qc->tf.ctl & ATA_NIEN))
370 goto freeze;
371
372 /* Check whether we are expecting interrupt in this state */
373 switch (ap->hsm_task_state) {
374 case HSM_ST_FIRST:
375 /* Some pre-ATAPI-4 devices assert INTRQ
376 * at this state when ready to receive CDB.
377 */
378
379 /* Check the ATA_DFLAG_CDB_INTR flag is enough here.
380 * The flag was turned on only for atapi devices.
381 * No need to check is_atapi_taskfile(&qc->tf) again.
382 */
383 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
384 goto err_hsm;
385 break;
386 case HSM_ST_LAST:
387 if (qc->tf.protocol == ATA_PROT_DMA ||
388 qc->tf.protocol == ATA_PROT_ATAPI_DMA) {
389 /* clear DMA-Start bit */
390 ap->ops->bmdma_stop(qc);
391
392 if (bmdma2 & SIL_DMA_ERROR) {
393 qc->err_mask |= AC_ERR_HOST_BUS;
394 ap->hsm_task_state = HSM_ST_ERR;
395 }
396 }
397 break;
398 case HSM_ST:
399 break;
400 default:
401 goto err_hsm;
402 }
403
404 /* check main status, clearing INTRQ */
405 status = ata_chk_status(ap);
406 if (unlikely(status & ATA_BUSY))
407 goto err_hsm;
408
409 /* ack bmdma irq events */
410 ata_bmdma_irq_clear(ap);
411
412 /* kick HSM in the ass */
413 ata_hsm_move(ap, qc, status, 0);
414
415 return;
416
417 err_hsm:
418 qc->err_mask |= AC_ERR_HSM;
419 freeze:
420 ata_port_freeze(ap);
421}
422
423static irqreturn_t sil_interrupt(int irq, void *dev_instance,
424 struct pt_regs *regs)
425{
426 struct ata_host_set *host_set = dev_instance;
427 void __iomem *mmio_base = host_set->mmio_base;
428 int handled = 0;
429 int i;
430
431 spin_lock(&host_set->lock);
432
433 for (i = 0; i < host_set->n_ports; i++) {
434 struct ata_port *ap = host_set->ports[i];
435 u32 bmdma2 = readl(mmio_base + sil_port[ap->port_no].bmdma2);
436
437 if (unlikely(!ap || ap->flags & ATA_FLAG_DISABLED))
438 continue;
439
440 if (bmdma2 == 0xffffffff ||
441 !(bmdma2 & (SIL_DMA_COMPLETE | SIL_DMA_SATA_IRQ)))
442 continue;
443
444 sil_host_intr(ap, bmdma2);
445 handled = 1;
446 }
447
448 spin_unlock(&host_set->lock);
449
450 return IRQ_RETVAL(handled);
451}
452
453static void sil_freeze(struct ata_port *ap)
454{
455 void __iomem *mmio_base = ap->host_set->mmio_base;
456 u32 tmp;
457
458 /* global IRQ mask doesn't block SATA IRQ, turn off explicitly */
459 writel(0, mmio_base + sil_port[ap->port_no].sien);
460
461 /* plug IRQ */
462 tmp = readl(mmio_base + SIL_SYSCFG);
463 tmp |= SIL_MASK_IDE0_INT << ap->port_no;
464 writel(tmp, mmio_base + SIL_SYSCFG);
465 readl(mmio_base + SIL_SYSCFG); /* flush */
466}
467
468static void sil_thaw(struct ata_port *ap)
469{
470 void __iomem *mmio_base = ap->host_set->mmio_base;
471 u32 tmp;
472
473 /* clear IRQ */
474 ata_chk_status(ap);
475 ata_bmdma_irq_clear(ap);
476
477 /* turn on SATA IRQ */
478 writel(SIL_SIEN_N, mmio_base + sil_port[ap->port_no].sien);
479
480 /* turn on IRQ */
481 tmp = readl(mmio_base + SIL_SYSCFG);
482 tmp &= ~(SIL_MASK_IDE0_INT << ap->port_no);
483 writel(tmp, mmio_base + SIL_SYSCFG);
484}
485
317/** 486/**
318 * sil_dev_config - Apply device/host-specific errata fixups 487 * sil_dev_config - Apply device/host-specific errata fixups
319 * @ap: Port containing device to be examined 488 * @ap: Port containing device to be examined
@@ -360,16 +529,16 @@ static void sil_dev_config(struct ata_port *ap, struct ata_device *dev)
360 if (slow_down || 529 if (slow_down ||
361 ((ap->flags & SIL_FLAG_MOD15WRITE) && 530 ((ap->flags & SIL_FLAG_MOD15WRITE) &&
362 (quirks & SIL_QUIRK_MOD15WRITE))) { 531 (quirks & SIL_QUIRK_MOD15WRITE))) {
363 printk(KERN_INFO "ata%u(%u): applying Seagate errata fix (mod15write workaround)\n", 532 ata_dev_printk(dev, KERN_INFO, "applying Seagate errata fix "
364 ap->id, dev->devno); 533 "(mod15write workaround)\n");
365 dev->max_sectors = 15; 534 dev->max_sectors = 15;
366 return; 535 return;
367 } 536 }
368 537
369 /* limit to udma5 */ 538 /* limit to udma5 */
370 if (quirks & SIL_QUIRK_UDMA5MAX) { 539 if (quirks & SIL_QUIRK_UDMA5MAX) {
371 printk(KERN_INFO "ata%u(%u): applying Maxtor errata fix %s\n", 540 ata_dev_printk(dev, KERN_INFO,
372 ap->id, dev->devno, model_num); 541 "applying Maxtor errata fix %s\n", model_num);
373 dev->udma_mask &= ATA_UDMA5; 542 dev->udma_mask &= ATA_UDMA5;
374 return; 543 return;
375 } 544 }
@@ -384,16 +553,12 @@ static int sil_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
384 int rc; 553 int rc;
385 unsigned int i; 554 unsigned int i;
386 int pci_dev_busy = 0; 555 int pci_dev_busy = 0;
387 u32 tmp, irq_mask; 556 u32 tmp;
388 u8 cls; 557 u8 cls;
389 558
390 if (!printed_version++) 559 if (!printed_version++)
391 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n"); 560 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
392 561
393 /*
394 * If this driver happens to only be useful on Apple's K2, then
395 * we should check that here as it has a normal Serverworks ID
396 */
397 rc = pci_enable_device(pdev); 562 rc = pci_enable_device(pdev);
398 if (rc) 563 if (rc)
399 return rc; 564 return rc;
@@ -478,31 +643,13 @@ static int sil_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
478 } 643 }
479 644
480 if (ent->driver_data == sil_3114) { 645 if (ent->driver_data == sil_3114) {
481 irq_mask = SIL_MASK_4PORT;
482
483 /* flip the magic "make 4 ports work" bit */ 646 /* flip the magic "make 4 ports work" bit */
484 tmp = readl(mmio_base + sil_port[2].bmdma); 647 tmp = readl(mmio_base + sil_port[2].bmdma);
485 if ((tmp & SIL_INTR_STEERING) == 0) 648 if ((tmp & SIL_INTR_STEERING) == 0)
486 writel(tmp | SIL_INTR_STEERING, 649 writel(tmp | SIL_INTR_STEERING,
487 mmio_base + sil_port[2].bmdma); 650 mmio_base + sil_port[2].bmdma);
488
489 } else {
490 irq_mask = SIL_MASK_2PORT;
491 }
492
493 /* make sure IDE0/1/2/3 interrupts are not masked */
494 tmp = readl(mmio_base + SIL_SYSCFG);
495 if (tmp & irq_mask) {
496 tmp &= ~irq_mask;
497 writel(tmp, mmio_base + SIL_SYSCFG);
498 readl(mmio_base + SIL_SYSCFG); /* flush */
499 } 651 }
500 652
501 /* mask all SATA phy-related interrupts */
502 /* TODO: unmask bit 6 (SError N bit) for hotplug */
503 for (i = 0; i < probe_ent->n_ports; i++)
504 writel(0, mmio_base + sil_port[i].sien);
505
506 pci_set_master(pdev); 653 pci_set_master(pdev);
507 654
508 /* FIXME: check ata_device_add return value */ 655 /* FIXME: check ata_device_add return value */
diff --git a/drivers/scsi/sata_sil24.c b/drivers/scsi/sata_sil24.c
index cb9082fd7e2f..c8b477c67247 100644
--- a/drivers/scsi/sata_sil24.c
+++ b/drivers/scsi/sata_sil24.c
@@ -31,15 +31,15 @@
31#include <asm/io.h> 31#include <asm/io.h>
32 32
33#define DRV_NAME "sata_sil24" 33#define DRV_NAME "sata_sil24"
34#define DRV_VERSION "0.23" 34#define DRV_VERSION "0.24"
35 35
36/* 36/*
37 * Port request block (PRB) 32 bytes 37 * Port request block (PRB) 32 bytes
38 */ 38 */
39struct sil24_prb { 39struct sil24_prb {
40 u16 ctrl; 40 __le16 ctrl;
41 u16 prot; 41 __le16 prot;
42 u32 rx_cnt; 42 __le32 rx_cnt;
43 u8 fis[6 * 4]; 43 u8 fis[6 * 4];
44}; 44};
45 45
@@ -47,17 +47,17 @@ struct sil24_prb {
47 * Scatter gather entry (SGE) 16 bytes 47 * Scatter gather entry (SGE) 16 bytes
48 */ 48 */
49struct sil24_sge { 49struct sil24_sge {
50 u64 addr; 50 __le64 addr;
51 u32 cnt; 51 __le32 cnt;
52 u32 flags; 52 __le32 flags;
53}; 53};
54 54
55/* 55/*
56 * Port multiplier 56 * Port multiplier
57 */ 57 */
58struct sil24_port_multiplier { 58struct sil24_port_multiplier {
59 u32 diag; 59 __le32 diag;
60 u32 sactive; 60 __le32 sactive;
61}; 61};
62 62
63enum { 63enum {
@@ -86,12 +86,21 @@ enum {
86 /* HOST_SLOT_STAT bits */ 86 /* HOST_SLOT_STAT bits */
87 HOST_SSTAT_ATTN = (1 << 31), 87 HOST_SSTAT_ATTN = (1 << 31),
88 88
89 /* HOST_CTRL bits */
90 HOST_CTRL_M66EN = (1 << 16), /* M66EN PCI bus signal */
91 HOST_CTRL_TRDY = (1 << 17), /* latched PCI TRDY */
92 HOST_CTRL_STOP = (1 << 18), /* latched PCI STOP */
93 HOST_CTRL_DEVSEL = (1 << 19), /* latched PCI DEVSEL */
94 HOST_CTRL_REQ64 = (1 << 20), /* latched PCI REQ64 */
95
89 /* 96 /*
90 * Port registers 97 * Port registers
91 * (8192 bytes @ +0x0000, +0x2000, +0x4000 and +0x6000 @ BAR2) 98 * (8192 bytes @ +0x0000, +0x2000, +0x4000 and +0x6000 @ BAR2)
92 */ 99 */
93 PORT_REGS_SIZE = 0x2000, 100 PORT_REGS_SIZE = 0x2000,
94 PORT_PRB = 0x0000, /* (32 bytes PRB + 16 bytes SGEs * 6) * 31 (3968 bytes) */ 101
102 PORT_LRAM = 0x0000, /* 31 LRAM slots and PM regs */
103 PORT_LRAM_SLOT_SZ = 0x0080, /* 32 bytes PRB + 2 SGE, ACT... */
95 104
96 PORT_PM = 0x0f80, /* 8 bytes PM * 16 (128 bytes) */ 105 PORT_PM = 0x0f80, /* 8 bytes PM * 16 (128 bytes) */
97 /* 32 bit regs */ 106 /* 32 bit regs */
@@ -142,8 +151,16 @@ enum {
142 PORT_IRQ_PWR_CHG = (1 << 3), /* power management change */ 151 PORT_IRQ_PWR_CHG = (1 << 3), /* power management change */
143 PORT_IRQ_PHYRDY_CHG = (1 << 4), /* PHY ready change */ 152 PORT_IRQ_PHYRDY_CHG = (1 << 4), /* PHY ready change */
144 PORT_IRQ_COMWAKE = (1 << 5), /* COMWAKE received */ 153 PORT_IRQ_COMWAKE = (1 << 5), /* COMWAKE received */
145 PORT_IRQ_UNK_FIS = (1 << 6), /* Unknown FIS received */ 154 PORT_IRQ_UNK_FIS = (1 << 6), /* unknown FIS received */
146 PORT_IRQ_SDB_FIS = (1 << 11), /* SDB FIS received */ 155 PORT_IRQ_DEV_XCHG = (1 << 7), /* device exchanged */
156 PORT_IRQ_8B10B = (1 << 8), /* 8b/10b decode error threshold */
157 PORT_IRQ_CRC = (1 << 9), /* CRC error threshold */
158 PORT_IRQ_HANDSHAKE = (1 << 10), /* handshake error threshold */
159 PORT_IRQ_SDB_NOTIFY = (1 << 11), /* SDB notify received */
160
161 DEF_PORT_IRQ = PORT_IRQ_COMPLETE | PORT_IRQ_ERROR |
162 PORT_IRQ_PHYRDY_CHG | PORT_IRQ_DEV_XCHG |
163 PORT_IRQ_UNK_FIS,
147 164
148 /* bits[27:16] are unmasked (raw) */ 165 /* bits[27:16] are unmasked (raw) */
149 PORT_IRQ_RAW_SHIFT = 16, 166 PORT_IRQ_RAW_SHIFT = 16,
@@ -174,7 +191,7 @@ enum {
174 PORT_CERR_CMD_PCIPERR = 27, /* ctrl[15:13] 110 - PCI parity err while fetching PRB */ 191 PORT_CERR_CMD_PCIPERR = 27, /* ctrl[15:13] 110 - PCI parity err while fetching PRB */
175 PORT_CERR_XFR_UNDEF = 32, /* PSD ecode 00 - undefined */ 192 PORT_CERR_XFR_UNDEF = 32, /* PSD ecode 00 - undefined */
176 PORT_CERR_XFR_TGTABRT = 33, /* PSD ecode 01 - target abort */ 193 PORT_CERR_XFR_TGTABRT = 33, /* PSD ecode 01 - target abort */
177 PORT_CERR_XFR_MSGABRT = 34, /* PSD ecode 10 - master abort */ 194 PORT_CERR_XFR_MSTABRT = 34, /* PSD ecode 10 - master abort */
178 PORT_CERR_XFR_PCIPERR = 35, /* PSD ecode 11 - PCI prity err during transfer */ 195 PORT_CERR_XFR_PCIPERR = 35, /* PSD ecode 11 - PCI prity err during transfer */
179 PORT_CERR_SENDSERVICE = 36, /* FIS received while sending service */ 196 PORT_CERR_SENDSERVICE = 36, /* FIS received while sending service */
180 197
@@ -202,11 +219,19 @@ enum {
202 SGE_DRD = (1 << 29), /* discard data read (/dev/null) 219 SGE_DRD = (1 << 29), /* discard data read (/dev/null)
203 data address ignored */ 220 data address ignored */
204 221
222 SIL24_MAX_CMDS = 31,
223
205 /* board id */ 224 /* board id */
206 BID_SIL3124 = 0, 225 BID_SIL3124 = 0,
207 BID_SIL3132 = 1, 226 BID_SIL3132 = 1,
208 BID_SIL3131 = 2, 227 BID_SIL3131 = 2,
209 228
229 /* host flags */
230 SIL24_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
231 ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA |
232 ATA_FLAG_NCQ | ATA_FLAG_SKIP_D2H_BSY,
233 SIL24_FLAG_PCIX_IRQ_WOC = (1 << 24), /* IRQ loss errata on PCI-X */
234
210 IRQ_STAT_4PORTS = 0xf, 235 IRQ_STAT_4PORTS = 0xf,
211}; 236};
212 237
@@ -226,6 +251,58 @@ union sil24_cmd_block {
226 struct sil24_atapi_block atapi; 251 struct sil24_atapi_block atapi;
227}; 252};
228 253
254static struct sil24_cerr_info {
255 unsigned int err_mask, action;
256 const char *desc;
257} sil24_cerr_db[] = {
258 [0] = { AC_ERR_DEV, ATA_EH_REVALIDATE,
259 "device error" },
260 [PORT_CERR_DEV] = { AC_ERR_DEV, ATA_EH_REVALIDATE,
261 "device error via D2H FIS" },
262 [PORT_CERR_SDB] = { AC_ERR_DEV, ATA_EH_REVALIDATE,
263 "device error via SDB FIS" },
264 [PORT_CERR_DATA] = { AC_ERR_ATA_BUS, ATA_EH_SOFTRESET,
265 "error in data FIS" },
266 [PORT_CERR_SEND] = { AC_ERR_ATA_BUS, ATA_EH_SOFTRESET,
267 "failed to transmit command FIS" },
268 [PORT_CERR_INCONSISTENT] = { AC_ERR_HSM, ATA_EH_SOFTRESET,
269 "protocol mismatch" },
270 [PORT_CERR_DIRECTION] = { AC_ERR_HSM, ATA_EH_SOFTRESET,
271 "data directon mismatch" },
272 [PORT_CERR_UNDERRUN] = { AC_ERR_HSM, ATA_EH_SOFTRESET,
273 "ran out of SGEs while writing" },
274 [PORT_CERR_OVERRUN] = { AC_ERR_HSM, ATA_EH_SOFTRESET,
275 "ran out of SGEs while reading" },
276 [PORT_CERR_PKT_PROT] = { AC_ERR_HSM, ATA_EH_SOFTRESET,
277 "invalid data directon for ATAPI CDB" },
278 [PORT_CERR_SGT_BOUNDARY] = { AC_ERR_SYSTEM, ATA_EH_SOFTRESET,
279 "SGT no on qword boundary" },
280 [PORT_CERR_SGT_TGTABRT] = { AC_ERR_HOST_BUS, ATA_EH_SOFTRESET,
281 "PCI target abort while fetching SGT" },
282 [PORT_CERR_SGT_MSTABRT] = { AC_ERR_HOST_BUS, ATA_EH_SOFTRESET,
283 "PCI master abort while fetching SGT" },
284 [PORT_CERR_SGT_PCIPERR] = { AC_ERR_HOST_BUS, ATA_EH_SOFTRESET,
285 "PCI parity error while fetching SGT" },
286 [PORT_CERR_CMD_BOUNDARY] = { AC_ERR_SYSTEM, ATA_EH_SOFTRESET,
287 "PRB not on qword boundary" },
288 [PORT_CERR_CMD_TGTABRT] = { AC_ERR_HOST_BUS, ATA_EH_SOFTRESET,
289 "PCI target abort while fetching PRB" },
290 [PORT_CERR_CMD_MSTABRT] = { AC_ERR_HOST_BUS, ATA_EH_SOFTRESET,
291 "PCI master abort while fetching PRB" },
292 [PORT_CERR_CMD_PCIPERR] = { AC_ERR_HOST_BUS, ATA_EH_SOFTRESET,
293 "PCI parity error while fetching PRB" },
294 [PORT_CERR_XFR_UNDEF] = { AC_ERR_HOST_BUS, ATA_EH_SOFTRESET,
295 "undefined error while transferring data" },
296 [PORT_CERR_XFR_TGTABRT] = { AC_ERR_HOST_BUS, ATA_EH_SOFTRESET,
297 "PCI target abort while transferring data" },
298 [PORT_CERR_XFR_MSTABRT] = { AC_ERR_HOST_BUS, ATA_EH_SOFTRESET,
299 "PCI master abort while transferring data" },
300 [PORT_CERR_XFR_PCIPERR] = { AC_ERR_HOST_BUS, ATA_EH_SOFTRESET,
301 "PCI parity error while transferring data" },
302 [PORT_CERR_SENDSERVICE] = { AC_ERR_HSM, ATA_EH_SOFTRESET,
303 "FIS received while sending service FIS" },
304};
305
229/* 306/*
230 * ap->private_data 307 * ap->private_data
231 * 308 *
@@ -249,12 +326,14 @@ static u8 sil24_check_status(struct ata_port *ap);
249static u32 sil24_scr_read(struct ata_port *ap, unsigned sc_reg); 326static u32 sil24_scr_read(struct ata_port *ap, unsigned sc_reg);
250static void sil24_scr_write(struct ata_port *ap, unsigned sc_reg, u32 val); 327static void sil24_scr_write(struct ata_port *ap, unsigned sc_reg, u32 val);
251static void sil24_tf_read(struct ata_port *ap, struct ata_taskfile *tf); 328static void sil24_tf_read(struct ata_port *ap, struct ata_taskfile *tf);
252static int sil24_probe_reset(struct ata_port *ap, unsigned int *classes);
253static void sil24_qc_prep(struct ata_queued_cmd *qc); 329static void sil24_qc_prep(struct ata_queued_cmd *qc);
254static unsigned int sil24_qc_issue(struct ata_queued_cmd *qc); 330static unsigned int sil24_qc_issue(struct ata_queued_cmd *qc);
255static void sil24_irq_clear(struct ata_port *ap); 331static void sil24_irq_clear(struct ata_port *ap);
256static void sil24_eng_timeout(struct ata_port *ap);
257static irqreturn_t sil24_interrupt(int irq, void *dev_instance, struct pt_regs *regs); 332static irqreturn_t sil24_interrupt(int irq, void *dev_instance, struct pt_regs *regs);
333static void sil24_freeze(struct ata_port *ap);
334static void sil24_thaw(struct ata_port *ap);
335static void sil24_error_handler(struct ata_port *ap);
336static void sil24_post_internal_cmd(struct ata_queued_cmd *qc);
258static int sil24_port_start(struct ata_port *ap); 337static int sil24_port_start(struct ata_port *ap);
259static void sil24_port_stop(struct ata_port *ap); 338static void sil24_port_stop(struct ata_port *ap);
260static void sil24_host_stop(struct ata_host_set *host_set); 339static void sil24_host_stop(struct ata_host_set *host_set);
@@ -281,7 +360,8 @@ static struct scsi_host_template sil24_sht = {
281 .name = DRV_NAME, 360 .name = DRV_NAME,
282 .ioctl = ata_scsi_ioctl, 361 .ioctl = ata_scsi_ioctl,
283 .queuecommand = ata_scsi_queuecmd, 362 .queuecommand = ata_scsi_queuecmd,
284 .can_queue = ATA_DEF_QUEUE, 363 .change_queue_depth = ata_scsi_change_queue_depth,
364 .can_queue = SIL24_MAX_CMDS,
285 .this_id = ATA_SHT_THIS_ID, 365 .this_id = ATA_SHT_THIS_ID,
286 .sg_tablesize = LIBATA_MAX_PRD, 366 .sg_tablesize = LIBATA_MAX_PRD,
287 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 367 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
@@ -290,6 +370,7 @@ static struct scsi_host_template sil24_sht = {
290 .proc_name = DRV_NAME, 370 .proc_name = DRV_NAME,
291 .dma_boundary = ATA_DMA_BOUNDARY, 371 .dma_boundary = ATA_DMA_BOUNDARY,
292 .slave_configure = ata_scsi_slave_config, 372 .slave_configure = ata_scsi_slave_config,
373 .slave_destroy = ata_scsi_slave_destroy,
293 .bios_param = ata_std_bios_param, 374 .bios_param = ata_std_bios_param,
294}; 375};
295 376
@@ -304,19 +385,20 @@ static const struct ata_port_operations sil24_ops = {
304 385
305 .tf_read = sil24_tf_read, 386 .tf_read = sil24_tf_read,
306 387
307 .probe_reset = sil24_probe_reset,
308
309 .qc_prep = sil24_qc_prep, 388 .qc_prep = sil24_qc_prep,
310 .qc_issue = sil24_qc_issue, 389 .qc_issue = sil24_qc_issue,
311 390
312 .eng_timeout = sil24_eng_timeout,
313
314 .irq_handler = sil24_interrupt, 391 .irq_handler = sil24_interrupt,
315 .irq_clear = sil24_irq_clear, 392 .irq_clear = sil24_irq_clear,
316 393
317 .scr_read = sil24_scr_read, 394 .scr_read = sil24_scr_read,
318 .scr_write = sil24_scr_write, 395 .scr_write = sil24_scr_write,
319 396
397 .freeze = sil24_freeze,
398 .thaw = sil24_thaw,
399 .error_handler = sil24_error_handler,
400 .post_internal_cmd = sil24_post_internal_cmd,
401
320 .port_start = sil24_port_start, 402 .port_start = sil24_port_start,
321 .port_stop = sil24_port_stop, 403 .port_stop = sil24_port_stop,
322 .host_stop = sil24_host_stop, 404 .host_stop = sil24_host_stop,
@@ -333,9 +415,8 @@ static struct ata_port_info sil24_port_info[] = {
333 /* sil_3124 */ 415 /* sil_3124 */
334 { 416 {
335 .sht = &sil24_sht, 417 .sht = &sil24_sht,
336 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 418 .host_flags = SIL24_COMMON_FLAGS | SIL24_NPORTS2FLAG(4) |
337 ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA | 419 SIL24_FLAG_PCIX_IRQ_WOC,
338 SIL24_NPORTS2FLAG(4),
339 .pio_mask = 0x1f, /* pio0-4 */ 420 .pio_mask = 0x1f, /* pio0-4 */
340 .mwdma_mask = 0x07, /* mwdma0-2 */ 421 .mwdma_mask = 0x07, /* mwdma0-2 */
341 .udma_mask = 0x3f, /* udma0-5 */ 422 .udma_mask = 0x3f, /* udma0-5 */
@@ -344,9 +425,7 @@ static struct ata_port_info sil24_port_info[] = {
344 /* sil_3132 */ 425 /* sil_3132 */
345 { 426 {
346 .sht = &sil24_sht, 427 .sht = &sil24_sht,
347 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 428 .host_flags = SIL24_COMMON_FLAGS | SIL24_NPORTS2FLAG(2),
348 ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA |
349 SIL24_NPORTS2FLAG(2),
350 .pio_mask = 0x1f, /* pio0-4 */ 429 .pio_mask = 0x1f, /* pio0-4 */
351 .mwdma_mask = 0x07, /* mwdma0-2 */ 430 .mwdma_mask = 0x07, /* mwdma0-2 */
352 .udma_mask = 0x3f, /* udma0-5 */ 431 .udma_mask = 0x3f, /* udma0-5 */
@@ -355,9 +434,7 @@ static struct ata_port_info sil24_port_info[] = {
355 /* sil_3131/sil_3531 */ 434 /* sil_3131/sil_3531 */
356 { 435 {
357 .sht = &sil24_sht, 436 .sht = &sil24_sht,
358 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 437 .host_flags = SIL24_COMMON_FLAGS | SIL24_NPORTS2FLAG(1),
359 ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA |
360 SIL24_NPORTS2FLAG(1),
361 .pio_mask = 0x1f, /* pio0-4 */ 438 .pio_mask = 0x1f, /* pio0-4 */
362 .mwdma_mask = 0x07, /* mwdma0-2 */ 439 .mwdma_mask = 0x07, /* mwdma0-2 */
363 .udma_mask = 0x3f, /* udma0-5 */ 440 .udma_mask = 0x3f, /* udma0-5 */
@@ -365,6 +442,13 @@ static struct ata_port_info sil24_port_info[] = {
365 }, 442 },
366}; 443};
367 444
445static int sil24_tag(int tag)
446{
447 if (unlikely(ata_tag_internal(tag)))
448 return 0;
449 return tag;
450}
451
368static void sil24_dev_config(struct ata_port *ap, struct ata_device *dev) 452static void sil24_dev_config(struct ata_port *ap, struct ata_device *dev)
369{ 453{
370 void __iomem *port = (void __iomem *)ap->ioaddr.cmd_addr; 454 void __iomem *port = (void __iomem *)ap->ioaddr.cmd_addr;
@@ -426,56 +510,65 @@ static void sil24_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
426 *tf = pp->tf; 510 *tf = pp->tf;
427} 511}
428 512
429static int sil24_softreset(struct ata_port *ap, int verbose, 513static int sil24_init_port(struct ata_port *ap)
430 unsigned int *class) 514{
515 void __iomem *port = (void __iomem *)ap->ioaddr.cmd_addr;
516 u32 tmp;
517
518 writel(PORT_CS_INIT, port + PORT_CTRL_STAT);
519 ata_wait_register(port + PORT_CTRL_STAT,
520 PORT_CS_INIT, PORT_CS_INIT, 10, 100);
521 tmp = ata_wait_register(port + PORT_CTRL_STAT,
522 PORT_CS_RDY, 0, 10, 100);
523
524 if ((tmp & (PORT_CS_INIT | PORT_CS_RDY)) != PORT_CS_RDY)
525 return -EIO;
526 return 0;
527}
528
529static int sil24_softreset(struct ata_port *ap, unsigned int *class)
431{ 530{
432 void __iomem *port = (void __iomem *)ap->ioaddr.cmd_addr; 531 void __iomem *port = (void __iomem *)ap->ioaddr.cmd_addr;
433 struct sil24_port_priv *pp = ap->private_data; 532 struct sil24_port_priv *pp = ap->private_data;
434 struct sil24_prb *prb = &pp->cmd_block[0].ata.prb; 533 struct sil24_prb *prb = &pp->cmd_block[0].ata.prb;
435 dma_addr_t paddr = pp->cmd_block_dma; 534 dma_addr_t paddr = pp->cmd_block_dma;
436 unsigned long timeout = jiffies + ATA_TMOUT_BOOT * HZ; 535 u32 mask, irq_stat;
437 u32 irq_enable, irq_stat; 536 const char *reason;
438 537
439 DPRINTK("ENTER\n"); 538 DPRINTK("ENTER\n");
440 539
441 if (!sata_dev_present(ap)) { 540 if (ata_port_offline(ap)) {
442 DPRINTK("PHY reports no device\n"); 541 DPRINTK("PHY reports no device\n");
443 *class = ATA_DEV_NONE; 542 *class = ATA_DEV_NONE;
444 goto out; 543 goto out;
445 } 544 }
446 545
447 /* temporarily turn off IRQs during SRST */ 546 /* put the port into known state */
448 irq_enable = readl(port + PORT_IRQ_ENABLE_SET); 547 if (sil24_init_port(ap)) {
449 writel(irq_enable, port + PORT_IRQ_ENABLE_CLR); 548 reason ="port not ready";
450 549 goto err;
451 /* 550 }
452 * XXX: Not sure whether the following sleep is needed or not.
453 * The original driver had it. So....
454 */
455 msleep(10);
456 551
552 /* do SRST */
457 prb->ctrl = cpu_to_le16(PRB_CTRL_SRST); 553 prb->ctrl = cpu_to_le16(PRB_CTRL_SRST);
458 prb->fis[1] = 0; /* no PM yet */ 554 prb->fis[1] = 0; /* no PM yet */
459 555
460 writel((u32)paddr, port + PORT_CMD_ACTIVATE); 556 writel((u32)paddr, port + PORT_CMD_ACTIVATE);
557 writel((u64)paddr >> 32, port + PORT_CMD_ACTIVATE + 4);
461 558
462 do { 559 mask = (PORT_IRQ_COMPLETE | PORT_IRQ_ERROR) << PORT_IRQ_RAW_SHIFT;
463 irq_stat = readl(port + PORT_IRQ_STAT); 560 irq_stat = ata_wait_register(port + PORT_IRQ_STAT, mask, 0x0,
464 writel(irq_stat, port + PORT_IRQ_STAT); /* clear irq */ 561 100, ATA_TMOUT_BOOT / HZ * 1000);
465
466 irq_stat >>= PORT_IRQ_RAW_SHIFT;
467 if (irq_stat & (PORT_IRQ_COMPLETE | PORT_IRQ_ERROR))
468 break;
469 562
470 msleep(100); 563 writel(irq_stat, port + PORT_IRQ_STAT); /* clear IRQs */
471 } while (time_before(jiffies, timeout)); 564 irq_stat >>= PORT_IRQ_RAW_SHIFT;
472
473 /* restore IRQs */
474 writel(irq_enable, port + PORT_IRQ_ENABLE_SET);
475 565
476 if (!(irq_stat & PORT_IRQ_COMPLETE)) { 566 if (!(irq_stat & PORT_IRQ_COMPLETE)) {
477 DPRINTK("EXIT, srst failed\n"); 567 if (irq_stat & PORT_IRQ_ERROR)
478 return -EIO; 568 reason = "SRST command error";
569 else
570 reason = "timeout";
571 goto err;
479 } 572 }
480 573
481 sil24_update_tf(ap); 574 sil24_update_tf(ap);
@@ -487,22 +580,57 @@ static int sil24_softreset(struct ata_port *ap, int verbose,
487 out: 580 out:
488 DPRINTK("EXIT, class=%u\n", *class); 581 DPRINTK("EXIT, class=%u\n", *class);
489 return 0; 582 return 0;
583
584 err:
585 ata_port_printk(ap, KERN_ERR, "softreset failed (%s)\n", reason);
586 return -EIO;
490} 587}
491 588
492static int sil24_hardreset(struct ata_port *ap, int verbose, 589static int sil24_hardreset(struct ata_port *ap, unsigned int *class)
493 unsigned int *class)
494{ 590{
495 unsigned int dummy_class; 591 void __iomem *port = (void __iomem *)ap->ioaddr.cmd_addr;
592 const char *reason;
593 int tout_msec, rc;
594 u32 tmp;
496 595
497 /* sil24 doesn't report device signature after hard reset */ 596 /* sil24 does the right thing(tm) without any protection */
498 return sata_std_hardreset(ap, verbose, &dummy_class); 597 sata_set_spd(ap);
499}
500 598
501static int sil24_probe_reset(struct ata_port *ap, unsigned int *classes) 599 tout_msec = 100;
502{ 600 if (ata_port_online(ap))
503 return ata_drive_probe_reset(ap, ata_std_probeinit, 601 tout_msec = 5000;
504 sil24_softreset, sil24_hardreset, 602
505 ata_std_postreset, classes); 603 writel(PORT_CS_DEV_RST, port + PORT_CTRL_STAT);
604 tmp = ata_wait_register(port + PORT_CTRL_STAT,
605 PORT_CS_DEV_RST, PORT_CS_DEV_RST, 10, tout_msec);
606
607 /* SStatus oscillates between zero and valid status after
608 * DEV_RST, debounce it.
609 */
610 rc = sata_phy_debounce(ap, sata_deb_timing_before_fsrst);
611 if (rc) {
612 reason = "PHY debouncing failed";
613 goto err;
614 }
615
616 if (tmp & PORT_CS_DEV_RST) {
617 if (ata_port_offline(ap))
618 return 0;
619 reason = "link not ready";
620 goto err;
621 }
622
623 /* Sil24 doesn't store signature FIS after hardreset, so we
624 * can't wait for BSY to clear. Some devices take a long time
625 * to get ready and those devices will choke if we don't wait
626 * for BSY clearance here. Tell libata to perform follow-up
627 * softreset.
628 */
629 return -EAGAIN;
630
631 err:
632 ata_port_printk(ap, KERN_ERR, "hardreset failed (%s)\n", reason);
633 return -EIO;
506} 634}
507 635
508static inline void sil24_fill_sg(struct ata_queued_cmd *qc, 636static inline void sil24_fill_sg(struct ata_queued_cmd *qc,
@@ -528,17 +656,20 @@ static void sil24_qc_prep(struct ata_queued_cmd *qc)
528{ 656{
529 struct ata_port *ap = qc->ap; 657 struct ata_port *ap = qc->ap;
530 struct sil24_port_priv *pp = ap->private_data; 658 struct sil24_port_priv *pp = ap->private_data;
531 union sil24_cmd_block *cb = pp->cmd_block + qc->tag; 659 union sil24_cmd_block *cb;
532 struct sil24_prb *prb; 660 struct sil24_prb *prb;
533 struct sil24_sge *sge; 661 struct sil24_sge *sge;
662 u16 ctrl = 0;
663
664 cb = &pp->cmd_block[sil24_tag(qc->tag)];
534 665
535 switch (qc->tf.protocol) { 666 switch (qc->tf.protocol) {
536 case ATA_PROT_PIO: 667 case ATA_PROT_PIO:
537 case ATA_PROT_DMA: 668 case ATA_PROT_DMA:
669 case ATA_PROT_NCQ:
538 case ATA_PROT_NODATA: 670 case ATA_PROT_NODATA:
539 prb = &cb->ata.prb; 671 prb = &cb->ata.prb;
540 sge = cb->ata.sge; 672 sge = cb->ata.sge;
541 prb->ctrl = 0;
542 break; 673 break;
543 674
544 case ATA_PROT_ATAPI: 675 case ATA_PROT_ATAPI:
@@ -551,12 +682,10 @@ static void sil24_qc_prep(struct ata_queued_cmd *qc)
551 682
552 if (qc->tf.protocol != ATA_PROT_ATAPI_NODATA) { 683 if (qc->tf.protocol != ATA_PROT_ATAPI_NODATA) {
553 if (qc->tf.flags & ATA_TFLAG_WRITE) 684 if (qc->tf.flags & ATA_TFLAG_WRITE)
554 prb->ctrl = cpu_to_le16(PRB_CTRL_PACKET_WRITE); 685 ctrl = PRB_CTRL_PACKET_WRITE;
555 else 686 else
556 prb->ctrl = cpu_to_le16(PRB_CTRL_PACKET_READ); 687 ctrl = PRB_CTRL_PACKET_READ;
557 } else 688 }
558 prb->ctrl = 0;
559
560 break; 689 break;
561 690
562 default: 691 default:
@@ -565,6 +694,7 @@ static void sil24_qc_prep(struct ata_queued_cmd *qc)
565 BUG(); 694 BUG();
566 } 695 }
567 696
697 prb->ctrl = cpu_to_le16(ctrl);
568 ata_tf_to_fis(&qc->tf, prb->fis, 0); 698 ata_tf_to_fis(&qc->tf, prb->fis, 0);
569 699
570 if (qc->flags & ATA_QCFLAG_DMAMAP) 700 if (qc->flags & ATA_QCFLAG_DMAMAP)
@@ -574,11 +704,18 @@ static void sil24_qc_prep(struct ata_queued_cmd *qc)
574static unsigned int sil24_qc_issue(struct ata_queued_cmd *qc) 704static unsigned int sil24_qc_issue(struct ata_queued_cmd *qc)
575{ 705{
576 struct ata_port *ap = qc->ap; 706 struct ata_port *ap = qc->ap;
577 void __iomem *port = (void __iomem *)ap->ioaddr.cmd_addr;
578 struct sil24_port_priv *pp = ap->private_data; 707 struct sil24_port_priv *pp = ap->private_data;
579 dma_addr_t paddr = pp->cmd_block_dma + qc->tag * sizeof(*pp->cmd_block); 708 void __iomem *port = (void __iomem *)ap->ioaddr.cmd_addr;
709 unsigned int tag = sil24_tag(qc->tag);
710 dma_addr_t paddr;
711 void __iomem *activate;
712
713 paddr = pp->cmd_block_dma + tag * sizeof(*pp->cmd_block);
714 activate = port + PORT_CMD_ACTIVATE + tag * 8;
715
716 writel((u32)paddr, activate);
717 writel((u64)paddr >> 32, activate + 4);
580 718
581 writel((u32)paddr, port + PORT_CMD_ACTIVATE);
582 return 0; 719 return 0;
583} 720}
584 721
@@ -587,162 +724,139 @@ static void sil24_irq_clear(struct ata_port *ap)
587 /* unused */ 724 /* unused */
588} 725}
589 726
590static int __sil24_restart_controller(void __iomem *port) 727static void sil24_freeze(struct ata_port *ap)
591{ 728{
592 u32 tmp; 729 void __iomem *port = (void __iomem *)ap->ioaddr.cmd_addr;
593 int cnt;
594
595 writel(PORT_CS_INIT, port + PORT_CTRL_STAT);
596
597 /* Max ~10ms */
598 for (cnt = 0; cnt < 10000; cnt++) {
599 tmp = readl(port + PORT_CTRL_STAT);
600 if (tmp & PORT_CS_RDY)
601 return 0;
602 udelay(1);
603 }
604 730
605 return -1; 731 /* Port-wide IRQ mask in HOST_CTRL doesn't really work, clear
732 * PORT_IRQ_ENABLE instead.
733 */
734 writel(0xffff, port + PORT_IRQ_ENABLE_CLR);
606} 735}
607 736
608static void sil24_restart_controller(struct ata_port *ap) 737static void sil24_thaw(struct ata_port *ap)
609{ 738{
610 if (__sil24_restart_controller((void __iomem *)ap->ioaddr.cmd_addr)) 739 void __iomem *port = (void __iomem *)ap->ioaddr.cmd_addr;
611 printk(KERN_ERR DRV_NAME 740 u32 tmp;
612 " ata%u: failed to restart controller\n", ap->id); 741
742 /* clear IRQ */
743 tmp = readl(port + PORT_IRQ_STAT);
744 writel(tmp, port + PORT_IRQ_STAT);
745
746 /* turn IRQ back on */
747 writel(DEF_PORT_IRQ, port + PORT_IRQ_ENABLE_SET);
613} 748}
614 749
615static int __sil24_reset_controller(void __iomem *port) 750static void sil24_error_intr(struct ata_port *ap)
616{ 751{
617 int cnt; 752 void __iomem *port = (void __iomem *)ap->ioaddr.cmd_addr;
618 u32 tmp; 753 struct ata_eh_info *ehi = &ap->eh_info;
754 int freeze = 0;
755 u32 irq_stat;
619 756
620 /* Reset controller state. Is this correct? */ 757 /* on error, we need to clear IRQ explicitly */
621 writel(PORT_CS_DEV_RST, port + PORT_CTRL_STAT); 758 irq_stat = readl(port + PORT_IRQ_STAT);
622 readl(port + PORT_CTRL_STAT); /* sync */ 759 writel(irq_stat, port + PORT_IRQ_STAT);
623 760
624 /* Max ~100ms */ 761 /* first, analyze and record host port events */
625 for (cnt = 0; cnt < 1000; cnt++) { 762 ata_ehi_clear_desc(ehi);
626 udelay(100);
627 tmp = readl(port + PORT_CTRL_STAT);
628 if (!(tmp & PORT_CS_DEV_RST))
629 break;
630 }
631 763
632 if (tmp & PORT_CS_DEV_RST) 764 ata_ehi_push_desc(ehi, "irq_stat 0x%08x", irq_stat);
633 return -1;
634 765
635 if (tmp & PORT_CS_RDY) 766 if (irq_stat & (PORT_IRQ_PHYRDY_CHG | PORT_IRQ_DEV_XCHG)) {
636 return 0; 767 ata_ehi_hotplugged(ehi);
768 ata_ehi_push_desc(ehi, ", %s",
769 irq_stat & PORT_IRQ_PHYRDY_CHG ?
770 "PHY RDY changed" : "device exchanged");
771 freeze = 1;
772 }
637 773
638 return __sil24_restart_controller(port); 774 if (irq_stat & PORT_IRQ_UNK_FIS) {
639} 775 ehi->err_mask |= AC_ERR_HSM;
776 ehi->action |= ATA_EH_SOFTRESET;
777 ata_ehi_push_desc(ehi , ", unknown FIS");
778 freeze = 1;
779 }
640 780
641static void sil24_reset_controller(struct ata_port *ap) 781 /* deal with command error */
642{ 782 if (irq_stat & PORT_IRQ_ERROR) {
643 printk(KERN_NOTICE DRV_NAME 783 struct sil24_cerr_info *ci = NULL;
644 " ata%u: resetting controller...\n", ap->id); 784 unsigned int err_mask = 0, action = 0;
645 if (__sil24_reset_controller((void __iomem *)ap->ioaddr.cmd_addr)) 785 struct ata_queued_cmd *qc;
646 printk(KERN_ERR DRV_NAME 786 u32 cerr;
647 " ata%u: failed to reset controller\n", ap->id); 787
648} 788 /* analyze CMD_ERR */
789 cerr = readl(port + PORT_CMD_ERR);
790 if (cerr < ARRAY_SIZE(sil24_cerr_db))
791 ci = &sil24_cerr_db[cerr];
792
793 if (ci && ci->desc) {
794 err_mask |= ci->err_mask;
795 action |= ci->action;
796 ata_ehi_push_desc(ehi, ", %s", ci->desc);
797 } else {
798 err_mask |= AC_ERR_OTHER;
799 action |= ATA_EH_SOFTRESET;
800 ata_ehi_push_desc(ehi, ", unknown command error %d",
801 cerr);
802 }
649 803
650static void sil24_eng_timeout(struct ata_port *ap) 804 /* record error info */
651{ 805 qc = ata_qc_from_tag(ap, ap->active_tag);
652 struct ata_queued_cmd *qc; 806 if (qc) {
807 sil24_update_tf(ap);
808 qc->err_mask |= err_mask;
809 } else
810 ehi->err_mask |= err_mask;
653 811
654 qc = ata_qc_from_tag(ap, ap->active_tag); 812 ehi->action |= action;
813 }
655 814
656 printk(KERN_ERR "ata%u: command timeout\n", ap->id); 815 /* freeze or abort */
657 qc->err_mask |= AC_ERR_TIMEOUT; 816 if (freeze)
658 ata_eh_qc_complete(qc); 817 ata_port_freeze(ap);
818 else
819 ata_port_abort(ap);
820}
659 821
660 sil24_reset_controller(ap); 822static void sil24_finish_qc(struct ata_queued_cmd *qc)
823{
824 if (qc->flags & ATA_QCFLAG_RESULT_TF)
825 sil24_update_tf(qc->ap);
661} 826}
662 827
663static void sil24_error_intr(struct ata_port *ap, u32 slot_stat) 828static inline void sil24_host_intr(struct ata_port *ap)
664{ 829{
665 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->active_tag);
666 struct sil24_port_priv *pp = ap->private_data;
667 void __iomem *port = (void __iomem *)ap->ioaddr.cmd_addr; 830 void __iomem *port = (void __iomem *)ap->ioaddr.cmd_addr;
668 u32 irq_stat, cmd_err, sstatus, serror; 831 u32 slot_stat, qc_active;
669 unsigned int err_mask; 832 int rc;
670 833
671 irq_stat = readl(port + PORT_IRQ_STAT); 834 slot_stat = readl(port + PORT_SLOT_STAT);
672 writel(irq_stat, port + PORT_IRQ_STAT); /* clear irq */
673 835
674 if (!(irq_stat & PORT_IRQ_ERROR)) { 836 if (unlikely(slot_stat & HOST_SSTAT_ATTN)) {
675 /* ignore non-completion, non-error irqs for now */ 837 sil24_error_intr(ap);
676 printk(KERN_WARNING DRV_NAME
677 "ata%u: non-error exception irq (irq_stat %x)\n",
678 ap->id, irq_stat);
679 return; 838 return;
680 } 839 }
681 840
682 cmd_err = readl(port + PORT_CMD_ERR); 841 if (ap->flags & SIL24_FLAG_PCIX_IRQ_WOC)
683 sstatus = readl(port + PORT_SSTATUS); 842 writel(PORT_IRQ_COMPLETE, port + PORT_IRQ_STAT);
684 serror = readl(port + PORT_SERROR);
685 if (serror)
686 writel(serror, port + PORT_SERROR);
687 843
688 /* 844 qc_active = slot_stat & ~HOST_SSTAT_ATTN;
689 * Don't log ATAPI device errors. They're supposed to happen 845 rc = ata_qc_complete_multiple(ap, qc_active, sil24_finish_qc);
690 * and any serious errors will be logged using sense data by 846 if (rc > 0)
691 * the SCSI layer. 847 return;
692 */ 848 if (rc < 0) {
693 if (ap->device[0].class != ATA_DEV_ATAPI || cmd_err > PORT_CERR_SDB) 849 struct ata_eh_info *ehi = &ap->eh_info;
694 printk("ata%u: error interrupt on port%d\n" 850 ehi->err_mask |= AC_ERR_HSM;
695 " stat=0x%x irq=0x%x cmd_err=%d sstatus=0x%x serror=0x%x\n", 851 ehi->action |= ATA_EH_SOFTRESET;
696 ap->id, ap->port_no, slot_stat, irq_stat, cmd_err, sstatus, serror); 852 ata_port_freeze(ap);
697 853 return;
698 if (cmd_err == PORT_CERR_DEV || cmd_err == PORT_CERR_SDB) {
699 /*
700 * Device is reporting error, tf registers are valid.
701 */
702 sil24_update_tf(ap);
703 err_mask = ac_err_mask(pp->tf.command);
704 sil24_restart_controller(ap);
705 } else {
706 /*
707 * Other errors. libata currently doesn't have any
708 * mechanism to report these errors. Just turn on
709 * ATA_ERR.
710 */
711 err_mask = AC_ERR_OTHER;
712 sil24_reset_controller(ap);
713 } 854 }
714 855
715 if (qc) { 856 if (ata_ratelimit())
716 qc->err_mask |= err_mask; 857 ata_port_printk(ap, KERN_INFO, "spurious interrupt "
717 ata_qc_complete(qc); 858 "(slot_stat 0x%x active_tag %d sactive 0x%x)\n",
718 } 859 slot_stat, ap->active_tag, ap->sactive);
719}
720
721static inline void sil24_host_intr(struct ata_port *ap)
722{
723 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->active_tag);
724 void __iomem *port = (void __iomem *)ap->ioaddr.cmd_addr;
725 u32 slot_stat;
726
727 slot_stat = readl(port + PORT_SLOT_STAT);
728 if (!(slot_stat & HOST_SSTAT_ATTN)) {
729 struct sil24_port_priv *pp = ap->private_data;
730 /*
731 * !HOST_SSAT_ATTN guarantees successful completion,
732 * so reading back tf registers is unnecessary for
733 * most commands. TODO: read tf registers for
734 * commands which require these values on successful
735 * completion (EXECUTE DEVICE DIAGNOSTIC, CHECK POWER,
736 * DEVICE RESET and READ PORT MULTIPLIER (any more?).
737 */
738 sil24_update_tf(ap);
739
740 if (qc) {
741 qc->err_mask |= ac_err_mask(pp->tf.command);
742 ata_qc_complete(qc);
743 }
744 } else
745 sil24_error_intr(ap, slot_stat);
746} 860}
747 861
748static irqreturn_t sil24_interrupt(int irq, void *dev_instance, struct pt_regs *regs) 862static irqreturn_t sil24_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
@@ -769,7 +883,7 @@ static irqreturn_t sil24_interrupt(int irq, void *dev_instance, struct pt_regs *
769 for (i = 0; i < host_set->n_ports; i++) 883 for (i = 0; i < host_set->n_ports; i++)
770 if (status & (1 << i)) { 884 if (status & (1 << i)) {
771 struct ata_port *ap = host_set->ports[i]; 885 struct ata_port *ap = host_set->ports[i];
772 if (ap && !(ap->flags & ATA_FLAG_PORT_DISABLED)) { 886 if (ap && !(ap->flags & ATA_FLAG_DISABLED)) {
773 sil24_host_intr(host_set->ports[i]); 887 sil24_host_intr(host_set->ports[i]);
774 handled++; 888 handled++;
775 } else 889 } else
@@ -782,9 +896,35 @@ static irqreturn_t sil24_interrupt(int irq, void *dev_instance, struct pt_regs *
782 return IRQ_RETVAL(handled); 896 return IRQ_RETVAL(handled);
783} 897}
784 898
899static void sil24_error_handler(struct ata_port *ap)
900{
901 struct ata_eh_context *ehc = &ap->eh_context;
902
903 if (sil24_init_port(ap)) {
904 ata_eh_freeze_port(ap);
905 ehc->i.action |= ATA_EH_HARDRESET;
906 }
907
908 /* perform recovery */
909 ata_do_eh(ap, ata_std_prereset, sil24_softreset, sil24_hardreset,
910 ata_std_postreset);
911}
912
913static void sil24_post_internal_cmd(struct ata_queued_cmd *qc)
914{
915 struct ata_port *ap = qc->ap;
916
917 if (qc->flags & ATA_QCFLAG_FAILED)
918 qc->err_mask |= AC_ERR_OTHER;
919
920 /* make DMA engine forget about the failed command */
921 if (qc->err_mask)
922 sil24_init_port(ap);
923}
924
785static inline void sil24_cblk_free(struct sil24_port_priv *pp, struct device *dev) 925static inline void sil24_cblk_free(struct sil24_port_priv *pp, struct device *dev)
786{ 926{
787 const size_t cb_size = sizeof(*pp->cmd_block); 927 const size_t cb_size = sizeof(*pp->cmd_block) * SIL24_MAX_CMDS;
788 928
789 dma_free_coherent(dev, cb_size, pp->cmd_block, pp->cmd_block_dma); 929 dma_free_coherent(dev, cb_size, pp->cmd_block, pp->cmd_block_dma);
790} 930}
@@ -794,7 +934,7 @@ static int sil24_port_start(struct ata_port *ap)
794 struct device *dev = ap->host_set->dev; 934 struct device *dev = ap->host_set->dev;
795 struct sil24_port_priv *pp; 935 struct sil24_port_priv *pp;
796 union sil24_cmd_block *cb; 936 union sil24_cmd_block *cb;
797 size_t cb_size = sizeof(*cb); 937 size_t cb_size = sizeof(*cb) * SIL24_MAX_CMDS;
798 dma_addr_t cb_dma; 938 dma_addr_t cb_dma;
799 int rc = -ENOMEM; 939 int rc = -ENOMEM;
800 940
@@ -858,6 +998,7 @@ static int sil24_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
858 void __iomem *host_base = NULL; 998 void __iomem *host_base = NULL;
859 void __iomem *port_base = NULL; 999 void __iomem *port_base = NULL;
860 int i, rc; 1000 int i, rc;
1001 u32 tmp;
861 1002
862 if (!printed_version++) 1003 if (!printed_version++)
863 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n"); 1004 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
@@ -910,37 +1051,53 @@ static int sil24_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
910 /* 1051 /*
911 * Configure the device 1052 * Configure the device
912 */ 1053 */
913 /* 1054 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
914 * FIXME: This device is certainly 64-bit capable. We just 1055 rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
915 * don't know how to use it. After fixing 32bit activation in 1056 if (rc) {
916 * this function, enable 64bit masks here. 1057 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
917 */ 1058 if (rc) {
918 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK); 1059 dev_printk(KERN_ERR, &pdev->dev,
919 if (rc) { 1060 "64-bit DMA enable failed\n");
920 dev_printk(KERN_ERR, &pdev->dev, 1061 goto out_free;
921 "32-bit DMA enable failed\n"); 1062 }
922 goto out_free; 1063 }
923 } 1064 } else {
924 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK); 1065 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
925 if (rc) { 1066 if (rc) {
926 dev_printk(KERN_ERR, &pdev->dev, 1067 dev_printk(KERN_ERR, &pdev->dev,
927 "32-bit consistent DMA enable failed\n"); 1068 "32-bit DMA enable failed\n");
928 goto out_free; 1069 goto out_free;
1070 }
1071 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
1072 if (rc) {
1073 dev_printk(KERN_ERR, &pdev->dev,
1074 "32-bit consistent DMA enable failed\n");
1075 goto out_free;
1076 }
929 } 1077 }
930 1078
931 /* GPIO off */ 1079 /* GPIO off */
932 writel(0, host_base + HOST_FLASH_CMD); 1080 writel(0, host_base + HOST_FLASH_CMD);
933 1081
934 /* Mask interrupts during initialization */ 1082 /* Apply workaround for completion IRQ loss on PCI-X errata */
1083 if (probe_ent->host_flags & SIL24_FLAG_PCIX_IRQ_WOC) {
1084 tmp = readl(host_base + HOST_CTRL);
1085 if (tmp & (HOST_CTRL_TRDY | HOST_CTRL_STOP | HOST_CTRL_DEVSEL))
1086 dev_printk(KERN_INFO, &pdev->dev,
1087 "Applying completion IRQ loss on PCI-X "
1088 "errata fix\n");
1089 else
1090 probe_ent->host_flags &= ~SIL24_FLAG_PCIX_IRQ_WOC;
1091 }
1092
1093 /* clear global reset & mask interrupts during initialization */
935 writel(0, host_base + HOST_CTRL); 1094 writel(0, host_base + HOST_CTRL);
936 1095
937 for (i = 0; i < probe_ent->n_ports; i++) { 1096 for (i = 0; i < probe_ent->n_ports; i++) {
938 void __iomem *port = port_base + i * PORT_REGS_SIZE; 1097 void __iomem *port = port_base + i * PORT_REGS_SIZE;
939 unsigned long portu = (unsigned long)port; 1098 unsigned long portu = (unsigned long)port;
940 u32 tmp;
941 int cnt;
942 1099
943 probe_ent->port[i].cmd_addr = portu + PORT_PRB; 1100 probe_ent->port[i].cmd_addr = portu;
944 probe_ent->port[i].scr_addr = portu + PORT_SCONTROL; 1101 probe_ent->port[i].scr_addr = portu + PORT_SCONTROL;
945 1102
946 ata_std_ports(&probe_ent->port[i]); 1103 ata_std_ports(&probe_ent->port[i]);
@@ -952,18 +1109,20 @@ static int sil24_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
952 tmp = readl(port + PORT_CTRL_STAT); 1109 tmp = readl(port + PORT_CTRL_STAT);
953 if (tmp & PORT_CS_PORT_RST) { 1110 if (tmp & PORT_CS_PORT_RST) {
954 writel(PORT_CS_PORT_RST, port + PORT_CTRL_CLR); 1111 writel(PORT_CS_PORT_RST, port + PORT_CTRL_CLR);
955 readl(port + PORT_CTRL_STAT); /* sync */ 1112 tmp = ata_wait_register(port + PORT_CTRL_STAT,
956 for (cnt = 0; cnt < 10; cnt++) { 1113 PORT_CS_PORT_RST,
957 msleep(10); 1114 PORT_CS_PORT_RST, 10, 100);
958 tmp = readl(port + PORT_CTRL_STAT);
959 if (!(tmp & PORT_CS_PORT_RST))
960 break;
961 }
962 if (tmp & PORT_CS_PORT_RST) 1115 if (tmp & PORT_CS_PORT_RST)
963 dev_printk(KERN_ERR, &pdev->dev, 1116 dev_printk(KERN_ERR, &pdev->dev,
964 "failed to clear port RST\n"); 1117 "failed to clear port RST\n");
965 } 1118 }
966 1119
1120 /* Configure IRQ WoC */
1121 if (probe_ent->host_flags & SIL24_FLAG_PCIX_IRQ_WOC)
1122 writel(PORT_CS_IRQ_WOC, port + PORT_CTRL_STAT);
1123 else
1124 writel(PORT_CS_IRQ_WOC, port + PORT_CTRL_CLR);
1125
967 /* Zero error counters. */ 1126 /* Zero error counters. */
968 writel(0x8000, port + PORT_DECODE_ERR_THRESH); 1127 writel(0x8000, port + PORT_DECODE_ERR_THRESH);
969 writel(0x8000, port + PORT_CRC_ERR_THRESH); 1128 writel(0x8000, port + PORT_CRC_ERR_THRESH);
@@ -972,26 +1131,11 @@ static int sil24_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
972 writel(0x0000, port + PORT_CRC_ERR_CNT); 1131 writel(0x0000, port + PORT_CRC_ERR_CNT);
973 writel(0x0000, port + PORT_HSHK_ERR_CNT); 1132 writel(0x0000, port + PORT_HSHK_ERR_CNT);
974 1133
975 /* FIXME: 32bit activation? */ 1134 /* Always use 64bit activation */
976 writel(0, port + PORT_ACTIVATE_UPPER_ADDR); 1135 writel(PORT_CS_32BIT_ACTV, port + PORT_CTRL_CLR);
977 writel(PORT_CS_32BIT_ACTV, port + PORT_CTRL_STAT);
978
979 /* Configure interrupts */
980 writel(0xffff, port + PORT_IRQ_ENABLE_CLR);
981 writel(PORT_IRQ_COMPLETE | PORT_IRQ_ERROR | PORT_IRQ_SDB_FIS,
982 port + PORT_IRQ_ENABLE_SET);
983
984 /* Clear interrupts */
985 writel(0x0fff0fff, port + PORT_IRQ_STAT);
986 writel(PORT_CS_IRQ_WOC, port + PORT_CTRL_CLR);
987 1136
988 /* Clear port multiplier enable and resume bits */ 1137 /* Clear port multiplier enable and resume bits */
989 writel(PORT_CS_PM_EN | PORT_CS_RESUME, port + PORT_CTRL_CLR); 1138 writel(PORT_CS_PM_EN | PORT_CS_RESUME, port + PORT_CTRL_CLR);
990
991 /* Reset itself */
992 if (__sil24_reset_controller(port))
993 dev_printk(KERN_ERR, &pdev->dev,
994 "failed to reset controller\n");
995 } 1139 }
996 1140
997 /* Turn on interrupts */ 1141 /* Turn on interrupts */
diff --git a/drivers/scsi/sata_sis.c b/drivers/scsi/sata_sis.c
index 728530df2e07..a07e6e525173 100644
--- a/drivers/scsi/sata_sis.c
+++ b/drivers/scsi/sata_sis.c
@@ -43,7 +43,7 @@
43#include <linux/libata.h> 43#include <linux/libata.h>
44 44
45#define DRV_NAME "sata_sis" 45#define DRV_NAME "sata_sis"
46#define DRV_VERSION "0.5" 46#define DRV_VERSION "0.6"
47 47
48enum { 48enum {
49 sis_180 = 0, 49 sis_180 = 0,
@@ -96,6 +96,7 @@ static struct scsi_host_template sis_sht = {
96 .proc_name = DRV_NAME, 96 .proc_name = DRV_NAME,
97 .dma_boundary = ATA_DMA_BOUNDARY, 97 .dma_boundary = ATA_DMA_BOUNDARY,
98 .slave_configure = ata_scsi_slave_config, 98 .slave_configure = ata_scsi_slave_config,
99 .slave_destroy = ata_scsi_slave_destroy,
99 .bios_param = ata_std_bios_param, 100 .bios_param = ata_std_bios_param,
100}; 101};
101 102
@@ -113,6 +114,7 @@ static const struct ata_port_operations sis_ops = {
113 .bmdma_status = ata_bmdma_status, 114 .bmdma_status = ata_bmdma_status,
114 .qc_prep = ata_qc_prep, 115 .qc_prep = ata_qc_prep,
115 .qc_issue = ata_qc_issue_prot, 116 .qc_issue = ata_qc_issue_prot,
117 .data_xfer = ata_pio_data_xfer,
116 .eng_timeout = ata_eng_timeout, 118 .eng_timeout = ata_eng_timeout,
117 .irq_handler = ata_interrupt, 119 .irq_handler = ata_interrupt,
118 .irq_clear = ata_bmdma_irq_clear, 120 .irq_clear = ata_bmdma_irq_clear,
diff --git a/drivers/scsi/sata_svw.c b/drivers/scsi/sata_svw.c
index 53b0d5c0a61f..d9b516836486 100644
--- a/drivers/scsi/sata_svw.c
+++ b/drivers/scsi/sata_svw.c
@@ -54,7 +54,7 @@
54#endif /* CONFIG_PPC_OF */ 54#endif /* CONFIG_PPC_OF */
55 55
56#define DRV_NAME "sata_svw" 56#define DRV_NAME "sata_svw"
57#define DRV_VERSION "1.07" 57#define DRV_VERSION "1.8"
58 58
59enum { 59enum {
60 /* Taskfile registers offsets */ 60 /* Taskfile registers offsets */
@@ -257,7 +257,7 @@ static int k2_sata_proc_info(struct Scsi_Host *shost, char *page, char **start,
257 int len, index; 257 int len, index;
258 258
259 /* Find the ata_port */ 259 /* Find the ata_port */
260 ap = (struct ata_port *) &shost->hostdata[0]; 260 ap = ata_shost_to_port(shost);
261 if (ap == NULL) 261 if (ap == NULL)
262 return 0; 262 return 0;
263 263
@@ -299,6 +299,7 @@ static struct scsi_host_template k2_sata_sht = {
299 .proc_name = DRV_NAME, 299 .proc_name = DRV_NAME,
300 .dma_boundary = ATA_DMA_BOUNDARY, 300 .dma_boundary = ATA_DMA_BOUNDARY,
301 .slave_configure = ata_scsi_slave_config, 301 .slave_configure = ata_scsi_slave_config,
302 .slave_destroy = ata_scsi_slave_destroy,
302#ifdef CONFIG_PPC_OF 303#ifdef CONFIG_PPC_OF
303 .proc_info = k2_sata_proc_info, 304 .proc_info = k2_sata_proc_info,
304#endif 305#endif
@@ -320,6 +321,7 @@ static const struct ata_port_operations k2_sata_ops = {
320 .bmdma_status = ata_bmdma_status, 321 .bmdma_status = ata_bmdma_status,
321 .qc_prep = ata_qc_prep, 322 .qc_prep = ata_qc_prep,
322 .qc_issue = ata_qc_issue_prot, 323 .qc_issue = ata_qc_issue_prot,
324 .data_xfer = ata_mmio_data_xfer,
323 .eng_timeout = ata_eng_timeout, 325 .eng_timeout = ata_eng_timeout,
324 .irq_handler = ata_interrupt, 326 .irq_handler = ata_interrupt,
325 .irq_clear = ata_bmdma_irq_clear, 327 .irq_clear = ata_bmdma_irq_clear,
diff --git a/drivers/scsi/sata_sx4.c b/drivers/scsi/sata_sx4.c
index 4139ad4b1df0..7f864410f7c2 100644
--- a/drivers/scsi/sata_sx4.c
+++ b/drivers/scsi/sata_sx4.c
@@ -46,7 +46,7 @@
46#include "sata_promise.h" 46#include "sata_promise.h"
47 47
48#define DRV_NAME "sata_sx4" 48#define DRV_NAME "sata_sx4"
49#define DRV_VERSION "0.8" 49#define DRV_VERSION "0.9"
50 50
51 51
52enum { 52enum {
@@ -191,6 +191,7 @@ static struct scsi_host_template pdc_sata_sht = {
191 .proc_name = DRV_NAME, 191 .proc_name = DRV_NAME,
192 .dma_boundary = ATA_DMA_BOUNDARY, 192 .dma_boundary = ATA_DMA_BOUNDARY,
193 .slave_configure = ata_scsi_slave_config, 193 .slave_configure = ata_scsi_slave_config,
194 .slave_destroy = ata_scsi_slave_destroy,
194 .bios_param = ata_std_bios_param, 195 .bios_param = ata_std_bios_param,
195}; 196};
196 197
@@ -204,6 +205,7 @@ static const struct ata_port_operations pdc_20621_ops = {
204 .phy_reset = pdc_20621_phy_reset, 205 .phy_reset = pdc_20621_phy_reset,
205 .qc_prep = pdc20621_qc_prep, 206 .qc_prep = pdc20621_qc_prep,
206 .qc_issue = pdc20621_qc_issue_prot, 207 .qc_issue = pdc20621_qc_issue_prot,
208 .data_xfer = ata_mmio_data_xfer,
207 .eng_timeout = pdc_eng_timeout, 209 .eng_timeout = pdc_eng_timeout,
208 .irq_handler = pdc20621_interrupt, 210 .irq_handler = pdc20621_interrupt,
209 .irq_clear = pdc20621_irq_clear, 211 .irq_clear = pdc20621_irq_clear,
@@ -218,7 +220,7 @@ static const struct ata_port_info pdc_port_info[] = {
218 .sht = &pdc_sata_sht, 220 .sht = &pdc_sata_sht,
219 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 221 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
220 ATA_FLAG_SRST | ATA_FLAG_MMIO | 222 ATA_FLAG_SRST | ATA_FLAG_MMIO |
221 ATA_FLAG_NO_ATAPI, 223 ATA_FLAG_NO_ATAPI | ATA_FLAG_PIO_POLLING,
222 .pio_mask = 0x1f, /* pio0-4 */ 224 .pio_mask = 0x1f, /* pio0-4 */
223 .mwdma_mask = 0x07, /* mwdma0-2 */ 225 .mwdma_mask = 0x07, /* mwdma0-2 */
224 .udma_mask = 0x7f, /* udma0-6 ; FIXME */ 226 .udma_mask = 0x7f, /* udma0-6 ; FIXME */
@@ -833,11 +835,11 @@ static irqreturn_t pdc20621_interrupt (int irq, void *dev_instance, struct pt_re
833 tmp = mask & (1 << i); 835 tmp = mask & (1 << i);
834 VPRINTK("seq %u, port_no %u, ap %p, tmp %x\n", i, port_no, ap, tmp); 836 VPRINTK("seq %u, port_no %u, ap %p, tmp %x\n", i, port_no, ap, tmp);
835 if (tmp && ap && 837 if (tmp && ap &&
836 !(ap->flags & (ATA_FLAG_PORT_DISABLED | ATA_FLAG_NOINTR))) { 838 !(ap->flags & ATA_FLAG_DISABLED)) {
837 struct ata_queued_cmd *qc; 839 struct ata_queued_cmd *qc;
838 840
839 qc = ata_qc_from_tag(ap, ap->active_tag); 841 qc = ata_qc_from_tag(ap, ap->active_tag);
840 if (qc && (!(qc->tf.ctl & ATA_NIEN))) 842 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)))
841 handled += pdc20621_host_intr(ap, qc, (i > 4), 843 handled += pdc20621_host_intr(ap, qc, (i > 4),
842 mmio_base); 844 mmio_base);
843 } 845 }
@@ -868,15 +870,16 @@ static void pdc_eng_timeout(struct ata_port *ap)
868 switch (qc->tf.protocol) { 870 switch (qc->tf.protocol) {
869 case ATA_PROT_DMA: 871 case ATA_PROT_DMA:
870 case ATA_PROT_NODATA: 872 case ATA_PROT_NODATA:
871 printk(KERN_ERR "ata%u: command timeout\n", ap->id); 873 ata_port_printk(ap, KERN_ERR, "command timeout\n");
872 qc->err_mask |= __ac_err_mask(ata_wait_idle(ap)); 874 qc->err_mask |= __ac_err_mask(ata_wait_idle(ap));
873 break; 875 break;
874 876
875 default: 877 default:
876 drv_stat = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 1000); 878 drv_stat = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 1000);
877 879
878 printk(KERN_ERR "ata%u: unknown timeout, cmd 0x%x stat 0x%x\n", 880 ata_port_printk(ap, KERN_ERR,
879 ap->id, qc->tf.command, drv_stat); 881 "unknown timeout, cmd 0x%x stat 0x%x\n",
882 qc->tf.command, drv_stat);
880 883
881 qc->err_mask |= ac_err_mask(drv_stat); 884 qc->err_mask |= ac_err_mask(drv_stat);
882 break; 885 break;
@@ -1375,10 +1378,6 @@ static int pdc_sata_init_one (struct pci_dev *pdev, const struct pci_device_id *
1375 if (!printed_version++) 1378 if (!printed_version++)
1376 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n"); 1379 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
1377 1380
1378 /*
1379 * If this driver happens to only be useful on Apple's K2, then
1380 * we should check that here as it has a normal Serverworks ID
1381 */
1382 rc = pci_enable_device(pdev); 1381 rc = pci_enable_device(pdev);
1383 if (rc) 1382 if (rc)
1384 return rc; 1383 return rc;
diff --git a/drivers/scsi/sata_uli.c b/drivers/scsi/sata_uli.c
index 38b52bd3fa3f..e69ba229adca 100644
--- a/drivers/scsi/sata_uli.c
+++ b/drivers/scsi/sata_uli.c
@@ -37,7 +37,7 @@
37#include <linux/libata.h> 37#include <linux/libata.h>
38 38
39#define DRV_NAME "sata_uli" 39#define DRV_NAME "sata_uli"
40#define DRV_VERSION "0.5" 40#define DRV_VERSION "0.6"
41 41
42enum { 42enum {
43 uli_5289 = 0, 43 uli_5289 = 0,
@@ -90,6 +90,7 @@ static struct scsi_host_template uli_sht = {
90 .proc_name = DRV_NAME, 90 .proc_name = DRV_NAME,
91 .dma_boundary = ATA_DMA_BOUNDARY, 91 .dma_boundary = ATA_DMA_BOUNDARY,
92 .slave_configure = ata_scsi_slave_config, 92 .slave_configure = ata_scsi_slave_config,
93 .slave_destroy = ata_scsi_slave_destroy,
93 .bios_param = ata_std_bios_param, 94 .bios_param = ata_std_bios_param,
94}; 95};
95 96
@@ -110,6 +111,7 @@ static const struct ata_port_operations uli_ops = {
110 .bmdma_status = ata_bmdma_status, 111 .bmdma_status = ata_bmdma_status,
111 .qc_prep = ata_qc_prep, 112 .qc_prep = ata_qc_prep,
112 .qc_issue = ata_qc_issue_prot, 113 .qc_issue = ata_qc_issue_prot,
114 .data_xfer = ata_pio_data_xfer,
113 115
114 .eng_timeout = ata_eng_timeout, 116 .eng_timeout = ata_eng_timeout,
115 117
diff --git a/drivers/scsi/sata_via.c b/drivers/scsi/sata_via.c
index 9e7ae4e0db32..c6975c5580ef 100644
--- a/drivers/scsi/sata_via.c
+++ b/drivers/scsi/sata_via.c
@@ -47,7 +47,7 @@
47#include <asm/io.h> 47#include <asm/io.h>
48 48
49#define DRV_NAME "sata_via" 49#define DRV_NAME "sata_via"
50#define DRV_VERSION "1.1" 50#define DRV_VERSION "1.2"
51 51
52enum board_ids_enum { 52enum board_ids_enum {
53 vt6420, 53 vt6420,
@@ -103,6 +103,7 @@ static struct scsi_host_template svia_sht = {
103 .proc_name = DRV_NAME, 103 .proc_name = DRV_NAME,
104 .dma_boundary = ATA_DMA_BOUNDARY, 104 .dma_boundary = ATA_DMA_BOUNDARY,
105 .slave_configure = ata_scsi_slave_config, 105 .slave_configure = ata_scsi_slave_config,
106 .slave_destroy = ata_scsi_slave_destroy,
106 .bios_param = ata_std_bios_param, 107 .bios_param = ata_std_bios_param,
107}; 108};
108 109
@@ -124,6 +125,7 @@ static const struct ata_port_operations svia_sata_ops = {
124 125
125 .qc_prep = ata_qc_prep, 126 .qc_prep = ata_qc_prep,
126 .qc_issue = ata_qc_issue_prot, 127 .qc_issue = ata_qc_issue_prot,
128 .data_xfer = ata_pio_data_xfer,
127 129
128 .eng_timeout = ata_eng_timeout, 130 .eng_timeout = ata_eng_timeout,
129 131
diff --git a/drivers/scsi/sata_vsc.c b/drivers/scsi/sata_vsc.c
index 8a29ce340b47..22ca7b848cf6 100644
--- a/drivers/scsi/sata_vsc.c
+++ b/drivers/scsi/sata_vsc.c
@@ -221,14 +221,21 @@ static irqreturn_t vsc_sata_interrupt (int irq, void *dev_instance,
221 221
222 ap = host_set->ports[i]; 222 ap = host_set->ports[i];
223 223
224 if (ap && !(ap->flags & 224 if (is_vsc_sata_int_err(i, int_status)) {
225 (ATA_FLAG_PORT_DISABLED|ATA_FLAG_NOINTR))) { 225 u32 err_status;
226 printk(KERN_DEBUG "%s: ignoring interrupt(s)\n", __FUNCTION__);
227 err_status = ap ? vsc_sata_scr_read(ap, SCR_ERROR) : 0;
228 vsc_sata_scr_write(ap, SCR_ERROR, err_status);
229 handled++;
230 }
231
232 if (ap && !(ap->flags & ATA_FLAG_DISABLED)) {
226 struct ata_queued_cmd *qc; 233 struct ata_queued_cmd *qc;
227 234
228 qc = ata_qc_from_tag(ap, ap->active_tag); 235 qc = ata_qc_from_tag(ap, ap->active_tag);
229 if (qc && (!(qc->tf.ctl & ATA_NIEN))) { 236 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)))
230 handled += ata_host_intr(ap, qc); 237 handled += ata_host_intr(ap, qc);
231 } else if (is_vsc_sata_int_err(i, int_status)) { 238 else if (is_vsc_sata_int_err(i, int_status)) {
232 /* 239 /*
233 * On some chips (i.e. Intel 31244), an error 240 * On some chips (i.e. Intel 31244), an error
234 * interrupt will sneak in at initialization 241 * interrupt will sneak in at initialization
@@ -272,6 +279,7 @@ static struct scsi_host_template vsc_sata_sht = {
272 .proc_name = DRV_NAME, 279 .proc_name = DRV_NAME,
273 .dma_boundary = ATA_DMA_BOUNDARY, 280 .dma_boundary = ATA_DMA_BOUNDARY,
274 .slave_configure = ata_scsi_slave_config, 281 .slave_configure = ata_scsi_slave_config,
282 .slave_destroy = ata_scsi_slave_destroy,
275 .bios_param = ata_std_bios_param, 283 .bios_param = ata_std_bios_param,
276}; 284};
277 285
@@ -290,6 +298,7 @@ static const struct ata_port_operations vsc_sata_ops = {
290 .bmdma_status = ata_bmdma_status, 298 .bmdma_status = ata_bmdma_status,
291 .qc_prep = ata_qc_prep, 299 .qc_prep = ata_qc_prep,
292 .qc_issue = ata_qc_issue_prot, 300 .qc_issue = ata_qc_issue_prot,
301 .data_xfer = ata_pio_data_xfer,
293 .eng_timeout = ata_eng_timeout, 302 .eng_timeout = ata_eng_timeout,
294 .irq_handler = vsc_sata_interrupt, 303 .irq_handler = vsc_sata_interrupt,
295 .irq_clear = ata_bmdma_irq_clear, 304 .irq_clear = ata_bmdma_irq_clear,
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index 73994e2ac2cb..dae4f08adde0 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -720,6 +720,24 @@ void scsi_init_cmd_from_req(struct scsi_cmnd *cmd, struct scsi_request *sreq)
720static DEFINE_PER_CPU(struct list_head, scsi_done_q); 720static DEFINE_PER_CPU(struct list_head, scsi_done_q);
721 721
722/** 722/**
723 * scsi_req_abort_cmd -- Request command recovery for the specified command
724 * cmd: pointer to the SCSI command of interest
725 *
726 * This function requests that SCSI Core start recovery for the
727 * command by deleting the timer and adding the command to the eh
728 * queue. It can be called by either LLDDs or SCSI Core. LLDDs who
729 * implement their own error recovery MAY ignore the timeout event if
730 * they generated scsi_req_abort_cmd.
731 */
732void scsi_req_abort_cmd(struct scsi_cmnd *cmd)
733{
734 if (!scsi_delete_timer(cmd))
735 return;
736 scsi_times_out(cmd);
737}
738EXPORT_SYMBOL(scsi_req_abort_cmd);
739
740/**
723 * scsi_done - Enqueue the finished SCSI command into the done queue. 741 * scsi_done - Enqueue the finished SCSI command into the done queue.
724 * @cmd: The SCSI Command for which a low-level device driver (LLDD) gives 742 * @cmd: The SCSI Command for which a low-level device driver (LLDD) gives
725 * ownership back to SCSI Core -- i.e. the LLDD has finished with it. 743 * ownership back to SCSI Core -- i.e. the LLDD has finished with it.
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index 1c75646f9689..346ab72ebf86 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -58,6 +58,28 @@ void scsi_eh_wakeup(struct Scsi_Host *shost)
58} 58}
59 59
60/** 60/**
61 * scsi_schedule_eh - schedule EH for SCSI host
62 * @shost: SCSI host to invoke error handling on.
63 *
64 * Schedule SCSI EH without scmd.
65 **/
66void scsi_schedule_eh(struct Scsi_Host *shost)
67{
68 unsigned long flags;
69
70 spin_lock_irqsave(shost->host_lock, flags);
71
72 if (scsi_host_set_state(shost, SHOST_RECOVERY) == 0 ||
73 scsi_host_set_state(shost, SHOST_CANCEL_RECOVERY) == 0) {
74 shost->host_eh_scheduled++;
75 scsi_eh_wakeup(shost);
76 }
77
78 spin_unlock_irqrestore(shost->host_lock, flags);
79}
80EXPORT_SYMBOL_GPL(scsi_schedule_eh);
81
82/**
61 * scsi_eh_scmd_add - add scsi cmd to error handling. 83 * scsi_eh_scmd_add - add scsi cmd to error handling.
62 * @scmd: scmd to run eh on. 84 * @scmd: scmd to run eh on.
63 * @eh_flag: optional SCSI_EH flag. 85 * @eh_flag: optional SCSI_EH flag.
@@ -1517,7 +1539,7 @@ int scsi_error_handler(void *data)
1517 */ 1539 */
1518 set_current_state(TASK_INTERRUPTIBLE); 1540 set_current_state(TASK_INTERRUPTIBLE);
1519 while (!kthread_should_stop()) { 1541 while (!kthread_should_stop()) {
1520 if (shost->host_failed == 0 || 1542 if ((shost->host_failed == 0 && shost->host_eh_scheduled == 0) ||
1521 shost->host_failed != shost->host_busy) { 1543 shost->host_failed != shost->host_busy) {
1522 SCSI_LOG_ERROR_RECOVERY(1, 1544 SCSI_LOG_ERROR_RECOVERY(1,
1523 printk("Error handler scsi_eh_%d sleeping\n", 1545 printk("Error handler scsi_eh_%d sleeping\n",
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index faee4757c03a..28befa7bb0e9 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -566,7 +566,7 @@ void scsi_device_unbusy(struct scsi_device *sdev)
566 spin_lock_irqsave(shost->host_lock, flags); 566 spin_lock_irqsave(shost->host_lock, flags);
567 shost->host_busy--; 567 shost->host_busy--;
568 if (unlikely(scsi_host_in_recovery(shost) && 568 if (unlikely(scsi_host_in_recovery(shost) &&
569 shost->host_failed)) 569 (shost->host_failed || shost->host_eh_scheduled)))
570 scsi_eh_wakeup(shost); 570 scsi_eh_wakeup(shost);
571 spin_unlock(shost->host_lock); 571 spin_unlock(shost->host_lock);
572 spin_lock(sdev->request_queue->queue_lock); 572 spin_lock(sdev->request_queue->queue_lock);
diff --git a/drivers/scsi/scsi_transport_api.h b/drivers/scsi/scsi_transport_api.h
new file mode 100644
index 000000000000..934f0e62bb5c
--- /dev/null
+++ b/drivers/scsi/scsi_transport_api.h
@@ -0,0 +1,6 @@
1#ifndef _SCSI_TRANSPORT_API_H
2#define _SCSI_TRANSPORT_API_H
3
4void scsi_schedule_eh(struct Scsi_Host *shost);
5
6#endif /* _SCSI_TRANSPORT_API_H */