aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/scsi/ahci.c346
-rw-r--r--drivers/scsi/ata_piix.c14
-rw-r--r--drivers/scsi/libata-bmdma.c143
-rw-r--r--drivers/scsi/libata-core.c1857
-rw-r--r--drivers/scsi/libata-eh.c1311
-rw-r--r--drivers/scsi/libata-scsi.c272
-rw-r--r--drivers/scsi/libata.h20
-rw-r--r--drivers/scsi/pdc_adma.c8
-rw-r--r--drivers/scsi/sata_mv.c28
-rw-r--r--drivers/scsi/sata_nv.c4
-rw-r--r--drivers/scsi/sata_promise.c14
-rw-r--r--drivers/scsi/sata_qstor.c11
-rw-r--r--drivers/scsi/sata_sil.c57
-rw-r--r--drivers/scsi/sata_sil24.c394
-rw-r--r--drivers/scsi/sata_sx4.c13
-rw-r--r--drivers/scsi/sata_vsc.c15
-rw-r--r--drivers/scsi/scsi.c18
-rw-r--r--drivers/scsi/scsi_error.c3
-rw-r--r--drivers/scsi/scsi_lib.c2
-rw-r--r--drivers/scsi/scsi_priv.h1
-rw-r--r--include/linux/ata.h34
-rw-r--r--include/linux/libata.h273
-rw-r--r--include/scsi/scsi_cmnd.h1
-rw-r--r--include/scsi/scsi_eh.h1
-rw-r--r--include/scsi/scsi_host.h1
25 files changed, 3680 insertions, 1161 deletions
diff --git a/drivers/scsi/ahci.c b/drivers/scsi/ahci.c
index d23f00230a76..45fd71d80128 100644
--- a/drivers/scsi/ahci.c
+++ b/drivers/scsi/ahci.c
@@ -56,12 +56,15 @@ enum {
56 AHCI_MAX_SG = 168, /* hardware max is 64K */ 56 AHCI_MAX_SG = 168, /* hardware max is 64K */
57 AHCI_DMA_BOUNDARY = 0xffffffff, 57 AHCI_DMA_BOUNDARY = 0xffffffff,
58 AHCI_USE_CLUSTERING = 0, 58 AHCI_USE_CLUSTERING = 0,
59 AHCI_CMD_SLOT_SZ = 32 * 32, 59 AHCI_MAX_CMDS = 32,
60 AHCI_CMD_SZ = 32,
61 AHCI_CMD_SLOT_SZ = AHCI_MAX_CMDS * AHCI_CMD_SZ,
60 AHCI_RX_FIS_SZ = 256, 62 AHCI_RX_FIS_SZ = 256,
61 AHCI_CMD_TBL_HDR = 0x80,
62 AHCI_CMD_TBL_CDB = 0x40, 63 AHCI_CMD_TBL_CDB = 0x40,
63 AHCI_CMD_TBL_SZ = AHCI_CMD_TBL_HDR + (AHCI_MAX_SG * 16), 64 AHCI_CMD_TBL_HDR_SZ = 0x80,
64 AHCI_PORT_PRIV_DMA_SZ = AHCI_CMD_SLOT_SZ + AHCI_CMD_TBL_SZ + 65 AHCI_CMD_TBL_SZ = AHCI_CMD_TBL_HDR_SZ + (AHCI_MAX_SG * 16),
66 AHCI_CMD_TBL_AR_SZ = AHCI_CMD_TBL_SZ * AHCI_MAX_CMDS,
67 AHCI_PORT_PRIV_DMA_SZ = AHCI_CMD_SLOT_SZ + AHCI_CMD_TBL_AR_SZ +
65 AHCI_RX_FIS_SZ, 68 AHCI_RX_FIS_SZ,
66 AHCI_IRQ_ON_SG = (1 << 31), 69 AHCI_IRQ_ON_SG = (1 << 31),
67 AHCI_CMD_ATAPI = (1 << 5), 70 AHCI_CMD_ATAPI = (1 << 5),
@@ -71,6 +74,7 @@ enum {
71 AHCI_CMD_CLR_BUSY = (1 << 10), 74 AHCI_CMD_CLR_BUSY = (1 << 10),
72 75
73 RX_FIS_D2H_REG = 0x40, /* offset of D2H Register FIS data */ 76 RX_FIS_D2H_REG = 0x40, /* offset of D2H Register FIS data */
77 RX_FIS_UNK = 0x60, /* offset of Unknown FIS data */
74 78
75 board_ahci = 0, 79 board_ahci = 0,
76 board_ahci_vt8251 = 1, 80 board_ahci_vt8251 = 1,
@@ -88,8 +92,9 @@ enum {
88 HOST_AHCI_EN = (1 << 31), /* AHCI enabled */ 92 HOST_AHCI_EN = (1 << 31), /* AHCI enabled */
89 93
90 /* HOST_CAP bits */ 94 /* HOST_CAP bits */
91 HOST_CAP_64 = (1 << 31), /* PCI DAC (64-bit DMA) support */
92 HOST_CAP_CLO = (1 << 24), /* Command List Override support */ 95 HOST_CAP_CLO = (1 << 24), /* Command List Override support */
96 HOST_CAP_NCQ = (1 << 30), /* Native Command Queueing */
97 HOST_CAP_64 = (1 << 31), /* PCI DAC (64-bit DMA) support */
93 98
94 /* registers for each SATA port */ 99 /* registers for each SATA port */
95 PORT_LST_ADDR = 0x00, /* command list DMA addr */ 100 PORT_LST_ADDR = 0x00, /* command list DMA addr */
@@ -128,15 +133,16 @@ enum {
128 PORT_IRQ_PIOS_FIS = (1 << 1), /* PIO Setup FIS rx'd */ 133 PORT_IRQ_PIOS_FIS = (1 << 1), /* PIO Setup FIS rx'd */
129 PORT_IRQ_D2H_REG_FIS = (1 << 0), /* D2H Register FIS rx'd */ 134 PORT_IRQ_D2H_REG_FIS = (1 << 0), /* D2H Register FIS rx'd */
130 135
131 PORT_IRQ_FATAL = PORT_IRQ_TF_ERR | 136 PORT_IRQ_FREEZE = PORT_IRQ_HBUS_ERR |
132 PORT_IRQ_HBUS_ERR | 137 PORT_IRQ_IF_ERR |
133 PORT_IRQ_HBUS_DATA_ERR | 138 PORT_IRQ_CONNECT |
134 PORT_IRQ_IF_ERR, 139 PORT_IRQ_UNK_FIS,
135 DEF_PORT_IRQ = PORT_IRQ_FATAL | PORT_IRQ_PHYRDY | 140 PORT_IRQ_ERROR = PORT_IRQ_FREEZE |
136 PORT_IRQ_CONNECT | PORT_IRQ_SG_DONE | 141 PORT_IRQ_TF_ERR |
137 PORT_IRQ_UNK_FIS | PORT_IRQ_SDB_FIS | 142 PORT_IRQ_HBUS_DATA_ERR,
138 PORT_IRQ_DMAS_FIS | PORT_IRQ_PIOS_FIS | 143 DEF_PORT_IRQ = PORT_IRQ_ERROR | PORT_IRQ_SG_DONE |
139 PORT_IRQ_D2H_REG_FIS, 144 PORT_IRQ_SDB_FIS | PORT_IRQ_DMAS_FIS |
145 PORT_IRQ_PIOS_FIS | PORT_IRQ_D2H_REG_FIS,
140 146
141 /* PORT_CMD bits */ 147 /* PORT_CMD bits */
142 PORT_CMD_ATAPI = (1 << 24), /* Device is ATAPI */ 148 PORT_CMD_ATAPI = (1 << 24), /* Device is ATAPI */
@@ -185,7 +191,6 @@ struct ahci_port_priv {
185 dma_addr_t cmd_slot_dma; 191 dma_addr_t cmd_slot_dma;
186 void *cmd_tbl; 192 void *cmd_tbl;
187 dma_addr_t cmd_tbl_dma; 193 dma_addr_t cmd_tbl_dma;
188 struct ahci_sg *cmd_tbl_sg;
189 void *rx_fis; 194 void *rx_fis;
190 dma_addr_t rx_fis_dma; 195 dma_addr_t rx_fis_dma;
191}; 196};
@@ -197,13 +202,15 @@ static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc);
197static irqreturn_t ahci_interrupt (int irq, void *dev_instance, struct pt_regs *regs); 202static irqreturn_t ahci_interrupt (int irq, void *dev_instance, struct pt_regs *regs);
198static int ahci_probe_reset(struct ata_port *ap, unsigned int *classes); 203static int ahci_probe_reset(struct ata_port *ap, unsigned int *classes);
199static void ahci_irq_clear(struct ata_port *ap); 204static void ahci_irq_clear(struct ata_port *ap);
200static void ahci_eng_timeout(struct ata_port *ap);
201static int ahci_port_start(struct ata_port *ap); 205static int ahci_port_start(struct ata_port *ap);
202static void ahci_port_stop(struct ata_port *ap); 206static void ahci_port_stop(struct ata_port *ap);
203static void ahci_tf_read(struct ata_port *ap, struct ata_taskfile *tf); 207static void ahci_tf_read(struct ata_port *ap, struct ata_taskfile *tf);
204static void ahci_qc_prep(struct ata_queued_cmd *qc); 208static void ahci_qc_prep(struct ata_queued_cmd *qc);
205static u8 ahci_check_status(struct ata_port *ap); 209static u8 ahci_check_status(struct ata_port *ap);
206static inline int ahci_host_intr(struct ata_port *ap, struct ata_queued_cmd *qc); 210static void ahci_freeze(struct ata_port *ap);
211static void ahci_thaw(struct ata_port *ap);
212static void ahci_error_handler(struct ata_port *ap);
213static void ahci_post_internal_cmd(struct ata_queued_cmd *qc);
207static void ahci_remove_one (struct pci_dev *pdev); 214static void ahci_remove_one (struct pci_dev *pdev);
208 215
209static struct scsi_host_template ahci_sht = { 216static struct scsi_host_template ahci_sht = {
@@ -211,7 +218,8 @@ static struct scsi_host_template ahci_sht = {
211 .name = DRV_NAME, 218 .name = DRV_NAME,
212 .ioctl = ata_scsi_ioctl, 219 .ioctl = ata_scsi_ioctl,
213 .queuecommand = ata_scsi_queuecmd, 220 .queuecommand = ata_scsi_queuecmd,
214 .can_queue = ATA_DEF_QUEUE, 221 .change_queue_depth = ata_scsi_change_queue_depth,
222 .can_queue = AHCI_MAX_CMDS - 1,
215 .this_id = ATA_SHT_THIS_ID, 223 .this_id = ATA_SHT_THIS_ID,
216 .sg_tablesize = AHCI_MAX_SG, 224 .sg_tablesize = AHCI_MAX_SG,
217 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 225 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
@@ -237,14 +245,18 @@ static const struct ata_port_operations ahci_ops = {
237 .qc_prep = ahci_qc_prep, 245 .qc_prep = ahci_qc_prep,
238 .qc_issue = ahci_qc_issue, 246 .qc_issue = ahci_qc_issue,
239 247
240 .eng_timeout = ahci_eng_timeout,
241
242 .irq_handler = ahci_interrupt, 248 .irq_handler = ahci_interrupt,
243 .irq_clear = ahci_irq_clear, 249 .irq_clear = ahci_irq_clear,
244 250
245 .scr_read = ahci_scr_read, 251 .scr_read = ahci_scr_read,
246 .scr_write = ahci_scr_write, 252 .scr_write = ahci_scr_write,
247 253
254 .freeze = ahci_freeze,
255 .thaw = ahci_thaw,
256
257 .error_handler = ahci_error_handler,
258 .post_internal_cmd = ahci_post_internal_cmd,
259
248 .port_start = ahci_port_start, 260 .port_start = ahci_port_start,
249 .port_stop = ahci_port_stop, 261 .port_stop = ahci_port_stop,
250}; 262};
@@ -390,8 +402,6 @@ static int ahci_port_start(struct ata_port *ap)
390 pp->cmd_tbl = mem; 402 pp->cmd_tbl = mem;
391 pp->cmd_tbl_dma = mem_dma; 403 pp->cmd_tbl_dma = mem_dma;
392 404
393 pp->cmd_tbl_sg = mem + AHCI_CMD_TBL_HDR;
394
395 ap->private_data = pp; 405 ap->private_data = pp;
396 406
397 if (hpriv->cap & HOST_CAP_64) 407 if (hpriv->cap & HOST_CAP_64)
@@ -524,12 +534,17 @@ static unsigned int ahci_dev_classify(struct ata_port *ap)
524 return ata_dev_classify(&tf); 534 return ata_dev_classify(&tf);
525} 535}
526 536
527static void ahci_fill_cmd_slot(struct ahci_port_priv *pp, u32 opts) 537static void ahci_fill_cmd_slot(struct ahci_port_priv *pp, unsigned int tag,
538 u32 opts)
528{ 539{
529 pp->cmd_slot[0].opts = cpu_to_le32(opts); 540 dma_addr_t cmd_tbl_dma;
530 pp->cmd_slot[0].status = 0; 541
531 pp->cmd_slot[0].tbl_addr = cpu_to_le32(pp->cmd_tbl_dma & 0xffffffff); 542 cmd_tbl_dma = pp->cmd_tbl_dma + tag * AHCI_CMD_TBL_SZ;
532 pp->cmd_slot[0].tbl_addr_hi = cpu_to_le32((pp->cmd_tbl_dma >> 16) >> 16); 543
544 pp->cmd_slot[tag].opts = cpu_to_le32(opts);
545 pp->cmd_slot[tag].status = 0;
546 pp->cmd_slot[tag].tbl_addr = cpu_to_le32(cmd_tbl_dma & 0xffffffff);
547 pp->cmd_slot[tag].tbl_addr_hi = cpu_to_le32((cmd_tbl_dma >> 16) >> 16);
533} 548}
534 549
535static int ahci_clo(struct ata_port *ap) 550static int ahci_clo(struct ata_port *ap)
@@ -567,7 +582,7 @@ static int ahci_softreset(struct ata_port *ap, unsigned int *class)
567 582
568 DPRINTK("ENTER\n"); 583 DPRINTK("ENTER\n");
569 584
570 if (!sata_dev_present(ap)) { 585 if (ata_port_offline(ap)) {
571 DPRINTK("PHY reports no device\n"); 586 DPRINTK("PHY reports no device\n");
572 *class = ATA_DEV_NONE; 587 *class = ATA_DEV_NONE;
573 return 0; 588 return 0;
@@ -597,11 +612,12 @@ static int ahci_softreset(struct ata_port *ap, unsigned int *class)
597 /* restart engine */ 612 /* restart engine */
598 ahci_start_engine(ap); 613 ahci_start_engine(ap);
599 614
600 ata_tf_init(ap, &tf, 0); 615 ata_tf_init(ap->device, &tf);
601 fis = pp->cmd_tbl; 616 fis = pp->cmd_tbl;
602 617
603 /* issue the first D2H Register FIS */ 618 /* issue the first D2H Register FIS */
604 ahci_fill_cmd_slot(pp, cmd_fis_len | AHCI_CMD_RESET | AHCI_CMD_CLR_BUSY); 619 ahci_fill_cmd_slot(pp, 0,
620 cmd_fis_len | AHCI_CMD_RESET | AHCI_CMD_CLR_BUSY);
605 621
606 tf.ctl |= ATA_SRST; 622 tf.ctl |= ATA_SRST;
607 ata_tf_to_fis(&tf, fis, 0); 623 ata_tf_to_fis(&tf, fis, 0);
@@ -620,7 +636,7 @@ static int ahci_softreset(struct ata_port *ap, unsigned int *class)
620 msleep(1); 636 msleep(1);
621 637
622 /* issue the second D2H Register FIS */ 638 /* issue the second D2H Register FIS */
623 ahci_fill_cmd_slot(pp, cmd_fis_len); 639 ahci_fill_cmd_slot(pp, 0, cmd_fis_len);
624 640
625 tf.ctl &= ~ATA_SRST; 641 tf.ctl &= ~ATA_SRST;
626 ata_tf_to_fis(&tf, fis, 0); 642 ata_tf_to_fis(&tf, fis, 0);
@@ -640,7 +656,7 @@ static int ahci_softreset(struct ata_port *ap, unsigned int *class)
640 msleep(150); 656 msleep(150);
641 657
642 *class = ATA_DEV_NONE; 658 *class = ATA_DEV_NONE;
643 if (sata_dev_present(ap)) { 659 if (ata_port_online(ap)) {
644 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) { 660 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
645 rc = -EIO; 661 rc = -EIO;
646 reason = "device not ready"; 662 reason = "device not ready";
@@ -655,8 +671,7 @@ static int ahci_softreset(struct ata_port *ap, unsigned int *class)
655 fail_restart: 671 fail_restart:
656 ahci_start_engine(ap); 672 ahci_start_engine(ap);
657 fail: 673 fail:
658 printk(KERN_ERR "ata%u: softreset failed (%s)\n", 674 ata_port_printk(ap, KERN_ERR, "softreset failed (%s)\n", reason);
659 ap->id, reason);
660 return rc; 675 return rc;
661} 676}
662 677
@@ -670,7 +685,7 @@ static int ahci_hardreset(struct ata_port *ap, unsigned int *class)
670 rc = sata_std_hardreset(ap, class); 685 rc = sata_std_hardreset(ap, class);
671 ahci_start_engine(ap); 686 ahci_start_engine(ap);
672 687
673 if (rc == 0) 688 if (rc == 0 && ata_port_online(ap))
674 *class = ahci_dev_classify(ap); 689 *class = ahci_dev_classify(ap);
675 if (*class == ATA_DEV_UNKNOWN) 690 if (*class == ATA_DEV_UNKNOWN)
676 *class = ATA_DEV_NONE; 691 *class = ATA_DEV_NONE;
@@ -726,9 +741,8 @@ static void ahci_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
726 ata_tf_from_fis(d2h_fis, tf); 741 ata_tf_from_fis(d2h_fis, tf);
727} 742}
728 743
729static unsigned int ahci_fill_sg(struct ata_queued_cmd *qc) 744static unsigned int ahci_fill_sg(struct ata_queued_cmd *qc, void *cmd_tbl)
730{ 745{
731 struct ahci_port_priv *pp = qc->ap->private_data;
732 struct scatterlist *sg; 746 struct scatterlist *sg;
733 struct ahci_sg *ahci_sg; 747 struct ahci_sg *ahci_sg;
734 unsigned int n_sg = 0; 748 unsigned int n_sg = 0;
@@ -738,7 +752,7 @@ static unsigned int ahci_fill_sg(struct ata_queued_cmd *qc)
738 /* 752 /*
739 * Next, the S/G list. 753 * Next, the S/G list.
740 */ 754 */
741 ahci_sg = pp->cmd_tbl_sg; 755 ahci_sg = cmd_tbl + AHCI_CMD_TBL_HDR_SZ;
742 ata_for_each_sg(sg, qc) { 756 ata_for_each_sg(sg, qc) {
743 dma_addr_t addr = sg_dma_address(sg); 757 dma_addr_t addr = sg_dma_address(sg);
744 u32 sg_len = sg_dma_len(sg); 758 u32 sg_len = sg_dma_len(sg);
@@ -759,6 +773,7 @@ static void ahci_qc_prep(struct ata_queued_cmd *qc)
759 struct ata_port *ap = qc->ap; 773 struct ata_port *ap = qc->ap;
760 struct ahci_port_priv *pp = ap->private_data; 774 struct ahci_port_priv *pp = ap->private_data;
761 int is_atapi = is_atapi_taskfile(&qc->tf); 775 int is_atapi = is_atapi_taskfile(&qc->tf);
776 void *cmd_tbl;
762 u32 opts; 777 u32 opts;
763 const u32 cmd_fis_len = 5; /* five dwords */ 778 const u32 cmd_fis_len = 5; /* five dwords */
764 unsigned int n_elem; 779 unsigned int n_elem;
@@ -767,16 +782,17 @@ static void ahci_qc_prep(struct ata_queued_cmd *qc)
767 * Fill in command table information. First, the header, 782 * Fill in command table information. First, the header,
768 * a SATA Register - Host to Device command FIS. 783 * a SATA Register - Host to Device command FIS.
769 */ 784 */
770 ata_tf_to_fis(&qc->tf, pp->cmd_tbl, 0); 785 cmd_tbl = pp->cmd_tbl + qc->tag * AHCI_CMD_TBL_SZ;
786
787 ata_tf_to_fis(&qc->tf, cmd_tbl, 0);
771 if (is_atapi) { 788 if (is_atapi) {
772 memset(pp->cmd_tbl + AHCI_CMD_TBL_CDB, 0, 32); 789 memset(cmd_tbl + AHCI_CMD_TBL_CDB, 0, 32);
773 memcpy(pp->cmd_tbl + AHCI_CMD_TBL_CDB, qc->cdb, 790 memcpy(cmd_tbl + AHCI_CMD_TBL_CDB, qc->cdb, qc->dev->cdb_len);
774 qc->dev->cdb_len);
775 } 791 }
776 792
777 n_elem = 0; 793 n_elem = 0;
778 if (qc->flags & ATA_QCFLAG_DMAMAP) 794 if (qc->flags & ATA_QCFLAG_DMAMAP)
779 n_elem = ahci_fill_sg(qc); 795 n_elem = ahci_fill_sg(qc, cmd_tbl);
780 796
781 /* 797 /*
782 * Fill in command slot information. 798 * Fill in command slot information.
@@ -787,112 +803,123 @@ static void ahci_qc_prep(struct ata_queued_cmd *qc)
787 if (is_atapi) 803 if (is_atapi)
788 opts |= AHCI_CMD_ATAPI | AHCI_CMD_PREFETCH; 804 opts |= AHCI_CMD_ATAPI | AHCI_CMD_PREFETCH;
789 805
790 ahci_fill_cmd_slot(pp, opts); 806 ahci_fill_cmd_slot(pp, qc->tag, opts);
791} 807}
792 808
793static void ahci_restart_port(struct ata_port *ap, u32 irq_stat) 809static void ahci_error_intr(struct ata_port *ap, u32 irq_stat)
794{ 810{
795 void __iomem *mmio = ap->host_set->mmio_base; 811 struct ahci_port_priv *pp = ap->private_data;
796 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no); 812 struct ata_eh_info *ehi = &ap->eh_info;
797 u32 tmp; 813 unsigned int err_mask = 0, action = 0;
814 struct ata_queued_cmd *qc;
815 u32 serror;
798 816
799 if ((ap->device[0].class != ATA_DEV_ATAPI) || 817 ata_ehi_clear_desc(ehi);
800 ((irq_stat & PORT_IRQ_TF_ERR) == 0))
801 printk(KERN_WARNING "ata%u: port reset, "
802 "p_is %x is %x pis %x cmd %x tf %x ss %x se %x\n",
803 ap->id,
804 irq_stat,
805 readl(mmio + HOST_IRQ_STAT),
806 readl(port_mmio + PORT_IRQ_STAT),
807 readl(port_mmio + PORT_CMD),
808 readl(port_mmio + PORT_TFDATA),
809 readl(port_mmio + PORT_SCR_STAT),
810 readl(port_mmio + PORT_SCR_ERR));
811
812 /* stop DMA */
813 ahci_stop_engine(ap);
814 818
815 /* clear SATA phy error, if any */ 819 /* AHCI needs SError cleared; otherwise, it might lock up */
816 tmp = readl(port_mmio + PORT_SCR_ERR); 820 serror = ahci_scr_read(ap, SCR_ERROR);
817 writel(tmp, port_mmio + PORT_SCR_ERR); 821 ahci_scr_write(ap, SCR_ERROR, serror);
818 822
819 /* if DRQ/BSY is set, device needs to be reset. 823 /* analyze @irq_stat */
820 * if so, issue COMRESET 824 ata_ehi_push_desc(ehi, "irq_stat 0x%08x", irq_stat);
821 */ 825
822 tmp = readl(port_mmio + PORT_TFDATA); 826 if (irq_stat & PORT_IRQ_TF_ERR)
823 if (tmp & (ATA_BUSY | ATA_DRQ)) { 827 err_mask |= AC_ERR_DEV;
824 writel(0x301, port_mmio + PORT_SCR_CTL); 828
825 readl(port_mmio + PORT_SCR_CTL); /* flush */ 829 if (irq_stat & (PORT_IRQ_HBUS_ERR | PORT_IRQ_HBUS_DATA_ERR)) {
826 udelay(10); 830 err_mask |= AC_ERR_HOST_BUS;
827 writel(0x300, port_mmio + PORT_SCR_CTL); 831 action |= ATA_EH_SOFTRESET;
828 readl(port_mmio + PORT_SCR_CTL); /* flush */
829 } 832 }
830 833
831 /* re-start DMA */ 834 if (irq_stat & PORT_IRQ_IF_ERR) {
832 ahci_start_engine(ap); 835 err_mask |= AC_ERR_ATA_BUS;
833} 836 action |= ATA_EH_SOFTRESET;
837 ata_ehi_push_desc(ehi, ", interface fatal error");
838 }
834 839
835static void ahci_eng_timeout(struct ata_port *ap) 840 if (irq_stat & (PORT_IRQ_CONNECT | PORT_IRQ_PHYRDY)) {
836{ 841 err_mask |= AC_ERR_ATA_BUS;
837 struct ata_host_set *host_set = ap->host_set; 842 action |= ATA_EH_SOFTRESET;
838 void __iomem *mmio = host_set->mmio_base; 843 ata_ehi_push_desc(ehi, ", %s", irq_stat & PORT_IRQ_CONNECT ?
839 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no); 844 "connection status changed" : "PHY RDY changed");
840 struct ata_queued_cmd *qc; 845 }
841 unsigned long flags;
842 846
843 printk(KERN_WARNING "ata%u: handling error/timeout\n", ap->id); 847 if (irq_stat & PORT_IRQ_UNK_FIS) {
848 u32 *unk = (u32 *)(pp->rx_fis + RX_FIS_UNK);
844 849
845 spin_lock_irqsave(&host_set->lock, flags); 850 err_mask |= AC_ERR_HSM;
851 action |= ATA_EH_SOFTRESET;
852 ata_ehi_push_desc(ehi, ", unknown FIS %08x %08x %08x %08x",
853 unk[0], unk[1], unk[2], unk[3]);
854 }
846 855
847 ahci_restart_port(ap, readl(port_mmio + PORT_IRQ_STAT)); 856 /* okay, let's hand over to EH */
848 qc = ata_qc_from_tag(ap, ap->active_tag); 857 ehi->serror |= serror;
849 qc->err_mask |= AC_ERR_TIMEOUT; 858 ehi->action |= action;
850 859
851 spin_unlock_irqrestore(&host_set->lock, flags); 860 qc = ata_qc_from_tag(ap, ap->active_tag);
861 if (qc)
862 qc->err_mask |= err_mask;
863 else
864 ehi->err_mask |= err_mask;
852 865
853 ata_eh_qc_complete(qc); 866 if (irq_stat & PORT_IRQ_FREEZE)
867 ata_port_freeze(ap);
868 else
869 ata_port_abort(ap);
854} 870}
855 871
856static inline int ahci_host_intr(struct ata_port *ap, struct ata_queued_cmd *qc) 872static void ahci_host_intr(struct ata_port *ap)
857{ 873{
858 void __iomem *mmio = ap->host_set->mmio_base; 874 void __iomem *mmio = ap->host_set->mmio_base;
859 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no); 875 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
860 u32 status, serr, ci; 876 struct ata_eh_info *ehi = &ap->eh_info;
861 877 u32 status, qc_active;
862 serr = readl(port_mmio + PORT_SCR_ERR); 878 int rc;
863 writel(serr, port_mmio + PORT_SCR_ERR);
864 879
865 status = readl(port_mmio + PORT_IRQ_STAT); 880 status = readl(port_mmio + PORT_IRQ_STAT);
866 writel(status, port_mmio + PORT_IRQ_STAT); 881 writel(status, port_mmio + PORT_IRQ_STAT);
867 882
868 ci = readl(port_mmio + PORT_CMD_ISSUE); 883 if (unlikely(status & PORT_IRQ_ERROR)) {
869 if (likely((ci & 0x1) == 0)) { 884 ahci_error_intr(ap, status);
870 if (qc) { 885 return;
871 WARN_ON(qc->err_mask);
872 ata_qc_complete(qc);
873 qc = NULL;
874 }
875 } 886 }
876 887
877 if (status & PORT_IRQ_FATAL) { 888 if (ap->sactive)
878 unsigned int err_mask; 889 qc_active = readl(port_mmio + PORT_SCR_ACT);
879 if (status & PORT_IRQ_TF_ERR) 890 else
880 err_mask = AC_ERR_DEV; 891 qc_active = readl(port_mmio + PORT_CMD_ISSUE);
881 else if (status & PORT_IRQ_IF_ERR) 892
882 err_mask = AC_ERR_ATA_BUS; 893 rc = ata_qc_complete_multiple(ap, qc_active, NULL);
883 else 894 if (rc > 0)
884 err_mask = AC_ERR_HOST_BUS; 895 return;
885 896 if (rc < 0) {
886 /* command processing has stopped due to error; restart */ 897 ehi->err_mask |= AC_ERR_HSM;
887 ahci_restart_port(ap, status); 898 ehi->action |= ATA_EH_SOFTRESET;
888 899 ata_port_freeze(ap);
889 if (qc) { 900 return;
890 qc->err_mask |= err_mask; 901 }
891 ata_qc_complete(qc); 902
892 } 903 /* hmmm... a spurious interupt */
904
905 /* some devices send D2H reg with I bit set during NCQ command phase */
906 if (ap->sactive && status & PORT_IRQ_D2H_REG_FIS)
907 return;
908
909 /* ignore interim PIO setup fis interrupts */
910 if (ata_tag_valid(ap->active_tag)) {
911 struct ata_queued_cmd *qc =
912 ata_qc_from_tag(ap, ap->active_tag);
913
914 if (qc && qc->tf.protocol == ATA_PROT_PIO &&
915 (status & PORT_IRQ_PIOS_FIS))
916 return;
893 } 917 }
894 918
895 return 1; 919 if (ata_ratelimit())
920 ata_port_printk(ap, KERN_INFO, "spurious interrupt "
921 "(irq_stat 0x%x active_tag %d sactive 0x%x)\n",
922 status, ap->active_tag, ap->sactive);
896} 923}
897 924
898static void ahci_irq_clear(struct ata_port *ap) 925static void ahci_irq_clear(struct ata_port *ap)
@@ -900,7 +927,7 @@ static void ahci_irq_clear(struct ata_port *ap)
900 /* TODO */ 927 /* TODO */
901} 928}
902 929
903static irqreturn_t ahci_interrupt (int irq, void *dev_instance, struct pt_regs *regs) 930static irqreturn_t ahci_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
904{ 931{
905 struct ata_host_set *host_set = dev_instance; 932 struct ata_host_set *host_set = dev_instance;
906 struct ahci_host_priv *hpriv; 933 struct ahci_host_priv *hpriv;
@@ -929,14 +956,7 @@ static irqreturn_t ahci_interrupt (int irq, void *dev_instance, struct pt_regs *
929 956
930 ap = host_set->ports[i]; 957 ap = host_set->ports[i];
931 if (ap) { 958 if (ap) {
932 struct ata_queued_cmd *qc; 959 ahci_host_intr(ap);
933 qc = ata_qc_from_tag(ap, ap->active_tag);
934 if (!ahci_host_intr(ap, qc))
935 if (ata_ratelimit())
936 dev_printk(KERN_WARNING, host_set->dev,
937 "unhandled interrupt on port %u\n",
938 i);
939
940 VPRINTK("port %u\n", i); 960 VPRINTK("port %u\n", i);
941 } else { 961 } else {
942 VPRINTK("port %u (no irq)\n", i); 962 VPRINTK("port %u (no irq)\n", i);
@@ -953,7 +973,7 @@ static irqreturn_t ahci_interrupt (int irq, void *dev_instance, struct pt_regs *
953 handled = 1; 973 handled = 1;
954 } 974 }
955 975
956 spin_unlock(&host_set->lock); 976 spin_unlock(&host_set->lock);
957 977
958 VPRINTK("EXIT\n"); 978 VPRINTK("EXIT\n");
959 979
@@ -965,12 +985,64 @@ static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc)
965 struct ata_port *ap = qc->ap; 985 struct ata_port *ap = qc->ap;
966 void __iomem *port_mmio = (void __iomem *) ap->ioaddr.cmd_addr; 986 void __iomem *port_mmio = (void __iomem *) ap->ioaddr.cmd_addr;
967 987
968 writel(1, port_mmio + PORT_CMD_ISSUE); 988 if (qc->tf.protocol == ATA_PROT_NCQ)
989 writel(1 << qc->tag, port_mmio + PORT_SCR_ACT);
990 writel(1 << qc->tag, port_mmio + PORT_CMD_ISSUE);
969 readl(port_mmio + PORT_CMD_ISSUE); /* flush */ 991 readl(port_mmio + PORT_CMD_ISSUE); /* flush */
970 992
971 return 0; 993 return 0;
972} 994}
973 995
996static void ahci_freeze(struct ata_port *ap)
997{
998 void __iomem *mmio = ap->host_set->mmio_base;
999 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
1000
1001 /* turn IRQ off */
1002 writel(0, port_mmio + PORT_IRQ_MASK);
1003}
1004
1005static void ahci_thaw(struct ata_port *ap)
1006{
1007 void __iomem *mmio = ap->host_set->mmio_base;
1008 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
1009 u32 tmp;
1010
1011 /* clear IRQ */
1012 tmp = readl(port_mmio + PORT_IRQ_STAT);
1013 writel(tmp, port_mmio + PORT_IRQ_STAT);
1014 writel(1 << ap->id, mmio + HOST_IRQ_STAT);
1015
1016 /* turn IRQ back on */
1017 writel(DEF_PORT_IRQ, port_mmio + PORT_IRQ_MASK);
1018}
1019
1020static void ahci_error_handler(struct ata_port *ap)
1021{
1022 if (!(ap->flags & ATA_FLAG_FROZEN)) {
1023 /* restart engine */
1024 ahci_stop_engine(ap);
1025 ahci_start_engine(ap);
1026 }
1027
1028 /* perform recovery */
1029 ata_do_eh(ap, ahci_softreset, ahci_hardreset, ahci_postreset);
1030}
1031
1032static void ahci_post_internal_cmd(struct ata_queued_cmd *qc)
1033{
1034 struct ata_port *ap = qc->ap;
1035
1036 if (qc->flags & ATA_QCFLAG_FAILED)
1037 qc->err_mask |= AC_ERR_OTHER;
1038
1039 if (qc->err_mask) {
1040 /* make DMA engine forget about the failed command */
1041 ahci_stop_engine(ap);
1042 ahci_start_engine(ap);
1043 }
1044}
1045
974static void ahci_setup_port(struct ata_ioports *port, unsigned long base, 1046static void ahci_setup_port(struct ata_ioports *port, unsigned long base,
975 unsigned int port_idx) 1047 unsigned int port_idx)
976{ 1048{
@@ -1115,9 +1187,6 @@ static int ahci_host_init(struct ata_probe_ent *probe_ent)
1115 writel(tmp, port_mmio + PORT_IRQ_STAT); 1187 writel(tmp, port_mmio + PORT_IRQ_STAT);
1116 1188
1117 writel(1 << i, mmio + HOST_IRQ_STAT); 1189 writel(1 << i, mmio + HOST_IRQ_STAT);
1118
1119 /* set irq mask (enables interrupts) */
1120 writel(DEF_PORT_IRQ, port_mmio + PORT_IRQ_MASK);
1121 } 1190 }
1122 1191
1123 tmp = readl(mmio + HOST_CTL); 1192 tmp = readl(mmio + HOST_CTL);
@@ -1215,6 +1284,8 @@ static int ahci_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
1215 1284
1216 VPRINTK("ENTER\n"); 1285 VPRINTK("ENTER\n");
1217 1286
1287 WARN_ON(ATA_MAX_QUEUE > AHCI_MAX_CMDS);
1288
1218 if (!printed_version++) 1289 if (!printed_version++)
1219 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n"); 1290 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
1220 1291
@@ -1282,6 +1353,9 @@ static int ahci_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
1282 if (rc) 1353 if (rc)
1283 goto err_out_hpriv; 1354 goto err_out_hpriv;
1284 1355
1356 if (hpriv->cap & HOST_CAP_NCQ)
1357 probe_ent->host_flags |= ATA_FLAG_NCQ;
1358
1285 ahci_print_info(probe_ent); 1359 ahci_print_info(probe_ent);
1286 1360
1287 /* FIXME: check ata_device_add return value */ 1361 /* FIXME: check ata_device_add return value */
diff --git a/drivers/scsi/ata_piix.c b/drivers/scsi/ata_piix.c
index 62dabf74188e..e3184a77a600 100644
--- a/drivers/scsi/ata_piix.c
+++ b/drivers/scsi/ata_piix.c
@@ -243,7 +243,10 @@ static const struct ata_port_operations piix_pata_ops = {
243 .qc_prep = ata_qc_prep, 243 .qc_prep = ata_qc_prep,
244 .qc_issue = ata_qc_issue_prot, 244 .qc_issue = ata_qc_issue_prot,
245 245
246 .eng_timeout = ata_eng_timeout, 246 .freeze = ata_bmdma_freeze,
247 .thaw = ata_bmdma_thaw,
248 .error_handler = ata_bmdma_error_handler,
249 .post_internal_cmd = ata_bmdma_post_internal_cmd,
247 250
248 .irq_handler = ata_interrupt, 251 .irq_handler = ata_interrupt,
249 .irq_clear = ata_bmdma_irq_clear, 252 .irq_clear = ata_bmdma_irq_clear,
@@ -271,7 +274,10 @@ static const struct ata_port_operations piix_sata_ops = {
271 .qc_prep = ata_qc_prep, 274 .qc_prep = ata_qc_prep,
272 .qc_issue = ata_qc_issue_prot, 275 .qc_issue = ata_qc_issue_prot,
273 276
274 .eng_timeout = ata_eng_timeout, 277 .freeze = ata_bmdma_freeze,
278 .thaw = ata_bmdma_thaw,
279 .error_handler = ata_bmdma_error_handler,
280 .post_internal_cmd = ata_bmdma_post_internal_cmd,
275 281
276 .irq_handler = ata_interrupt, 282 .irq_handler = ata_interrupt,
277 .irq_clear = ata_bmdma_irq_clear, 283 .irq_clear = ata_bmdma_irq_clear,
@@ -484,7 +490,7 @@ static int piix_pata_probe_reset(struct ata_port *ap, unsigned int *classes)
484 struct pci_dev *pdev = to_pci_dev(ap->host_set->dev); 490 struct pci_dev *pdev = to_pci_dev(ap->host_set->dev);
485 491
486 if (!pci_test_config_bits(pdev, &piix_enable_bits[ap->hard_port_no])) { 492 if (!pci_test_config_bits(pdev, &piix_enable_bits[ap->hard_port_no])) {
487 printk(KERN_INFO "ata%u: port disabled. ignoring.\n", ap->id); 493 ata_port_printk(ap, KERN_INFO, "port disabled. ignoring.\n");
488 return 0; 494 return 0;
489 } 495 }
490 496
@@ -565,7 +571,7 @@ static unsigned int piix_sata_probe (struct ata_port *ap)
565static int piix_sata_probe_reset(struct ata_port *ap, unsigned int *classes) 571static int piix_sata_probe_reset(struct ata_port *ap, unsigned int *classes)
566{ 572{
567 if (!piix_sata_probe(ap)) { 573 if (!piix_sata_probe(ap)) {
568 printk(KERN_INFO "ata%u: SATA port has no device.\n", ap->id); 574 ata_port_printk(ap, KERN_INFO, "SATA port has no device.\n");
569 return 0; 575 return 0;
570 } 576 }
571 577
diff --git a/drivers/scsi/libata-bmdma.c b/drivers/scsi/libata-bmdma.c
index 835dff0bafdc..6d30d2c52960 100644
--- a/drivers/scsi/libata-bmdma.c
+++ b/drivers/scsi/libata-bmdma.c
@@ -652,6 +652,149 @@ void ata_bmdma_stop(struct ata_queued_cmd *qc)
652 ata_altstatus(ap); /* dummy read */ 652 ata_altstatus(ap); /* dummy read */
653} 653}
654 654
655/**
656 * ata_bmdma_freeze - Freeze BMDMA controller port
657 * @ap: port to freeze
658 *
659 * Freeze BMDMA controller port.
660 *
661 * LOCKING:
662 * Inherited from caller.
663 */
664void ata_bmdma_freeze(struct ata_port *ap)
665{
666 struct ata_ioports *ioaddr = &ap->ioaddr;
667
668 ap->ctl |= ATA_NIEN;
669 ap->last_ctl = ap->ctl;
670
671 if (ap->flags & ATA_FLAG_MMIO)
672 writeb(ap->ctl, (void __iomem *)ioaddr->ctl_addr);
673 else
674 outb(ap->ctl, ioaddr->ctl_addr);
675}
676
677/**
678 * ata_bmdma_thaw - Thaw BMDMA controller port
679 * @ap: port to thaw
680 *
681 * Thaw BMDMA controller port.
682 *
683 * LOCKING:
684 * Inherited from caller.
685 */
686void ata_bmdma_thaw(struct ata_port *ap)
687{
688 /* clear & re-enable interrupts */
689 ata_chk_status(ap);
690 ap->ops->irq_clear(ap);
691 if (ap->ioaddr.ctl_addr) /* FIXME: hack. create a hook instead */
692 ata_irq_on(ap);
693}
694
695/**
696 * ata_bmdma_drive_eh - Perform EH with given methods for BMDMA controller
697 * @ap: port to handle error for
698 * @softreset: softreset method (can be NULL)
699 * @hardreset: hardreset method (can be NULL)
700 * @postreset: postreset method (can be NULL)
701 *
702 * Handle error for ATA BMDMA controller. It can handle both
703 * PATA and SATA controllers. Many controllers should be able to
704 * use this EH as-is or with some added handling before and
705 * after.
706 *
707 * This function is intended to be used for constructing
708 * ->error_handler callback by low level drivers.
709 *
710 * LOCKING:
711 * Kernel thread context (may sleep)
712 */
713void ata_bmdma_drive_eh(struct ata_port *ap, ata_reset_fn_t softreset,
714 ata_reset_fn_t hardreset, ata_postreset_fn_t postreset)
715{
716 struct ata_host_set *host_set = ap->host_set;
717 struct ata_eh_context *ehc = &ap->eh_context;
718 struct ata_queued_cmd *qc;
719 unsigned long flags;
720 int thaw = 0;
721
722 qc = __ata_qc_from_tag(ap, ap->active_tag);
723 if (qc && !(qc->flags & ATA_QCFLAG_FAILED))
724 qc = NULL;
725
726 /* reset PIO HSM and stop DMA engine */
727 spin_lock_irqsave(&host_set->lock, flags);
728
729 ap->hsm_task_state = HSM_ST_IDLE;
730
731 if (qc && (qc->tf.protocol == ATA_PROT_DMA ||
732 qc->tf.protocol == ATA_PROT_ATAPI_DMA)) {
733 u8 host_stat;
734
735 host_stat = ata_bmdma_status(ap);
736
737 ata_ehi_push_desc(&ehc->i, "BMDMA stat 0x%x", host_stat);
738
739 /* BMDMA controllers indicate host bus error by
740 * setting DMA_ERR bit and timing out. As it wasn't
741 * really a timeout event, adjust error mask and
742 * cancel frozen state.
743 */
744 if (qc->err_mask == AC_ERR_TIMEOUT && host_stat & ATA_DMA_ERR) {
745 qc->err_mask = AC_ERR_HOST_BUS;
746 thaw = 1;
747 }
748
749 ap->ops->bmdma_stop(qc);
750 }
751
752 ata_altstatus(ap);
753 ata_chk_status(ap);
754 ap->ops->irq_clear(ap);
755
756 spin_unlock_irqrestore(&host_set->lock, flags);
757
758 if (thaw)
759 ata_eh_thaw_port(ap);
760
761 /* PIO and DMA engines have been stopped, perform recovery */
762 ata_do_eh(ap, softreset, hardreset, postreset);
763}
764
765/**
766 * ata_bmdma_error_handler - Stock error handler for BMDMA controller
767 * @ap: port to handle error for
768 *
769 * Stock error handler for BMDMA controller.
770 *
771 * LOCKING:
772 * Kernel thread context (may sleep)
773 */
774void ata_bmdma_error_handler(struct ata_port *ap)
775{
776 ata_reset_fn_t hardreset;
777
778 hardreset = NULL;
779 if (sata_scr_valid(ap))
780 hardreset = sata_std_hardreset;
781
782 ata_bmdma_drive_eh(ap, ata_std_softreset, hardreset, ata_std_postreset);
783}
784
785/**
786 * ata_bmdma_post_internal_cmd - Stock post_internal_cmd for
787 * BMDMA controller
788 * @qc: internal command to clean up
789 *
790 * LOCKING:
791 * Kernel thread context (may sleep)
792 */
793void ata_bmdma_post_internal_cmd(struct ata_queued_cmd *qc)
794{
795 ata_bmdma_stop(qc);
796}
797
655#ifdef CONFIG_PCI 798#ifdef CONFIG_PCI
656static struct ata_probe_ent * 799static struct ata_probe_ent *
657ata_probe_ent_alloc(struct device *dev, const struct ata_port_info *port) 800ata_probe_ent_alloc(struct device *dev, const struct ata_port_info *port)
diff --git a/drivers/scsi/libata-core.c b/drivers/scsi/libata-core.c
index 509178c3700c..9051b6821c1c 100644
--- a/drivers/scsi/libata-core.c
+++ b/drivers/scsi/libata-core.c
@@ -61,13 +61,10 @@
61 61
62#include "libata.h" 62#include "libata.h"
63 63
64static unsigned int ata_dev_init_params(struct ata_port *ap, 64static unsigned int ata_dev_init_params(struct ata_device *dev,
65 struct ata_device *dev, 65 u16 heads, u16 sectors);
66 u16 heads, 66static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
67 u16 sectors); 67static void ata_dev_xfermask(struct ata_device *dev);
68static unsigned int ata_dev_set_xfermode(struct ata_port *ap,
69 struct ata_device *dev);
70static void ata_dev_xfermask(struct ata_port *ap, struct ata_device *dev);
71 68
72static unsigned int ata_unique_id = 1; 69static unsigned int ata_unique_id = 1;
73static struct workqueue_struct *ata_wq; 70static struct workqueue_struct *ata_wq;
@@ -412,11 +409,10 @@ static const char *sata_spd_string(unsigned int spd)
412 return spd_str[spd - 1]; 409 return spd_str[spd - 1];
413} 410}
414 411
415void ata_dev_disable(struct ata_port *ap, struct ata_device *dev) 412void ata_dev_disable(struct ata_device *dev)
416{ 413{
417 if (ata_dev_enabled(dev)) { 414 if (ata_dev_enabled(dev)) {
418 printk(KERN_WARNING "ata%u: dev %u disabled\n", 415 ata_dev_printk(dev, KERN_WARNING, "disabled\n");
419 ap->id, dev->devno);
420 dev->class++; 416 dev->class++;
421 } 417 }
422} 418}
@@ -955,13 +951,11 @@ void ata_qc_complete_internal(struct ata_queued_cmd *qc)
955{ 951{
956 struct completion *waiting = qc->private_data; 952 struct completion *waiting = qc->private_data;
957 953
958 qc->ap->ops->tf_read(qc->ap, &qc->tf);
959 complete(waiting); 954 complete(waiting);
960} 955}
961 956
962/** 957/**
963 * ata_exec_internal - execute libata internal command 958 * ata_exec_internal - execute libata internal command
964 * @ap: Port to which the command is sent
965 * @dev: Device to which the command is sent 959 * @dev: Device to which the command is sent
966 * @tf: Taskfile registers for the command and the result 960 * @tf: Taskfile registers for the command and the result
967 * @cdb: CDB for packet command 961 * @cdb: CDB for packet command
@@ -979,24 +973,62 @@ void ata_qc_complete_internal(struct ata_queued_cmd *qc)
979 * None. Should be called with kernel context, might sleep. 973 * None. Should be called with kernel context, might sleep.
980 */ 974 */
981 975
982unsigned ata_exec_internal(struct ata_port *ap, struct ata_device *dev, 976unsigned ata_exec_internal(struct ata_device *dev,
983 struct ata_taskfile *tf, const u8 *cdb, 977 struct ata_taskfile *tf, const u8 *cdb,
984 int dma_dir, void *buf, unsigned int buflen) 978 int dma_dir, void *buf, unsigned int buflen)
985{ 979{
980 struct ata_port *ap = dev->ap;
986 u8 command = tf->command; 981 u8 command = tf->command;
987 struct ata_queued_cmd *qc; 982 struct ata_queued_cmd *qc;
983 unsigned int tag, preempted_tag;
984 u32 preempted_sactive, preempted_qc_active;
988 DECLARE_COMPLETION(wait); 985 DECLARE_COMPLETION(wait);
989 unsigned long flags; 986 unsigned long flags;
990 unsigned int err_mask; 987 unsigned int err_mask;
988 int rc;
991 989
992 spin_lock_irqsave(&ap->host_set->lock, flags); 990 spin_lock_irqsave(&ap->host_set->lock, flags);
993 991
994 qc = ata_qc_new_init(ap, dev); 992 /* no internal command while frozen */
995 BUG_ON(qc == NULL); 993 if (ap->flags & ATA_FLAG_FROZEN) {
994 spin_unlock_irqrestore(&ap->host_set->lock, flags);
995 return AC_ERR_SYSTEM;
996 }
997
998 /* initialize internal qc */
999
1000 /* XXX: Tag 0 is used for drivers with legacy EH as some
1001 * drivers choke if any other tag is given. This breaks
1002 * ata_tag_internal() test for those drivers. Don't use new
1003 * EH stuff without converting to it.
1004 */
1005 if (ap->ops->error_handler)
1006 tag = ATA_TAG_INTERNAL;
1007 else
1008 tag = 0;
1009
1010 if (test_and_set_bit(tag, &ap->qc_allocated))
1011 BUG();
1012 qc = __ata_qc_from_tag(ap, tag);
1013
1014 qc->tag = tag;
1015 qc->scsicmd = NULL;
1016 qc->ap = ap;
1017 qc->dev = dev;
1018 ata_qc_reinit(qc);
1019
1020 preempted_tag = ap->active_tag;
1021 preempted_sactive = ap->sactive;
1022 preempted_qc_active = ap->qc_active;
1023 ap->active_tag = ATA_TAG_POISON;
1024 ap->sactive = 0;
1025 ap->qc_active = 0;
996 1026
1027 /* prepare & issue qc */
997 qc->tf = *tf; 1028 qc->tf = *tf;
998 if (cdb) 1029 if (cdb)
999 memcpy(qc->cdb, cdb, ATAPI_CDB_LEN); 1030 memcpy(qc->cdb, cdb, ATAPI_CDB_LEN);
1031 qc->flags |= ATA_QCFLAG_RESULT_TF;
1000 qc->dma_dir = dma_dir; 1032 qc->dma_dir = dma_dir;
1001 if (dma_dir != DMA_NONE) { 1033 if (dma_dir != DMA_NONE) {
1002 ata_sg_init_one(qc, buf, buflen); 1034 ata_sg_init_one(qc, buf, buflen);
@@ -1010,31 +1042,53 @@ unsigned ata_exec_internal(struct ata_port *ap, struct ata_device *dev,
1010 1042
1011 spin_unlock_irqrestore(&ap->host_set->lock, flags); 1043 spin_unlock_irqrestore(&ap->host_set->lock, flags);
1012 1044
1013 if (!wait_for_completion_timeout(&wait, ATA_TMOUT_INTERNAL)) { 1045 rc = wait_for_completion_timeout(&wait, ATA_TMOUT_INTERNAL);
1014 ata_port_flush_task(ap);
1015 1046
1047 ata_port_flush_task(ap);
1048
1049 if (!rc) {
1016 spin_lock_irqsave(&ap->host_set->lock, flags); 1050 spin_lock_irqsave(&ap->host_set->lock, flags);
1017 1051
1018 /* We're racing with irq here. If we lose, the 1052 /* We're racing with irq here. If we lose, the
1019 * following test prevents us from completing the qc 1053 * following test prevents us from completing the qc
1020 * again. If completion irq occurs after here but 1054 * twice. If we win, the port is frozen and will be
1021 * before the caller cleans up, it will result in a 1055 * cleaned up by ->post_internal_cmd().
1022 * spurious interrupt. We can live with that.
1023 */ 1056 */
1024 if (qc->flags & ATA_QCFLAG_ACTIVE) { 1057 if (qc->flags & ATA_QCFLAG_ACTIVE) {
1025 qc->err_mask = AC_ERR_TIMEOUT; 1058 qc->err_mask |= AC_ERR_TIMEOUT;
1026 ata_qc_complete(qc); 1059
1027 printk(KERN_WARNING "ata%u: qc timeout (cmd 0x%x)\n", 1060 if (ap->ops->error_handler)
1028 ap->id, command); 1061 ata_port_freeze(ap);
1062 else
1063 ata_qc_complete(qc);
1064
1065 ata_dev_printk(dev, KERN_WARNING,
1066 "qc timeout (cmd 0x%x)\n", command);
1029 } 1067 }
1030 1068
1031 spin_unlock_irqrestore(&ap->host_set->lock, flags); 1069 spin_unlock_irqrestore(&ap->host_set->lock, flags);
1032 } 1070 }
1033 1071
1034 *tf = qc->tf; 1072 /* do post_internal_cmd */
1073 if (ap->ops->post_internal_cmd)
1074 ap->ops->post_internal_cmd(qc);
1075
1076 if (qc->flags & ATA_QCFLAG_FAILED && !qc->err_mask) {
1077 ata_dev_printk(dev, KERN_WARNING, "zero err_mask for failed "
1078 "internal command, assuming AC_ERR_OTHER\n");
1079 qc->err_mask |= AC_ERR_OTHER;
1080 }
1081
1082 /* finish up */
1083 spin_lock_irqsave(&ap->host_set->lock, flags);
1084
1085 *tf = qc->result_tf;
1035 err_mask = qc->err_mask; 1086 err_mask = qc->err_mask;
1036 1087
1037 ata_qc_free(qc); 1088 ata_qc_free(qc);
1089 ap->active_tag = preempted_tag;
1090 ap->sactive = preempted_sactive;
1091 ap->qc_active = preempted_qc_active;
1038 1092
1039 /* XXX - Some LLDDs (sata_mv) disable port on command failure. 1093 /* XXX - Some LLDDs (sata_mv) disable port on command failure.
1040 * Until those drivers are fixed, we detect the condition 1094 * Until those drivers are fixed, we detect the condition
@@ -1052,6 +1106,8 @@ unsigned ata_exec_internal(struct ata_port *ap, struct ata_device *dev,
1052 ata_port_probe(ap); 1106 ata_port_probe(ap);
1053 } 1107 }
1054 1108
1109 spin_unlock_irqrestore(&ap->host_set->lock, flags);
1110
1055 return err_mask; 1111 return err_mask;
1056} 1112}
1057 1113
@@ -1090,11 +1146,10 @@ unsigned int ata_pio_need_iordy(const struct ata_device *adev)
1090 1146
1091/** 1147/**
1092 * ata_dev_read_id - Read ID data from the specified device 1148 * ata_dev_read_id - Read ID data from the specified device
1093 * @ap: port on which target device resides
1094 * @dev: target device 1149 * @dev: target device
1095 * @p_class: pointer to class of the target device (may be changed) 1150 * @p_class: pointer to class of the target device (may be changed)
1096 * @post_reset: is this read ID post-reset? 1151 * @post_reset: is this read ID post-reset?
1097 * @p_id: read IDENTIFY page (newly allocated) 1152 * @id: buffer to read IDENTIFY data into
1098 * 1153 *
1099 * Read ID data from the specified device. ATA_CMD_ID_ATA is 1154 * Read ID data from the specified device. ATA_CMD_ID_ATA is
1100 * performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI 1155 * performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI
@@ -1107,13 +1162,13 @@ unsigned int ata_pio_need_iordy(const struct ata_device *adev)
1107 * RETURNS: 1162 * RETURNS:
1108 * 0 on success, -errno otherwise. 1163 * 0 on success, -errno otherwise.
1109 */ 1164 */
1110static int ata_dev_read_id(struct ata_port *ap, struct ata_device *dev, 1165static int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
1111 unsigned int *p_class, int post_reset, u16 **p_id) 1166 int post_reset, u16 *id)
1112{ 1167{
1168 struct ata_port *ap = dev->ap;
1113 unsigned int class = *p_class; 1169 unsigned int class = *p_class;
1114 struct ata_taskfile tf; 1170 struct ata_taskfile tf;
1115 unsigned int err_mask = 0; 1171 unsigned int err_mask = 0;
1116 u16 *id;
1117 const char *reason; 1172 const char *reason;
1118 int rc; 1173 int rc;
1119 1174
@@ -1121,15 +1176,8 @@ static int ata_dev_read_id(struct ata_port *ap, struct ata_device *dev,
1121 1176
1122 ata_dev_select(ap, dev->devno, 1, 1); /* select device 0/1 */ 1177 ata_dev_select(ap, dev->devno, 1, 1); /* select device 0/1 */
1123 1178
1124 id = kmalloc(sizeof(id[0]) * ATA_ID_WORDS, GFP_KERNEL);
1125 if (id == NULL) {
1126 rc = -ENOMEM;
1127 reason = "out of memory";
1128 goto err_out;
1129 }
1130
1131 retry: 1179 retry:
1132 ata_tf_init(ap, &tf, dev->devno); 1180 ata_tf_init(dev, &tf);
1133 1181
1134 switch (class) { 1182 switch (class) {
1135 case ATA_DEV_ATA: 1183 case ATA_DEV_ATA:
@@ -1146,7 +1194,7 @@ static int ata_dev_read_id(struct ata_port *ap, struct ata_device *dev,
1146 1194
1147 tf.protocol = ATA_PROT_PIO; 1195 tf.protocol = ATA_PROT_PIO;
1148 1196
1149 err_mask = ata_exec_internal(ap, dev, &tf, NULL, DMA_FROM_DEVICE, 1197 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE,
1150 id, sizeof(id[0]) * ATA_ID_WORDS); 1198 id, sizeof(id[0]) * ATA_ID_WORDS);
1151 if (err_mask) { 1199 if (err_mask) {
1152 rc = -EIO; 1200 rc = -EIO;
@@ -1173,7 +1221,7 @@ static int ata_dev_read_id(struct ata_port *ap, struct ata_device *dev,
1173 * Some drives were very specific about that exact sequence. 1221 * Some drives were very specific about that exact sequence.
1174 */ 1222 */
1175 if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) { 1223 if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
1176 err_mask = ata_dev_init_params(ap, dev, id[3], id[6]); 1224 err_mask = ata_dev_init_params(dev, id[3], id[6]);
1177 if (err_mask) { 1225 if (err_mask) {
1178 rc = -EIO; 1226 rc = -EIO;
1179 reason = "INIT_DEV_PARAMS failed"; 1227 reason = "INIT_DEV_PARAMS failed";
@@ -1189,25 +1237,44 @@ static int ata_dev_read_id(struct ata_port *ap, struct ata_device *dev,
1189 } 1237 }
1190 1238
1191 *p_class = class; 1239 *p_class = class;
1192 *p_id = id; 1240
1193 return 0; 1241 return 0;
1194 1242
1195 err_out: 1243 err_out:
1196 printk(KERN_WARNING "ata%u: dev %u failed to IDENTIFY (%s)\n", 1244 ata_dev_printk(dev, KERN_WARNING, "failed to IDENTIFY "
1197 ap->id, dev->devno, reason); 1245 "(%s, err_mask=0x%x)\n", reason, err_mask);
1198 kfree(id);
1199 return rc; 1246 return rc;
1200} 1247}
1201 1248
1202static inline u8 ata_dev_knobble(const struct ata_port *ap, 1249static inline u8 ata_dev_knobble(struct ata_device *dev)
1203 struct ata_device *dev) 1250{
1251 return ((dev->ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
1252}
1253
1254static void ata_dev_config_ncq(struct ata_device *dev,
1255 char *desc, size_t desc_sz)
1204{ 1256{
1205 return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id))); 1257 struct ata_port *ap = dev->ap;
1258 int hdepth = 0, ddepth = ata_id_queue_depth(dev->id);
1259
1260 if (!ata_id_has_ncq(dev->id)) {
1261 desc[0] = '\0';
1262 return;
1263 }
1264
1265 if (ap->flags & ATA_FLAG_NCQ) {
1266 hdepth = min(ap->host->can_queue, ATA_MAX_QUEUE - 1);
1267 dev->flags |= ATA_DFLAG_NCQ;
1268 }
1269
1270 if (hdepth >= ddepth)
1271 snprintf(desc, desc_sz, "NCQ (depth %d)", ddepth);
1272 else
1273 snprintf(desc, desc_sz, "NCQ (depth %d/%d)", hdepth, ddepth);
1206} 1274}
1207 1275
1208/** 1276/**
1209 * ata_dev_configure - Configure the specified ATA/ATAPI device 1277 * ata_dev_configure - Configure the specified ATA/ATAPI device
1210 * @ap: Port on which target device resides
1211 * @dev: Target device to configure 1278 * @dev: Target device to configure
1212 * @print_info: Enable device info printout 1279 * @print_info: Enable device info printout
1213 * 1280 *
@@ -1220,9 +1287,9 @@ static inline u8 ata_dev_knobble(const struct ata_port *ap,
1220 * RETURNS: 1287 * RETURNS:
1221 * 0 on success, -errno otherwise 1288 * 0 on success, -errno otherwise
1222 */ 1289 */
1223static int ata_dev_configure(struct ata_port *ap, struct ata_device *dev, 1290static int ata_dev_configure(struct ata_device *dev, int print_info)
1224 int print_info)
1225{ 1291{
1292 struct ata_port *ap = dev->ap;
1226 const u16 *id = dev->id; 1293 const u16 *id = dev->id;
1227 unsigned int xfer_mask; 1294 unsigned int xfer_mask;
1228 int i, rc; 1295 int i, rc;
@@ -1237,10 +1304,10 @@ static int ata_dev_configure(struct ata_port *ap, struct ata_device *dev,
1237 1304
1238 /* print device capabilities */ 1305 /* print device capabilities */
1239 if (print_info) 1306 if (print_info)
1240 printk(KERN_DEBUG "ata%u: dev %u cfg 49:%04x 82:%04x 83:%04x " 1307 ata_dev_printk(dev, KERN_DEBUG, "cfg 49:%04x 82:%04x 83:%04x "
1241 "84:%04x 85:%04x 86:%04x 87:%04x 88:%04x\n", 1308 "84:%04x 85:%04x 86:%04x 87:%04x 88:%04x\n",
1242 ap->id, dev->devno, id[49], id[82], id[83], 1309 id[49], id[82], id[83], id[84],
1243 id[84], id[85], id[86], id[87], id[88]); 1310 id[85], id[86], id[87], id[88]);
1244 1311
1245 /* initialize to-be-configured parameters */ 1312 /* initialize to-be-configured parameters */
1246 dev->flags &= ~ATA_DFLAG_CFG_MASK; 1313 dev->flags &= ~ATA_DFLAG_CFG_MASK;
@@ -1266,6 +1333,7 @@ static int ata_dev_configure(struct ata_port *ap, struct ata_device *dev,
1266 1333
1267 if (ata_id_has_lba(id)) { 1334 if (ata_id_has_lba(id)) {
1268 const char *lba_desc; 1335 const char *lba_desc;
1336 char ncq_desc[20];
1269 1337
1270 lba_desc = "LBA"; 1338 lba_desc = "LBA";
1271 dev->flags |= ATA_DFLAG_LBA; 1339 dev->flags |= ATA_DFLAG_LBA;
@@ -1274,15 +1342,17 @@ static int ata_dev_configure(struct ata_port *ap, struct ata_device *dev,
1274 lba_desc = "LBA48"; 1342 lba_desc = "LBA48";
1275 } 1343 }
1276 1344
1345 /* config NCQ */
1346 ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc));
1347
1277 /* print device info to dmesg */ 1348 /* print device info to dmesg */
1278 if (print_info) 1349 if (print_info)
1279 printk(KERN_INFO "ata%u: dev %u ATA-%d, " 1350 ata_dev_printk(dev, KERN_INFO, "ATA-%d, "
1280 "max %s, %Lu sectors: %s\n", 1351 "max %s, %Lu sectors: %s %s\n",
1281 ap->id, dev->devno, 1352 ata_id_major_version(id),
1282 ata_id_major_version(id), 1353 ata_mode_string(xfer_mask),
1283 ata_mode_string(xfer_mask), 1354 (unsigned long long)dev->n_sectors,
1284 (unsigned long long)dev->n_sectors, 1355 lba_desc, ncq_desc);
1285 lba_desc);
1286 } else { 1356 } else {
1287 /* CHS */ 1357 /* CHS */
1288 1358
@@ -1300,13 +1370,18 @@ static int ata_dev_configure(struct ata_port *ap, struct ata_device *dev,
1300 1370
1301 /* print device info to dmesg */ 1371 /* print device info to dmesg */
1302 if (print_info) 1372 if (print_info)
1303 printk(KERN_INFO "ata%u: dev %u ATA-%d, " 1373 ata_dev_printk(dev, KERN_INFO, "ATA-%d, "
1304 "max %s, %Lu sectors: CHS %u/%u/%u\n", 1374 "max %s, %Lu sectors: CHS %u/%u/%u\n",
1305 ap->id, dev->devno, 1375 ata_id_major_version(id),
1306 ata_id_major_version(id), 1376 ata_mode_string(xfer_mask),
1307 ata_mode_string(xfer_mask), 1377 (unsigned long long)dev->n_sectors,
1308 (unsigned long long)dev->n_sectors, 1378 dev->cylinders, dev->heads, dev->sectors);
1309 dev->cylinders, dev->heads, dev->sectors); 1379 }
1380
1381 if (dev->id[59] & 0x100) {
1382 dev->multi_count = dev->id[59] & 0xff;
1383 DPRINTK("ata%u: dev %u multi count %u\n",
1384 ap->id, dev->devno, dev->multi_count);
1310 } 1385 }
1311 1386
1312 dev->cdb_len = 16; 1387 dev->cdb_len = 16;
@@ -1314,18 +1389,27 @@ static int ata_dev_configure(struct ata_port *ap, struct ata_device *dev,
1314 1389
1315 /* ATAPI-specific feature tests */ 1390 /* ATAPI-specific feature tests */
1316 else if (dev->class == ATA_DEV_ATAPI) { 1391 else if (dev->class == ATA_DEV_ATAPI) {
1392 char *cdb_intr_string = "";
1393
1317 rc = atapi_cdb_len(id); 1394 rc = atapi_cdb_len(id);
1318 if ((rc < 12) || (rc > ATAPI_CDB_LEN)) { 1395 if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
1319 printk(KERN_WARNING "ata%u: unsupported CDB len\n", ap->id); 1396 ata_dev_printk(dev, KERN_WARNING,
1397 "unsupported CDB len\n");
1320 rc = -EINVAL; 1398 rc = -EINVAL;
1321 goto err_out_nosup; 1399 goto err_out_nosup;
1322 } 1400 }
1323 dev->cdb_len = (unsigned int) rc; 1401 dev->cdb_len = (unsigned int) rc;
1324 1402
1403 if (ata_id_cdb_intr(dev->id)) {
1404 dev->flags |= ATA_DFLAG_CDB_INTR;
1405 cdb_intr_string = ", CDB intr";
1406 }
1407
1325 /* print device info to dmesg */ 1408 /* print device info to dmesg */
1326 if (print_info) 1409 if (print_info)
1327 printk(KERN_INFO "ata%u: dev %u ATAPI, max %s\n", 1410 ata_dev_printk(dev, KERN_INFO, "ATAPI, max %s%s\n",
1328 ap->id, dev->devno, ata_mode_string(xfer_mask)); 1411 ata_mode_string(xfer_mask),
1412 cdb_intr_string);
1329 } 1413 }
1330 1414
1331 ap->host->max_cmd_len = 0; 1415 ap->host->max_cmd_len = 0;
@@ -1335,10 +1419,10 @@ static int ata_dev_configure(struct ata_port *ap, struct ata_device *dev,
1335 ap->device[i].cdb_len); 1419 ap->device[i].cdb_len);
1336 1420
1337 /* limit bridge transfers to udma5, 200 sectors */ 1421 /* limit bridge transfers to udma5, 200 sectors */
1338 if (ata_dev_knobble(ap, dev)) { 1422 if (ata_dev_knobble(dev)) {
1339 if (print_info) 1423 if (print_info)
1340 printk(KERN_INFO "ata%u(%u): applying bridge limits\n", 1424 ata_dev_printk(dev, KERN_INFO,
1341 ap->id, dev->devno); 1425 "applying bridge limits\n");
1342 dev->udma_mask &= ATA_UDMA5; 1426 dev->udma_mask &= ATA_UDMA5;
1343 dev->max_sectors = ATA_MAX_SECTORS; 1427 dev->max_sectors = ATA_MAX_SECTORS;
1344 } 1428 }
@@ -1391,15 +1475,18 @@ static int ata_bus_probe(struct ata_port *ap)
1391 if (ap->ops->probe_reset) { 1475 if (ap->ops->probe_reset) {
1392 rc = ap->ops->probe_reset(ap, classes); 1476 rc = ap->ops->probe_reset(ap, classes);
1393 if (rc) { 1477 if (rc) {
1394 printk("ata%u: reset failed (errno=%d)\n", ap->id, rc); 1478 ata_port_printk(ap, KERN_ERR,
1479 "reset failed (errno=%d)\n", rc);
1395 return rc; 1480 return rc;
1396 } 1481 }
1397 } else { 1482 } else {
1398 ap->ops->phy_reset(ap); 1483 ap->ops->phy_reset(ap);
1399 1484
1400 if (!(ap->flags & ATA_FLAG_DISABLED)) 1485 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1401 for (i = 0; i < ATA_MAX_DEVICES; i++) 1486 if (!(ap->flags & ATA_FLAG_DISABLED))
1402 classes[i] = ap->device[i].class; 1487 classes[i] = ap->device[i].class;
1488 ap->device[i].class = ATA_DEV_UNKNOWN;
1489 }
1403 1490
1404 ata_port_probe(ap); 1491 ata_port_probe(ap);
1405 } 1492 }
@@ -1418,32 +1505,17 @@ static int ata_bus_probe(struct ata_port *ap)
1418 if (!ata_dev_enabled(dev)) 1505 if (!ata_dev_enabled(dev))
1419 continue; 1506 continue;
1420 1507
1421 kfree(dev->id); 1508 rc = ata_dev_read_id(dev, &dev->class, 1, dev->id);
1422 dev->id = NULL;
1423 rc = ata_dev_read_id(ap, dev, &dev->class, 1, &dev->id);
1424 if (rc) 1509 if (rc)
1425 goto fail; 1510 goto fail;
1426 1511
1427 rc = ata_dev_configure(ap, dev, 1); 1512 rc = ata_dev_configure(dev, 1);
1428 if (rc) 1513 if (rc)
1429 goto fail; 1514 goto fail;
1430 } 1515 }
1431 1516
1432 /* configure transfer mode */ 1517 /* configure transfer mode */
1433 if (ap->ops->set_mode) { 1518 rc = ata_set_mode(ap, &dev);
1434 /* FIXME: make ->set_mode handle no device case and
1435 * return error code and failing device on failure as
1436 * ata_set_mode() does.
1437 */
1438 for (i = 0; i < ATA_MAX_DEVICES; i++)
1439 if (ata_dev_enabled(&ap->device[i])) {
1440 ap->ops->set_mode(ap);
1441 break;
1442 }
1443 rc = 0;
1444 } else
1445 rc = ata_set_mode(ap, &dev);
1446
1447 if (rc) { 1519 if (rc) {
1448 down_xfermask = 1; 1520 down_xfermask = 1;
1449 goto fail; 1521 goto fail;
@@ -1465,18 +1537,18 @@ static int ata_bus_probe(struct ata_port *ap)
1465 tries[dev->devno] = 0; 1537 tries[dev->devno] = 0;
1466 break; 1538 break;
1467 case -EIO: 1539 case -EIO:
1468 ata_down_sata_spd_limit(ap); 1540 sata_down_spd_limit(ap);
1469 /* fall through */ 1541 /* fall through */
1470 default: 1542 default:
1471 tries[dev->devno]--; 1543 tries[dev->devno]--;
1472 if (down_xfermask && 1544 if (down_xfermask &&
1473 ata_down_xfermask_limit(ap, dev, tries[dev->devno] == 1)) 1545 ata_down_xfermask_limit(dev, tries[dev->devno] == 1))
1474 tries[dev->devno] = 0; 1546 tries[dev->devno] = 0;
1475 } 1547 }
1476 1548
1477 if (!tries[dev->devno]) { 1549 if (!tries[dev->devno]) {
1478 ata_down_xfermask_limit(ap, dev, 1); 1550 ata_down_xfermask_limit(dev, 1);
1479 ata_dev_disable(ap, dev); 1551 ata_dev_disable(dev);
1480 } 1552 }
1481 1553
1482 goto retry; 1554 goto retry;
@@ -1511,21 +1583,19 @@ static void sata_print_link_status(struct ata_port *ap)
1511{ 1583{
1512 u32 sstatus, scontrol, tmp; 1584 u32 sstatus, scontrol, tmp;
1513 1585
1514 if (!ap->ops->scr_read) 1586 if (sata_scr_read(ap, SCR_STATUS, &sstatus))
1515 return; 1587 return;
1588 sata_scr_read(ap, SCR_CONTROL, &scontrol);
1516 1589
1517 sstatus = scr_read(ap, SCR_STATUS); 1590 if (ata_port_online(ap)) {
1518 scontrol = scr_read(ap, SCR_CONTROL);
1519
1520 if (sata_dev_present(ap)) {
1521 tmp = (sstatus >> 4) & 0xf; 1591 tmp = (sstatus >> 4) & 0xf;
1522 printk(KERN_INFO 1592 ata_port_printk(ap, KERN_INFO,
1523 "ata%u: SATA link up %s (SStatus %X SControl %X)\n", 1593 "SATA link up %s (SStatus %X SControl %X)\n",
1524 ap->id, sata_spd_string(tmp), sstatus, scontrol); 1594 sata_spd_string(tmp), sstatus, scontrol);
1525 } else { 1595 } else {
1526 printk(KERN_INFO 1596 ata_port_printk(ap, KERN_INFO,
1527 "ata%u: SATA link down (SStatus %X SControl %X)\n", 1597 "SATA link down (SStatus %X SControl %X)\n",
1528 ap->id, sstatus, scontrol); 1598 sstatus, scontrol);
1529 } 1599 }
1530} 1600}
1531 1601
@@ -1548,17 +1618,18 @@ void __sata_phy_reset(struct ata_port *ap)
1548 1618
1549 if (ap->flags & ATA_FLAG_SATA_RESET) { 1619 if (ap->flags & ATA_FLAG_SATA_RESET) {
1550 /* issue phy wake/reset */ 1620 /* issue phy wake/reset */
1551 scr_write_flush(ap, SCR_CONTROL, 0x301); 1621 sata_scr_write_flush(ap, SCR_CONTROL, 0x301);
1552 /* Couldn't find anything in SATA I/II specs, but 1622 /* Couldn't find anything in SATA I/II specs, but
1553 * AHCI-1.1 10.4.2 says at least 1 ms. */ 1623 * AHCI-1.1 10.4.2 says at least 1 ms. */
1554 mdelay(1); 1624 mdelay(1);
1555 } 1625 }
1556 scr_write_flush(ap, SCR_CONTROL, 0x300); /* phy wake/clear reset */ 1626 /* phy wake/clear reset */
1627 sata_scr_write_flush(ap, SCR_CONTROL, 0x300);
1557 1628
1558 /* wait for phy to become ready, if necessary */ 1629 /* wait for phy to become ready, if necessary */
1559 do { 1630 do {
1560 msleep(200); 1631 msleep(200);
1561 sstatus = scr_read(ap, SCR_STATUS); 1632 sata_scr_read(ap, SCR_STATUS, &sstatus);
1562 if ((sstatus & 0xf) != 1) 1633 if ((sstatus & 0xf) != 1)
1563 break; 1634 break;
1564 } while (time_before(jiffies, timeout)); 1635 } while (time_before(jiffies, timeout));
@@ -1567,7 +1638,7 @@ void __sata_phy_reset(struct ata_port *ap)
1567 sata_print_link_status(ap); 1638 sata_print_link_status(ap);
1568 1639
1569 /* TODO: phy layer with polling, timeouts, etc. */ 1640 /* TODO: phy layer with polling, timeouts, etc. */
1570 if (sata_dev_present(ap)) 1641 if (!ata_port_offline(ap))
1571 ata_port_probe(ap); 1642 ata_port_probe(ap);
1572 else 1643 else
1573 ata_port_disable(ap); 1644 ata_port_disable(ap);
@@ -1604,15 +1675,15 @@ void sata_phy_reset(struct ata_port *ap)
1604 1675
1605/** 1676/**
1606 * ata_dev_pair - return other device on cable 1677 * ata_dev_pair - return other device on cable
1607 * @ap: port
1608 * @adev: device 1678 * @adev: device
1609 * 1679 *
1610 * Obtain the other device on the same cable, or if none is 1680 * Obtain the other device on the same cable, or if none is
1611 * present NULL is returned 1681 * present NULL is returned
1612 */ 1682 */
1613 1683
1614struct ata_device *ata_dev_pair(struct ata_port *ap, struct ata_device *adev) 1684struct ata_device *ata_dev_pair(struct ata_device *adev)
1615{ 1685{
1686 struct ata_port *ap = adev->ap;
1616 struct ata_device *pair = &ap->device[1 - adev->devno]; 1687 struct ata_device *pair = &ap->device[1 - adev->devno];
1617 if (!ata_dev_enabled(pair)) 1688 if (!ata_dev_enabled(pair))
1618 return NULL; 1689 return NULL;
@@ -1640,12 +1711,12 @@ void ata_port_disable(struct ata_port *ap)
1640} 1711}
1641 1712
1642/** 1713/**
1643 * ata_down_sata_spd_limit - adjust SATA spd limit downward 1714 * sata_down_spd_limit - adjust SATA spd limit downward
1644 * @ap: Port to adjust SATA spd limit for 1715 * @ap: Port to adjust SATA spd limit for
1645 * 1716 *
1646 * Adjust SATA spd limit of @ap downward. Note that this 1717 * Adjust SATA spd limit of @ap downward. Note that this
1647 * function only adjusts the limit. The change must be applied 1718 * function only adjusts the limit. The change must be applied
1648 * using ata_set_sata_spd(). 1719 * using sata_set_spd().
1649 * 1720 *
1650 * LOCKING: 1721 * LOCKING:
1651 * Inherited from caller. 1722 * Inherited from caller.
@@ -1653,13 +1724,14 @@ void ata_port_disable(struct ata_port *ap)
1653 * RETURNS: 1724 * RETURNS:
1654 * 0 on success, negative errno on failure 1725 * 0 on success, negative errno on failure
1655 */ 1726 */
1656int ata_down_sata_spd_limit(struct ata_port *ap) 1727int sata_down_spd_limit(struct ata_port *ap)
1657{ 1728{
1658 u32 spd, mask; 1729 u32 sstatus, spd, mask;
1659 int highbit; 1730 int rc, highbit;
1660 1731
1661 if (ap->cbl != ATA_CBL_SATA || !ap->ops->scr_read) 1732 rc = sata_scr_read(ap, SCR_STATUS, &sstatus);
1662 return -EOPNOTSUPP; 1733 if (rc)
1734 return rc;
1663 1735
1664 mask = ap->sata_spd_limit; 1736 mask = ap->sata_spd_limit;
1665 if (mask <= 1) 1737 if (mask <= 1)
@@ -1667,7 +1739,7 @@ int ata_down_sata_spd_limit(struct ata_port *ap)
1667 highbit = fls(mask) - 1; 1739 highbit = fls(mask) - 1;
1668 mask &= ~(1 << highbit); 1740 mask &= ~(1 << highbit);
1669 1741
1670 spd = (scr_read(ap, SCR_STATUS) >> 4) & 0xf; 1742 spd = (sstatus >> 4) & 0xf;
1671 if (spd <= 1) 1743 if (spd <= 1)
1672 return -EINVAL; 1744 return -EINVAL;
1673 spd--; 1745 spd--;
@@ -1677,13 +1749,13 @@ int ata_down_sata_spd_limit(struct ata_port *ap)
1677 1749
1678 ap->sata_spd_limit = mask; 1750 ap->sata_spd_limit = mask;
1679 1751
1680 printk(KERN_WARNING "ata%u: limiting SATA link speed to %s\n", 1752 ata_port_printk(ap, KERN_WARNING, "limiting SATA link speed to %s\n",
1681 ap->id, sata_spd_string(fls(mask))); 1753 sata_spd_string(fls(mask)));
1682 1754
1683 return 0; 1755 return 0;
1684} 1756}
1685 1757
1686static int __ata_set_sata_spd_needed(struct ata_port *ap, u32 *scontrol) 1758static int __sata_set_spd_needed(struct ata_port *ap, u32 *scontrol)
1687{ 1759{
1688 u32 spd, limit; 1760 u32 spd, limit;
1689 1761
@@ -1699,7 +1771,7 @@ static int __ata_set_sata_spd_needed(struct ata_port *ap, u32 *scontrol)
1699} 1771}
1700 1772
1701/** 1773/**
1702 * ata_set_sata_spd_needed - is SATA spd configuration needed 1774 * sata_set_spd_needed - is SATA spd configuration needed
1703 * @ap: Port in question 1775 * @ap: Port in question
1704 * 1776 *
1705 * Test whether the spd limit in SControl matches 1777 * Test whether the spd limit in SControl matches
@@ -1713,20 +1785,18 @@ static int __ata_set_sata_spd_needed(struct ata_port *ap, u32 *scontrol)
1713 * RETURNS: 1785 * RETURNS:
1714 * 1 if SATA spd configuration is needed, 0 otherwise. 1786 * 1 if SATA spd configuration is needed, 0 otherwise.
1715 */ 1787 */
1716int ata_set_sata_spd_needed(struct ata_port *ap) 1788int sata_set_spd_needed(struct ata_port *ap)
1717{ 1789{
1718 u32 scontrol; 1790 u32 scontrol;
1719 1791
1720 if (ap->cbl != ATA_CBL_SATA || !ap->ops->scr_read) 1792 if (sata_scr_read(ap, SCR_CONTROL, &scontrol))
1721 return 0; 1793 return 0;
1722 1794
1723 scontrol = scr_read(ap, SCR_CONTROL); 1795 return __sata_set_spd_needed(ap, &scontrol);
1724
1725 return __ata_set_sata_spd_needed(ap, &scontrol);
1726} 1796}
1727 1797
1728/** 1798/**
1729 * ata_set_sata_spd - set SATA spd according to spd limit 1799 * sata_set_spd - set SATA spd according to spd limit
1730 * @ap: Port to set SATA spd for 1800 * @ap: Port to set SATA spd for
1731 * 1801 *
1732 * Set SATA spd of @ap according to sata_spd_limit. 1802 * Set SATA spd of @ap according to sata_spd_limit.
@@ -1736,20 +1806,22 @@ int ata_set_sata_spd_needed(struct ata_port *ap)
1736 * 1806 *
1737 * RETURNS: 1807 * RETURNS:
1738 * 0 if spd doesn't need to be changed, 1 if spd has been 1808 * 0 if spd doesn't need to be changed, 1 if spd has been
1739 * changed. -EOPNOTSUPP if SCR registers are inaccessible. 1809 * changed. Negative errno if SCR registers are inaccessible.
1740 */ 1810 */
1741int ata_set_sata_spd(struct ata_port *ap) 1811int sata_set_spd(struct ata_port *ap)
1742{ 1812{
1743 u32 scontrol; 1813 u32 scontrol;
1814 int rc;
1744 1815
1745 if (ap->cbl != ATA_CBL_SATA || !ap->ops->scr_read) 1816 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
1746 return -EOPNOTSUPP; 1817 return rc;
1747 1818
1748 scontrol = scr_read(ap, SCR_CONTROL); 1819 if (!__sata_set_spd_needed(ap, &scontrol))
1749 if (!__ata_set_sata_spd_needed(ap, &scontrol))
1750 return 0; 1820 return 0;
1751 1821
1752 scr_write(ap, SCR_CONTROL, scontrol); 1822 if ((rc = sata_scr_write(ap, SCR_CONTROL, scontrol)))
1823 return rc;
1824
1753 return 1; 1825 return 1;
1754} 1826}
1755 1827
@@ -1903,7 +1975,6 @@ int ata_timing_compute(struct ata_device *adev, unsigned short speed,
1903 1975
1904/** 1976/**
1905 * ata_down_xfermask_limit - adjust dev xfer masks downward 1977 * ata_down_xfermask_limit - adjust dev xfer masks downward
1906 * @ap: Port associated with device @dev
1907 * @dev: Device to adjust xfer masks 1978 * @dev: Device to adjust xfer masks
1908 * @force_pio0: Force PIO0 1979 * @force_pio0: Force PIO0
1909 * 1980 *
@@ -1917,8 +1988,7 @@ int ata_timing_compute(struct ata_device *adev, unsigned short speed,
1917 * RETURNS: 1988 * RETURNS:
1918 * 0 on success, negative errno on failure 1989 * 0 on success, negative errno on failure
1919 */ 1990 */
1920int ata_down_xfermask_limit(struct ata_port *ap, struct ata_device *dev, 1991int ata_down_xfermask_limit(struct ata_device *dev, int force_pio0)
1921 int force_pio0)
1922{ 1992{
1923 unsigned long xfer_mask; 1993 unsigned long xfer_mask;
1924 int highbit; 1994 int highbit;
@@ -1942,8 +2012,8 @@ int ata_down_xfermask_limit(struct ata_port *ap, struct ata_device *dev,
1942 ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask, 2012 ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask,
1943 &dev->udma_mask); 2013 &dev->udma_mask);
1944 2014
1945 printk(KERN_WARNING "ata%u: dev %u limiting speed to %s\n", 2015 ata_dev_printk(dev, KERN_WARNING, "limiting speed to %s\n",
1946 ap->id, dev->devno, ata_mode_string(xfer_mask)); 2016 ata_mode_string(xfer_mask));
1947 2017
1948 return 0; 2018 return 0;
1949 2019
@@ -1951,7 +2021,7 @@ int ata_down_xfermask_limit(struct ata_port *ap, struct ata_device *dev,
1951 return -EINVAL; 2021 return -EINVAL;
1952} 2022}
1953 2023
1954static int ata_dev_set_mode(struct ata_port *ap, struct ata_device *dev) 2024static int ata_dev_set_mode(struct ata_device *dev)
1955{ 2025{
1956 unsigned int err_mask; 2026 unsigned int err_mask;
1957 int rc; 2027 int rc;
@@ -1960,24 +2030,22 @@ static int ata_dev_set_mode(struct ata_port *ap, struct ata_device *dev)
1960 if (dev->xfer_shift == ATA_SHIFT_PIO) 2030 if (dev->xfer_shift == ATA_SHIFT_PIO)
1961 dev->flags |= ATA_DFLAG_PIO; 2031 dev->flags |= ATA_DFLAG_PIO;
1962 2032
1963 err_mask = ata_dev_set_xfermode(ap, dev); 2033 err_mask = ata_dev_set_xfermode(dev);
1964 if (err_mask) { 2034 if (err_mask) {
1965 printk(KERN_ERR 2035 ata_dev_printk(dev, KERN_ERR, "failed to set xfermode "
1966 "ata%u: failed to set xfermode (err_mask=0x%x)\n", 2036 "(err_mask=0x%x)\n", err_mask);
1967 ap->id, err_mask);
1968 return -EIO; 2037 return -EIO;
1969 } 2038 }
1970 2039
1971 rc = ata_dev_revalidate(ap, dev, 0); 2040 rc = ata_dev_revalidate(dev, 0);
1972 if (rc) 2041 if (rc)
1973 return rc; 2042 return rc;
1974 2043
1975 DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n", 2044 DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n",
1976 dev->xfer_shift, (int)dev->xfer_mode); 2045 dev->xfer_shift, (int)dev->xfer_mode);
1977 2046
1978 printk(KERN_INFO "ata%u: dev %u configured for %s\n", 2047 ata_dev_printk(dev, KERN_INFO, "configured for %s\n",
1979 ap->id, dev->devno, 2048 ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)));
1980 ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)));
1981 return 0; 2049 return 0;
1982} 2050}
1983 2051
@@ -2001,6 +2069,20 @@ int ata_set_mode(struct ata_port *ap, struct ata_device **r_failed_dev)
2001 struct ata_device *dev; 2069 struct ata_device *dev;
2002 int i, rc = 0, used_dma = 0, found = 0; 2070 int i, rc = 0, used_dma = 0, found = 0;
2003 2071
2072 /* has private set_mode? */
2073 if (ap->ops->set_mode) {
2074 /* FIXME: make ->set_mode handle no device case and
2075 * return error code and failing device on failure.
2076 */
2077 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2078 if (ata_dev_enabled(&ap->device[i])) {
2079 ap->ops->set_mode(ap);
2080 break;
2081 }
2082 }
2083 return 0;
2084 }
2085
2004 /* step 1: calculate xfer_mask */ 2086 /* step 1: calculate xfer_mask */
2005 for (i = 0; i < ATA_MAX_DEVICES; i++) { 2087 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2006 unsigned int pio_mask, dma_mask; 2088 unsigned int pio_mask, dma_mask;
@@ -2010,7 +2092,7 @@ int ata_set_mode(struct ata_port *ap, struct ata_device **r_failed_dev)
2010 if (!ata_dev_enabled(dev)) 2092 if (!ata_dev_enabled(dev))
2011 continue; 2093 continue;
2012 2094
2013 ata_dev_xfermask(ap, dev); 2095 ata_dev_xfermask(dev);
2014 2096
2015 pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0); 2097 pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0);
2016 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask); 2098 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
@@ -2031,8 +2113,7 @@ int ata_set_mode(struct ata_port *ap, struct ata_device **r_failed_dev)
2031 continue; 2113 continue;
2032 2114
2033 if (!dev->pio_mode) { 2115 if (!dev->pio_mode) {
2034 printk(KERN_WARNING "ata%u: dev %u no PIO support\n", 2116 ata_dev_printk(dev, KERN_WARNING, "no PIO support\n");
2035 ap->id, dev->devno);
2036 rc = -EINVAL; 2117 rc = -EINVAL;
2037 goto out; 2118 goto out;
2038 } 2119 }
@@ -2063,7 +2144,7 @@ int ata_set_mode(struct ata_port *ap, struct ata_device **r_failed_dev)
2063 if (!ata_dev_enabled(dev)) 2144 if (!ata_dev_enabled(dev))
2064 continue; 2145 continue;
2065 2146
2066 rc = ata_dev_set_mode(ap, dev); 2147 rc = ata_dev_set_mode(dev);
2067 if (rc) 2148 if (rc)
2068 goto out; 2149 goto out;
2069 } 2150 }
@@ -2131,8 +2212,8 @@ unsigned int ata_busy_sleep (struct ata_port *ap,
2131 } 2212 }
2132 2213
2133 if (status & ATA_BUSY) 2214 if (status & ATA_BUSY)
2134 printk(KERN_WARNING "ata%u is slow to respond, " 2215 ata_port_printk(ap, KERN_WARNING,
2135 "please be patient\n", ap->id); 2216 "port is slow to respond, please be patient\n");
2136 2217
2137 timeout = timer_start + tmout; 2218 timeout = timer_start + tmout;
2138 while ((status & ATA_BUSY) && (time_before(jiffies, timeout))) { 2219 while ((status & ATA_BUSY) && (time_before(jiffies, timeout))) {
@@ -2141,8 +2222,8 @@ unsigned int ata_busy_sleep (struct ata_port *ap,
2141 } 2222 }
2142 2223
2143 if (status & ATA_BUSY) { 2224 if (status & ATA_BUSY) {
2144 printk(KERN_ERR "ata%u failed to respond (%lu secs)\n", 2225 ata_port_printk(ap, KERN_ERR, "port failed to respond "
2145 ap->id, tmout / HZ); 2226 "(%lu secs)\n", tmout / HZ);
2146 return 1; 2227 return 1;
2147 } 2228 }
2148 2229
@@ -2235,7 +2316,7 @@ static unsigned int ata_bus_softreset(struct ata_port *ap,
2235 * pulldown resistor. 2316 * pulldown resistor.
2236 */ 2317 */
2237 if (ata_check_status(ap) == 0xFF) { 2318 if (ata_check_status(ap) == 0xFF) {
2238 printk(KERN_ERR "ata%u: SRST failed (status 0xFF)\n", ap->id); 2319 ata_port_printk(ap, KERN_ERR, "SRST failed (status 0xFF)\n");
2239 return AC_ERR_OTHER; 2320 return AC_ERR_OTHER;
2240 } 2321 }
2241 2322
@@ -2329,7 +2410,7 @@ void ata_bus_reset(struct ata_port *ap)
2329 return; 2410 return;
2330 2411
2331err_out: 2412err_out:
2332 printk(KERN_ERR "ata%u: disabling port\n", ap->id); 2413 ata_port_printk(ap, KERN_ERR, "disabling port\n");
2333 ap->ops->port_disable(ap); 2414 ap->ops->port_disable(ap);
2334 2415
2335 DPRINTK("EXIT\n"); 2416 DPRINTK("EXIT\n");
@@ -2339,20 +2420,26 @@ static int sata_phy_resume(struct ata_port *ap)
2339{ 2420{
2340 unsigned long timeout = jiffies + (HZ * 5); 2421 unsigned long timeout = jiffies + (HZ * 5);
2341 u32 scontrol, sstatus; 2422 u32 scontrol, sstatus;
2423 int rc;
2424
2425 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
2426 return rc;
2342 2427
2343 scontrol = scr_read(ap, SCR_CONTROL);
2344 scontrol = (scontrol & 0x0f0) | 0x300; 2428 scontrol = (scontrol & 0x0f0) | 0x300;
2345 scr_write_flush(ap, SCR_CONTROL, scontrol); 2429
2430 if ((rc = sata_scr_write(ap, SCR_CONTROL, scontrol)))
2431 return rc;
2346 2432
2347 /* Wait for phy to become ready, if necessary. */ 2433 /* Wait for phy to become ready, if necessary. */
2348 do { 2434 do {
2349 msleep(200); 2435 msleep(200);
2350 sstatus = scr_read(ap, SCR_STATUS); 2436 if ((rc = sata_scr_read(ap, SCR_STATUS, &sstatus)))
2437 return rc;
2351 if ((sstatus & 0xf) != 1) 2438 if ((sstatus & 0xf) != 1)
2352 return 0; 2439 return 0;
2353 } while (time_before(jiffies, timeout)); 2440 } while (time_before(jiffies, timeout));
2354 2441
2355 return -1; 2442 return -EBUSY;
2356} 2443}
2357 2444
2358/** 2445/**
@@ -2370,22 +2457,20 @@ static int sata_phy_resume(struct ata_port *ap)
2370 */ 2457 */
2371void ata_std_probeinit(struct ata_port *ap) 2458void ata_std_probeinit(struct ata_port *ap)
2372{ 2459{
2373 if ((ap->flags & ATA_FLAG_SATA) && ap->ops->scr_read) { 2460 u32 scontrol;
2374 u32 spd;
2375
2376 /* set cable type and resume link */
2377 ap->cbl = ATA_CBL_SATA;
2378 sata_phy_resume(ap);
2379 2461
2380 /* init sata_spd_limit to the current value */ 2462 /* resume link */
2381 spd = (scr_read(ap, SCR_CONTROL) & 0xf0) >> 4; 2463 sata_phy_resume(ap);
2382 if (spd)
2383 ap->sata_spd_limit &= (1 << spd) - 1;
2384 2464
2385 /* wait for device */ 2465 /* init sata_spd_limit to the current value */
2386 if (sata_dev_present(ap)) 2466 if (sata_scr_read(ap, SCR_CONTROL, &scontrol) == 0) {
2387 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT); 2467 int spd = (scontrol >> 4) & 0xf;
2468 ap->sata_spd_limit &= (1 << spd) - 1;
2388 } 2469 }
2470
2471 /* wait for device */
2472 if (ata_port_online(ap))
2473 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
2389} 2474}
2390 2475
2391/** 2476/**
@@ -2410,7 +2495,7 @@ int ata_std_softreset(struct ata_port *ap, unsigned int *classes)
2410 2495
2411 DPRINTK("ENTER\n"); 2496 DPRINTK("ENTER\n");
2412 2497
2413 if (ap->ops->scr_read && !sata_dev_present(ap)) { 2498 if (ata_port_offline(ap)) {
2414 classes[0] = ATA_DEV_NONE; 2499 classes[0] = ATA_DEV_NONE;
2415 goto out; 2500 goto out;
2416 } 2501 }
@@ -2428,8 +2513,8 @@ int ata_std_softreset(struct ata_port *ap, unsigned int *classes)
2428 DPRINTK("about to softreset, devmask=%x\n", devmask); 2513 DPRINTK("about to softreset, devmask=%x\n", devmask);
2429 err_mask = ata_bus_softreset(ap, devmask); 2514 err_mask = ata_bus_softreset(ap, devmask);
2430 if (err_mask) { 2515 if (err_mask) {
2431 printk(KERN_ERR "ata%u: SRST failed (err_mask=0x%x)\n", 2516 ata_port_printk(ap, KERN_ERR, "SRST failed (err_mask=0x%x)\n",
2432 ap->id, err_mask); 2517 err_mask);
2433 return -EIO; 2518 return -EIO;
2434 } 2519 }
2435 2520
@@ -2461,26 +2546,35 @@ int ata_std_softreset(struct ata_port *ap, unsigned int *classes)
2461int sata_std_hardreset(struct ata_port *ap, unsigned int *class) 2546int sata_std_hardreset(struct ata_port *ap, unsigned int *class)
2462{ 2547{
2463 u32 scontrol; 2548 u32 scontrol;
2549 int rc;
2464 2550
2465 DPRINTK("ENTER\n"); 2551 DPRINTK("ENTER\n");
2466 2552
2467 if (ata_set_sata_spd_needed(ap)) { 2553 if (sata_set_spd_needed(ap)) {
2468 /* SATA spec says nothing about how to reconfigure 2554 /* SATA spec says nothing about how to reconfigure
2469 * spd. To be on the safe side, turn off phy during 2555 * spd. To be on the safe side, turn off phy during
2470 * reconfiguration. This works for at least ICH7 AHCI 2556 * reconfiguration. This works for at least ICH7 AHCI
2471 * and Sil3124. 2557 * and Sil3124.
2472 */ 2558 */
2473 scontrol = scr_read(ap, SCR_CONTROL); 2559 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
2560 return rc;
2561
2474 scontrol = (scontrol & 0x0f0) | 0x302; 2562 scontrol = (scontrol & 0x0f0) | 0x302;
2475 scr_write_flush(ap, SCR_CONTROL, scontrol);
2476 2563
2477 ata_set_sata_spd(ap); 2564 if ((rc = sata_scr_write(ap, SCR_CONTROL, scontrol)))
2565 return rc;
2566
2567 sata_set_spd(ap);
2478 } 2568 }
2479 2569
2480 /* issue phy wake/reset */ 2570 /* issue phy wake/reset */
2481 scontrol = scr_read(ap, SCR_CONTROL); 2571 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
2572 return rc;
2573
2482 scontrol = (scontrol & 0x0f0) | 0x301; 2574 scontrol = (scontrol & 0x0f0) | 0x301;
2483 scr_write_flush(ap, SCR_CONTROL, scontrol); 2575
2576 if ((rc = sata_scr_write_flush(ap, SCR_CONTROL, scontrol)))
2577 return rc;
2484 2578
2485 /* Couldn't find anything in SATA I/II specs, but AHCI-1.1 2579 /* Couldn't find anything in SATA I/II specs, but AHCI-1.1
2486 * 10.4.2 says at least 1 ms. 2580 * 10.4.2 says at least 1 ms.
@@ -2491,15 +2585,15 @@ int sata_std_hardreset(struct ata_port *ap, unsigned int *class)
2491 sata_phy_resume(ap); 2585 sata_phy_resume(ap);
2492 2586
2493 /* TODO: phy layer with polling, timeouts, etc. */ 2587 /* TODO: phy layer with polling, timeouts, etc. */
2494 if (!sata_dev_present(ap)) { 2588 if (ata_port_offline(ap)) {
2495 *class = ATA_DEV_NONE; 2589 *class = ATA_DEV_NONE;
2496 DPRINTK("EXIT, link offline\n"); 2590 DPRINTK("EXIT, link offline\n");
2497 return 0; 2591 return 0;
2498 } 2592 }
2499 2593
2500 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) { 2594 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
2501 printk(KERN_ERR 2595 ata_port_printk(ap, KERN_ERR,
2502 "ata%u: COMRESET failed (device not ready)\n", ap->id); 2596 "COMRESET failed (device not ready)\n");
2503 return -EIO; 2597 return -EIO;
2504 } 2598 }
2505 2599
@@ -2528,15 +2622,23 @@ int sata_std_hardreset(struct ata_port *ap, unsigned int *class)
2528 */ 2622 */
2529void ata_std_postreset(struct ata_port *ap, unsigned int *classes) 2623void ata_std_postreset(struct ata_port *ap, unsigned int *classes)
2530{ 2624{
2625 u32 serror;
2626
2531 DPRINTK("ENTER\n"); 2627 DPRINTK("ENTER\n");
2532 2628
2533 /* print link status */ 2629 /* print link status */
2534 if (ap->cbl == ATA_CBL_SATA) 2630 sata_print_link_status(ap);
2535 sata_print_link_status(ap); 2631
2632 /* clear SError */
2633 if (sata_scr_read(ap, SCR_ERROR, &serror) == 0)
2634 sata_scr_write(ap, SCR_ERROR, serror);
2536 2635
2537 /* re-enable interrupts */ 2636 /* re-enable interrupts */
2538 if (ap->ioaddr.ctl_addr) /* FIXME: hack. create a hook instead */ 2637 if (!ap->ops->error_handler) {
2539 ata_irq_on(ap); 2638 /* FIXME: hack. create a hook instead */
2639 if (ap->ioaddr.ctl_addr)
2640 ata_irq_on(ap);
2641 }
2540 2642
2541 /* is double-select really necessary? */ 2643 /* is double-select really necessary? */
2542 if (classes[0] != ATA_DEV_NONE) 2644 if (classes[0] != ATA_DEV_NONE)
@@ -2579,7 +2681,7 @@ int ata_std_probe_reset(struct ata_port *ap, unsigned int *classes)
2579 ata_reset_fn_t hardreset; 2681 ata_reset_fn_t hardreset;
2580 2682
2581 hardreset = NULL; 2683 hardreset = NULL;
2582 if (ap->cbl == ATA_CBL_SATA && ap->ops->scr_read) 2684 if (sata_scr_valid(ap))
2583 hardreset = sata_std_hardreset; 2685 hardreset = sata_std_hardreset;
2584 2686
2585 return ata_drive_probe_reset(ap, ata_std_probeinit, 2687 return ata_drive_probe_reset(ap, ata_std_probeinit,
@@ -2588,7 +2690,7 @@ int ata_std_probe_reset(struct ata_port *ap, unsigned int *classes)
2588} 2690}
2589 2691
2590int ata_do_reset(struct ata_port *ap, ata_reset_fn_t reset, 2692int ata_do_reset(struct ata_port *ap, ata_reset_fn_t reset,
2591 ata_postreset_fn_t postreset, unsigned int *classes) 2693 unsigned int *classes)
2592{ 2694{
2593 int i, rc; 2695 int i, rc;
2594 2696
@@ -2612,9 +2714,6 @@ int ata_do_reset(struct ata_port *ap, ata_reset_fn_t reset,
2612 if (classes[i] == ATA_DEV_UNKNOWN) 2714 if (classes[i] == ATA_DEV_UNKNOWN)
2613 classes[i] = ATA_DEV_NONE; 2715 classes[i] = ATA_DEV_NONE;
2614 2716
2615 if (postreset)
2616 postreset(ap, classes);
2617
2618 return 0; 2717 return 0;
2619} 2718}
2620 2719
@@ -2654,15 +2753,17 @@ int ata_drive_probe_reset(struct ata_port *ap, ata_probeinit_fn_t probeinit,
2654{ 2753{
2655 int rc = -EINVAL; 2754 int rc = -EINVAL;
2656 2755
2756 ata_eh_freeze_port(ap);
2757
2657 if (probeinit) 2758 if (probeinit)
2658 probeinit(ap); 2759 probeinit(ap);
2659 2760
2660 if (softreset && !ata_set_sata_spd_needed(ap)) { 2761 if (softreset && !sata_set_spd_needed(ap)) {
2661 rc = ata_do_reset(ap, softreset, postreset, classes); 2762 rc = ata_do_reset(ap, softreset, classes);
2662 if (rc == 0 && classes[0] != ATA_DEV_UNKNOWN) 2763 if (rc == 0 && classes[0] != ATA_DEV_UNKNOWN)
2663 goto done; 2764 goto done;
2664 printk(KERN_INFO "ata%u: softreset failed, will try " 2765 ata_port_printk(ap, KERN_INFO, "softreset failed, "
2665 "hardreset in 5 secs\n", ap->id); 2766 "will try hardreset in 5 secs\n");
2666 ssleep(5); 2767 ssleep(5);
2667 } 2768 }
2668 2769
@@ -2670,39 +2771,45 @@ int ata_drive_probe_reset(struct ata_port *ap, ata_probeinit_fn_t probeinit,
2670 goto done; 2771 goto done;
2671 2772
2672 while (1) { 2773 while (1) {
2673 rc = ata_do_reset(ap, hardreset, postreset, classes); 2774 rc = ata_do_reset(ap, hardreset, classes);
2674 if (rc == 0) { 2775 if (rc == 0) {
2675 if (classes[0] != ATA_DEV_UNKNOWN) 2776 if (classes[0] != ATA_DEV_UNKNOWN)
2676 goto done; 2777 goto done;
2677 break; 2778 break;
2678 } 2779 }
2679 2780
2680 if (ata_down_sata_spd_limit(ap)) 2781 if (sata_down_spd_limit(ap))
2681 goto done; 2782 goto done;
2682 2783
2683 printk(KERN_INFO "ata%u: hardreset failed, will retry " 2784 ata_port_printk(ap, KERN_INFO, "hardreset failed, "
2684 "in 5 secs\n", ap->id); 2785 "will retry in 5 secs\n");
2685 ssleep(5); 2786 ssleep(5);
2686 } 2787 }
2687 2788
2688 if (softreset) { 2789 if (softreset) {
2689 printk(KERN_INFO "ata%u: hardreset succeeded without " 2790 ata_port_printk(ap, KERN_INFO,
2690 "classification, will retry softreset in 5 secs\n", 2791 "hardreset succeeded without classification, "
2691 ap->id); 2792 "will retry softreset in 5 secs\n");
2692 ssleep(5); 2793 ssleep(5);
2693 2794
2694 rc = ata_do_reset(ap, softreset, postreset, classes); 2795 rc = ata_do_reset(ap, softreset, classes);
2695 } 2796 }
2696 2797
2697 done: 2798 done:
2698 if (rc == 0 && classes[0] == ATA_DEV_UNKNOWN) 2799 if (rc == 0) {
2699 rc = -ENODEV; 2800 if (postreset)
2801 postreset(ap, classes);
2802
2803 ata_eh_thaw_port(ap);
2804
2805 if (classes[0] == ATA_DEV_UNKNOWN)
2806 rc = -ENODEV;
2807 }
2700 return rc; 2808 return rc;
2701} 2809}
2702 2810
2703/** 2811/**
2704 * ata_dev_same_device - Determine whether new ID matches configured device 2812 * ata_dev_same_device - Determine whether new ID matches configured device
2705 * @ap: port on which the device to compare against resides
2706 * @dev: device to compare against 2813 * @dev: device to compare against
2707 * @new_class: class of the new device 2814 * @new_class: class of the new device
2708 * @new_id: IDENTIFY page of the new device 2815 * @new_id: IDENTIFY page of the new device
@@ -2717,17 +2824,16 @@ int ata_drive_probe_reset(struct ata_port *ap, ata_probeinit_fn_t probeinit,
2717 * RETURNS: 2824 * RETURNS:
2718 * 1 if @dev matches @new_class and @new_id, 0 otherwise. 2825 * 1 if @dev matches @new_class and @new_id, 0 otherwise.
2719 */ 2826 */
2720static int ata_dev_same_device(struct ata_port *ap, struct ata_device *dev, 2827static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class,
2721 unsigned int new_class, const u16 *new_id) 2828 const u16 *new_id)
2722{ 2829{
2723 const u16 *old_id = dev->id; 2830 const u16 *old_id = dev->id;
2724 unsigned char model[2][41], serial[2][21]; 2831 unsigned char model[2][41], serial[2][21];
2725 u64 new_n_sectors; 2832 u64 new_n_sectors;
2726 2833
2727 if (dev->class != new_class) { 2834 if (dev->class != new_class) {
2728 printk(KERN_INFO 2835 ata_dev_printk(dev, KERN_INFO, "class mismatch %d != %d\n",
2729 "ata%u: dev %u class mismatch %d != %d\n", 2836 dev->class, new_class);
2730 ap->id, dev->devno, dev->class, new_class);
2731 return 0; 2837 return 0;
2732 } 2838 }
2733 2839
@@ -2738,24 +2844,22 @@ static int ata_dev_same_device(struct ata_port *ap, struct ata_device *dev,
2738 new_n_sectors = ata_id_n_sectors(new_id); 2844 new_n_sectors = ata_id_n_sectors(new_id);
2739 2845
2740 if (strcmp(model[0], model[1])) { 2846 if (strcmp(model[0], model[1])) {
2741 printk(KERN_INFO 2847 ata_dev_printk(dev, KERN_INFO, "model number mismatch "
2742 "ata%u: dev %u model number mismatch '%s' != '%s'\n", 2848 "'%s' != '%s'\n", model[0], model[1]);
2743 ap->id, dev->devno, model[0], model[1]);
2744 return 0; 2849 return 0;
2745 } 2850 }
2746 2851
2747 if (strcmp(serial[0], serial[1])) { 2852 if (strcmp(serial[0], serial[1])) {
2748 printk(KERN_INFO 2853 ata_dev_printk(dev, KERN_INFO, "serial number mismatch "
2749 "ata%u: dev %u serial number mismatch '%s' != '%s'\n", 2854 "'%s' != '%s'\n", serial[0], serial[1]);
2750 ap->id, dev->devno, serial[0], serial[1]);
2751 return 0; 2855 return 0;
2752 } 2856 }
2753 2857
2754 if (dev->class == ATA_DEV_ATA && dev->n_sectors != new_n_sectors) { 2858 if (dev->class == ATA_DEV_ATA && dev->n_sectors != new_n_sectors) {
2755 printk(KERN_INFO 2859 ata_dev_printk(dev, KERN_INFO, "n_sectors mismatch "
2756 "ata%u: dev %u n_sectors mismatch %llu != %llu\n", 2860 "%llu != %llu\n",
2757 ap->id, dev->devno, (unsigned long long)dev->n_sectors, 2861 (unsigned long long)dev->n_sectors,
2758 (unsigned long long)new_n_sectors); 2862 (unsigned long long)new_n_sectors);
2759 return 0; 2863 return 0;
2760 } 2864 }
2761 2865
@@ -2764,7 +2868,6 @@ static int ata_dev_same_device(struct ata_port *ap, struct ata_device *dev,
2764 2868
2765/** 2869/**
2766 * ata_dev_revalidate - Revalidate ATA device 2870 * ata_dev_revalidate - Revalidate ATA device
2767 * @ap: port on which the device to revalidate resides
2768 * @dev: device to revalidate 2871 * @dev: device to revalidate
2769 * @post_reset: is this revalidation after reset? 2872 * @post_reset: is this revalidation after reset?
2770 * 2873 *
@@ -2777,11 +2880,10 @@ static int ata_dev_same_device(struct ata_port *ap, struct ata_device *dev,
2777 * RETURNS: 2880 * RETURNS:
2778 * 0 on success, negative errno otherwise 2881 * 0 on success, negative errno otherwise
2779 */ 2882 */
2780int ata_dev_revalidate(struct ata_port *ap, struct ata_device *dev, 2883int ata_dev_revalidate(struct ata_device *dev, int post_reset)
2781 int post_reset)
2782{ 2884{
2783 unsigned int class = dev->class; 2885 unsigned int class = dev->class;
2784 u16 *id = NULL; 2886 u16 *id = (void *)dev->ap->sector_buf;
2785 int rc; 2887 int rc;
2786 2888
2787 if (!ata_dev_enabled(dev)) { 2889 if (!ata_dev_enabled(dev)) {
@@ -2789,29 +2891,26 @@ int ata_dev_revalidate(struct ata_port *ap, struct ata_device *dev,
2789 goto fail; 2891 goto fail;
2790 } 2892 }
2791 2893
2792 /* allocate & read ID data */ 2894 /* read ID data */
2793 rc = ata_dev_read_id(ap, dev, &class, post_reset, &id); 2895 rc = ata_dev_read_id(dev, &class, post_reset, id);
2794 if (rc) 2896 if (rc)
2795 goto fail; 2897 goto fail;
2796 2898
2797 /* is the device still there? */ 2899 /* is the device still there? */
2798 if (!ata_dev_same_device(ap, dev, class, id)) { 2900 if (!ata_dev_same_device(dev, class, id)) {
2799 rc = -ENODEV; 2901 rc = -ENODEV;
2800 goto fail; 2902 goto fail;
2801 } 2903 }
2802 2904
2803 kfree(dev->id); 2905 memcpy(dev->id, id, sizeof(id[0]) * ATA_ID_WORDS);
2804 dev->id = id;
2805 2906
2806 /* configure device according to the new ID */ 2907 /* configure device according to the new ID */
2807 rc = ata_dev_configure(ap, dev, 0); 2908 rc = ata_dev_configure(dev, 0);
2808 if (rc == 0) 2909 if (rc == 0)
2809 return 0; 2910 return 0;
2810 2911
2811 fail: 2912 fail:
2812 printk(KERN_ERR "ata%u: dev %u revalidation failed (errno=%d)\n", 2913 ata_dev_printk(dev, KERN_ERR, "revalidation failed (errno=%d)\n", rc);
2813 ap->id, dev->devno, rc);
2814 kfree(id);
2815 return rc; 2914 return rc;
2816} 2915}
2817 2916
@@ -2887,7 +2986,6 @@ static int ata_dma_blacklisted(const struct ata_device *dev)
2887 2986
2888/** 2987/**
2889 * ata_dev_xfermask - Compute supported xfermask of the given device 2988 * ata_dev_xfermask - Compute supported xfermask of the given device
2890 * @ap: Port on which the device to compute xfermask for resides
2891 * @dev: Device to compute xfermask for 2989 * @dev: Device to compute xfermask for
2892 * 2990 *
2893 * Compute supported xfermask of @dev and store it in 2991 * Compute supported xfermask of @dev and store it in
@@ -2902,8 +3000,9 @@ static int ata_dma_blacklisted(const struct ata_device *dev)
2902 * LOCKING: 3000 * LOCKING:
2903 * None. 3001 * None.
2904 */ 3002 */
2905static void ata_dev_xfermask(struct ata_port *ap, struct ata_device *dev) 3003static void ata_dev_xfermask(struct ata_device *dev)
2906{ 3004{
3005 struct ata_port *ap = dev->ap;
2907 struct ata_host_set *hs = ap->host_set; 3006 struct ata_host_set *hs = ap->host_set;
2908 unsigned long xfer_mask; 3007 unsigned long xfer_mask;
2909 int i; 3008 int i;
@@ -2939,8 +3038,8 @@ static void ata_dev_xfermask(struct ata_port *ap, struct ata_device *dev)
2939 } 3038 }
2940 3039
2941 if (ata_dma_blacklisted(dev)) 3040 if (ata_dma_blacklisted(dev))
2942 printk(KERN_WARNING "ata%u: dev %u is on DMA blacklist, " 3041 ata_dev_printk(dev, KERN_WARNING,
2943 "disabling DMA\n", ap->id, dev->devno); 3042 "device is on DMA blacklist, disabling DMA\n");
2944 3043
2945 if (hs->flags & ATA_HOST_SIMPLEX) { 3044 if (hs->flags & ATA_HOST_SIMPLEX) {
2946 if (hs->simplex_claimed) 3045 if (hs->simplex_claimed)
@@ -2956,7 +3055,6 @@ static void ata_dev_xfermask(struct ata_port *ap, struct ata_device *dev)
2956 3055
2957/** 3056/**
2958 * ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command 3057 * ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
2959 * @ap: Port associated with device @dev
2960 * @dev: Device to which command will be sent 3058 * @dev: Device to which command will be sent
2961 * 3059 *
2962 * Issue SET FEATURES - XFER MODE command to device @dev 3060 * Issue SET FEATURES - XFER MODE command to device @dev
@@ -2969,8 +3067,7 @@ static void ata_dev_xfermask(struct ata_port *ap, struct ata_device *dev)
2969 * 0 on success, AC_ERR_* mask otherwise. 3067 * 0 on success, AC_ERR_* mask otherwise.
2970 */ 3068 */
2971 3069
2972static unsigned int ata_dev_set_xfermode(struct ata_port *ap, 3070static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
2973 struct ata_device *dev)
2974{ 3071{
2975 struct ata_taskfile tf; 3072 struct ata_taskfile tf;
2976 unsigned int err_mask; 3073 unsigned int err_mask;
@@ -2978,14 +3075,14 @@ static unsigned int ata_dev_set_xfermode(struct ata_port *ap,
2978 /* set up set-features taskfile */ 3075 /* set up set-features taskfile */
2979 DPRINTK("set features - xfer mode\n"); 3076 DPRINTK("set features - xfer mode\n");
2980 3077
2981 ata_tf_init(ap, &tf, dev->devno); 3078 ata_tf_init(dev, &tf);
2982 tf.command = ATA_CMD_SET_FEATURES; 3079 tf.command = ATA_CMD_SET_FEATURES;
2983 tf.feature = SETFEATURES_XFER; 3080 tf.feature = SETFEATURES_XFER;
2984 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; 3081 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
2985 tf.protocol = ATA_PROT_NODATA; 3082 tf.protocol = ATA_PROT_NODATA;
2986 tf.nsect = dev->xfer_mode; 3083 tf.nsect = dev->xfer_mode;
2987 3084
2988 err_mask = ata_exec_internal(ap, dev, &tf, NULL, DMA_NONE, NULL, 0); 3085 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
2989 3086
2990 DPRINTK("EXIT, err_mask=%x\n", err_mask); 3087 DPRINTK("EXIT, err_mask=%x\n", err_mask);
2991 return err_mask; 3088 return err_mask;
@@ -2993,8 +3090,9 @@ static unsigned int ata_dev_set_xfermode(struct ata_port *ap,
2993 3090
2994/** 3091/**
2995 * ata_dev_init_params - Issue INIT DEV PARAMS command 3092 * ata_dev_init_params - Issue INIT DEV PARAMS command
2996 * @ap: Port associated with device @dev
2997 * @dev: Device to which command will be sent 3093 * @dev: Device to which command will be sent
3094 * @heads: Number of heads
3095 * @sectors: Number of sectors
2998 * 3096 *
2999 * LOCKING: 3097 * LOCKING:
3000 * Kernel thread context (may sleep) 3098 * Kernel thread context (may sleep)
@@ -3002,11 +3100,8 @@ static unsigned int ata_dev_set_xfermode(struct ata_port *ap,
3002 * RETURNS: 3100 * RETURNS:
3003 * 0 on success, AC_ERR_* mask otherwise. 3101 * 0 on success, AC_ERR_* mask otherwise.
3004 */ 3102 */
3005 3103static unsigned int ata_dev_init_params(struct ata_device *dev,
3006static unsigned int ata_dev_init_params(struct ata_port *ap, 3104 u16 heads, u16 sectors)
3007 struct ata_device *dev,
3008 u16 heads,
3009 u16 sectors)
3010{ 3105{
3011 struct ata_taskfile tf; 3106 struct ata_taskfile tf;
3012 unsigned int err_mask; 3107 unsigned int err_mask;
@@ -3018,14 +3113,14 @@ static unsigned int ata_dev_init_params(struct ata_port *ap,
3018 /* set up init dev params taskfile */ 3113 /* set up init dev params taskfile */
3019 DPRINTK("init dev params \n"); 3114 DPRINTK("init dev params \n");
3020 3115
3021 ata_tf_init(ap, &tf, dev->devno); 3116 ata_tf_init(dev, &tf);
3022 tf.command = ATA_CMD_INIT_DEV_PARAMS; 3117 tf.command = ATA_CMD_INIT_DEV_PARAMS;
3023 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; 3118 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
3024 tf.protocol = ATA_PROT_NODATA; 3119 tf.protocol = ATA_PROT_NODATA;
3025 tf.nsect = sectors; 3120 tf.nsect = sectors;
3026 tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */ 3121 tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
3027 3122
3028 err_mask = ata_exec_internal(ap, dev, &tf, NULL, DMA_NONE, NULL, 0); 3123 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
3029 3124
3030 DPRINTK("EXIT, err_mask=%x\n", err_mask); 3125 DPRINTK("EXIT, err_mask=%x\n", err_mask);
3031 return err_mask; 3126 return err_mask;
@@ -3163,6 +3258,15 @@ int ata_check_atapi_dma(struct ata_queued_cmd *qc)
3163 if (ap->ops->check_atapi_dma) 3258 if (ap->ops->check_atapi_dma)
3164 rc = ap->ops->check_atapi_dma(qc); 3259 rc = ap->ops->check_atapi_dma(qc);
3165 3260
3261 /* We don't support polling DMA.
3262 * Use PIO if the LLDD handles only interrupts in
3263 * the HSM_ST_LAST state and the ATAPI device
3264 * generates CDB interrupts.
3265 */
3266 if ((ap->flags & ATA_FLAG_PIO_POLLING) &&
3267 (qc->dev->flags & ATA_DFLAG_CDB_INTR))
3268 rc = 1;
3269
3166 return rc; 3270 return rc;
3167} 3271}
3168/** 3272/**
@@ -3391,126 +3495,6 @@ skip_map:
3391} 3495}
3392 3496
3393/** 3497/**
3394 * ata_poll_qc_complete - turn irq back on and finish qc
3395 * @qc: Command to complete
3396 * @err_mask: ATA status register content
3397 *
3398 * LOCKING:
3399 * None. (grabs host lock)
3400 */
3401
3402void ata_poll_qc_complete(struct ata_queued_cmd *qc)
3403{
3404 struct ata_port *ap = qc->ap;
3405 unsigned long flags;
3406
3407 spin_lock_irqsave(&ap->host_set->lock, flags);
3408 ap->flags &= ~ATA_FLAG_NOINTR;
3409 ata_irq_on(ap);
3410 ata_qc_complete(qc);
3411 spin_unlock_irqrestore(&ap->host_set->lock, flags);
3412}
3413
3414/**
3415 * ata_pio_poll - poll using PIO, depending on current state
3416 * @qc: qc in progress
3417 *
3418 * LOCKING:
3419 * None. (executing in kernel thread context)
3420 *
3421 * RETURNS:
3422 * timeout value to use
3423 */
3424static unsigned long ata_pio_poll(struct ata_queued_cmd *qc)
3425{
3426 struct ata_port *ap = qc->ap;
3427 u8 status;
3428 unsigned int poll_state = HSM_ST_UNKNOWN;
3429 unsigned int reg_state = HSM_ST_UNKNOWN;
3430
3431 switch (ap->hsm_task_state) {
3432 case HSM_ST:
3433 case HSM_ST_POLL:
3434 poll_state = HSM_ST_POLL;
3435 reg_state = HSM_ST;
3436 break;
3437 case HSM_ST_LAST:
3438 case HSM_ST_LAST_POLL:
3439 poll_state = HSM_ST_LAST_POLL;
3440 reg_state = HSM_ST_LAST;
3441 break;
3442 default:
3443 BUG();
3444 break;
3445 }
3446
3447 status = ata_chk_status(ap);
3448 if (status & ATA_BUSY) {
3449 if (time_after(jiffies, ap->pio_task_timeout)) {
3450 qc->err_mask |= AC_ERR_TIMEOUT;
3451 ap->hsm_task_state = HSM_ST_TMOUT;
3452 return 0;
3453 }
3454 ap->hsm_task_state = poll_state;
3455 return ATA_SHORT_PAUSE;
3456 }
3457
3458 ap->hsm_task_state = reg_state;
3459 return 0;
3460}
3461
3462/**
3463 * ata_pio_complete - check if drive is busy or idle
3464 * @qc: qc to complete
3465 *
3466 * LOCKING:
3467 * None. (executing in kernel thread context)
3468 *
3469 * RETURNS:
3470 * Non-zero if qc completed, zero otherwise.
3471 */
3472static int ata_pio_complete(struct ata_queued_cmd *qc)
3473{
3474 struct ata_port *ap = qc->ap;
3475 u8 drv_stat;
3476
3477 /*
3478 * This is purely heuristic. This is a fast path. Sometimes when
3479 * we enter, BSY will be cleared in a chk-status or two. If not,
3480 * the drive is probably seeking or something. Snooze for a couple
3481 * msecs, then chk-status again. If still busy, fall back to
3482 * HSM_ST_POLL state.
3483 */
3484 drv_stat = ata_busy_wait(ap, ATA_BUSY, 10);
3485 if (drv_stat & ATA_BUSY) {
3486 msleep(2);
3487 drv_stat = ata_busy_wait(ap, ATA_BUSY, 10);
3488 if (drv_stat & ATA_BUSY) {
3489 ap->hsm_task_state = HSM_ST_LAST_POLL;
3490 ap->pio_task_timeout = jiffies + ATA_TMOUT_PIO;
3491 return 0;
3492 }
3493 }
3494
3495 drv_stat = ata_wait_idle(ap);
3496 if (!ata_ok(drv_stat)) {
3497 qc->err_mask |= __ac_err_mask(drv_stat);
3498 ap->hsm_task_state = HSM_ST_ERR;
3499 return 0;
3500 }
3501
3502 ap->hsm_task_state = HSM_ST_IDLE;
3503
3504 WARN_ON(qc->err_mask);
3505 ata_poll_qc_complete(qc);
3506
3507 /* another command may start at this point */
3508
3509 return 1;
3510}
3511
3512
3513/**
3514 * swap_buf_le16 - swap halves of 16-bit words in place 3498 * swap_buf_le16 - swap halves of 16-bit words in place
3515 * @buf: Buffer to swap 3499 * @buf: Buffer to swap
3516 * @buf_words: Number of 16-bit words in buffer. 3500 * @buf_words: Number of 16-bit words in buffer.
@@ -3678,7 +3662,23 @@ static void ata_pio_sector(struct ata_queued_cmd *qc)
3678 page = nth_page(page, (offset >> PAGE_SHIFT)); 3662 page = nth_page(page, (offset >> PAGE_SHIFT));
3679 offset %= PAGE_SIZE; 3663 offset %= PAGE_SIZE;
3680 3664
3681 buf = kmap(page) + offset; 3665 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
3666
3667 if (PageHighMem(page)) {
3668 unsigned long flags;
3669
3670 local_irq_save(flags);
3671 buf = kmap_atomic(page, KM_IRQ0);
3672
3673 /* do the actual data transfer */
3674 ata_data_xfer(ap, buf + offset, ATA_SECT_SIZE, do_write);
3675
3676 kunmap_atomic(buf, KM_IRQ0);
3677 local_irq_restore(flags);
3678 } else {
3679 buf = page_address(page);
3680 ata_data_xfer(ap, buf + offset, ATA_SECT_SIZE, do_write);
3681 }
3682 3682
3683 qc->cursect++; 3683 qc->cursect++;
3684 qc->cursg_ofs++; 3684 qc->cursg_ofs++;
@@ -3687,14 +3687,68 @@ static void ata_pio_sector(struct ata_queued_cmd *qc)
3687 qc->cursg++; 3687 qc->cursg++;
3688 qc->cursg_ofs = 0; 3688 qc->cursg_ofs = 0;
3689 } 3689 }
3690}
3690 3691
3691 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read"); 3692/**
3693 * ata_pio_sectors - Transfer one or many 512-byte sectors.
3694 * @qc: Command on going
3695 *
3696 * Transfer one or many ATA_SECT_SIZE of data from/to the
3697 * ATA device for the DRQ request.
3698 *
3699 * LOCKING:
3700 * Inherited from caller.
3701 */
3692 3702
3693 /* do the actual data transfer */ 3703static void ata_pio_sectors(struct ata_queued_cmd *qc)
3694 do_write = (qc->tf.flags & ATA_TFLAG_WRITE); 3704{
3695 ata_data_xfer(ap, buf, ATA_SECT_SIZE, do_write); 3705 if (is_multi_taskfile(&qc->tf)) {
3706 /* READ/WRITE MULTIPLE */
3707 unsigned int nsect;
3696 3708
3697 kunmap(page); 3709 WARN_ON(qc->dev->multi_count == 0);
3710
3711 nsect = min(qc->nsect - qc->cursect, qc->dev->multi_count);
3712 while (nsect--)
3713 ata_pio_sector(qc);
3714 } else
3715 ata_pio_sector(qc);
3716}
3717
3718/**
3719 * atapi_send_cdb - Write CDB bytes to hardware
3720 * @ap: Port to which ATAPI device is attached.
3721 * @qc: Taskfile currently active
3722 *
3723 * When device has indicated its readiness to accept
3724 * a CDB, this function is called. Send the CDB.
3725 *
3726 * LOCKING:
3727 * caller.
3728 */
3729
3730static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc)
3731{
3732 /* send SCSI cdb */
3733 DPRINTK("send cdb\n");
3734 WARN_ON(qc->dev->cdb_len < 12);
3735
3736 ata_data_xfer(ap, qc->cdb, qc->dev->cdb_len, 1);
3737 ata_altstatus(ap); /* flush */
3738
3739 switch (qc->tf.protocol) {
3740 case ATA_PROT_ATAPI:
3741 ap->hsm_task_state = HSM_ST;
3742 break;
3743 case ATA_PROT_ATAPI_NODATA:
3744 ap->hsm_task_state = HSM_ST_LAST;
3745 break;
3746 case ATA_PROT_ATAPI_DMA:
3747 ap->hsm_task_state = HSM_ST_LAST;
3748 /* initiate bmdma */
3749 ap->ops->bmdma_start(qc);
3750 break;
3751 }
3698} 3752}
3699 3753
3700/** 3754/**
@@ -3735,8 +3789,8 @@ next_sg:
3735 unsigned int i; 3789 unsigned int i;
3736 3790
3737 if (words) /* warning if bytes > 1 */ 3791 if (words) /* warning if bytes > 1 */
3738 printk(KERN_WARNING "ata%u: %u bytes trailing data\n", 3792 ata_dev_printk(qc->dev, KERN_WARNING,
3739 ap->id, bytes); 3793 "%u bytes trailing data\n", bytes);
3740 3794
3741 for (i = 0; i < words; i++) 3795 for (i = 0; i < words; i++)
3742 ata_data_xfer(ap, (unsigned char*)pad_buf, 2, do_write); 3796 ata_data_xfer(ap, (unsigned char*)pad_buf, 2, do_write);
@@ -3760,7 +3814,23 @@ next_sg:
3760 /* don't cross page boundaries */ 3814 /* don't cross page boundaries */
3761 count = min(count, (unsigned int)PAGE_SIZE - offset); 3815 count = min(count, (unsigned int)PAGE_SIZE - offset);
3762 3816
3763 buf = kmap(page) + offset; 3817 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
3818
3819 if (PageHighMem(page)) {
3820 unsigned long flags;
3821
3822 local_irq_save(flags);
3823 buf = kmap_atomic(page, KM_IRQ0);
3824
3825 /* do the actual data transfer */
3826 ata_data_xfer(ap, buf + offset, count, do_write);
3827
3828 kunmap_atomic(buf, KM_IRQ0);
3829 local_irq_restore(flags);
3830 } else {
3831 buf = page_address(page);
3832 ata_data_xfer(ap, buf + offset, count, do_write);
3833 }
3764 3834
3765 bytes -= count; 3835 bytes -= count;
3766 qc->curbytes += count; 3836 qc->curbytes += count;
@@ -3771,13 +3841,6 @@ next_sg:
3771 qc->cursg_ofs = 0; 3841 qc->cursg_ofs = 0;
3772 } 3842 }
3773 3843
3774 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
3775
3776 /* do the actual data transfer */
3777 ata_data_xfer(ap, buf, count, do_write);
3778
3779 kunmap(page);
3780
3781 if (bytes) 3844 if (bytes)
3782 goto next_sg; 3845 goto next_sg;
3783} 3846}
@@ -3814,199 +3877,347 @@ static void atapi_pio_bytes(struct ata_queued_cmd *qc)
3814 if (do_write != i_write) 3877 if (do_write != i_write)
3815 goto err_out; 3878 goto err_out;
3816 3879
3880 VPRINTK("ata%u: xfering %d bytes\n", ap->id, bytes);
3881
3817 __atapi_pio_bytes(qc, bytes); 3882 __atapi_pio_bytes(qc, bytes);
3818 3883
3819 return; 3884 return;
3820 3885
3821err_out: 3886err_out:
3822 printk(KERN_INFO "ata%u: dev %u: ATAPI check failed\n", 3887 ata_dev_printk(dev, KERN_INFO, "ATAPI check failed\n");
3823 ap->id, dev->devno);
3824 qc->err_mask |= AC_ERR_HSM; 3888 qc->err_mask |= AC_ERR_HSM;
3825 ap->hsm_task_state = HSM_ST_ERR; 3889 ap->hsm_task_state = HSM_ST_ERR;
3826} 3890}
3827 3891
3828/** 3892/**
3829 * ata_pio_block - start PIO on a block 3893 * ata_hsm_ok_in_wq - Check if the qc can be handled in the workqueue.
3830 * @qc: qc to transfer block for 3894 * @ap: the target ata_port
3895 * @qc: qc on going
3896 *
3897 * RETURNS:
3898 * 1 if ok in workqueue, 0 otherwise.
3899 */
3900
3901static inline int ata_hsm_ok_in_wq(struct ata_port *ap, struct ata_queued_cmd *qc)
3902{
3903 if (qc->tf.flags & ATA_TFLAG_POLLING)
3904 return 1;
3905
3906 if (ap->hsm_task_state == HSM_ST_FIRST) {
3907 if (qc->tf.protocol == ATA_PROT_PIO &&
3908 (qc->tf.flags & ATA_TFLAG_WRITE))
3909 return 1;
3910
3911 if (is_atapi_taskfile(&qc->tf) &&
3912 !(qc->dev->flags & ATA_DFLAG_CDB_INTR))
3913 return 1;
3914 }
3915
3916 return 0;
3917}
3918
3919/**
3920 * ata_hsm_qc_complete - finish a qc running on standard HSM
3921 * @qc: Command to complete
3922 * @in_wq: 1 if called from workqueue, 0 otherwise
3923 *
3924 * Finish @qc which is running on standard HSM.
3831 * 3925 *
3832 * LOCKING: 3926 * LOCKING:
3833 * None. (executing in kernel thread context) 3927 * If @in_wq is zero, spin_lock_irqsave(host_set lock).
3928 * Otherwise, none on entry and grabs host lock.
3834 */ 3929 */
3835static void ata_pio_block(struct ata_queued_cmd *qc) 3930static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
3836{ 3931{
3837 struct ata_port *ap = qc->ap; 3932 struct ata_port *ap = qc->ap;
3838 u8 status; 3933 unsigned long flags;
3839 3934
3840 /* 3935 if (ap->ops->error_handler) {
3841 * This is purely heuristic. This is a fast path. 3936 if (in_wq) {
3842 * Sometimes when we enter, BSY will be cleared in 3937 spin_lock_irqsave(&ap->host_set->lock, flags);
3843 * a chk-status or two. If not, the drive is probably seeking
3844 * or something. Snooze for a couple msecs, then
3845 * chk-status again. If still busy, fall back to
3846 * HSM_ST_POLL state.
3847 */
3848 status = ata_busy_wait(ap, ATA_BUSY, 5);
3849 if (status & ATA_BUSY) {
3850 msleep(2);
3851 status = ata_busy_wait(ap, ATA_BUSY, 10);
3852 if (status & ATA_BUSY) {
3853 ap->hsm_task_state = HSM_ST_POLL;
3854 ap->pio_task_timeout = jiffies + ATA_TMOUT_PIO;
3855 return;
3856 }
3857 }
3858 3938
3859 /* check error */ 3939 /* EH might have kicked in while host_set lock
3860 if (status & (ATA_ERR | ATA_DF)) { 3940 * is released.
3861 qc->err_mask |= AC_ERR_DEV; 3941 */
3862 ap->hsm_task_state = HSM_ST_ERR; 3942 qc = ata_qc_from_tag(ap, qc->tag);
3863 return; 3943 if (qc) {
3864 } 3944 if (likely(!(qc->err_mask & AC_ERR_HSM))) {
3945 ata_irq_on(ap);
3946 ata_qc_complete(qc);
3947 } else
3948 ata_port_freeze(ap);
3949 }
3865 3950
3866 /* transfer data if any */ 3951 spin_unlock_irqrestore(&ap->host_set->lock, flags);
3867 if (is_atapi_taskfile(&qc->tf)) { 3952 } else {
3868 /* DRQ=0 means no more data to transfer */ 3953 if (likely(!(qc->err_mask & AC_ERR_HSM)))
3869 if ((status & ATA_DRQ) == 0) { 3954 ata_qc_complete(qc);
3870 ap->hsm_task_state = HSM_ST_LAST; 3955 else
3871 return; 3956 ata_port_freeze(ap);
3872 } 3957 }
3873
3874 atapi_pio_bytes(qc);
3875 } else { 3958 } else {
3876 /* handle BSY=0, DRQ=0 as error */ 3959 if (in_wq) {
3877 if ((status & ATA_DRQ) == 0) { 3960 spin_lock_irqsave(&ap->host_set->lock, flags);
3878 qc->err_mask |= AC_ERR_HSM; 3961 ata_irq_on(ap);
3879 ap->hsm_task_state = HSM_ST_ERR; 3962 ata_qc_complete(qc);
3880 return; 3963 spin_unlock_irqrestore(&ap->host_set->lock, flags);
3881 } 3964 } else
3882 3965 ata_qc_complete(qc);
3883 ata_pio_sector(qc);
3884 } 3966 }
3885} 3967}
3886 3968
3887static void ata_pio_error(struct ata_queued_cmd *qc) 3969/**
3970 * ata_hsm_move - move the HSM to the next state.
3971 * @ap: the target ata_port
3972 * @qc: qc on going
3973 * @status: current device status
3974 * @in_wq: 1 if called from workqueue, 0 otherwise
3975 *
3976 * RETURNS:
3977 * 1 when poll next status needed, 0 otherwise.
3978 */
3979
3980static int ata_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
3981 u8 status, int in_wq)
3888{ 3982{
3889 struct ata_port *ap = qc->ap; 3983 unsigned long flags = 0;
3984 int poll_next;
3890 3985
3891 if (qc->tf.command != ATA_CMD_PACKET) 3986 WARN_ON((qc->flags & ATA_QCFLAG_ACTIVE) == 0);
3892 printk(KERN_WARNING "ata%u: dev %u PIO error\n",
3893 ap->id, qc->dev->devno);
3894 3987
3895 /* make sure qc->err_mask is available to 3988 /* Make sure ata_qc_issue_prot() does not throw things
3896 * know what's wrong and recover 3989 * like DMA polling into the workqueue. Notice that
3990 * in_wq is not equivalent to (qc->tf.flags & ATA_TFLAG_POLLING).
3897 */ 3991 */
3898 WARN_ON(qc->err_mask == 0); 3992 WARN_ON(in_wq != ata_hsm_ok_in_wq(ap, qc));
3899 3993
3900 ap->hsm_task_state = HSM_ST_IDLE; 3994fsm_start:
3995 DPRINTK("ata%u: protocol %d task_state %d (dev_stat 0x%X)\n",
3996 ap->id, qc->tf.protocol, ap->hsm_task_state, status);
3901 3997
3902 ata_poll_qc_complete(qc); 3998 switch (ap->hsm_task_state) {
3903} 3999 case HSM_ST_FIRST:
4000 /* Send first data block or PACKET CDB */
3904 4001
3905static void ata_pio_task(void *_data) 4002 /* If polling, we will stay in the work queue after
3906{ 4003 * sending the data. Otherwise, interrupt handler
3907 struct ata_queued_cmd *qc = _data; 4004 * takes over after sending the data.
3908 struct ata_port *ap = qc->ap; 4005 */
3909 unsigned long timeout; 4006 poll_next = (qc->tf.flags & ATA_TFLAG_POLLING);
3910 int qc_completed;
3911 4007
3912fsm_start: 4008 /* check device status */
3913 timeout = 0; 4009 if (unlikely((status & (ATA_BUSY | ATA_DRQ)) != ATA_DRQ)) {
3914 qc_completed = 0; 4010 /* Wrong status. Let EH handle this */
4011 qc->err_mask |= AC_ERR_HSM;
4012 ap->hsm_task_state = HSM_ST_ERR;
4013 goto fsm_start;
4014 }
3915 4015
3916 switch (ap->hsm_task_state) { 4016 /* Device should not ask for data transfer (DRQ=1)
3917 case HSM_ST_IDLE: 4017 * when it finds something wrong.
3918 return; 4018 * We ignore DRQ here and stop the HSM by
4019 * changing hsm_task_state to HSM_ST_ERR and
4020 * let the EH abort the command or reset the device.
4021 */
4022 if (unlikely(status & (ATA_ERR | ATA_DF))) {
4023 printk(KERN_WARNING "ata%d: DRQ=1 with device error, dev_stat 0x%X\n",
4024 ap->id, status);
4025 qc->err_mask |= AC_ERR_DEV;
4026 ap->hsm_task_state = HSM_ST_ERR;
4027 goto fsm_start;
4028 }
4029
4030 /* Send the CDB (atapi) or the first data block (ata pio out).
4031 * During the state transition, interrupt handler shouldn't
4032 * be invoked before the data transfer is complete and
4033 * hsm_task_state is changed. Hence, the following locking.
4034 */
4035 if (in_wq)
4036 spin_lock_irqsave(&ap->host_set->lock, flags);
4037
4038 if (qc->tf.protocol == ATA_PROT_PIO) {
4039 /* PIO data out protocol.
4040 * send first data block.
4041 */
4042
4043 /* ata_pio_sectors() might change the state
4044 * to HSM_ST_LAST. so, the state is changed here
4045 * before ata_pio_sectors().
4046 */
4047 ap->hsm_task_state = HSM_ST;
4048 ata_pio_sectors(qc);
4049 ata_altstatus(ap); /* flush */
4050 } else
4051 /* send CDB */
4052 atapi_send_cdb(ap, qc);
4053
4054 if (in_wq)
4055 spin_unlock_irqrestore(&ap->host_set->lock, flags);
4056
4057 /* if polling, ata_pio_task() handles the rest.
4058 * otherwise, interrupt handler takes over from here.
4059 */
4060 break;
3919 4061
3920 case HSM_ST: 4062 case HSM_ST:
3921 ata_pio_block(qc); 4063 /* complete command or read/write the data register */
4064 if (qc->tf.protocol == ATA_PROT_ATAPI) {
4065 /* ATAPI PIO protocol */
4066 if ((status & ATA_DRQ) == 0) {
4067 /* no more data to transfer */
4068 ap->hsm_task_state = HSM_ST_LAST;
4069 goto fsm_start;
4070 }
4071
4072 /* Device should not ask for data transfer (DRQ=1)
4073 * when it finds something wrong.
4074 * We ignore DRQ here and stop the HSM by
4075 * changing hsm_task_state to HSM_ST_ERR and
4076 * let the EH abort the command or reset the device.
4077 */
4078 if (unlikely(status & (ATA_ERR | ATA_DF))) {
4079 printk(KERN_WARNING "ata%d: DRQ=1 with device error, dev_stat 0x%X\n",
4080 ap->id, status);
4081 qc->err_mask |= AC_ERR_DEV;
4082 ap->hsm_task_state = HSM_ST_ERR;
4083 goto fsm_start;
4084 }
4085
4086 atapi_pio_bytes(qc);
4087
4088 if (unlikely(ap->hsm_task_state == HSM_ST_ERR))
4089 /* bad ireason reported by device */
4090 goto fsm_start;
4091
4092 } else {
4093 /* ATA PIO protocol */
4094 if (unlikely((status & ATA_DRQ) == 0)) {
4095 /* handle BSY=0, DRQ=0 as error */
4096 qc->err_mask |= AC_ERR_HSM;
4097 ap->hsm_task_state = HSM_ST_ERR;
4098 goto fsm_start;
4099 }
4100
4101 /* For PIO reads, some devices may ask for
4102 * data transfer (DRQ=1) alone with ERR=1.
4103 * We respect DRQ here and transfer one
4104 * block of junk data before changing the
4105 * hsm_task_state to HSM_ST_ERR.
4106 *
4107 * For PIO writes, ERR=1 DRQ=1 doesn't make
4108 * sense since the data block has been
4109 * transferred to the device.
4110 */
4111 if (unlikely(status & (ATA_ERR | ATA_DF))) {
4112 /* data might be corrputed */
4113 qc->err_mask |= AC_ERR_DEV;
4114
4115 if (!(qc->tf.flags & ATA_TFLAG_WRITE)) {
4116 ata_pio_sectors(qc);
4117 ata_altstatus(ap);
4118 status = ata_wait_idle(ap);
4119 }
4120
4121 /* ata_pio_sectors() might change the
4122 * state to HSM_ST_LAST. so, the state
4123 * is changed after ata_pio_sectors().
4124 */
4125 ap->hsm_task_state = HSM_ST_ERR;
4126 goto fsm_start;
4127 }
4128
4129 ata_pio_sectors(qc);
4130
4131 if (ap->hsm_task_state == HSM_ST_LAST &&
4132 (!(qc->tf.flags & ATA_TFLAG_WRITE))) {
4133 /* all data read */
4134 ata_altstatus(ap);
4135 status = ata_wait_idle(ap);
4136 goto fsm_start;
4137 }
4138 }
4139
4140 ata_altstatus(ap); /* flush */
4141 poll_next = 1;
3922 break; 4142 break;
3923 4143
3924 case HSM_ST_LAST: 4144 case HSM_ST_LAST:
3925 qc_completed = ata_pio_complete(qc); 4145 if (unlikely(!ata_ok(status))) {
3926 break; 4146 qc->err_mask |= __ac_err_mask(status);
4147 ap->hsm_task_state = HSM_ST_ERR;
4148 goto fsm_start;
4149 }
4150
4151 /* no more data to transfer */
4152 DPRINTK("ata%u: dev %u command complete, drv_stat 0x%x\n",
4153 ap->id, qc->dev->devno, status);
4154
4155 WARN_ON(qc->err_mask);
4156
4157 ap->hsm_task_state = HSM_ST_IDLE;
3927 4158
3928 case HSM_ST_POLL: 4159 /* complete taskfile transaction */
3929 case HSM_ST_LAST_POLL: 4160 ata_hsm_qc_complete(qc, in_wq);
3930 timeout = ata_pio_poll(qc); 4161
4162 poll_next = 0;
3931 break; 4163 break;
3932 4164
3933 case HSM_ST_TMOUT:
3934 case HSM_ST_ERR: 4165 case HSM_ST_ERR:
3935 ata_pio_error(qc); 4166 /* make sure qc->err_mask is available to
3936 return; 4167 * know what's wrong and recover
4168 */
4169 WARN_ON(qc->err_mask == 0);
4170
4171 ap->hsm_task_state = HSM_ST_IDLE;
4172
4173 /* complete taskfile transaction */
4174 ata_hsm_qc_complete(qc, in_wq);
4175
4176 poll_next = 0;
4177 break;
4178 default:
4179 poll_next = 0;
4180 BUG();
3937 } 4181 }
3938 4182
3939 if (timeout) 4183 return poll_next;
3940 ata_port_queue_task(ap, ata_pio_task, qc, timeout);
3941 else if (!qc_completed)
3942 goto fsm_start;
3943} 4184}
3944 4185
3945/** 4186static void ata_pio_task(void *_data)
3946 * atapi_packet_task - Write CDB bytes to hardware
3947 * @_data: qc in progress
3948 *
3949 * When device has indicated its readiness to accept
3950 * a CDB, this function is called. Send the CDB.
3951 * If DMA is to be performed, exit immediately.
3952 * Otherwise, we are in polling mode, so poll
3953 * status under operation succeeds or fails.
3954 *
3955 * LOCKING:
3956 * Kernel thread context (may sleep)
3957 */
3958static void atapi_packet_task(void *_data)
3959{ 4187{
3960 struct ata_queued_cmd *qc = _data; 4188 struct ata_queued_cmd *qc = _data;
3961 struct ata_port *ap = qc->ap; 4189 struct ata_port *ap = qc->ap;
3962 u8 status; 4190 u8 status;
4191 int poll_next;
3963 4192
3964 /* sleep-wait for BSY to clear */ 4193fsm_start:
3965 DPRINTK("busy wait\n"); 4194 WARN_ON(ap->hsm_task_state == HSM_ST_IDLE);
3966 if (ata_busy_sleep(ap, ATA_TMOUT_CDB_QUICK, ATA_TMOUT_CDB)) {
3967 qc->err_mask |= AC_ERR_TIMEOUT;
3968 goto err_out;
3969 }
3970
3971 /* make sure DRQ is set */
3972 status = ata_chk_status(ap);
3973 if ((status & (ATA_BUSY | ATA_DRQ)) != ATA_DRQ) {
3974 qc->err_mask |= AC_ERR_HSM;
3975 goto err_out;
3976 }
3977
3978 /* send SCSI cdb */
3979 DPRINTK("send cdb\n");
3980 WARN_ON(qc->dev->cdb_len < 12);
3981
3982 if (qc->tf.protocol == ATA_PROT_ATAPI_DMA ||
3983 qc->tf.protocol == ATA_PROT_ATAPI_NODATA) {
3984 unsigned long flags;
3985
3986 /* Once we're done issuing command and kicking bmdma,
3987 * irq handler takes over. To not lose irq, we need
3988 * to clear NOINTR flag before sending cdb, but
3989 * interrupt handler shouldn't be invoked before we're
3990 * finished. Hence, the following locking.
3991 */
3992 spin_lock_irqsave(&ap->host_set->lock, flags);
3993 ap->flags &= ~ATA_FLAG_NOINTR;
3994 ata_data_xfer(ap, qc->cdb, qc->dev->cdb_len, 1);
3995 if (qc->tf.protocol == ATA_PROT_ATAPI_DMA)
3996 ap->ops->bmdma_start(qc); /* initiate bmdma */
3997 spin_unlock_irqrestore(&ap->host_set->lock, flags);
3998 } else {
3999 ata_data_xfer(ap, qc->cdb, qc->dev->cdb_len, 1);
4000 4195
4001 /* PIO commands are handled by polling */ 4196 /*
4002 ap->hsm_task_state = HSM_ST; 4197 * This is purely heuristic. This is a fast path.
4003 ata_port_queue_task(ap, ata_pio_task, qc, 0); 4198 * Sometimes when we enter, BSY will be cleared in
4199 * a chk-status or two. If not, the drive is probably seeking
4200 * or something. Snooze for a couple msecs, then
4201 * chk-status again. If still busy, queue delayed work.
4202 */
4203 status = ata_busy_wait(ap, ATA_BUSY, 5);
4204 if (status & ATA_BUSY) {
4205 msleep(2);
4206 status = ata_busy_wait(ap, ATA_BUSY, 10);
4207 if (status & ATA_BUSY) {
4208 ata_port_queue_task(ap, ata_pio_task, qc, ATA_SHORT_PAUSE);
4209 return;
4210 }
4004 } 4211 }
4005 4212
4006 return; 4213 /* move the HSM */
4214 poll_next = ata_hsm_move(ap, qc, status, 1);
4007 4215
4008err_out: 4216 /* another command or interrupt handler
4009 ata_poll_qc_complete(qc); 4217 * may be running at this point.
4218 */
4219 if (poll_next)
4220 goto fsm_start;
4010} 4221}
4011 4222
4012/** 4223/**
@@ -4023,9 +4234,14 @@ static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
4023 struct ata_queued_cmd *qc = NULL; 4234 struct ata_queued_cmd *qc = NULL;
4024 unsigned int i; 4235 unsigned int i;
4025 4236
4026 for (i = 0; i < ATA_MAX_QUEUE; i++) 4237 /* no command while frozen */
4027 if (!test_and_set_bit(i, &ap->qactive)) { 4238 if (unlikely(ap->flags & ATA_FLAG_FROZEN))
4028 qc = ata_qc_from_tag(ap, i); 4239 return NULL;
4240
4241 /* the last tag is reserved for internal command. */
4242 for (i = 0; i < ATA_MAX_QUEUE - 1; i++)
4243 if (!test_and_set_bit(i, &ap->qc_allocated)) {
4244 qc = __ata_qc_from_tag(ap, i);
4029 break; 4245 break;
4030 } 4246 }
4031 4247
@@ -4037,16 +4253,15 @@ static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
4037 4253
4038/** 4254/**
4039 * ata_qc_new_init - Request an available ATA command, and initialize it 4255 * ata_qc_new_init - Request an available ATA command, and initialize it
4040 * @ap: Port associated with device @dev
4041 * @dev: Device from whom we request an available command structure 4256 * @dev: Device from whom we request an available command structure
4042 * 4257 *
4043 * LOCKING: 4258 * LOCKING:
4044 * None. 4259 * None.
4045 */ 4260 */
4046 4261
4047struct ata_queued_cmd *ata_qc_new_init(struct ata_port *ap, 4262struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev)
4048 struct ata_device *dev)
4049{ 4263{
4264 struct ata_port *ap = dev->ap;
4050 struct ata_queued_cmd *qc; 4265 struct ata_queued_cmd *qc;
4051 4266
4052 qc = ata_qc_new(ap); 4267 qc = ata_qc_new(ap);
@@ -4081,36 +4296,153 @@ void ata_qc_free(struct ata_queued_cmd *qc)
4081 qc->flags = 0; 4296 qc->flags = 0;
4082 tag = qc->tag; 4297 tag = qc->tag;
4083 if (likely(ata_tag_valid(tag))) { 4298 if (likely(ata_tag_valid(tag))) {
4084 if (tag == ap->active_tag)
4085 ap->active_tag = ATA_TAG_POISON;
4086 qc->tag = ATA_TAG_POISON; 4299 qc->tag = ATA_TAG_POISON;
4087 clear_bit(tag, &ap->qactive); 4300 clear_bit(tag, &ap->qc_allocated);
4088 } 4301 }
4089} 4302}
4090 4303
4091void __ata_qc_complete(struct ata_queued_cmd *qc) 4304void __ata_qc_complete(struct ata_queued_cmd *qc)
4092{ 4305{
4306 struct ata_port *ap = qc->ap;
4307
4093 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */ 4308 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
4094 WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE)); 4309 WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
4095 4310
4096 if (likely(qc->flags & ATA_QCFLAG_DMAMAP)) 4311 if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
4097 ata_sg_clean(qc); 4312 ata_sg_clean(qc);
4098 4313
4314 /* command should be marked inactive atomically with qc completion */
4315 if (qc->tf.protocol == ATA_PROT_NCQ)
4316 ap->sactive &= ~(1 << qc->tag);
4317 else
4318 ap->active_tag = ATA_TAG_POISON;
4319
4099 /* atapi: mark qc as inactive to prevent the interrupt handler 4320 /* atapi: mark qc as inactive to prevent the interrupt handler
4100 * from completing the command twice later, before the error handler 4321 * from completing the command twice later, before the error handler
4101 * is called. (when rc != 0 and atapi request sense is needed) 4322 * is called. (when rc != 0 and atapi request sense is needed)
4102 */ 4323 */
4103 qc->flags &= ~ATA_QCFLAG_ACTIVE; 4324 qc->flags &= ~ATA_QCFLAG_ACTIVE;
4325 ap->qc_active &= ~(1 << qc->tag);
4104 4326
4105 /* call completion callback */ 4327 /* call completion callback */
4106 qc->complete_fn(qc); 4328 qc->complete_fn(qc);
4107} 4329}
4108 4330
4331/**
4332 * ata_qc_complete - Complete an active ATA command
4333 * @qc: Command to complete
4334 * @err_mask: ATA Status register contents
4335 *
4336 * Indicate to the mid and upper layers that an ATA
4337 * command has completed, with either an ok or not-ok status.
4338 *
4339 * LOCKING:
4340 * spin_lock_irqsave(host_set lock)
4341 */
4342void ata_qc_complete(struct ata_queued_cmd *qc)
4343{
4344 struct ata_port *ap = qc->ap;
4345
4346 /* XXX: New EH and old EH use different mechanisms to
4347 * synchronize EH with regular execution path.
4348 *
4349 * In new EH, a failed qc is marked with ATA_QCFLAG_FAILED.
4350 * Normal execution path is responsible for not accessing a
4351 * failed qc. libata core enforces the rule by returning NULL
4352 * from ata_qc_from_tag() for failed qcs.
4353 *
4354 * Old EH depends on ata_qc_complete() nullifying completion
4355 * requests if ATA_QCFLAG_EH_SCHEDULED is set. Old EH does
4356 * not synchronize with interrupt handler. Only PIO task is
4357 * taken care of.
4358 */
4359 if (ap->ops->error_handler) {
4360 WARN_ON(ap->flags & ATA_FLAG_FROZEN);
4361
4362 if (unlikely(qc->err_mask))
4363 qc->flags |= ATA_QCFLAG_FAILED;
4364
4365 if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) {
4366 if (!ata_tag_internal(qc->tag)) {
4367 /* always fill result TF for failed qc */
4368 ap->ops->tf_read(ap, &qc->result_tf);
4369 ata_qc_schedule_eh(qc);
4370 return;
4371 }
4372 }
4373
4374 /* read result TF if requested */
4375 if (qc->flags & ATA_QCFLAG_RESULT_TF)
4376 ap->ops->tf_read(ap, &qc->result_tf);
4377
4378 __ata_qc_complete(qc);
4379 } else {
4380 if (qc->flags & ATA_QCFLAG_EH_SCHEDULED)
4381 return;
4382
4383 /* read result TF if failed or requested */
4384 if (qc->err_mask || qc->flags & ATA_QCFLAG_RESULT_TF)
4385 ap->ops->tf_read(ap, &qc->result_tf);
4386
4387 __ata_qc_complete(qc);
4388 }
4389}
4390
4391/**
4392 * ata_qc_complete_multiple - Complete multiple qcs successfully
4393 * @ap: port in question
4394 * @qc_active: new qc_active mask
4395 * @finish_qc: LLDD callback invoked before completing a qc
4396 *
4397 * Complete in-flight commands. This functions is meant to be
4398 * called from low-level driver's interrupt routine to complete
4399 * requests normally. ap->qc_active and @qc_active is compared
4400 * and commands are completed accordingly.
4401 *
4402 * LOCKING:
4403 * spin_lock_irqsave(host_set lock)
4404 *
4405 * RETURNS:
4406 * Number of completed commands on success, -errno otherwise.
4407 */
4408int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active,
4409 void (*finish_qc)(struct ata_queued_cmd *))
4410{
4411 int nr_done = 0;
4412 u32 done_mask;
4413 int i;
4414
4415 done_mask = ap->qc_active ^ qc_active;
4416
4417 if (unlikely(done_mask & qc_active)) {
4418 ata_port_printk(ap, KERN_ERR, "illegal qc_active transition "
4419 "(%08x->%08x)\n", ap->qc_active, qc_active);
4420 return -EINVAL;
4421 }
4422
4423 for (i = 0; i < ATA_MAX_QUEUE; i++) {
4424 struct ata_queued_cmd *qc;
4425
4426 if (!(done_mask & (1 << i)))
4427 continue;
4428
4429 if ((qc = ata_qc_from_tag(ap, i))) {
4430 if (finish_qc)
4431 finish_qc(qc);
4432 ata_qc_complete(qc);
4433 nr_done++;
4434 }
4435 }
4436
4437 return nr_done;
4438}
4439
4109static inline int ata_should_dma_map(struct ata_queued_cmd *qc) 4440static inline int ata_should_dma_map(struct ata_queued_cmd *qc)
4110{ 4441{
4111 struct ata_port *ap = qc->ap; 4442 struct ata_port *ap = qc->ap;
4112 4443
4113 switch (qc->tf.protocol) { 4444 switch (qc->tf.protocol) {
4445 case ATA_PROT_NCQ:
4114 case ATA_PROT_DMA: 4446 case ATA_PROT_DMA:
4115 case ATA_PROT_ATAPI_DMA: 4447 case ATA_PROT_ATAPI_DMA:
4116 return 1; 4448 return 1;
@@ -4145,8 +4477,22 @@ void ata_qc_issue(struct ata_queued_cmd *qc)
4145{ 4477{
4146 struct ata_port *ap = qc->ap; 4478 struct ata_port *ap = qc->ap;
4147 4479
4148 qc->ap->active_tag = qc->tag; 4480 /* Make sure only one non-NCQ command is outstanding. The
4481 * check is skipped for old EH because it reuses active qc to
4482 * request ATAPI sense.
4483 */
4484 WARN_ON(ap->ops->error_handler && ata_tag_valid(ap->active_tag));
4485
4486 if (qc->tf.protocol == ATA_PROT_NCQ) {
4487 WARN_ON(ap->sactive & (1 << qc->tag));
4488 ap->sactive |= 1 << qc->tag;
4489 } else {
4490 WARN_ON(ap->sactive);
4491 ap->active_tag = qc->tag;
4492 }
4493
4149 qc->flags |= ATA_QCFLAG_ACTIVE; 4494 qc->flags |= ATA_QCFLAG_ACTIVE;
4495 ap->qc_active |= 1 << qc->tag;
4150 4496
4151 if (ata_should_dma_map(qc)) { 4497 if (ata_should_dma_map(qc)) {
4152 if (qc->flags & ATA_QCFLAG_SG) { 4498 if (qc->flags & ATA_QCFLAG_SG) {
@@ -4196,43 +4542,105 @@ unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc)
4196{ 4542{
4197 struct ata_port *ap = qc->ap; 4543 struct ata_port *ap = qc->ap;
4198 4544
4545 /* Use polling pio if the LLD doesn't handle
4546 * interrupt driven pio and atapi CDB interrupt.
4547 */
4548 if (ap->flags & ATA_FLAG_PIO_POLLING) {
4549 switch (qc->tf.protocol) {
4550 case ATA_PROT_PIO:
4551 case ATA_PROT_ATAPI:
4552 case ATA_PROT_ATAPI_NODATA:
4553 qc->tf.flags |= ATA_TFLAG_POLLING;
4554 break;
4555 case ATA_PROT_ATAPI_DMA:
4556 if (qc->dev->flags & ATA_DFLAG_CDB_INTR)
4557 /* see ata_check_atapi_dma() */
4558 BUG();
4559 break;
4560 default:
4561 break;
4562 }
4563 }
4564
4565 /* select the device */
4199 ata_dev_select(ap, qc->dev->devno, 1, 0); 4566 ata_dev_select(ap, qc->dev->devno, 1, 0);
4200 4567
4568 /* start the command */
4201 switch (qc->tf.protocol) { 4569 switch (qc->tf.protocol) {
4202 case ATA_PROT_NODATA: 4570 case ATA_PROT_NODATA:
4571 if (qc->tf.flags & ATA_TFLAG_POLLING)
4572 ata_qc_set_polling(qc);
4573
4203 ata_tf_to_host(ap, &qc->tf); 4574 ata_tf_to_host(ap, &qc->tf);
4575 ap->hsm_task_state = HSM_ST_LAST;
4576
4577 if (qc->tf.flags & ATA_TFLAG_POLLING)
4578 ata_port_queue_task(ap, ata_pio_task, qc, 0);
4579
4204 break; 4580 break;
4205 4581
4206 case ATA_PROT_DMA: 4582 case ATA_PROT_DMA:
4583 WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
4584
4207 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */ 4585 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
4208 ap->ops->bmdma_setup(qc); /* set up bmdma */ 4586 ap->ops->bmdma_setup(qc); /* set up bmdma */
4209 ap->ops->bmdma_start(qc); /* initiate bmdma */ 4587 ap->ops->bmdma_start(qc); /* initiate bmdma */
4588 ap->hsm_task_state = HSM_ST_LAST;
4210 break; 4589 break;
4211 4590
4212 case ATA_PROT_PIO: /* load tf registers, initiate polling pio */ 4591 case ATA_PROT_PIO:
4213 ata_qc_set_polling(qc); 4592 if (qc->tf.flags & ATA_TFLAG_POLLING)
4214 ata_tf_to_host(ap, &qc->tf); 4593 ata_qc_set_polling(qc);
4215 ap->hsm_task_state = HSM_ST;
4216 ata_port_queue_task(ap, ata_pio_task, qc, 0);
4217 break;
4218 4594
4219 case ATA_PROT_ATAPI:
4220 ata_qc_set_polling(qc);
4221 ata_tf_to_host(ap, &qc->tf); 4595 ata_tf_to_host(ap, &qc->tf);
4222 ata_port_queue_task(ap, atapi_packet_task, qc, 0); 4596
4597 if (qc->tf.flags & ATA_TFLAG_WRITE) {
4598 /* PIO data out protocol */
4599 ap->hsm_task_state = HSM_ST_FIRST;
4600 ata_port_queue_task(ap, ata_pio_task, qc, 0);
4601
4602 /* always send first data block using
4603 * the ata_pio_task() codepath.
4604 */
4605 } else {
4606 /* PIO data in protocol */
4607 ap->hsm_task_state = HSM_ST;
4608
4609 if (qc->tf.flags & ATA_TFLAG_POLLING)
4610 ata_port_queue_task(ap, ata_pio_task, qc, 0);
4611
4612 /* if polling, ata_pio_task() handles the rest.
4613 * otherwise, interrupt handler takes over from here.
4614 */
4615 }
4616
4223 break; 4617 break;
4224 4618
4619 case ATA_PROT_ATAPI:
4225 case ATA_PROT_ATAPI_NODATA: 4620 case ATA_PROT_ATAPI_NODATA:
4226 ap->flags |= ATA_FLAG_NOINTR; 4621 if (qc->tf.flags & ATA_TFLAG_POLLING)
4622 ata_qc_set_polling(qc);
4623
4227 ata_tf_to_host(ap, &qc->tf); 4624 ata_tf_to_host(ap, &qc->tf);
4228 ata_port_queue_task(ap, atapi_packet_task, qc, 0); 4625
4626 ap->hsm_task_state = HSM_ST_FIRST;
4627
4628 /* send cdb by polling if no cdb interrupt */
4629 if ((!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) ||
4630 (qc->tf.flags & ATA_TFLAG_POLLING))
4631 ata_port_queue_task(ap, ata_pio_task, qc, 0);
4229 break; 4632 break;
4230 4633
4231 case ATA_PROT_ATAPI_DMA: 4634 case ATA_PROT_ATAPI_DMA:
4232 ap->flags |= ATA_FLAG_NOINTR; 4635 WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
4636
4233 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */ 4637 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
4234 ap->ops->bmdma_setup(qc); /* set up bmdma */ 4638 ap->ops->bmdma_setup(qc); /* set up bmdma */
4235 ata_port_queue_task(ap, atapi_packet_task, qc, 0); 4639 ap->hsm_task_state = HSM_ST_FIRST;
4640
4641 /* send cdb by polling if no cdb interrupt */
4642 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
4643 ata_port_queue_task(ap, ata_pio_task, qc, 0);
4236 break; 4644 break;
4237 4645
4238 default: 4646 default:
@@ -4262,52 +4670,66 @@ unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc)
4262inline unsigned int ata_host_intr (struct ata_port *ap, 4670inline unsigned int ata_host_intr (struct ata_port *ap,
4263 struct ata_queued_cmd *qc) 4671 struct ata_queued_cmd *qc)
4264{ 4672{
4265 u8 status, host_stat; 4673 u8 status, host_stat = 0;
4266 4674
4267 switch (qc->tf.protocol) { 4675 VPRINTK("ata%u: protocol %d task_state %d\n",
4676 ap->id, qc->tf.protocol, ap->hsm_task_state);
4268 4677
4269 case ATA_PROT_DMA: 4678 /* Check whether we are expecting interrupt in this state */
4270 case ATA_PROT_ATAPI_DMA: 4679 switch (ap->hsm_task_state) {
4271 case ATA_PROT_ATAPI: 4680 case HSM_ST_FIRST:
4272 /* check status of DMA engine */ 4681 /* Some pre-ATAPI-4 devices assert INTRQ
4273 host_stat = ap->ops->bmdma_status(ap); 4682 * at this state when ready to receive CDB.
4274 VPRINTK("ata%u: host_stat 0x%X\n", ap->id, host_stat); 4683 */
4275
4276 /* if it's not our irq... */
4277 if (!(host_stat & ATA_DMA_INTR))
4278 goto idle_irq;
4279
4280 /* before we do anything else, clear DMA-Start bit */
4281 ap->ops->bmdma_stop(qc);
4282
4283 /* fall through */
4284
4285 case ATA_PROT_ATAPI_NODATA:
4286 case ATA_PROT_NODATA:
4287 /* check altstatus */
4288 status = ata_altstatus(ap);
4289 if (status & ATA_BUSY)
4290 goto idle_irq;
4291 4684
4292 /* check main status, clearing INTRQ */ 4685 /* Check the ATA_DFLAG_CDB_INTR flag is enough here.
4293 status = ata_chk_status(ap); 4686 * The flag was turned on only for atapi devices.
4294 if (unlikely(status & ATA_BUSY)) 4687 * No need to check is_atapi_taskfile(&qc->tf) again.
4688 */
4689 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
4295 goto idle_irq; 4690 goto idle_irq;
4296 DPRINTK("ata%u: protocol %d (dev_stat 0x%X)\n",
4297 ap->id, qc->tf.protocol, status);
4298
4299 /* ack bmdma irq events */
4300 ap->ops->irq_clear(ap);
4301
4302 /* complete taskfile transaction */
4303 qc->err_mask |= ac_err_mask(status);
4304 ata_qc_complete(qc);
4305 break; 4691 break;
4306 4692 case HSM_ST_LAST:
4693 if (qc->tf.protocol == ATA_PROT_DMA ||
4694 qc->tf.protocol == ATA_PROT_ATAPI_DMA) {
4695 /* check status of DMA engine */
4696 host_stat = ap->ops->bmdma_status(ap);
4697 VPRINTK("ata%u: host_stat 0x%X\n", ap->id, host_stat);
4698
4699 /* if it's not our irq... */
4700 if (!(host_stat & ATA_DMA_INTR))
4701 goto idle_irq;
4702
4703 /* before we do anything else, clear DMA-Start bit */
4704 ap->ops->bmdma_stop(qc);
4705
4706 if (unlikely(host_stat & ATA_DMA_ERR)) {
4707 /* error when transfering data to/from memory */
4708 qc->err_mask |= AC_ERR_HOST_BUS;
4709 ap->hsm_task_state = HSM_ST_ERR;
4710 }
4711 }
4712 break;
4713 case HSM_ST:
4714 break;
4307 default: 4715 default:
4308 goto idle_irq; 4716 goto idle_irq;
4309 } 4717 }
4310 4718
4719 /* check altstatus */
4720 status = ata_altstatus(ap);
4721 if (status & ATA_BUSY)
4722 goto idle_irq;
4723
4724 /* check main status, clearing INTRQ */
4725 status = ata_chk_status(ap);
4726 if (unlikely(status & ATA_BUSY))
4727 goto idle_irq;
4728
4729 /* ack bmdma irq events */
4730 ap->ops->irq_clear(ap);
4731
4732 ata_hsm_move(ap, qc, status, 0);
4311 return 1; /* irq handled */ 4733 return 1; /* irq handled */
4312 4734
4313idle_irq: 4735idle_irq:
@@ -4316,7 +4738,7 @@ idle_irq:
4316#ifdef ATA_IRQ_TRAP 4738#ifdef ATA_IRQ_TRAP
4317 if ((ap->stats.idle_irq % 1000) == 0) { 4739 if ((ap->stats.idle_irq % 1000) == 0) {
4318 ata_irq_ack(ap, 0); /* debug trap */ 4740 ata_irq_ack(ap, 0); /* debug trap */
4319 printk(KERN_WARNING "ata%d: irq trap\n", ap->id); 4741 ata_port_printk(ap, KERN_WARNING, "irq trap\n");
4320 return 1; 4742 return 1;
4321 } 4743 }
4322#endif 4744#endif
@@ -4354,11 +4776,11 @@ irqreturn_t ata_interrupt (int irq, void *dev_instance, struct pt_regs *regs)
4354 4776
4355 ap = host_set->ports[i]; 4777 ap = host_set->ports[i];
4356 if (ap && 4778 if (ap &&
4357 !(ap->flags & (ATA_FLAG_DISABLED | ATA_FLAG_NOINTR))) { 4779 !(ap->flags & ATA_FLAG_DISABLED)) {
4358 struct ata_queued_cmd *qc; 4780 struct ata_queued_cmd *qc;
4359 4781
4360 qc = ata_qc_from_tag(ap, ap->active_tag); 4782 qc = ata_qc_from_tag(ap, ap->active_tag);
4361 if (qc && (!(qc->tf.ctl & ATA_NIEN)) && 4783 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)) &&
4362 (qc->flags & ATA_QCFLAG_ACTIVE)) 4784 (qc->flags & ATA_QCFLAG_ACTIVE))
4363 handled |= ata_host_intr(ap, qc); 4785 handled |= ata_host_intr(ap, qc);
4364 } 4786 }
@@ -4369,32 +4791,168 @@ irqreturn_t ata_interrupt (int irq, void *dev_instance, struct pt_regs *regs)
4369 return IRQ_RETVAL(handled); 4791 return IRQ_RETVAL(handled);
4370} 4792}
4371 4793
4794/**
4795 * sata_scr_valid - test whether SCRs are accessible
4796 * @ap: ATA port to test SCR accessibility for
4797 *
4798 * Test whether SCRs are accessible for @ap.
4799 *
4800 * LOCKING:
4801 * None.
4802 *
4803 * RETURNS:
4804 * 1 if SCRs are accessible, 0 otherwise.
4805 */
4806int sata_scr_valid(struct ata_port *ap)
4807{
4808 return ap->cbl == ATA_CBL_SATA && ap->ops->scr_read;
4809}
4810
4811/**
4812 * sata_scr_read - read SCR register of the specified port
4813 * @ap: ATA port to read SCR for
4814 * @reg: SCR to read
4815 * @val: Place to store read value
4816 *
4817 * Read SCR register @reg of @ap into *@val. This function is
4818 * guaranteed to succeed if the cable type of the port is SATA
4819 * and the port implements ->scr_read.
4820 *
4821 * LOCKING:
4822 * None.
4823 *
4824 * RETURNS:
4825 * 0 on success, negative errno on failure.
4826 */
4827int sata_scr_read(struct ata_port *ap, int reg, u32 *val)
4828{
4829 if (sata_scr_valid(ap)) {
4830 *val = ap->ops->scr_read(ap, reg);
4831 return 0;
4832 }
4833 return -EOPNOTSUPP;
4834}
4835
4836/**
4837 * sata_scr_write - write SCR register of the specified port
4838 * @ap: ATA port to write SCR for
4839 * @reg: SCR to write
4840 * @val: value to write
4841 *
4842 * Write @val to SCR register @reg of @ap. This function is
4843 * guaranteed to succeed if the cable type of the port is SATA
4844 * and the port implements ->scr_read.
4845 *
4846 * LOCKING:
4847 * None.
4848 *
4849 * RETURNS:
4850 * 0 on success, negative errno on failure.
4851 */
4852int sata_scr_write(struct ata_port *ap, int reg, u32 val)
4853{
4854 if (sata_scr_valid(ap)) {
4855 ap->ops->scr_write(ap, reg, val);
4856 return 0;
4857 }
4858 return -EOPNOTSUPP;
4859}
4860
4861/**
4862 * sata_scr_write_flush - write SCR register of the specified port and flush
4863 * @ap: ATA port to write SCR for
4864 * @reg: SCR to write
4865 * @val: value to write
4866 *
4867 * This function is identical to sata_scr_write() except that this
4868 * function performs flush after writing to the register.
4869 *
4870 * LOCKING:
4871 * None.
4872 *
4873 * RETURNS:
4874 * 0 on success, negative errno on failure.
4875 */
4876int sata_scr_write_flush(struct ata_port *ap, int reg, u32 val)
4877{
4878 if (sata_scr_valid(ap)) {
4879 ap->ops->scr_write(ap, reg, val);
4880 ap->ops->scr_read(ap, reg);
4881 return 0;
4882 }
4883 return -EOPNOTSUPP;
4884}
4885
4886/**
4887 * ata_port_online - test whether the given port is online
4888 * @ap: ATA port to test
4889 *
4890 * Test whether @ap is online. Note that this function returns 0
4891 * if online status of @ap cannot be obtained, so
4892 * ata_port_online(ap) != !ata_port_offline(ap).
4893 *
4894 * LOCKING:
4895 * None.
4896 *
4897 * RETURNS:
4898 * 1 if the port online status is available and online.
4899 */
4900int ata_port_online(struct ata_port *ap)
4901{
4902 u32 sstatus;
4903
4904 if (!sata_scr_read(ap, SCR_STATUS, &sstatus) && (sstatus & 0xf) == 0x3)
4905 return 1;
4906 return 0;
4907}
4908
4909/**
4910 * ata_port_offline - test whether the given port is offline
4911 * @ap: ATA port to test
4912 *
4913 * Test whether @ap is offline. Note that this function returns
4914 * 0 if offline status of @ap cannot be obtained, so
4915 * ata_port_online(ap) != !ata_port_offline(ap).
4916 *
4917 * LOCKING:
4918 * None.
4919 *
4920 * RETURNS:
4921 * 1 if the port offline status is available and offline.
4922 */
4923int ata_port_offline(struct ata_port *ap)
4924{
4925 u32 sstatus;
4926
4927 if (!sata_scr_read(ap, SCR_STATUS, &sstatus) && (sstatus & 0xf) != 0x3)
4928 return 1;
4929 return 0;
4930}
4372 4931
4373/* 4932/*
4374 * Execute a 'simple' command, that only consists of the opcode 'cmd' itself, 4933 * Execute a 'simple' command, that only consists of the opcode 'cmd' itself,
4375 * without filling any other registers 4934 * without filling any other registers
4376 */ 4935 */
4377static int ata_do_simple_cmd(struct ata_port *ap, struct ata_device *dev, 4936static int ata_do_simple_cmd(struct ata_device *dev, u8 cmd)
4378 u8 cmd)
4379{ 4937{
4380 struct ata_taskfile tf; 4938 struct ata_taskfile tf;
4381 int err; 4939 int err;
4382 4940
4383 ata_tf_init(ap, &tf, dev->devno); 4941 ata_tf_init(dev, &tf);
4384 4942
4385 tf.command = cmd; 4943 tf.command = cmd;
4386 tf.flags |= ATA_TFLAG_DEVICE; 4944 tf.flags |= ATA_TFLAG_DEVICE;
4387 tf.protocol = ATA_PROT_NODATA; 4945 tf.protocol = ATA_PROT_NODATA;
4388 4946
4389 err = ata_exec_internal(ap, dev, &tf, NULL, DMA_NONE, NULL, 0); 4947 err = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
4390 if (err) 4948 if (err)
4391 printk(KERN_ERR "%s: ata command failed: %d\n", 4949 ata_dev_printk(dev, KERN_ERR, "%s: ata command failed: %d\n",
4392 __FUNCTION__, err); 4950 __FUNCTION__, err);
4393 4951
4394 return err; 4952 return err;
4395} 4953}
4396 4954
4397static int ata_flush_cache(struct ata_port *ap, struct ata_device *dev) 4955static int ata_flush_cache(struct ata_device *dev)
4398{ 4956{
4399 u8 cmd; 4957 u8 cmd;
4400 4958
@@ -4406,22 +4964,21 @@ static int ata_flush_cache(struct ata_port *ap, struct ata_device *dev)
4406 else 4964 else
4407 cmd = ATA_CMD_FLUSH; 4965 cmd = ATA_CMD_FLUSH;
4408 4966
4409 return ata_do_simple_cmd(ap, dev, cmd); 4967 return ata_do_simple_cmd(dev, cmd);
4410} 4968}
4411 4969
4412static int ata_standby_drive(struct ata_port *ap, struct ata_device *dev) 4970static int ata_standby_drive(struct ata_device *dev)
4413{ 4971{
4414 return ata_do_simple_cmd(ap, dev, ATA_CMD_STANDBYNOW1); 4972 return ata_do_simple_cmd(dev, ATA_CMD_STANDBYNOW1);
4415} 4973}
4416 4974
4417static int ata_start_drive(struct ata_port *ap, struct ata_device *dev) 4975static int ata_start_drive(struct ata_device *dev)
4418{ 4976{
4419 return ata_do_simple_cmd(ap, dev, ATA_CMD_IDLEIMMEDIATE); 4977 return ata_do_simple_cmd(dev, ATA_CMD_IDLEIMMEDIATE);
4420} 4978}
4421 4979
4422/** 4980/**
4423 * ata_device_resume - wakeup a previously suspended devices 4981 * ata_device_resume - wakeup a previously suspended devices
4424 * @ap: port the device is connected to
4425 * @dev: the device to resume 4982 * @dev: the device to resume
4426 * 4983 *
4427 * Kick the drive back into action, by sending it an idle immediate 4984 * Kick the drive back into action, by sending it an idle immediate
@@ -4429,39 +4986,42 @@ static int ata_start_drive(struct ata_port *ap, struct ata_device *dev)
4429 * and host. 4986 * and host.
4430 * 4987 *
4431 */ 4988 */
4432int ata_device_resume(struct ata_port *ap, struct ata_device *dev) 4989int ata_device_resume(struct ata_device *dev)
4433{ 4990{
4991 struct ata_port *ap = dev->ap;
4992
4434 if (ap->flags & ATA_FLAG_SUSPENDED) { 4993 if (ap->flags & ATA_FLAG_SUSPENDED) {
4435 struct ata_device *failed_dev; 4994 struct ata_device *failed_dev;
4436 ap->flags &= ~ATA_FLAG_SUSPENDED; 4995 ap->flags &= ~ATA_FLAG_SUSPENDED;
4437 while (ata_set_mode(ap, &failed_dev)) 4996 while (ata_set_mode(ap, &failed_dev))
4438 ata_dev_disable(ap, failed_dev); 4997 ata_dev_disable(failed_dev);
4439 } 4998 }
4440 if (!ata_dev_enabled(dev)) 4999 if (!ata_dev_enabled(dev))
4441 return 0; 5000 return 0;
4442 if (dev->class == ATA_DEV_ATA) 5001 if (dev->class == ATA_DEV_ATA)
4443 ata_start_drive(ap, dev); 5002 ata_start_drive(dev);
4444 5003
4445 return 0; 5004 return 0;
4446} 5005}
4447 5006
4448/** 5007/**
4449 * ata_device_suspend - prepare a device for suspend 5008 * ata_device_suspend - prepare a device for suspend
4450 * @ap: port the device is connected to
4451 * @dev: the device to suspend 5009 * @dev: the device to suspend
4452 * 5010 *
4453 * Flush the cache on the drive, if appropriate, then issue a 5011 * Flush the cache on the drive, if appropriate, then issue a
4454 * standbynow command. 5012 * standbynow command.
4455 */ 5013 */
4456int ata_device_suspend(struct ata_port *ap, struct ata_device *dev, pm_message_t state) 5014int ata_device_suspend(struct ata_device *dev, pm_message_t state)
4457{ 5015{
5016 struct ata_port *ap = dev->ap;
5017
4458 if (!ata_dev_enabled(dev)) 5018 if (!ata_dev_enabled(dev))
4459 return 0; 5019 return 0;
4460 if (dev->class == ATA_DEV_ATA) 5020 if (dev->class == ATA_DEV_ATA)
4461 ata_flush_cache(ap, dev); 5021 ata_flush_cache(dev);
4462 5022
4463 if (state.event != PM_EVENT_FREEZE) 5023 if (state.event != PM_EVENT_FREEZE)
4464 ata_standby_drive(ap, dev); 5024 ata_standby_drive(dev);
4465 ap->flags |= ATA_FLAG_SUSPENDED; 5025 ap->flags |= ATA_FLAG_SUSPENDED;
4466 return 0; 5026 return 0;
4467} 5027}
@@ -4589,7 +5149,6 @@ static void ata_host_init(struct ata_port *ap, struct Scsi_Host *host,
4589 ap->udma_mask = ent->udma_mask; 5149 ap->udma_mask = ent->udma_mask;
4590 ap->flags |= ent->host_flags; 5150 ap->flags |= ent->host_flags;
4591 ap->ops = ent->port_ops; 5151 ap->ops = ent->port_ops;
4592 ap->cbl = ATA_CBL_NONE;
4593 ap->sata_spd_limit = UINT_MAX; 5152 ap->sata_spd_limit = UINT_MAX;
4594 ap->active_tag = ATA_TAG_POISON; 5153 ap->active_tag = ATA_TAG_POISON;
4595 ap->last_ctl = 0xFF; 5154 ap->last_ctl = 0xFF;
@@ -4597,8 +5156,14 @@ static void ata_host_init(struct ata_port *ap, struct Scsi_Host *host,
4597 INIT_WORK(&ap->port_task, NULL, NULL); 5156 INIT_WORK(&ap->port_task, NULL, NULL);
4598 INIT_LIST_HEAD(&ap->eh_done_q); 5157 INIT_LIST_HEAD(&ap->eh_done_q);
4599 5158
5159 /* set cable type */
5160 ap->cbl = ATA_CBL_NONE;
5161 if (ap->flags & ATA_FLAG_SATA)
5162 ap->cbl = ATA_CBL_SATA;
5163
4600 for (i = 0; i < ATA_MAX_DEVICES; i++) { 5164 for (i = 0; i < ATA_MAX_DEVICES; i++) {
4601 struct ata_device *dev = &ap->device[i]; 5165 struct ata_device *dev = &ap->device[i];
5166 dev->ap = ap;
4602 dev->devno = i; 5167 dev->devno = i;
4603 dev->pio_mask = UINT_MAX; 5168 dev->pio_mask = UINT_MAX;
4604 dev->mwdma_mask = UINT_MAX; 5169 dev->mwdma_mask = UINT_MAX;
@@ -4722,18 +5287,18 @@ int ata_device_add(const struct ata_probe_ent *ent)
4722 (ap->pio_mask << ATA_SHIFT_PIO); 5287 (ap->pio_mask << ATA_SHIFT_PIO);
4723 5288
4724 /* print per-port info to dmesg */ 5289 /* print per-port info to dmesg */
4725 printk(KERN_INFO "ata%u: %cATA max %s cmd 0x%lX ctl 0x%lX " 5290 ata_port_printk(ap, KERN_INFO, "%cATA max %s cmd 0x%lX "
4726 "bmdma 0x%lX irq %lu\n", 5291 "ctl 0x%lX bmdma 0x%lX irq %lu\n",
4727 ap->id, 5292 ap->flags & ATA_FLAG_SATA ? 'S' : 'P',
4728 ap->flags & ATA_FLAG_SATA ? 'S' : 'P', 5293 ata_mode_string(xfer_mode_mask),
4729 ata_mode_string(xfer_mode_mask), 5294 ap->ioaddr.cmd_addr,
4730 ap->ioaddr.cmd_addr, 5295 ap->ioaddr.ctl_addr,
4731 ap->ioaddr.ctl_addr, 5296 ap->ioaddr.bmdma_addr,
4732 ap->ioaddr.bmdma_addr, 5297 ent->irq);
4733 ent->irq);
4734 5298
4735 ata_chk_status(ap); 5299 ata_chk_status(ap);
4736 host_set->ops->irq_clear(ap); 5300 host_set->ops->irq_clear(ap);
5301 ata_eh_freeze_port(ap); /* freeze port before requesting IRQ */
4737 count++; 5302 count++;
4738 } 5303 }
4739 5304
@@ -4768,8 +5333,7 @@ int ata_device_add(const struct ata_probe_ent *ent)
4768 5333
4769 rc = scsi_add_host(ap->host, dev); 5334 rc = scsi_add_host(ap->host, dev);
4770 if (rc) { 5335 if (rc) {
4771 printk(KERN_ERR "ata%u: scsi_add_host failed\n", 5336 ata_port_printk(ap, KERN_ERR, "scsi_add_host failed\n");
4772 ap->id);
4773 /* FIXME: do something useful here */ 5337 /* FIXME: do something useful here */
4774 /* FIXME: handle unconditional calls to 5338 /* FIXME: handle unconditional calls to
4775 * scsi_scan_host and ata_host_remove, below, 5339 * scsi_scan_host and ata_host_remove, below,
@@ -4865,14 +5429,11 @@ void ata_host_set_remove(struct ata_host_set *host_set)
4865int ata_scsi_release(struct Scsi_Host *host) 5429int ata_scsi_release(struct Scsi_Host *host)
4866{ 5430{
4867 struct ata_port *ap = ata_shost_to_port(host); 5431 struct ata_port *ap = ata_shost_to_port(host);
4868 int i;
4869 5432
4870 DPRINTK("ENTER\n"); 5433 DPRINTK("ENTER\n");
4871 5434
4872 ap->ops->port_disable(ap); 5435 ap->ops->port_disable(ap);
4873 ata_host_remove(ap, 0); 5436 ata_host_remove(ap, 0);
4874 for (i = 0; i < ATA_MAX_DEVICES; i++)
4875 kfree(ap->device[i].id);
4876 5437
4877 DPRINTK("EXIT\n"); 5438 DPRINTK("EXIT\n");
4878 return 1; 5439 return 1;
@@ -5090,7 +5651,8 @@ EXPORT_SYMBOL_GPL(ata_device_add);
5090EXPORT_SYMBOL_GPL(ata_host_set_remove); 5651EXPORT_SYMBOL_GPL(ata_host_set_remove);
5091EXPORT_SYMBOL_GPL(ata_sg_init); 5652EXPORT_SYMBOL_GPL(ata_sg_init);
5092EXPORT_SYMBOL_GPL(ata_sg_init_one); 5653EXPORT_SYMBOL_GPL(ata_sg_init_one);
5093EXPORT_SYMBOL_GPL(__ata_qc_complete); 5654EXPORT_SYMBOL_GPL(ata_qc_complete);
5655EXPORT_SYMBOL_GPL(ata_qc_complete_multiple);
5094EXPORT_SYMBOL_GPL(ata_qc_issue_prot); 5656EXPORT_SYMBOL_GPL(ata_qc_issue_prot);
5095EXPORT_SYMBOL_GPL(ata_tf_load); 5657EXPORT_SYMBOL_GPL(ata_tf_load);
5096EXPORT_SYMBOL_GPL(ata_tf_read); 5658EXPORT_SYMBOL_GPL(ata_tf_read);
@@ -5112,8 +5674,13 @@ EXPORT_SYMBOL_GPL(ata_bmdma_start);
5112EXPORT_SYMBOL_GPL(ata_bmdma_irq_clear); 5674EXPORT_SYMBOL_GPL(ata_bmdma_irq_clear);
5113EXPORT_SYMBOL_GPL(ata_bmdma_status); 5675EXPORT_SYMBOL_GPL(ata_bmdma_status);
5114EXPORT_SYMBOL_GPL(ata_bmdma_stop); 5676EXPORT_SYMBOL_GPL(ata_bmdma_stop);
5677EXPORT_SYMBOL_GPL(ata_bmdma_freeze);
5678EXPORT_SYMBOL_GPL(ata_bmdma_thaw);
5679EXPORT_SYMBOL_GPL(ata_bmdma_drive_eh);
5680EXPORT_SYMBOL_GPL(ata_bmdma_error_handler);
5681EXPORT_SYMBOL_GPL(ata_bmdma_post_internal_cmd);
5115EXPORT_SYMBOL_GPL(ata_port_probe); 5682EXPORT_SYMBOL_GPL(ata_port_probe);
5116EXPORT_SYMBOL_GPL(ata_set_sata_spd); 5683EXPORT_SYMBOL_GPL(sata_set_spd);
5117EXPORT_SYMBOL_GPL(sata_phy_reset); 5684EXPORT_SYMBOL_GPL(sata_phy_reset);
5118EXPORT_SYMBOL_GPL(__sata_phy_reset); 5685EXPORT_SYMBOL_GPL(__sata_phy_reset);
5119EXPORT_SYMBOL_GPL(ata_bus_reset); 5686EXPORT_SYMBOL_GPL(ata_bus_reset);
@@ -5134,8 +5701,15 @@ EXPORT_SYMBOL_GPL(ata_port_queue_task);
5134EXPORT_SYMBOL_GPL(ata_scsi_ioctl); 5701EXPORT_SYMBOL_GPL(ata_scsi_ioctl);
5135EXPORT_SYMBOL_GPL(ata_scsi_queuecmd); 5702EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
5136EXPORT_SYMBOL_GPL(ata_scsi_slave_config); 5703EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
5704EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth);
5137EXPORT_SYMBOL_GPL(ata_scsi_release); 5705EXPORT_SYMBOL_GPL(ata_scsi_release);
5138EXPORT_SYMBOL_GPL(ata_host_intr); 5706EXPORT_SYMBOL_GPL(ata_host_intr);
5707EXPORT_SYMBOL_GPL(sata_scr_valid);
5708EXPORT_SYMBOL_GPL(sata_scr_read);
5709EXPORT_SYMBOL_GPL(sata_scr_write);
5710EXPORT_SYMBOL_GPL(sata_scr_write_flush);
5711EXPORT_SYMBOL_GPL(ata_port_online);
5712EXPORT_SYMBOL_GPL(ata_port_offline);
5139EXPORT_SYMBOL_GPL(ata_id_string); 5713EXPORT_SYMBOL_GPL(ata_id_string);
5140EXPORT_SYMBOL_GPL(ata_id_c_string); 5714EXPORT_SYMBOL_GPL(ata_id_c_string);
5141EXPORT_SYMBOL_GPL(ata_scsi_simulate); 5715EXPORT_SYMBOL_GPL(ata_scsi_simulate);
@@ -5161,7 +5735,12 @@ EXPORT_SYMBOL_GPL(ata_device_resume);
5161EXPORT_SYMBOL_GPL(ata_scsi_device_suspend); 5735EXPORT_SYMBOL_GPL(ata_scsi_device_suspend);
5162EXPORT_SYMBOL_GPL(ata_scsi_device_resume); 5736EXPORT_SYMBOL_GPL(ata_scsi_device_resume);
5163 5737
5164EXPORT_SYMBOL_GPL(ata_scsi_error);
5165EXPORT_SYMBOL_GPL(ata_eng_timeout); 5738EXPORT_SYMBOL_GPL(ata_eng_timeout);
5739EXPORT_SYMBOL_GPL(ata_port_schedule_eh);
5740EXPORT_SYMBOL_GPL(ata_port_abort);
5741EXPORT_SYMBOL_GPL(ata_port_freeze);
5742EXPORT_SYMBOL_GPL(ata_eh_freeze_port);
5743EXPORT_SYMBOL_GPL(ata_eh_thaw_port);
5166EXPORT_SYMBOL_GPL(ata_eh_qc_complete); 5744EXPORT_SYMBOL_GPL(ata_eh_qc_complete);
5167EXPORT_SYMBOL_GPL(ata_eh_qc_retry); 5745EXPORT_SYMBOL_GPL(ata_eh_qc_retry);
5746EXPORT_SYMBOL_GPL(ata_do_eh);
diff --git a/drivers/scsi/libata-eh.c b/drivers/scsi/libata-eh.c
index c31b13fd5307..7244caff13a0 100644
--- a/drivers/scsi/libata-eh.c
+++ b/drivers/scsi/libata-eh.c
@@ -44,6 +44,53 @@
44 44
45#include "libata.h" 45#include "libata.h"
46 46
47static void __ata_port_freeze(struct ata_port *ap);
48
49static void ata_ering_record(struct ata_ering *ering, int is_io,
50 unsigned int err_mask)
51{
52 struct ata_ering_entry *ent;
53
54 WARN_ON(!err_mask);
55
56 ering->cursor++;
57 ering->cursor %= ATA_ERING_SIZE;
58
59 ent = &ering->ring[ering->cursor];
60 ent->is_io = is_io;
61 ent->err_mask = err_mask;
62 ent->timestamp = get_jiffies_64();
63}
64
65static struct ata_ering_entry * ata_ering_top(struct ata_ering *ering)
66{
67 struct ata_ering_entry *ent = &ering->ring[ering->cursor];
68 if (!ent->err_mask)
69 return NULL;
70 return ent;
71}
72
73static int ata_ering_map(struct ata_ering *ering,
74 int (*map_fn)(struct ata_ering_entry *, void *),
75 void *arg)
76{
77 int idx, rc = 0;
78 struct ata_ering_entry *ent;
79
80 idx = ering->cursor;
81 do {
82 ent = &ering->ring[idx];
83 if (!ent->err_mask)
84 break;
85 rc = map_fn(ent, arg);
86 if (rc)
87 break;
88 idx = (idx - 1 + ATA_ERING_SIZE) % ATA_ERING_SIZE;
89 } while (idx != ering->cursor);
90
91 return rc;
92}
93
47/** 94/**
48 * ata_scsi_timed_out - SCSI layer time out callback 95 * ata_scsi_timed_out - SCSI layer time out callback
49 * @cmd: timed out SCSI command 96 * @cmd: timed out SCSI command
@@ -55,6 +102,8 @@
55 * from finishing it by setting EH_SCHEDULED and return 102 * from finishing it by setting EH_SCHEDULED and return
56 * EH_NOT_HANDLED. 103 * EH_NOT_HANDLED.
57 * 104 *
105 * TODO: kill this function once old EH is gone.
106 *
58 * LOCKING: 107 * LOCKING:
59 * Called from timer context 108 * Called from timer context
60 * 109 *
@@ -67,10 +116,16 @@ enum scsi_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd)
67 struct ata_port *ap = ata_shost_to_port(host); 116 struct ata_port *ap = ata_shost_to_port(host);
68 unsigned long flags; 117 unsigned long flags;
69 struct ata_queued_cmd *qc; 118 struct ata_queued_cmd *qc;
70 enum scsi_eh_timer_return ret = EH_HANDLED; 119 enum scsi_eh_timer_return ret;
71 120
72 DPRINTK("ENTER\n"); 121 DPRINTK("ENTER\n");
73 122
123 if (ap->ops->error_handler) {
124 ret = EH_NOT_HANDLED;
125 goto out;
126 }
127
128 ret = EH_HANDLED;
74 spin_lock_irqsave(&ap->host_set->lock, flags); 129 spin_lock_irqsave(&ap->host_set->lock, flags);
75 qc = ata_qc_from_tag(ap, ap->active_tag); 130 qc = ata_qc_from_tag(ap, ap->active_tag);
76 if (qc) { 131 if (qc) {
@@ -81,6 +136,7 @@ enum scsi_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd)
81 } 136 }
82 spin_unlock_irqrestore(&ap->host_set->lock, flags); 137 spin_unlock_irqrestore(&ap->host_set->lock, flags);
83 138
139 out:
84 DPRINTK("EXIT, ret=%d\n", ret); 140 DPRINTK("EXIT, ret=%d\n", ret);
85 return ret; 141 return ret;
86} 142}
@@ -100,21 +156,141 @@ enum scsi_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd)
100void ata_scsi_error(struct Scsi_Host *host) 156void ata_scsi_error(struct Scsi_Host *host)
101{ 157{
102 struct ata_port *ap = ata_shost_to_port(host); 158 struct ata_port *ap = ata_shost_to_port(host);
159 spinlock_t *hs_lock = &ap->host_set->lock;
160 int i, repeat_cnt = ATA_EH_MAX_REPEAT;
161 unsigned long flags;
103 162
104 DPRINTK("ENTER\n"); 163 DPRINTK("ENTER\n");
105 164
106 /* synchronize with IRQ handler and port task */ 165 /* synchronize with port task */
107 spin_unlock_wait(&ap->host_set->lock);
108 ata_port_flush_task(ap); 166 ata_port_flush_task(ap);
109 167
110 WARN_ON(ata_qc_from_tag(ap, ap->active_tag) == NULL); 168 /* synchronize with host_set lock and sort out timeouts */
169
170 /* For new EH, all qcs are finished in one of three ways -
171 * normal completion, error completion, and SCSI timeout.
172 * Both cmpletions can race against SCSI timeout. When normal
173 * completion wins, the qc never reaches EH. When error
174 * completion wins, the qc has ATA_QCFLAG_FAILED set.
175 *
176 * When SCSI timeout wins, things are a bit more complex.
177 * Normal or error completion can occur after the timeout but
178 * before this point. In such cases, both types of
179 * completions are honored. A scmd is determined to have
180 * timed out iff its associated qc is active and not failed.
181 */
182 if (ap->ops->error_handler) {
183 struct scsi_cmnd *scmd, *tmp;
184 int nr_timedout = 0;
185
186 spin_lock_irqsave(hs_lock, flags);
187
188 list_for_each_entry_safe(scmd, tmp, &host->eh_cmd_q, eh_entry) {
189 struct ata_queued_cmd *qc;
190
191 for (i = 0; i < ATA_MAX_QUEUE; i++) {
192 qc = __ata_qc_from_tag(ap, i);
193 if (qc->flags & ATA_QCFLAG_ACTIVE &&
194 qc->scsicmd == scmd)
195 break;
196 }
197
198 if (i < ATA_MAX_QUEUE) {
199 /* the scmd has an associated qc */
200 if (!(qc->flags & ATA_QCFLAG_FAILED)) {
201 /* which hasn't failed yet, timeout */
202 qc->err_mask |= AC_ERR_TIMEOUT;
203 qc->flags |= ATA_QCFLAG_FAILED;
204 nr_timedout++;
205 }
206 } else {
207 /* Normal completion occurred after
208 * SCSI timeout but before this point.
209 * Successfully complete it.
210 */
211 scmd->retries = scmd->allowed;
212 scsi_eh_finish_cmd(scmd, &ap->eh_done_q);
213 }
214 }
215
216 /* If we have timed out qcs. They belong to EH from
217 * this point but the state of the controller is
218 * unknown. Freeze the port to make sure the IRQ
219 * handler doesn't diddle with those qcs. This must
220 * be done atomically w.r.t. setting QCFLAG_FAILED.
221 */
222 if (nr_timedout)
223 __ata_port_freeze(ap);
224
225 spin_unlock_irqrestore(hs_lock, flags);
226 } else
227 spin_unlock_wait(hs_lock);
228
229 repeat:
230 /* invoke error handler */
231 if (ap->ops->error_handler) {
232 /* fetch & clear EH info */
233 spin_lock_irqsave(hs_lock, flags);
111 234
112 ap->ops->eng_timeout(ap); 235 memset(&ap->eh_context, 0, sizeof(ap->eh_context));
236 ap->eh_context.i = ap->eh_info;
237 memset(&ap->eh_info, 0, sizeof(ap->eh_info));
113 238
239 ap->flags &= ~ATA_FLAG_EH_PENDING;
240
241 spin_unlock_irqrestore(hs_lock, flags);
242
243 /* invoke EH */
244 ap->ops->error_handler(ap);
245
246 /* Exception might have happend after ->error_handler
247 * recovered the port but before this point. Repeat
248 * EH in such case.
249 */
250 spin_lock_irqsave(hs_lock, flags);
251
252 if (ap->flags & ATA_FLAG_EH_PENDING) {
253 if (--repeat_cnt) {
254 ata_port_printk(ap, KERN_INFO,
255 "EH pending after completion, "
256 "repeating EH (cnt=%d)\n", repeat_cnt);
257 spin_unlock_irqrestore(hs_lock, flags);
258 goto repeat;
259 }
260 ata_port_printk(ap, KERN_ERR, "EH pending after %d "
261 "tries, giving up\n", ATA_EH_MAX_REPEAT);
262 }
263
264 /* this run is complete, make sure EH info is clear */
265 memset(&ap->eh_info, 0, sizeof(ap->eh_info));
266
267 /* Clear host_eh_scheduled while holding hs_lock such
268 * that if exception occurs after this point but
269 * before EH completion, SCSI midlayer will
270 * re-initiate EH.
271 */
272 host->host_eh_scheduled = 0;
273
274 spin_unlock_irqrestore(hs_lock, flags);
275 } else {
276 WARN_ON(ata_qc_from_tag(ap, ap->active_tag) == NULL);
277 ap->ops->eng_timeout(ap);
278 }
279
280 /* finish or retry handled scmd's and clean up */
114 WARN_ON(host->host_failed || !list_empty(&host->eh_cmd_q)); 281 WARN_ON(host->host_failed || !list_empty(&host->eh_cmd_q));
115 282
116 scsi_eh_flush_done_q(&ap->eh_done_q); 283 scsi_eh_flush_done_q(&ap->eh_done_q);
117 284
285 /* clean up */
286 spin_lock_irqsave(hs_lock, flags);
287
288 if (ap->flags & ATA_FLAG_RECOVERED)
289 ata_port_printk(ap, KERN_INFO, "EH complete\n");
290 ap->flags &= ~ATA_FLAG_RECOVERED;
291
292 spin_unlock_irqrestore(hs_lock, flags);
293
118 DPRINTK("EXIT\n"); 294 DPRINTK("EXIT\n");
119} 295}
120 296
@@ -133,6 +309,8 @@ void ata_scsi_error(struct Scsi_Host *host)
133 * an interrupt was not delivered to the driver, even though the 309 * an interrupt was not delivered to the driver, even though the
134 * transaction completed successfully. 310 * transaction completed successfully.
135 * 311 *
312 * TODO: kill this function once old EH is gone.
313 *
136 * LOCKING: 314 * LOCKING:
137 * Inherited from SCSI layer (none, can sleep) 315 * Inherited from SCSI layer (none, can sleep)
138 */ 316 */
@@ -167,11 +345,12 @@ static void ata_qc_timeout(struct ata_queued_cmd *qc)
167 /* ack bmdma irq events */ 345 /* ack bmdma irq events */
168 ap->ops->irq_clear(ap); 346 ap->ops->irq_clear(ap);
169 347
170 printk(KERN_ERR "ata%u: command 0x%x timeout, stat 0x%x host_stat 0x%x\n", 348 ata_dev_printk(qc->dev, KERN_ERR, "command 0x%x timeout, "
171 ap->id, qc->tf.command, drv_stat, host_stat); 349 "stat 0x%x host_stat 0x%x\n",
350 qc->tf.command, drv_stat, host_stat);
172 351
173 /* complete taskfile transaction */ 352 /* complete taskfile transaction */
174 qc->err_mask |= ac_err_mask(drv_stat); 353 qc->err_mask |= AC_ERR_TIMEOUT;
175 break; 354 break;
176 } 355 }
177 356
@@ -197,6 +376,8 @@ static void ata_qc_timeout(struct ata_queued_cmd *qc)
197 * an interrupt was not delivered to the driver, even though the 376 * an interrupt was not delivered to the driver, even though the
198 * transaction completed successfully. 377 * transaction completed successfully.
199 * 378 *
379 * TODO: kill this function once old EH is gone.
380 *
200 * LOCKING: 381 * LOCKING:
201 * Inherited from SCSI layer (none, can sleep) 382 * Inherited from SCSI layer (none, can sleep)
202 */ 383 */
@@ -209,6 +390,190 @@ void ata_eng_timeout(struct ata_port *ap)
209 DPRINTK("EXIT\n"); 390 DPRINTK("EXIT\n");
210} 391}
211 392
393/**
394 * ata_qc_schedule_eh - schedule qc for error handling
395 * @qc: command to schedule error handling for
396 *
397 * Schedule error handling for @qc. EH will kick in as soon as
398 * other commands are drained.
399 *
400 * LOCKING:
401 * spin_lock_irqsave(host_set lock)
402 */
403void ata_qc_schedule_eh(struct ata_queued_cmd *qc)
404{
405 struct ata_port *ap = qc->ap;
406
407 WARN_ON(!ap->ops->error_handler);
408
409 qc->flags |= ATA_QCFLAG_FAILED;
410 qc->ap->flags |= ATA_FLAG_EH_PENDING;
411
412 /* The following will fail if timeout has already expired.
413 * ata_scsi_error() takes care of such scmds on EH entry.
414 * Note that ATA_QCFLAG_FAILED is unconditionally set after
415 * this function completes.
416 */
417 scsi_req_abort_cmd(qc->scsicmd);
418}
419
420/**
421 * ata_port_schedule_eh - schedule error handling without a qc
422 * @ap: ATA port to schedule EH for
423 *
424 * Schedule error handling for @ap. EH will kick in as soon as
425 * all commands are drained.
426 *
427 * LOCKING:
428 * spin_lock_irqsave(host_set lock)
429 */
430void ata_port_schedule_eh(struct ata_port *ap)
431{
432 WARN_ON(!ap->ops->error_handler);
433
434 ap->flags |= ATA_FLAG_EH_PENDING;
435 ata_schedule_scsi_eh(ap->host);
436
437 DPRINTK("port EH scheduled\n");
438}
439
440/**
441 * ata_port_abort - abort all qc's on the port
442 * @ap: ATA port to abort qc's for
443 *
444 * Abort all active qc's of @ap and schedule EH.
445 *
446 * LOCKING:
447 * spin_lock_irqsave(host_set lock)
448 *
449 * RETURNS:
450 * Number of aborted qc's.
451 */
452int ata_port_abort(struct ata_port *ap)
453{
454 int tag, nr_aborted = 0;
455
456 WARN_ON(!ap->ops->error_handler);
457
458 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
459 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, tag);
460
461 if (qc) {
462 qc->flags |= ATA_QCFLAG_FAILED;
463 ata_qc_complete(qc);
464 nr_aborted++;
465 }
466 }
467
468 if (!nr_aborted)
469 ata_port_schedule_eh(ap);
470
471 return nr_aborted;
472}
473
474/**
475 * __ata_port_freeze - freeze port
476 * @ap: ATA port to freeze
477 *
478 * This function is called when HSM violation or some other
479 * condition disrupts normal operation of the port. Frozen port
480 * is not allowed to perform any operation until the port is
481 * thawed, which usually follows a successful reset.
482 *
483 * ap->ops->freeze() callback can be used for freezing the port
484 * hardware-wise (e.g. mask interrupt and stop DMA engine). If a
485 * port cannot be frozen hardware-wise, the interrupt handler
486 * must ack and clear interrupts unconditionally while the port
487 * is frozen.
488 *
489 * LOCKING:
490 * spin_lock_irqsave(host_set lock)
491 */
492static void __ata_port_freeze(struct ata_port *ap)
493{
494 WARN_ON(!ap->ops->error_handler);
495
496 if (ap->ops->freeze)
497 ap->ops->freeze(ap);
498
499 ap->flags |= ATA_FLAG_FROZEN;
500
501 DPRINTK("ata%u port frozen\n", ap->id);
502}
503
504/**
505 * ata_port_freeze - abort & freeze port
506 * @ap: ATA port to freeze
507 *
508 * Abort and freeze @ap.
509 *
510 * LOCKING:
511 * spin_lock_irqsave(host_set lock)
512 *
513 * RETURNS:
514 * Number of aborted commands.
515 */
516int ata_port_freeze(struct ata_port *ap)
517{
518 int nr_aborted;
519
520 WARN_ON(!ap->ops->error_handler);
521
522 nr_aborted = ata_port_abort(ap);
523 __ata_port_freeze(ap);
524
525 return nr_aborted;
526}
527
528/**
529 * ata_eh_freeze_port - EH helper to freeze port
530 * @ap: ATA port to freeze
531 *
532 * Freeze @ap.
533 *
534 * LOCKING:
535 * None.
536 */
537void ata_eh_freeze_port(struct ata_port *ap)
538{
539 unsigned long flags;
540
541 if (!ap->ops->error_handler)
542 return;
543
544 spin_lock_irqsave(&ap->host_set->lock, flags);
545 __ata_port_freeze(ap);
546 spin_unlock_irqrestore(&ap->host_set->lock, flags);
547}
548
549/**
550 * ata_port_thaw_port - EH helper to thaw port
551 * @ap: ATA port to thaw
552 *
553 * Thaw frozen port @ap.
554 *
555 * LOCKING:
556 * None.
557 */
558void ata_eh_thaw_port(struct ata_port *ap)
559{
560 unsigned long flags;
561
562 if (!ap->ops->error_handler)
563 return;
564
565 spin_lock_irqsave(&ap->host_set->lock, flags);
566
567 ap->flags &= ~ATA_FLAG_FROZEN;
568
569 if (ap->ops->thaw)
570 ap->ops->thaw(ap);
571
572 spin_unlock_irqrestore(&ap->host_set->lock, flags);
573
574 DPRINTK("ata%u port thawed\n", ap->id);
575}
576
212static void ata_eh_scsidone(struct scsi_cmnd *scmd) 577static void ata_eh_scsidone(struct scsi_cmnd *scmd)
213{ 578{
214 /* nada */ 579 /* nada */
@@ -261,3 +626,933 @@ void ata_eh_qc_retry(struct ata_queued_cmd *qc)
261 scmd->retries--; 626 scmd->retries--;
262 __ata_eh_qc_complete(qc); 627 __ata_eh_qc_complete(qc);
263} 628}
629
630/**
631 * ata_eh_about_to_do - about to perform eh_action
632 * @ap: target ATA port
633 * @action: action about to be performed
634 *
635 * Called just before performing EH actions to clear related bits
636 * in @ap->eh_info such that eh actions are not unnecessarily
637 * repeated.
638 *
639 * LOCKING:
640 * None.
641 */
642static void ata_eh_about_to_do(struct ata_port *ap, unsigned int action)
643{
644 unsigned long flags;
645
646 spin_lock_irqsave(&ap->host_set->lock, flags);
647 ap->eh_info.action &= ~action;
648 ap->flags |= ATA_FLAG_RECOVERED;
649 spin_unlock_irqrestore(&ap->host_set->lock, flags);
650}
651
652/**
653 * ata_err_string - convert err_mask to descriptive string
654 * @err_mask: error mask to convert to string
655 *
656 * Convert @err_mask to descriptive string. Errors are
657 * prioritized according to severity and only the most severe
658 * error is reported.
659 *
660 * LOCKING:
661 * None.
662 *
663 * RETURNS:
664 * Descriptive string for @err_mask
665 */
666static const char * ata_err_string(unsigned int err_mask)
667{
668 if (err_mask & AC_ERR_HOST_BUS)
669 return "host bus error";
670 if (err_mask & AC_ERR_ATA_BUS)
671 return "ATA bus error";
672 if (err_mask & AC_ERR_TIMEOUT)
673 return "timeout";
674 if (err_mask & AC_ERR_HSM)
675 return "HSM violation";
676 if (err_mask & AC_ERR_SYSTEM)
677 return "internal error";
678 if (err_mask & AC_ERR_MEDIA)
679 return "media error";
680 if (err_mask & AC_ERR_INVALID)
681 return "invalid argument";
682 if (err_mask & AC_ERR_DEV)
683 return "device error";
684 return "unknown error";
685}
686
687/**
688 * ata_read_log_page - read a specific log page
689 * @dev: target device
690 * @page: page to read
691 * @buf: buffer to store read page
692 * @sectors: number of sectors to read
693 *
694 * Read log page using READ_LOG_EXT command.
695 *
696 * LOCKING:
697 * Kernel thread context (may sleep).
698 *
699 * RETURNS:
700 * 0 on success, AC_ERR_* mask otherwise.
701 */
702static unsigned int ata_read_log_page(struct ata_device *dev,
703 u8 page, void *buf, unsigned int sectors)
704{
705 struct ata_taskfile tf;
706 unsigned int err_mask;
707
708 DPRINTK("read log page - page %d\n", page);
709
710 ata_tf_init(dev, &tf);
711 tf.command = ATA_CMD_READ_LOG_EXT;
712 tf.lbal = page;
713 tf.nsect = sectors;
714 tf.hob_nsect = sectors >> 8;
715 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_LBA48 | ATA_TFLAG_DEVICE;
716 tf.protocol = ATA_PROT_PIO;
717
718 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE,
719 buf, sectors * ATA_SECT_SIZE);
720
721 DPRINTK("EXIT, err_mask=%x\n", err_mask);
722 return err_mask;
723}
724
725/**
726 * ata_eh_read_log_10h - Read log page 10h for NCQ error details
727 * @dev: Device to read log page 10h from
728 * @tag: Resulting tag of the failed command
729 * @tf: Resulting taskfile registers of the failed command
730 *
731 * Read log page 10h to obtain NCQ error details and clear error
732 * condition.
733 *
734 * LOCKING:
735 * Kernel thread context (may sleep).
736 *
737 * RETURNS:
738 * 0 on success, -errno otherwise.
739 */
740static int ata_eh_read_log_10h(struct ata_device *dev,
741 int *tag, struct ata_taskfile *tf)
742{
743 u8 *buf = dev->ap->sector_buf;
744 unsigned int err_mask;
745 u8 csum;
746 int i;
747
748 err_mask = ata_read_log_page(dev, ATA_LOG_SATA_NCQ, buf, 1);
749 if (err_mask)
750 return -EIO;
751
752 csum = 0;
753 for (i = 0; i < ATA_SECT_SIZE; i++)
754 csum += buf[i];
755 if (csum)
756 ata_dev_printk(dev, KERN_WARNING,
757 "invalid checksum 0x%x on log page 10h\n", csum);
758
759 if (buf[0] & 0x80)
760 return -ENOENT;
761
762 *tag = buf[0] & 0x1f;
763
764 tf->command = buf[2];
765 tf->feature = buf[3];
766 tf->lbal = buf[4];
767 tf->lbam = buf[5];
768 tf->lbah = buf[6];
769 tf->device = buf[7];
770 tf->hob_lbal = buf[8];
771 tf->hob_lbam = buf[9];
772 tf->hob_lbah = buf[10];
773 tf->nsect = buf[12];
774 tf->hob_nsect = buf[13];
775
776 return 0;
777}
778
779/**
780 * atapi_eh_request_sense - perform ATAPI REQUEST_SENSE
781 * @dev: device to perform REQUEST_SENSE to
782 * @sense_buf: result sense data buffer (SCSI_SENSE_BUFFERSIZE bytes long)
783 *
784 * Perform ATAPI REQUEST_SENSE after the device reported CHECK
785 * SENSE. This function is EH helper.
786 *
787 * LOCKING:
788 * Kernel thread context (may sleep).
789 *
790 * RETURNS:
791 * 0 on success, AC_ERR_* mask on failure
792 */
793static unsigned int atapi_eh_request_sense(struct ata_device *dev,
794 unsigned char *sense_buf)
795{
796 struct ata_port *ap = dev->ap;
797 struct ata_taskfile tf;
798 u8 cdb[ATAPI_CDB_LEN];
799
800 DPRINTK("ATAPI request sense\n");
801
802 ata_tf_init(dev, &tf);
803
804 /* FIXME: is this needed? */
805 memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
806
807 /* XXX: why tf_read here? */
808 ap->ops->tf_read(ap, &tf);
809
810 /* fill these in, for the case where they are -not- overwritten */
811 sense_buf[0] = 0x70;
812 sense_buf[2] = tf.feature >> 4;
813
814 memset(cdb, 0, ATAPI_CDB_LEN);
815 cdb[0] = REQUEST_SENSE;
816 cdb[4] = SCSI_SENSE_BUFFERSIZE;
817
818 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
819 tf.command = ATA_CMD_PACKET;
820
821 /* is it pointless to prefer PIO for "safety reasons"? */
822 if (ap->flags & ATA_FLAG_PIO_DMA) {
823 tf.protocol = ATA_PROT_ATAPI_DMA;
824 tf.feature |= ATAPI_PKT_DMA;
825 } else {
826 tf.protocol = ATA_PROT_ATAPI;
827 tf.lbam = (8 * 1024) & 0xff;
828 tf.lbah = (8 * 1024) >> 8;
829 }
830
831 return ata_exec_internal(dev, &tf, cdb, DMA_FROM_DEVICE,
832 sense_buf, SCSI_SENSE_BUFFERSIZE);
833}
834
835/**
836 * ata_eh_analyze_serror - analyze SError for a failed port
837 * @ap: ATA port to analyze SError for
838 *
839 * Analyze SError if available and further determine cause of
840 * failure.
841 *
842 * LOCKING:
843 * None.
844 */
845static void ata_eh_analyze_serror(struct ata_port *ap)
846{
847 struct ata_eh_context *ehc = &ap->eh_context;
848 u32 serror = ehc->i.serror;
849 unsigned int err_mask = 0, action = 0;
850
851 if (serror & SERR_PERSISTENT) {
852 err_mask |= AC_ERR_ATA_BUS;
853 action |= ATA_EH_HARDRESET;
854 }
855 if (serror &
856 (SERR_DATA_RECOVERED | SERR_COMM_RECOVERED | SERR_DATA)) {
857 err_mask |= AC_ERR_ATA_BUS;
858 action |= ATA_EH_SOFTRESET;
859 }
860 if (serror & SERR_PROTOCOL) {
861 err_mask |= AC_ERR_HSM;
862 action |= ATA_EH_SOFTRESET;
863 }
864 if (serror & SERR_INTERNAL) {
865 err_mask |= AC_ERR_SYSTEM;
866 action |= ATA_EH_SOFTRESET;
867 }
868 if (serror & (SERR_PHYRDY_CHG | SERR_DEV_XCHG)) {
869 err_mask |= AC_ERR_ATA_BUS;
870 action |= ATA_EH_HARDRESET;
871 }
872
873 ehc->i.err_mask |= err_mask;
874 ehc->i.action |= action;
875}
876
877/**
878 * ata_eh_analyze_ncq_error - analyze NCQ error
879 * @ap: ATA port to analyze NCQ error for
880 *
881 * Read log page 10h, determine the offending qc and acquire
882 * error status TF. For NCQ device errors, all LLDDs have to do
883 * is setting AC_ERR_DEV in ehi->err_mask. This function takes
884 * care of the rest.
885 *
886 * LOCKING:
887 * Kernel thread context (may sleep).
888 */
889static void ata_eh_analyze_ncq_error(struct ata_port *ap)
890{
891 struct ata_eh_context *ehc = &ap->eh_context;
892 struct ata_device *dev = ap->device;
893 struct ata_queued_cmd *qc;
894 struct ata_taskfile tf;
895 int tag, rc;
896
897 /* if frozen, we can't do much */
898 if (ap->flags & ATA_FLAG_FROZEN)
899 return;
900
901 /* is it NCQ device error? */
902 if (!ap->sactive || !(ehc->i.err_mask & AC_ERR_DEV))
903 return;
904
905 /* has LLDD analyzed already? */
906 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
907 qc = __ata_qc_from_tag(ap, tag);
908
909 if (!(qc->flags & ATA_QCFLAG_FAILED))
910 continue;
911
912 if (qc->err_mask)
913 return;
914 }
915
916 /* okay, this error is ours */
917 rc = ata_eh_read_log_10h(dev, &tag, &tf);
918 if (rc) {
919 ata_port_printk(ap, KERN_ERR, "failed to read log page 10h "
920 "(errno=%d)\n", rc);
921 return;
922 }
923
924 if (!(ap->sactive & (1 << tag))) {
925 ata_port_printk(ap, KERN_ERR, "log page 10h reported "
926 "inactive tag %d\n", tag);
927 return;
928 }
929
930 /* we've got the perpetrator, condemn it */
931 qc = __ata_qc_from_tag(ap, tag);
932 memcpy(&qc->result_tf, &tf, sizeof(tf));
933 qc->err_mask |= AC_ERR_DEV;
934 ehc->i.err_mask &= ~AC_ERR_DEV;
935}
936
937/**
938 * ata_eh_analyze_tf - analyze taskfile of a failed qc
939 * @qc: qc to analyze
940 * @tf: Taskfile registers to analyze
941 *
942 * Analyze taskfile of @qc and further determine cause of
943 * failure. This function also requests ATAPI sense data if
944 * avaliable.
945 *
946 * LOCKING:
947 * Kernel thread context (may sleep).
948 *
949 * RETURNS:
950 * Determined recovery action
951 */
952static unsigned int ata_eh_analyze_tf(struct ata_queued_cmd *qc,
953 const struct ata_taskfile *tf)
954{
955 unsigned int tmp, action = 0;
956 u8 stat = tf->command, err = tf->feature;
957
958 if ((stat & (ATA_BUSY | ATA_DRQ | ATA_DRDY)) != ATA_DRDY) {
959 qc->err_mask |= AC_ERR_HSM;
960 return ATA_EH_SOFTRESET;
961 }
962
963 if (!(qc->err_mask & AC_ERR_DEV))
964 return 0;
965
966 switch (qc->dev->class) {
967 case ATA_DEV_ATA:
968 if (err & ATA_ICRC)
969 qc->err_mask |= AC_ERR_ATA_BUS;
970 if (err & ATA_UNC)
971 qc->err_mask |= AC_ERR_MEDIA;
972 if (err & ATA_IDNF)
973 qc->err_mask |= AC_ERR_INVALID;
974 break;
975
976 case ATA_DEV_ATAPI:
977 tmp = atapi_eh_request_sense(qc->dev,
978 qc->scsicmd->sense_buffer);
979 if (!tmp) {
980 /* ATA_QCFLAG_SENSE_VALID is used to tell
981 * atapi_qc_complete() that sense data is
982 * already valid.
983 *
984 * TODO: interpret sense data and set
985 * appropriate err_mask.
986 */
987 qc->flags |= ATA_QCFLAG_SENSE_VALID;
988 } else
989 qc->err_mask |= tmp;
990 }
991
992 if (qc->err_mask & (AC_ERR_HSM | AC_ERR_TIMEOUT | AC_ERR_ATA_BUS))
993 action |= ATA_EH_SOFTRESET;
994
995 return action;
996}
997
998static int ata_eh_categorize_ering_entry(struct ata_ering_entry *ent)
999{
1000 if (ent->err_mask & (AC_ERR_ATA_BUS | AC_ERR_TIMEOUT))
1001 return 1;
1002
1003 if (ent->is_io) {
1004 if (ent->err_mask & AC_ERR_HSM)
1005 return 1;
1006 if ((ent->err_mask &
1007 (AC_ERR_DEV|AC_ERR_MEDIA|AC_ERR_INVALID)) == AC_ERR_DEV)
1008 return 2;
1009 }
1010
1011 return 0;
1012}
1013
1014struct speed_down_needed_arg {
1015 u64 since;
1016 int nr_errors[3];
1017};
1018
1019static int speed_down_needed_cb(struct ata_ering_entry *ent, void *void_arg)
1020{
1021 struct speed_down_needed_arg *arg = void_arg;
1022
1023 if (ent->timestamp < arg->since)
1024 return -1;
1025
1026 arg->nr_errors[ata_eh_categorize_ering_entry(ent)]++;
1027 return 0;
1028}
1029
1030/**
1031 * ata_eh_speed_down_needed - Determine wheter speed down is necessary
1032 * @dev: Device of interest
1033 *
1034 * This function examines error ring of @dev and determines
1035 * whether speed down is necessary. Speed down is necessary if
1036 * there have been more than 3 of Cat-1 errors or 10 of Cat-2
1037 * errors during last 15 minutes.
1038 *
1039 * Cat-1 errors are ATA_BUS, TIMEOUT for any command and HSM
1040 * violation for known supported commands.
1041 *
1042 * Cat-2 errors are unclassified DEV error for known supported
1043 * command.
1044 *
1045 * LOCKING:
1046 * Inherited from caller.
1047 *
1048 * RETURNS:
1049 * 1 if speed down is necessary, 0 otherwise
1050 */
1051static int ata_eh_speed_down_needed(struct ata_device *dev)
1052{
1053 const u64 interval = 15LLU * 60 * HZ;
1054 static const int err_limits[3] = { -1, 3, 10 };
1055 struct speed_down_needed_arg arg;
1056 struct ata_ering_entry *ent;
1057 int err_cat;
1058 u64 j64;
1059
1060 ent = ata_ering_top(&dev->ering);
1061 if (!ent)
1062 return 0;
1063
1064 err_cat = ata_eh_categorize_ering_entry(ent);
1065 if (err_cat == 0)
1066 return 0;
1067
1068 memset(&arg, 0, sizeof(arg));
1069
1070 j64 = get_jiffies_64();
1071 if (j64 >= interval)
1072 arg.since = j64 - interval;
1073 else
1074 arg.since = 0;
1075
1076 ata_ering_map(&dev->ering, speed_down_needed_cb, &arg);
1077
1078 return arg.nr_errors[err_cat] > err_limits[err_cat];
1079}
1080
1081/**
1082 * ata_eh_speed_down - record error and speed down if necessary
1083 * @dev: Failed device
1084 * @is_io: Did the device fail during normal IO?
1085 * @err_mask: err_mask of the error
1086 *
1087 * Record error and examine error history to determine whether
1088 * adjusting transmission speed is necessary. It also sets
1089 * transmission limits appropriately if such adjustment is
1090 * necessary.
1091 *
1092 * LOCKING:
1093 * Kernel thread context (may sleep).
1094 *
1095 * RETURNS:
1096 * 0 on success, -errno otherwise
1097 */
1098static int ata_eh_speed_down(struct ata_device *dev, int is_io,
1099 unsigned int err_mask)
1100{
1101 if (!err_mask)
1102 return 0;
1103
1104 /* record error and determine whether speed down is necessary */
1105 ata_ering_record(&dev->ering, is_io, err_mask);
1106
1107 if (!ata_eh_speed_down_needed(dev))
1108 return 0;
1109
1110 /* speed down SATA link speed if possible */
1111 if (sata_down_spd_limit(dev->ap) == 0)
1112 return ATA_EH_HARDRESET;
1113
1114 /* lower transfer mode */
1115 if (ata_down_xfermask_limit(dev, 0) == 0)
1116 return ATA_EH_SOFTRESET;
1117
1118 ata_dev_printk(dev, KERN_ERR,
1119 "speed down requested but no transfer mode left\n");
1120 return 0;
1121}
1122
1123/**
1124 * ata_eh_autopsy - analyze error and determine recovery action
1125 * @ap: ATA port to perform autopsy on
1126 *
1127 * Analyze why @ap failed and determine which recovery action is
1128 * needed. This function also sets more detailed AC_ERR_* values
1129 * and fills sense data for ATAPI CHECK SENSE.
1130 *
1131 * LOCKING:
1132 * Kernel thread context (may sleep).
1133 */
1134static void ata_eh_autopsy(struct ata_port *ap)
1135{
1136 struct ata_eh_context *ehc = &ap->eh_context;
1137 unsigned int action = ehc->i.action;
1138 struct ata_device *failed_dev = NULL;
1139 unsigned int all_err_mask = 0;
1140 int tag, is_io = 0;
1141 u32 serror;
1142 int rc;
1143
1144 DPRINTK("ENTER\n");
1145
1146 /* obtain and analyze SError */
1147 rc = sata_scr_read(ap, SCR_ERROR, &serror);
1148 if (rc == 0) {
1149 ehc->i.serror |= serror;
1150 ata_eh_analyze_serror(ap);
1151 } else if (rc != -EOPNOTSUPP)
1152 action |= ATA_EH_HARDRESET;
1153
1154 /* analyze NCQ failure */
1155 ata_eh_analyze_ncq_error(ap);
1156
1157 /* any real error trumps AC_ERR_OTHER */
1158 if (ehc->i.err_mask & ~AC_ERR_OTHER)
1159 ehc->i.err_mask &= ~AC_ERR_OTHER;
1160
1161 all_err_mask |= ehc->i.err_mask;
1162
1163 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
1164 struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
1165
1166 if (!(qc->flags & ATA_QCFLAG_FAILED))
1167 continue;
1168
1169 /* inherit upper level err_mask */
1170 qc->err_mask |= ehc->i.err_mask;
1171
1172 if (qc->err_mask & AC_ERR_TIMEOUT)
1173 action |= ATA_EH_SOFTRESET;
1174
1175 /* analyze TF */
1176 action |= ata_eh_analyze_tf(qc, &qc->result_tf);
1177
1178 /* DEV errors are probably spurious in case of ATA_BUS error */
1179 if (qc->err_mask & AC_ERR_ATA_BUS)
1180 qc->err_mask &= ~(AC_ERR_DEV | AC_ERR_MEDIA |
1181 AC_ERR_INVALID);
1182
1183 /* any real error trumps unknown error */
1184 if (qc->err_mask & ~AC_ERR_OTHER)
1185 qc->err_mask &= ~AC_ERR_OTHER;
1186
1187 /* SENSE_VALID trumps dev/unknown error and revalidation */
1188 if (qc->flags & ATA_QCFLAG_SENSE_VALID) {
1189 qc->err_mask &= ~(AC_ERR_DEV | AC_ERR_OTHER);
1190 action &= ~ATA_EH_REVALIDATE;
1191 }
1192
1193 /* accumulate error info */
1194 failed_dev = qc->dev;
1195 all_err_mask |= qc->err_mask;
1196 if (qc->flags & ATA_QCFLAG_IO)
1197 is_io = 1;
1198 }
1199
1200 /* speed down iff command was in progress */
1201 if (failed_dev)
1202 action |= ata_eh_speed_down(failed_dev, is_io, all_err_mask);
1203
1204 if (all_err_mask)
1205 action |= ATA_EH_REVALIDATE;
1206
1207 ehc->i.dev = failed_dev;
1208 ehc->i.action = action;
1209
1210 DPRINTK("EXIT\n");
1211}
1212
1213/**
1214 * ata_eh_report - report error handling to user
1215 * @ap: ATA port EH is going on
1216 *
1217 * Report EH to user.
1218 *
1219 * LOCKING:
1220 * None.
1221 */
1222static void ata_eh_report(struct ata_port *ap)
1223{
1224 struct ata_eh_context *ehc = &ap->eh_context;
1225 const char *frozen, *desc;
1226 int tag, nr_failed = 0;
1227
1228 desc = NULL;
1229 if (ehc->i.desc[0] != '\0')
1230 desc = ehc->i.desc;
1231
1232 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
1233 struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
1234
1235 if (!(qc->flags & ATA_QCFLAG_FAILED))
1236 continue;
1237 if (qc->flags & ATA_QCFLAG_SENSE_VALID && !qc->err_mask)
1238 continue;
1239
1240 nr_failed++;
1241 }
1242
1243 if (!nr_failed && !ehc->i.err_mask)
1244 return;
1245
1246 frozen = "";
1247 if (ap->flags & ATA_FLAG_FROZEN)
1248 frozen = " frozen";
1249
1250 if (ehc->i.dev) {
1251 ata_dev_printk(ehc->i.dev, KERN_ERR, "exception Emask 0x%x "
1252 "SAct 0x%x SErr 0x%x action 0x%x%s\n",
1253 ehc->i.err_mask, ap->sactive, ehc->i.serror,
1254 ehc->i.action, frozen);
1255 if (desc)
1256 ata_dev_printk(ehc->i.dev, KERN_ERR, "(%s)\n", desc);
1257 } else {
1258 ata_port_printk(ap, KERN_ERR, "exception Emask 0x%x "
1259 "SAct 0x%x SErr 0x%x action 0x%x%s\n",
1260 ehc->i.err_mask, ap->sactive, ehc->i.serror,
1261 ehc->i.action, frozen);
1262 if (desc)
1263 ata_port_printk(ap, KERN_ERR, "(%s)\n", desc);
1264 }
1265
1266 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
1267 struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
1268
1269 if (!(qc->flags & ATA_QCFLAG_FAILED) || !qc->err_mask)
1270 continue;
1271
1272 ata_dev_printk(qc->dev, KERN_ERR, "tag %d cmd 0x%x "
1273 "Emask 0x%x stat 0x%x err 0x%x (%s)\n",
1274 qc->tag, qc->tf.command, qc->err_mask,
1275 qc->result_tf.command, qc->result_tf.feature,
1276 ata_err_string(qc->err_mask));
1277 }
1278}
1279
1280static int ata_eh_reset(struct ata_port *ap, ata_reset_fn_t softreset,
1281 ata_reset_fn_t hardreset, ata_postreset_fn_t postreset)
1282{
1283 struct ata_eh_context *ehc = &ap->eh_context;
1284 unsigned int classes[ATA_MAX_DEVICES];
1285 int tries = ATA_EH_RESET_TRIES;
1286 ata_reset_fn_t reset;
1287 int rc;
1288
1289 if (softreset && (!hardreset || (!sata_set_spd_needed(ap) &&
1290 !(ehc->i.action & ATA_EH_HARDRESET))))
1291 reset = softreset;
1292 else
1293 reset = hardreset;
1294
1295 retry:
1296 ata_port_printk(ap, KERN_INFO, "%s resetting port\n",
1297 reset == softreset ? "soft" : "hard");
1298
1299 /* reset */
1300 ata_eh_about_to_do(ap, ATA_EH_RESET_MASK);
1301 ehc->i.flags |= ATA_EHI_DID_RESET;
1302
1303 rc = ata_do_reset(ap, reset, classes);
1304
1305 if (rc && --tries) {
1306 ata_port_printk(ap, KERN_WARNING,
1307 "%sreset failed, retrying in 5 secs\n",
1308 reset == softreset ? "soft" : "hard");
1309 ssleep(5);
1310
1311 if (reset == hardreset)
1312 sata_down_spd_limit(ap);
1313 if (hardreset)
1314 reset = hardreset;
1315 goto retry;
1316 }
1317
1318 if (rc == 0) {
1319 if (postreset)
1320 postreset(ap, classes);
1321
1322 /* reset successful, schedule revalidation */
1323 ehc->i.dev = NULL;
1324 ehc->i.action &= ~ATA_EH_RESET_MASK;
1325 ehc->i.action |= ATA_EH_REVALIDATE;
1326 }
1327
1328 return rc;
1329}
1330
1331static int ata_eh_revalidate(struct ata_port *ap,
1332 struct ata_device **r_failed_dev)
1333{
1334 struct ata_eh_context *ehc = &ap->eh_context;
1335 struct ata_device *dev;
1336 int i, rc = 0;
1337
1338 DPRINTK("ENTER\n");
1339
1340 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1341 dev = &ap->device[i];
1342
1343 if (ehc->i.action & ATA_EH_REVALIDATE && ata_dev_enabled(dev) &&
1344 (!ehc->i.dev || ehc->i.dev == dev)) {
1345 if (ata_port_offline(ap)) {
1346 rc = -EIO;
1347 break;
1348 }
1349
1350 ata_eh_about_to_do(ap, ATA_EH_REVALIDATE);
1351 rc = ata_dev_revalidate(dev,
1352 ehc->i.flags & ATA_EHI_DID_RESET);
1353 if (rc)
1354 break;
1355
1356 ehc->i.action &= ~ATA_EH_REVALIDATE;
1357 }
1358 }
1359
1360 if (rc)
1361 *r_failed_dev = dev;
1362
1363 DPRINTK("EXIT\n");
1364 return rc;
1365}
1366
1367static int ata_port_nr_enabled(struct ata_port *ap)
1368{
1369 int i, cnt = 0;
1370
1371 for (i = 0; i < ATA_MAX_DEVICES; i++)
1372 if (ata_dev_enabled(&ap->device[i]))
1373 cnt++;
1374 return cnt;
1375}
1376
1377/**
1378 * ata_eh_recover - recover host port after error
1379 * @ap: host port to recover
1380 * @softreset: softreset method (can be NULL)
1381 * @hardreset: hardreset method (can be NULL)
1382 * @postreset: postreset method (can be NULL)
1383 *
1384 * This is the alpha and omega, eum and yang, heart and soul of
1385 * libata exception handling. On entry, actions required to
1386 * recover each devices are recorded in eh_context. This
1387 * function executes all the operations with appropriate retrials
1388 * and fallbacks to resurrect failed devices.
1389 *
1390 * LOCKING:
1391 * Kernel thread context (may sleep).
1392 *
1393 * RETURNS:
1394 * 0 on success, -errno on failure.
1395 */
1396static int ata_eh_recover(struct ata_port *ap, ata_reset_fn_t softreset,
1397 ata_reset_fn_t hardreset,
1398 ata_postreset_fn_t postreset)
1399{
1400 struct ata_eh_context *ehc = &ap->eh_context;
1401 struct ata_device *dev;
1402 int down_xfermask, i, rc;
1403
1404 DPRINTK("ENTER\n");
1405
1406 /* prep for recovery */
1407 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1408 dev = &ap->device[i];
1409
1410 ehc->tries[dev->devno] = ATA_EH_DEV_TRIES;
1411 }
1412
1413 retry:
1414 down_xfermask = 0;
1415 rc = 0;
1416
1417 /* skip EH if possible. */
1418 if (!ata_port_nr_enabled(ap) && !(ap->flags & ATA_FLAG_FROZEN))
1419 ehc->i.action = 0;
1420
1421 /* reset */
1422 if (ehc->i.action & ATA_EH_RESET_MASK) {
1423 ata_eh_freeze_port(ap);
1424
1425 rc = ata_eh_reset(ap, softreset, hardreset, postreset);
1426 if (rc) {
1427 ata_port_printk(ap, KERN_ERR,
1428 "reset failed, giving up\n");
1429 goto out;
1430 }
1431
1432 ata_eh_thaw_port(ap);
1433 }
1434
1435 /* revalidate existing devices */
1436 rc = ata_eh_revalidate(ap, &dev);
1437 if (rc)
1438 goto dev_fail;
1439
1440 /* configure transfer mode if the port has been reset */
1441 if (ehc->i.flags & ATA_EHI_DID_RESET) {
1442 rc = ata_set_mode(ap, &dev);
1443 if (rc) {
1444 down_xfermask = 1;
1445 goto dev_fail;
1446 }
1447 }
1448
1449 goto out;
1450
1451 dev_fail:
1452 switch (rc) {
1453 case -ENODEV:
1454 case -EINVAL:
1455 ehc->tries[dev->devno] = 0;
1456 break;
1457 case -EIO:
1458 sata_down_spd_limit(ap);
1459 default:
1460 ehc->tries[dev->devno]--;
1461 if (down_xfermask &&
1462 ata_down_xfermask_limit(dev, ehc->tries[dev->devno] == 1))
1463 ehc->tries[dev->devno] = 0;
1464 }
1465
1466 /* disable device if it has used up all its chances */
1467 if (ata_dev_enabled(dev) && !ehc->tries[dev->devno])
1468 ata_dev_disable(dev);
1469
1470 /* soft didn't work? be haaaaard */
1471 if (ehc->i.flags & ATA_EHI_DID_RESET)
1472 ehc->i.action |= ATA_EH_HARDRESET;
1473 else
1474 ehc->i.action |= ATA_EH_SOFTRESET;
1475
1476 if (ata_port_nr_enabled(ap)) {
1477 ata_port_printk(ap, KERN_WARNING, "failed to recover some "
1478 "devices, retrying in 5 secs\n");
1479 ssleep(5);
1480 } else {
1481 /* no device left, repeat fast */
1482 msleep(500);
1483 }
1484
1485 goto retry;
1486
1487 out:
1488 if (rc) {
1489 for (i = 0; i < ATA_MAX_DEVICES; i++)
1490 ata_dev_disable(&ap->device[i]);
1491 }
1492
1493 DPRINTK("EXIT, rc=%d\n", rc);
1494 return rc;
1495}
1496
1497/**
1498 * ata_eh_finish - finish up EH
1499 * @ap: host port to finish EH for
1500 *
1501 * Recovery is complete. Clean up EH states and retry or finish
1502 * failed qcs.
1503 *
1504 * LOCKING:
1505 * None.
1506 */
1507static void ata_eh_finish(struct ata_port *ap)
1508{
1509 int tag;
1510
1511 /* retry or finish qcs */
1512 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
1513 struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
1514
1515 if (!(qc->flags & ATA_QCFLAG_FAILED))
1516 continue;
1517
1518 if (qc->err_mask) {
1519 /* FIXME: Once EH migration is complete,
1520 * generate sense data in this function,
1521 * considering both err_mask and tf.
1522 */
1523 if (qc->err_mask & AC_ERR_INVALID)
1524 ata_eh_qc_complete(qc);
1525 else
1526 ata_eh_qc_retry(qc);
1527 } else {
1528 if (qc->flags & ATA_QCFLAG_SENSE_VALID) {
1529 ata_eh_qc_complete(qc);
1530 } else {
1531 /* feed zero TF to sense generation */
1532 memset(&qc->result_tf, 0, sizeof(qc->result_tf));
1533 ata_eh_qc_retry(qc);
1534 }
1535 }
1536 }
1537}
1538
1539/**
1540 * ata_do_eh - do standard error handling
1541 * @ap: host port to handle error for
1542 * @softreset: softreset method (can be NULL)
1543 * @hardreset: hardreset method (can be NULL)
1544 * @postreset: postreset method (can be NULL)
1545 *
1546 * Perform standard error handling sequence.
1547 *
1548 * LOCKING:
1549 * Kernel thread context (may sleep).
1550 */
1551void ata_do_eh(struct ata_port *ap, ata_reset_fn_t softreset,
1552 ata_reset_fn_t hardreset, ata_postreset_fn_t postreset)
1553{
1554 ata_eh_autopsy(ap);
1555 ata_eh_report(ap);
1556 ata_eh_recover(ap, softreset, hardreset, postreset);
1557 ata_eh_finish(ap);
1558}
diff --git a/drivers/scsi/libata-scsi.c b/drivers/scsi/libata-scsi.c
index 9871f8272df0..996058af1bcd 100644
--- a/drivers/scsi/libata-scsi.c
+++ b/drivers/scsi/libata-scsi.c
@@ -41,6 +41,7 @@
41#include <scsi/scsi_eh.h> 41#include <scsi/scsi_eh.h>
42#include <scsi/scsi_device.h> 42#include <scsi/scsi_device.h>
43#include <scsi/scsi_request.h> 43#include <scsi/scsi_request.h>
44#include <scsi/scsi_tcq.h>
44#include <scsi/scsi_transport.h> 45#include <scsi/scsi_transport.h>
45#include <linux/libata.h> 46#include <linux/libata.h>
46#include <linux/hdreg.h> 47#include <linux/hdreg.h>
@@ -302,7 +303,6 @@ int ata_scsi_ioctl(struct scsi_device *scsidev, int cmd, void __user *arg)
302 303
303/** 304/**
304 * ata_scsi_qc_new - acquire new ata_queued_cmd reference 305 * ata_scsi_qc_new - acquire new ata_queued_cmd reference
305 * @ap: ATA port to which the new command is attached
306 * @dev: ATA device to which the new command is attached 306 * @dev: ATA device to which the new command is attached
307 * @cmd: SCSI command that originated this ATA command 307 * @cmd: SCSI command that originated this ATA command
308 * @done: SCSI command completion function 308 * @done: SCSI command completion function
@@ -321,14 +321,13 @@ int ata_scsi_ioctl(struct scsi_device *scsidev, int cmd, void __user *arg)
321 * RETURNS: 321 * RETURNS:
322 * Command allocated, or %NULL if none available. 322 * Command allocated, or %NULL if none available.
323 */ 323 */
324struct ata_queued_cmd *ata_scsi_qc_new(struct ata_port *ap, 324struct ata_queued_cmd *ata_scsi_qc_new(struct ata_device *dev,
325 struct ata_device *dev,
326 struct scsi_cmnd *cmd, 325 struct scsi_cmnd *cmd,
327 void (*done)(struct scsi_cmnd *)) 326 void (*done)(struct scsi_cmnd *))
328{ 327{
329 struct ata_queued_cmd *qc; 328 struct ata_queued_cmd *qc;
330 329
331 qc = ata_qc_new_init(ap, dev); 330 qc = ata_qc_new_init(dev);
332 if (qc) { 331 if (qc) {
333 qc->scsicmd = cmd; 332 qc->scsicmd = cmd;
334 qc->scsidone = done; 333 qc->scsidone = done;
@@ -398,7 +397,7 @@ int ata_scsi_device_resume(struct scsi_device *sdev)
398 struct ata_port *ap = ata_shost_to_port(sdev->host); 397 struct ata_port *ap = ata_shost_to_port(sdev->host);
399 struct ata_device *dev = &ap->device[sdev->id]; 398 struct ata_device *dev = &ap->device[sdev->id];
400 399
401 return ata_device_resume(ap, dev); 400 return ata_device_resume(dev);
402} 401}
403 402
404int ata_scsi_device_suspend(struct scsi_device *sdev, pm_message_t state) 403int ata_scsi_device_suspend(struct scsi_device *sdev, pm_message_t state)
@@ -406,7 +405,7 @@ int ata_scsi_device_suspend(struct scsi_device *sdev, pm_message_t state)
406 struct ata_port *ap = ata_shost_to_port(sdev->host); 405 struct ata_port *ap = ata_shost_to_port(sdev->host);
407 struct ata_device *dev = &ap->device[sdev->id]; 406 struct ata_device *dev = &ap->device[sdev->id];
408 407
409 return ata_device_suspend(ap, dev, state); 408 return ata_device_suspend(dev, state);
410} 409}
411 410
412/** 411/**
@@ -417,6 +416,7 @@ int ata_scsi_device_suspend(struct scsi_device *sdev, pm_message_t state)
417 * @sk: the sense key we'll fill out 416 * @sk: the sense key we'll fill out
418 * @asc: the additional sense code we'll fill out 417 * @asc: the additional sense code we'll fill out
419 * @ascq: the additional sense code qualifier we'll fill out 418 * @ascq: the additional sense code qualifier we'll fill out
419 * @verbose: be verbose
420 * 420 *
421 * Converts an ATA error into a SCSI error. Fill out pointers to 421 * Converts an ATA error into a SCSI error. Fill out pointers to
422 * SK, ASC, and ASCQ bytes for later use in fixed or descriptor 422 * SK, ASC, and ASCQ bytes for later use in fixed or descriptor
@@ -426,7 +426,7 @@ int ata_scsi_device_suspend(struct scsi_device *sdev, pm_message_t state)
426 * spin_lock_irqsave(host_set lock) 426 * spin_lock_irqsave(host_set lock)
427 */ 427 */
428void ata_to_sense_error(unsigned id, u8 drv_stat, u8 drv_err, u8 *sk, u8 *asc, 428void ata_to_sense_error(unsigned id, u8 drv_stat, u8 drv_err, u8 *sk, u8 *asc,
429 u8 *ascq) 429 u8 *ascq, int verbose)
430{ 430{
431 int i; 431 int i;
432 432
@@ -491,8 +491,9 @@ void ata_to_sense_error(unsigned id, u8 drv_stat, u8 drv_err, u8 *sk, u8 *asc,
491 } 491 }
492 } 492 }
493 /* No immediate match */ 493 /* No immediate match */
494 printk(KERN_WARNING "ata%u: no sense translation for " 494 if (verbose)
495 "error 0x%02x\n", id, drv_err); 495 printk(KERN_WARNING "ata%u: no sense translation for "
496 "error 0x%02x\n", id, drv_err);
496 } 497 }
497 498
498 /* Fall back to interpreting status bits */ 499 /* Fall back to interpreting status bits */
@@ -505,8 +506,9 @@ void ata_to_sense_error(unsigned id, u8 drv_stat, u8 drv_err, u8 *sk, u8 *asc,
505 } 506 }
506 } 507 }
507 /* No error? Undecoded? */ 508 /* No error? Undecoded? */
508 printk(KERN_WARNING "ata%u: no sense translation for status: 0x%02x\n", 509 if (verbose)
509 id, drv_stat); 510 printk(KERN_WARNING "ata%u: no sense translation for "
511 "status: 0x%02x\n", id, drv_stat);
510 512
511 /* We need a sensible error return here, which is tricky, and one 513 /* We need a sensible error return here, which is tricky, and one
512 that won't cause people to do things like return a disk wrongly */ 514 that won't cause people to do things like return a disk wrongly */
@@ -515,9 +517,10 @@ void ata_to_sense_error(unsigned id, u8 drv_stat, u8 drv_err, u8 *sk, u8 *asc,
515 *ascq = 0x00; 517 *ascq = 0x00;
516 518
517 translate_done: 519 translate_done:
518 printk(KERN_ERR "ata%u: translated ATA stat/err 0x%02x/%02x to " 520 if (verbose)
519 "SCSI SK/ASC/ASCQ 0x%x/%02x/%02x\n", id, drv_stat, drv_err, 521 printk(KERN_ERR "ata%u: translated ATA stat/err 0x%02x/%02x "
520 *sk, *asc, *ascq); 522 "to SCSI SK/ASC/ASCQ 0x%x/%02x/%02x\n",
523 id, drv_stat, drv_err, *sk, *asc, *ascq);
521 return; 524 return;
522} 525}
523 526
@@ -537,9 +540,10 @@ void ata_to_sense_error(unsigned id, u8 drv_stat, u8 drv_err, u8 *sk, u8 *asc,
537void ata_gen_ata_desc_sense(struct ata_queued_cmd *qc) 540void ata_gen_ata_desc_sense(struct ata_queued_cmd *qc)
538{ 541{
539 struct scsi_cmnd *cmd = qc->scsicmd; 542 struct scsi_cmnd *cmd = qc->scsicmd;
540 struct ata_taskfile *tf = &qc->tf; 543 struct ata_taskfile *tf = &qc->result_tf;
541 unsigned char *sb = cmd->sense_buffer; 544 unsigned char *sb = cmd->sense_buffer;
542 unsigned char *desc = sb + 8; 545 unsigned char *desc = sb + 8;
546 int verbose = qc->ap->ops->error_handler == NULL;
543 547
544 memset(sb, 0, SCSI_SENSE_BUFFERSIZE); 548 memset(sb, 0, SCSI_SENSE_BUFFERSIZE);
545 549
@@ -552,7 +556,7 @@ void ata_gen_ata_desc_sense(struct ata_queued_cmd *qc)
552 if (qc->err_mask || 556 if (qc->err_mask ||
553 tf->command & (ATA_BUSY | ATA_DF | ATA_ERR | ATA_DRQ)) { 557 tf->command & (ATA_BUSY | ATA_DF | ATA_ERR | ATA_DRQ)) {
554 ata_to_sense_error(qc->ap->id, tf->command, tf->feature, 558 ata_to_sense_error(qc->ap->id, tf->command, tf->feature,
555 &sb[1], &sb[2], &sb[3]); 559 &sb[1], &sb[2], &sb[3], verbose);
556 sb[1] &= 0x0f; 560 sb[1] &= 0x0f;
557 } 561 }
558 562
@@ -608,8 +612,9 @@ void ata_gen_ata_desc_sense(struct ata_queued_cmd *qc)
608void ata_gen_fixed_sense(struct ata_queued_cmd *qc) 612void ata_gen_fixed_sense(struct ata_queued_cmd *qc)
609{ 613{
610 struct scsi_cmnd *cmd = qc->scsicmd; 614 struct scsi_cmnd *cmd = qc->scsicmd;
611 struct ata_taskfile *tf = &qc->tf; 615 struct ata_taskfile *tf = &qc->result_tf;
612 unsigned char *sb = cmd->sense_buffer; 616 unsigned char *sb = cmd->sense_buffer;
617 int verbose = qc->ap->ops->error_handler == NULL;
613 618
614 memset(sb, 0, SCSI_SENSE_BUFFERSIZE); 619 memset(sb, 0, SCSI_SENSE_BUFFERSIZE);
615 620
@@ -622,7 +627,7 @@ void ata_gen_fixed_sense(struct ata_queued_cmd *qc)
622 if (qc->err_mask || 627 if (qc->err_mask ||
623 tf->command & (ATA_BUSY | ATA_DF | ATA_ERR | ATA_DRQ)) { 628 tf->command & (ATA_BUSY | ATA_DF | ATA_ERR | ATA_DRQ)) {
624 ata_to_sense_error(qc->ap->id, tf->command, tf->feature, 629 ata_to_sense_error(qc->ap->id, tf->command, tf->feature,
625 &sb[2], &sb[12], &sb[13]); 630 &sb[2], &sb[12], &sb[13], verbose);
626 sb[2] &= 0x0f; 631 sb[2] &= 0x0f;
627 } 632 }
628 633
@@ -680,6 +685,14 @@ static void ata_scsi_dev_config(struct scsi_device *sdev,
680 request_queue_t *q = sdev->request_queue; 685 request_queue_t *q = sdev->request_queue;
681 blk_queue_max_hw_segments(q, q->max_hw_segments - 1); 686 blk_queue_max_hw_segments(q, q->max_hw_segments - 1);
682 } 687 }
688
689 if (dev->flags & ATA_DFLAG_NCQ) {
690 int depth;
691
692 depth = min(sdev->host->can_queue, ata_id_queue_depth(dev->id));
693 depth = min(ATA_MAX_QUEUE - 1, depth);
694 scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, depth);
695 }
683} 696}
684 697
685/** 698/**
@@ -714,6 +727,43 @@ int ata_scsi_slave_config(struct scsi_device *sdev)
714} 727}
715 728
716/** 729/**
730 * ata_scsi_change_queue_depth - SCSI callback for queue depth config
731 * @sdev: SCSI device to configure queue depth for
732 * @queue_depth: new queue depth
733 *
734 * This is libata standard hostt->change_queue_depth callback.
735 * SCSI will call into this callback when user tries to set queue
736 * depth via sysfs.
737 *
738 * LOCKING:
739 * SCSI layer (we don't care)
740 *
741 * RETURNS:
742 * Newly configured queue depth.
743 */
744int ata_scsi_change_queue_depth(struct scsi_device *sdev, int queue_depth)
745{
746 struct ata_port *ap = ata_shost_to_port(sdev->host);
747 struct ata_device *dev;
748 int max_depth;
749
750 if (queue_depth < 1)
751 return sdev->queue_depth;
752
753 dev = ata_scsi_find_dev(ap, sdev);
754 if (!dev || !ata_dev_enabled(dev))
755 return sdev->queue_depth;
756
757 max_depth = min(sdev->host->can_queue, ata_id_queue_depth(dev->id));
758 max_depth = min(ATA_MAX_QUEUE - 1, max_depth);
759 if (queue_depth > max_depth)
760 queue_depth = max_depth;
761
762 scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, queue_depth);
763 return queue_depth;
764}
765
766/**
717 * ata_scsi_start_stop_xlat - Translate SCSI START STOP UNIT command 767 * ata_scsi_start_stop_xlat - Translate SCSI START STOP UNIT command
718 * @qc: Storage for translated ATA taskfile 768 * @qc: Storage for translated ATA taskfile
719 * @scsicmd: SCSI command to translate 769 * @scsicmd: SCSI command to translate
@@ -748,7 +798,7 @@ static unsigned int ata_scsi_start_stop_xlat(struct ata_queued_cmd *qc,
748 tf->nsect = 1; /* 1 sector, lba=0 */ 798 tf->nsect = 1; /* 1 sector, lba=0 */
749 799
750 if (qc->dev->flags & ATA_DFLAG_LBA) { 800 if (qc->dev->flags & ATA_DFLAG_LBA) {
751 qc->tf.flags |= ATA_TFLAG_LBA; 801 tf->flags |= ATA_TFLAG_LBA;
752 802
753 tf->lbah = 0x0; 803 tf->lbah = 0x0;
754 tf->lbam = 0x0; 804 tf->lbam = 0x0;
@@ -1099,7 +1149,36 @@ static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc, const u8 *scsicm
1099 */ 1149 */
1100 goto nothing_to_do; 1150 goto nothing_to_do;
1101 1151
1102 if (dev->flags & ATA_DFLAG_LBA) { 1152 if ((dev->flags & (ATA_DFLAG_PIO | ATA_DFLAG_NCQ)) == ATA_DFLAG_NCQ) {
1153 /* yay, NCQ */
1154 if (!lba_48_ok(block, n_block))
1155 goto out_of_range;
1156
1157 tf->protocol = ATA_PROT_NCQ;
1158 tf->flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
1159
1160 if (tf->flags & ATA_TFLAG_WRITE)
1161 tf->command = ATA_CMD_FPDMA_WRITE;
1162 else
1163 tf->command = ATA_CMD_FPDMA_READ;
1164
1165 qc->nsect = n_block;
1166
1167 tf->nsect = qc->tag << 3;
1168 tf->hob_feature = (n_block >> 8) & 0xff;
1169 tf->feature = n_block & 0xff;
1170
1171 tf->hob_lbah = (block >> 40) & 0xff;
1172 tf->hob_lbam = (block >> 32) & 0xff;
1173 tf->hob_lbal = (block >> 24) & 0xff;
1174 tf->lbah = (block >> 16) & 0xff;
1175 tf->lbam = (block >> 8) & 0xff;
1176 tf->lbal = block & 0xff;
1177
1178 tf->device = 1 << 6;
1179 if (tf->flags & ATA_TFLAG_FUA)
1180 tf->device |= 1 << 7;
1181 } else if (dev->flags & ATA_DFLAG_LBA) {
1103 tf->flags |= ATA_TFLAG_LBA; 1182 tf->flags |= ATA_TFLAG_LBA;
1104 1183
1105 if (lba_28_ok(block, n_block)) { 1184 if (lba_28_ok(block, n_block)) {
@@ -1199,14 +1278,11 @@ static void ata_scsi_qc_complete(struct ata_queued_cmd *qc)
1199 */ 1278 */
1200 if (((cdb[0] == ATA_16) || (cdb[0] == ATA_12)) && 1279 if (((cdb[0] == ATA_16) || (cdb[0] == ATA_12)) &&
1201 ((cdb[2] & 0x20) || need_sense)) { 1280 ((cdb[2] & 0x20) || need_sense)) {
1202 qc->ap->ops->tf_read(qc->ap, &qc->tf);
1203 ata_gen_ata_desc_sense(qc); 1281 ata_gen_ata_desc_sense(qc);
1204 } else { 1282 } else {
1205 if (!need_sense) { 1283 if (!need_sense) {
1206 cmd->result = SAM_STAT_GOOD; 1284 cmd->result = SAM_STAT_GOOD;
1207 } else { 1285 } else {
1208 qc->ap->ops->tf_read(qc->ap, &qc->tf);
1209
1210 /* TODO: decide which descriptor format to use 1286 /* TODO: decide which descriptor format to use
1211 * for 48b LBA devices and call that here 1287 * for 48b LBA devices and call that here
1212 * instead of the fixed desc, which is only 1288 * instead of the fixed desc, which is only
@@ -1217,10 +1293,8 @@ static void ata_scsi_qc_complete(struct ata_queued_cmd *qc)
1217 } 1293 }
1218 } 1294 }
1219 1295
1220 if (need_sense) { 1296 if (need_sense && !qc->ap->ops->error_handler)
1221 /* The ata_gen_..._sense routines fill in tf */ 1297 ata_dump_status(qc->ap->id, &qc->result_tf);
1222 ata_dump_status(qc->ap->id, &qc->tf);
1223 }
1224 1298
1225 qc->scsidone(cmd); 1299 qc->scsidone(cmd);
1226 1300
@@ -1228,8 +1302,40 @@ static void ata_scsi_qc_complete(struct ata_queued_cmd *qc)
1228} 1302}
1229 1303
1230/** 1304/**
1305 * ata_scmd_need_defer - Check whether we need to defer scmd
1306 * @dev: ATA device to which the command is addressed
1307 * @is_io: Is the command IO (and thus possibly NCQ)?
1308 *
1309 * NCQ and non-NCQ commands cannot run together. As upper layer
1310 * only knows the queue depth, we are responsible for maintaining
1311 * exclusion. This function checks whether a new command can be
1312 * issued to @dev.
1313 *
1314 * LOCKING:
1315 * spin_lock_irqsave(host_set lock)
1316 *
1317 * RETURNS:
1318 * 1 if deferring is needed, 0 otherwise.
1319 */
1320static int ata_scmd_need_defer(struct ata_device *dev, int is_io)
1321{
1322 struct ata_port *ap = dev->ap;
1323
1324 if (!(dev->flags & ATA_DFLAG_NCQ))
1325 return 0;
1326
1327 if (is_io) {
1328 if (!ata_tag_valid(ap->active_tag))
1329 return 0;
1330 } else {
1331 if (!ata_tag_valid(ap->active_tag) && !ap->sactive)
1332 return 0;
1333 }
1334 return 1;
1335}
1336
1337/**
1231 * ata_scsi_translate - Translate then issue SCSI command to ATA device 1338 * ata_scsi_translate - Translate then issue SCSI command to ATA device
1232 * @ap: ATA port to which the command is addressed
1233 * @dev: ATA device to which the command is addressed 1339 * @dev: ATA device to which the command is addressed
1234 * @cmd: SCSI command to execute 1340 * @cmd: SCSI command to execute
1235 * @done: SCSI command completion function 1341 * @done: SCSI command completion function
@@ -1250,19 +1356,25 @@ static void ata_scsi_qc_complete(struct ata_queued_cmd *qc)
1250 * 1356 *
1251 * LOCKING: 1357 * LOCKING:
1252 * spin_lock_irqsave(host_set lock) 1358 * spin_lock_irqsave(host_set lock)
1359 *
1360 * RETURNS:
1361 * 0 on success, SCSI_ML_QUEUE_DEVICE_BUSY if the command
1362 * needs to be deferred.
1253 */ 1363 */
1254 1364static int ata_scsi_translate(struct ata_device *dev, struct scsi_cmnd *cmd,
1255static void ata_scsi_translate(struct ata_port *ap, struct ata_device *dev,
1256 struct scsi_cmnd *cmd,
1257 void (*done)(struct scsi_cmnd *), 1365 void (*done)(struct scsi_cmnd *),
1258 ata_xlat_func_t xlat_func) 1366 ata_xlat_func_t xlat_func)
1259{ 1367{
1260 struct ata_queued_cmd *qc; 1368 struct ata_queued_cmd *qc;
1261 u8 *scsicmd = cmd->cmnd; 1369 u8 *scsicmd = cmd->cmnd;
1370 int is_io = xlat_func == ata_scsi_rw_xlat;
1262 1371
1263 VPRINTK("ENTER\n"); 1372 VPRINTK("ENTER\n");
1264 1373
1265 qc = ata_scsi_qc_new(ap, dev, cmd, done); 1374 if (unlikely(ata_scmd_need_defer(dev, is_io)))
1375 goto defer;
1376
1377 qc = ata_scsi_qc_new(dev, cmd, done);
1266 if (!qc) 1378 if (!qc)
1267 goto err_mem; 1379 goto err_mem;
1268 1380
@@ -1270,8 +1382,8 @@ static void ata_scsi_translate(struct ata_port *ap, struct ata_device *dev,
1270 if (cmd->sc_data_direction == DMA_FROM_DEVICE || 1382 if (cmd->sc_data_direction == DMA_FROM_DEVICE ||
1271 cmd->sc_data_direction == DMA_TO_DEVICE) { 1383 cmd->sc_data_direction == DMA_TO_DEVICE) {
1272 if (unlikely(cmd->request_bufflen < 1)) { 1384 if (unlikely(cmd->request_bufflen < 1)) {
1273 printk(KERN_WARNING "ata%u(%u): WARNING: zero len r/w req\n", 1385 ata_dev_printk(dev, KERN_WARNING,
1274 ap->id, dev->devno); 1386 "WARNING: zero len r/w req\n");
1275 goto err_did; 1387 goto err_did;
1276 } 1388 }
1277 1389
@@ -1293,13 +1405,13 @@ static void ata_scsi_translate(struct ata_port *ap, struct ata_device *dev,
1293 ata_qc_issue(qc); 1405 ata_qc_issue(qc);
1294 1406
1295 VPRINTK("EXIT\n"); 1407 VPRINTK("EXIT\n");
1296 return; 1408 return 0;
1297 1409
1298early_finish: 1410early_finish:
1299 ata_qc_free(qc); 1411 ata_qc_free(qc);
1300 done(cmd); 1412 done(cmd);
1301 DPRINTK("EXIT - early finish (good or error)\n"); 1413 DPRINTK("EXIT - early finish (good or error)\n");
1302 return; 1414 return 0;
1303 1415
1304err_did: 1416err_did:
1305 ata_qc_free(qc); 1417 ata_qc_free(qc);
@@ -1307,7 +1419,11 @@ err_mem:
1307 cmd->result = (DID_ERROR << 16); 1419 cmd->result = (DID_ERROR << 16);
1308 done(cmd); 1420 done(cmd);
1309 DPRINTK("EXIT - internal\n"); 1421 DPRINTK("EXIT - internal\n");
1310 return; 1422 return 0;
1423
1424defer:
1425 DPRINTK("EXIT - defer\n");
1426 return SCSI_MLQUEUE_DEVICE_BUSY;
1311} 1427}
1312 1428
1313/** 1429/**
@@ -2004,7 +2120,6 @@ static void atapi_sense_complete(struct ata_queued_cmd *qc)
2004 * a sense descriptors, since that's only 2120 * a sense descriptors, since that's only
2005 * correct for ATA, not ATAPI 2121 * correct for ATA, not ATAPI
2006 */ 2122 */
2007 qc->ap->ops->tf_read(qc->ap, &qc->tf);
2008 ata_gen_ata_desc_sense(qc); 2123 ata_gen_ata_desc_sense(qc);
2009 } 2124 }
2010 2125
@@ -2070,6 +2185,26 @@ static void atapi_qc_complete(struct ata_queued_cmd *qc)
2070 2185
2071 VPRINTK("ENTER, err_mask 0x%X\n", err_mask); 2186 VPRINTK("ENTER, err_mask 0x%X\n", err_mask);
2072 2187
2188 /* handle completion from new EH */
2189 if (unlikely(qc->ap->ops->error_handler &&
2190 (err_mask || qc->flags & ATA_QCFLAG_SENSE_VALID))) {
2191
2192 if (!(qc->flags & ATA_QCFLAG_SENSE_VALID)) {
2193 /* FIXME: not quite right; we don't want the
2194 * translation of taskfile registers into a
2195 * sense descriptors, since that's only
2196 * correct for ATA, not ATAPI
2197 */
2198 ata_gen_ata_desc_sense(qc);
2199 }
2200
2201 qc->scsicmd->result = SAM_STAT_CHECK_CONDITION;
2202 qc->scsidone(cmd);
2203 ata_qc_free(qc);
2204 return;
2205 }
2206
2207 /* successful completion or old EH failure path */
2073 if (unlikely(err_mask & AC_ERR_DEV)) { 2208 if (unlikely(err_mask & AC_ERR_DEV)) {
2074 cmd->result = SAM_STAT_CHECK_CONDITION; 2209 cmd->result = SAM_STAT_CHECK_CONDITION;
2075 atapi_request_sense(qc); 2210 atapi_request_sense(qc);
@@ -2080,7 +2215,6 @@ static void atapi_qc_complete(struct ata_queued_cmd *qc)
2080 * a sense descriptors, since that's only 2215 * a sense descriptors, since that's only
2081 * correct for ATA, not ATAPI 2216 * correct for ATA, not ATAPI
2082 */ 2217 */
2083 qc->ap->ops->tf_read(qc->ap, &qc->tf);
2084 ata_gen_ata_desc_sense(qc); 2218 ata_gen_ata_desc_sense(qc);
2085 } else { 2219 } else {
2086 u8 *scsicmd = cmd->cmnd; 2220 u8 *scsicmd = cmd->cmnd;
@@ -2211,8 +2345,9 @@ ata_scsi_find_dev(struct ata_port *ap, const struct scsi_device *scsidev)
2211 2345
2212 if (!atapi_enabled || (ap->flags & ATA_FLAG_NO_ATAPI)) { 2346 if (!atapi_enabled || (ap->flags & ATA_FLAG_NO_ATAPI)) {
2213 if (unlikely(dev->class == ATA_DEV_ATAPI)) { 2347 if (unlikely(dev->class == ATA_DEV_ATAPI)) {
2214 printk(KERN_WARNING "ata%u(%u): WARNING: ATAPI is %s, device ignored.\n", 2348 ata_dev_printk(dev, KERN_WARNING,
2215 ap->id, dev->devno, atapi_enabled ? "not supported with this driver" : "disabled"); 2349 "WARNING: ATAPI is %s, device ignored.\n",
2350 atapi_enabled ? "not supported with this driver" : "disabled");
2216 return NULL; 2351 return NULL;
2217 } 2352 }
2218 } 2353 }
@@ -2361,6 +2496,9 @@ ata_scsi_pass_thru(struct ata_queued_cmd *qc, const u8 *scsicmd)
2361 */ 2496 */
2362 qc->nsect = cmd->bufflen / ATA_SECT_SIZE; 2497 qc->nsect = cmd->bufflen / ATA_SECT_SIZE;
2363 2498
2499 /* request result TF */
2500 qc->flags |= ATA_QCFLAG_RESULT_TF;
2501
2364 return 0; 2502 return 0;
2365 2503
2366 invalid_fld: 2504 invalid_fld:
@@ -2437,19 +2575,24 @@ static inline void ata_scsi_dump_cdb(struct ata_port *ap,
2437#endif 2575#endif
2438} 2576}
2439 2577
2440static inline void __ata_scsi_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *), 2578static inline int __ata_scsi_queuecmd(struct scsi_cmnd *cmd,
2441 struct ata_port *ap, struct ata_device *dev) 2579 void (*done)(struct scsi_cmnd *),
2580 struct ata_device *dev)
2442{ 2581{
2582 int rc = 0;
2583
2443 if (dev->class == ATA_DEV_ATA) { 2584 if (dev->class == ATA_DEV_ATA) {
2444 ata_xlat_func_t xlat_func = ata_get_xlat_func(dev, 2585 ata_xlat_func_t xlat_func = ata_get_xlat_func(dev,
2445 cmd->cmnd[0]); 2586 cmd->cmnd[0]);
2446 2587
2447 if (xlat_func) 2588 if (xlat_func)
2448 ata_scsi_translate(ap, dev, cmd, done, xlat_func); 2589 rc = ata_scsi_translate(dev, cmd, done, xlat_func);
2449 else 2590 else
2450 ata_scsi_simulate(ap, dev, cmd, done); 2591 ata_scsi_simulate(dev, cmd, done);
2451 } else 2592 } else
2452 ata_scsi_translate(ap, dev, cmd, done, atapi_xlat); 2593 rc = ata_scsi_translate(dev, cmd, done, atapi_xlat);
2594
2595 return rc;
2453} 2596}
2454 2597
2455/** 2598/**
@@ -2468,15 +2611,16 @@ static inline void __ata_scsi_queuecmd(struct scsi_cmnd *cmd, void (*done)(struc
2468 * Releases scsi-layer-held lock, and obtains host_set lock. 2611 * Releases scsi-layer-held lock, and obtains host_set lock.
2469 * 2612 *
2470 * RETURNS: 2613 * RETURNS:
2471 * Zero. 2614 * Return value from __ata_scsi_queuecmd() if @cmd can be queued,
2615 * 0 otherwise.
2472 */ 2616 */
2473
2474int ata_scsi_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)) 2617int ata_scsi_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
2475{ 2618{
2476 struct ata_port *ap; 2619 struct ata_port *ap;
2477 struct ata_device *dev; 2620 struct ata_device *dev;
2478 struct scsi_device *scsidev = cmd->device; 2621 struct scsi_device *scsidev = cmd->device;
2479 struct Scsi_Host *shost = scsidev->host; 2622 struct Scsi_Host *shost = scsidev->host;
2623 int rc = 0;
2480 2624
2481 ap = ata_shost_to_port(shost); 2625 ap = ata_shost_to_port(shost);
2482 2626
@@ -2487,7 +2631,7 @@ int ata_scsi_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
2487 2631
2488 dev = ata_scsi_find_dev(ap, scsidev); 2632 dev = ata_scsi_find_dev(ap, scsidev);
2489 if (likely(dev)) 2633 if (likely(dev))
2490 __ata_scsi_queuecmd(cmd, done, ap, dev); 2634 rc = __ata_scsi_queuecmd(cmd, done, dev);
2491 else { 2635 else {
2492 cmd->result = (DID_BAD_TARGET << 16); 2636 cmd->result = (DID_BAD_TARGET << 16);
2493 done(cmd); 2637 done(cmd);
@@ -2495,12 +2639,11 @@ int ata_scsi_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
2495 2639
2496 spin_unlock(&ap->host_set->lock); 2640 spin_unlock(&ap->host_set->lock);
2497 spin_lock(shost->host_lock); 2641 spin_lock(shost->host_lock);
2498 return 0; 2642 return rc;
2499} 2643}
2500 2644
2501/** 2645/**
2502 * ata_scsi_simulate - simulate SCSI command on ATA device 2646 * ata_scsi_simulate - simulate SCSI command on ATA device
2503 * @ap: port the device is connected to
2504 * @dev: the target device 2647 * @dev: the target device
2505 * @cmd: SCSI command being sent to device. 2648 * @cmd: SCSI command being sent to device.
2506 * @done: SCSI command completion function. 2649 * @done: SCSI command completion function.
@@ -2512,14 +2655,12 @@ int ata_scsi_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
2512 * spin_lock_irqsave(host_set lock) 2655 * spin_lock_irqsave(host_set lock)
2513 */ 2656 */
2514 2657
2515void ata_scsi_simulate(struct ata_port *ap, struct ata_device *dev, 2658void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd,
2516 struct scsi_cmnd *cmd,
2517 void (*done)(struct scsi_cmnd *)) 2659 void (*done)(struct scsi_cmnd *))
2518{ 2660{
2519 struct ata_scsi_args args; 2661 struct ata_scsi_args args;
2520 const u8 *scsicmd = cmd->cmnd; 2662 const u8 *scsicmd = cmd->cmnd;
2521 2663
2522 args.ap = ap;
2523 args.dev = dev; 2664 args.dev = dev;
2524 args.id = dev->id; 2665 args.id = dev->id;
2525 args.cmd = cmd; 2666 args.cmd = cmd;
@@ -2605,3 +2746,26 @@ void ata_scsi_scan_host(struct ata_port *ap)
2605 } 2746 }
2606} 2747}
2607 2748
2749/**
2750 * ata_schedule_scsi_eh - schedule EH for SCSI host
2751 * @shost: SCSI host to invoke error handling on.
2752 *
2753 * Schedule SCSI EH without scmd. This is a hack.
2754 *
2755 * LOCKING:
2756 * spin_lock_irqsave(host_set lock)
2757 **/
2758void ata_schedule_scsi_eh(struct Scsi_Host *shost)
2759{
2760 unsigned long flags;
2761
2762 spin_lock_irqsave(shost->host_lock, flags);
2763
2764 if (scsi_host_set_state(shost, SHOST_RECOVERY) == 0 ||
2765 scsi_host_set_state(shost, SHOST_CANCEL_RECOVERY) == 0) {
2766 shost->host_eh_scheduled++;
2767 scsi_eh_wakeup(shost);
2768 }
2769
2770 spin_unlock_irqrestore(shost->host_lock, flags);
2771}
diff --git a/drivers/scsi/libata.h b/drivers/scsi/libata.h
index 3f8b0a863781..b76ad7d7062a 100644
--- a/drivers/scsi/libata.h
+++ b/drivers/scsi/libata.h
@@ -32,7 +32,6 @@
32#define DRV_VERSION "1.30" /* must be exactly four chars */ 32#define DRV_VERSION "1.30" /* must be exactly four chars */
33 33
34struct ata_scsi_args { 34struct ata_scsi_args {
35 struct ata_port *ap;
36 struct ata_device *dev; 35 struct ata_device *dev;
37 u16 *id; 36 u16 *id;
38 struct scsi_cmnd *cmd; 37 struct scsi_cmnd *cmd;
@@ -43,23 +42,22 @@ struct ata_scsi_args {
43extern int atapi_enabled; 42extern int atapi_enabled;
44extern int atapi_dmadir; 43extern int atapi_dmadir;
45extern int libata_fua; 44extern int libata_fua;
46extern struct ata_queued_cmd *ata_qc_new_init(struct ata_port *ap, 45extern struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev);
47 struct ata_device *dev);
48extern int ata_rwcmd_protocol(struct ata_queued_cmd *qc); 46extern int ata_rwcmd_protocol(struct ata_queued_cmd *qc);
49extern void ata_dev_disable(struct ata_port *ap, struct ata_device *dev); 47extern void ata_dev_disable(struct ata_device *dev);
50extern void ata_port_flush_task(struct ata_port *ap); 48extern void ata_port_flush_task(struct ata_port *ap);
51extern unsigned ata_exec_internal(struct ata_port *ap, struct ata_device *dev, 49extern unsigned ata_exec_internal(struct ata_device *dev,
52 struct ata_taskfile *tf, const u8 *cdb, 50 struct ata_taskfile *tf, const u8 *cdb,
53 int dma_dir, void *buf, unsigned int buflen); 51 int dma_dir, void *buf, unsigned int buflen);
54extern int ata_down_sata_spd_limit(struct ata_port *ap); 52extern int sata_down_spd_limit(struct ata_port *ap);
55extern int ata_set_sata_spd_needed(struct ata_port *ap); 53extern int sata_set_spd_needed(struct ata_port *ap);
56extern int ata_down_xfermask_limit(struct ata_port *ap, struct ata_device *dev, 54extern int ata_down_xfermask_limit(struct ata_device *dev, int force_pio0);
57 int force_pio0);
58extern int ata_set_mode(struct ata_port *ap, struct ata_device **r_failed_dev); 55extern int ata_set_mode(struct ata_port *ap, struct ata_device **r_failed_dev);
59extern int ata_do_reset(struct ata_port *ap, ata_reset_fn_t reset, 56extern int ata_do_reset(struct ata_port *ap, ata_reset_fn_t reset,
60 ata_postreset_fn_t postreset, unsigned int *classes); 57 unsigned int *classes);
61extern void ata_qc_free(struct ata_queued_cmd *qc); 58extern void ata_qc_free(struct ata_queued_cmd *qc);
62extern void ata_qc_issue(struct ata_queued_cmd *qc); 59extern void ata_qc_issue(struct ata_queued_cmd *qc);
60extern void __ata_qc_complete(struct ata_queued_cmd *qc);
63extern int ata_check_atapi_dma(struct ata_queued_cmd *qc); 61extern int ata_check_atapi_dma(struct ata_queued_cmd *qc);
64extern void ata_dev_select(struct ata_port *ap, unsigned int device, 62extern void ata_dev_select(struct ata_port *ap, unsigned int device,
65 unsigned int wait, unsigned int can_sleep); 63 unsigned int wait, unsigned int can_sleep);
@@ -100,9 +98,11 @@ extern void ata_scsi_set_sense(struct scsi_cmnd *cmd,
100extern void ata_scsi_rbuf_fill(struct ata_scsi_args *args, 98extern void ata_scsi_rbuf_fill(struct ata_scsi_args *args,
101 unsigned int (*actor) (struct ata_scsi_args *args, 99 unsigned int (*actor) (struct ata_scsi_args *args,
102 u8 *rbuf, unsigned int buflen)); 100 u8 *rbuf, unsigned int buflen));
101extern void ata_schedule_scsi_eh(struct Scsi_Host *shost);
103 102
104/* libata-eh.c */ 103/* libata-eh.c */
105extern enum scsi_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd); 104extern enum scsi_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd);
106extern void ata_scsi_error(struct Scsi_Host *host); 105extern void ata_scsi_error(struct Scsi_Host *host);
106extern void ata_qc_schedule_eh(struct ata_queued_cmd *qc);
107 107
108#endif /* __LIBATA_H__ */ 108#endif /* __LIBATA_H__ */
diff --git a/drivers/scsi/pdc_adma.c b/drivers/scsi/pdc_adma.c
index b9a3c566f833..a341fa8d3291 100644
--- a/drivers/scsi/pdc_adma.c
+++ b/drivers/scsi/pdc_adma.c
@@ -455,13 +455,13 @@ static inline unsigned int adma_intr_pkt(struct ata_host_set *host_set)
455 continue; 455 continue;
456 handled = 1; 456 handled = 1;
457 adma_enter_reg_mode(ap); 457 adma_enter_reg_mode(ap);
458 if (ap->flags & (ATA_FLAG_DISABLED | ATA_FLAG_NOINTR)) 458 if (ap->flags & ATA_FLAG_DISABLED)
459 continue; 459 continue;
460 pp = ap->private_data; 460 pp = ap->private_data;
461 if (!pp || pp->state != adma_state_pkt) 461 if (!pp || pp->state != adma_state_pkt)
462 continue; 462 continue;
463 qc = ata_qc_from_tag(ap, ap->active_tag); 463 qc = ata_qc_from_tag(ap, ap->active_tag);
464 if (qc && (!(qc->tf.ctl & ATA_NIEN))) { 464 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) {
465 if ((status & (aPERR | aPSD | aUIRQ))) 465 if ((status & (aPERR | aPSD | aUIRQ)))
466 qc->err_mask |= AC_ERR_OTHER; 466 qc->err_mask |= AC_ERR_OTHER;
467 else if (pp->pkt[0] != cDONE) 467 else if (pp->pkt[0] != cDONE)
@@ -480,13 +480,13 @@ static inline unsigned int adma_intr_mmio(struct ata_host_set *host_set)
480 for (port_no = 0; port_no < host_set->n_ports; ++port_no) { 480 for (port_no = 0; port_no < host_set->n_ports; ++port_no) {
481 struct ata_port *ap; 481 struct ata_port *ap;
482 ap = host_set->ports[port_no]; 482 ap = host_set->ports[port_no];
483 if (ap && (!(ap->flags & (ATA_FLAG_DISABLED | ATA_FLAG_NOINTR)))) { 483 if (ap && (!(ap->flags & ATA_FLAG_DISABLED))) {
484 struct ata_queued_cmd *qc; 484 struct ata_queued_cmd *qc;
485 struct adma_port_priv *pp = ap->private_data; 485 struct adma_port_priv *pp = ap->private_data;
486 if (!pp || pp->state != adma_state_mmio) 486 if (!pp || pp->state != adma_state_mmio)
487 continue; 487 continue;
488 qc = ata_qc_from_tag(ap, ap->active_tag); 488 qc = ata_qc_from_tag(ap, ap->active_tag);
489 if (qc && (!(qc->tf.ctl & ATA_NIEN))) { 489 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) {
490 490
491 /* check main status, clearing INTRQ */ 491 /* check main status, clearing INTRQ */
492 u8 status = ata_check_status(ap); 492 u8 status = ata_check_status(ap);
diff --git a/drivers/scsi/sata_mv.c b/drivers/scsi/sata_mv.c
index 181917ac0426..e6d141dd0385 100644
--- a/drivers/scsi/sata_mv.c
+++ b/drivers/scsi/sata_mv.c
@@ -87,7 +87,7 @@ enum {
87 MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */ 87 MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */
88 MV_COMMON_FLAGS = (ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 88 MV_COMMON_FLAGS = (ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
89 ATA_FLAG_SATA_RESET | ATA_FLAG_MMIO | 89 ATA_FLAG_SATA_RESET | ATA_FLAG_MMIO |
90 ATA_FLAG_NO_ATAPI), 90 ATA_FLAG_PIO_POLLING),
91 MV_6XXX_FLAGS = MV_FLAG_IRQ_COALESCE, 91 MV_6XXX_FLAGS = MV_FLAG_IRQ_COALESCE,
92 92
93 CRQB_FLAG_READ = (1 << 0), 93 CRQB_FLAG_READ = (1 << 0),
@@ -680,7 +680,7 @@ static void mv_stop_dma(struct ata_port *ap)
680 } 680 }
681 681
682 if (EDMA_EN & reg) { 682 if (EDMA_EN & reg) {
683 printk(KERN_ERR "ata%u: Unable to stop eDMA\n", ap->id); 683 ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n");
684 /* FIXME: Consider doing a reset here to recover */ 684 /* FIXME: Consider doing a reset here to recover */
685 } 685 }
686} 686}
@@ -1309,8 +1309,8 @@ static void mv_err_intr(struct ata_port *ap)
1309 edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS); 1309 edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1310 1310
1311 if (EDMA_ERR_SERR & edma_err_cause) { 1311 if (EDMA_ERR_SERR & edma_err_cause) {
1312 serr = scr_read(ap, SCR_ERROR); 1312 sata_scr_read(ap, SCR_ERROR, &serr);
1313 scr_write_flush(ap, SCR_ERROR, serr); 1313 sata_scr_write_flush(ap, SCR_ERROR, serr);
1314 } 1314 }
1315 if (EDMA_ERR_SELF_DIS & edma_err_cause) { 1315 if (EDMA_ERR_SELF_DIS & edma_err_cause) {
1316 struct mv_port_priv *pp = ap->private_data; 1316 struct mv_port_priv *pp = ap->private_data;
@@ -1396,7 +1396,7 @@ static void mv_host_intr(struct ata_host_set *host_set, u32 relevant,
1396 } 1396 }
1397 } 1397 }
1398 1398
1399 if (ap->flags & (ATA_FLAG_DISABLED | ATA_FLAG_NOINTR)) 1399 if (ap && (ap->flags & ATA_FLAG_DISABLED))
1400 continue; 1400 continue;
1401 1401
1402 err_mask = ac_err_mask(ata_status); 1402 err_mask = ac_err_mask(ata_status);
@@ -1417,7 +1417,7 @@ static void mv_host_intr(struct ata_host_set *host_set, u32 relevant,
1417 VPRINTK("port %u IRQ found for qc, " 1417 VPRINTK("port %u IRQ found for qc, "
1418 "ata_status 0x%x\n", port,ata_status); 1418 "ata_status 0x%x\n", port,ata_status);
1419 /* mark qc status appropriately */ 1419 /* mark qc status appropriately */
1420 if (!(qc->tf.ctl & ATA_NIEN)) { 1420 if (!(qc->tf.flags & ATA_TFLAG_POLLING)) {
1421 qc->err_mask |= err_mask; 1421 qc->err_mask |= err_mask;
1422 ata_qc_complete(qc); 1422 ata_qc_complete(qc);
1423 } 1423 }
@@ -1934,15 +1934,16 @@ static void __mv_phy_reset(struct ata_port *ap, int can_sleep)
1934 1934
1935 /* Issue COMRESET via SControl */ 1935 /* Issue COMRESET via SControl */
1936comreset_retry: 1936comreset_retry:
1937 scr_write_flush(ap, SCR_CONTROL, 0x301); 1937 sata_scr_write_flush(ap, SCR_CONTROL, 0x301);
1938 __msleep(1, can_sleep); 1938 __msleep(1, can_sleep);
1939 1939
1940 scr_write_flush(ap, SCR_CONTROL, 0x300); 1940 sata_scr_write_flush(ap, SCR_CONTROL, 0x300);
1941 __msleep(20, can_sleep); 1941 __msleep(20, can_sleep);
1942 1942
1943 timeout = jiffies + msecs_to_jiffies(200); 1943 timeout = jiffies + msecs_to_jiffies(200);
1944 do { 1944 do {
1945 sstatus = scr_read(ap, SCR_STATUS) & 0x3; 1945 sata_scr_read(ap, SCR_STATUS, &sstatus);
1946 sstatus &= 0x3;
1946 if ((sstatus == 3) || (sstatus == 0)) 1947 if ((sstatus == 3) || (sstatus == 0))
1947 break; 1948 break;
1948 1949
@@ -1959,11 +1960,12 @@ comreset_retry:
1959 "SCtrl 0x%08x\n", mv_scr_read(ap, SCR_STATUS), 1960 "SCtrl 0x%08x\n", mv_scr_read(ap, SCR_STATUS),
1960 mv_scr_read(ap, SCR_ERROR), mv_scr_read(ap, SCR_CONTROL)); 1961 mv_scr_read(ap, SCR_ERROR), mv_scr_read(ap, SCR_CONTROL));
1961 1962
1962 if (sata_dev_present(ap)) { 1963 if (ata_port_online(ap)) {
1963 ata_port_probe(ap); 1964 ata_port_probe(ap);
1964 } else { 1965 } else {
1965 printk(KERN_INFO "ata%u: no device found (phy stat %08x)\n", 1966 sata_scr_read(ap, SCR_STATUS, &sstatus);
1966 ap->id, scr_read(ap, SCR_STATUS)); 1967 ata_port_printk(ap, KERN_INFO,
1968 "no device found (phy stat %08x)\n", sstatus);
1967 ata_port_disable(ap); 1969 ata_port_disable(ap);
1968 return; 1970 return;
1969 } 1971 }
@@ -2021,7 +2023,7 @@ static void mv_eng_timeout(struct ata_port *ap)
2021{ 2023{
2022 struct ata_queued_cmd *qc; 2024 struct ata_queued_cmd *qc;
2023 2025
2024 printk(KERN_ERR "ata%u: Entering mv_eng_timeout\n",ap->id); 2026 ata_port_printk(ap, KERN_ERR, "Entering mv_eng_timeout\n");
2025 DPRINTK("All regs @ start of eng_timeout\n"); 2027 DPRINTK("All regs @ start of eng_timeout\n");
2026 mv_dump_all_regs(ap->host_set->mmio_base, ap->port_no, 2028 mv_dump_all_regs(ap->host_set->mmio_base, ap->port_no,
2027 to_pci_dev(ap->host_set->dev)); 2029 to_pci_dev(ap->host_set->dev));
diff --git a/drivers/scsi/sata_nv.c b/drivers/scsi/sata_nv.c
index 3a70875be8ba..70c51088d371 100644
--- a/drivers/scsi/sata_nv.c
+++ b/drivers/scsi/sata_nv.c
@@ -279,11 +279,11 @@ static irqreturn_t nv_interrupt (int irq, void *dev_instance,
279 279
280 ap = host_set->ports[i]; 280 ap = host_set->ports[i];
281 if (ap && 281 if (ap &&
282 !(ap->flags & (ATA_FLAG_DISABLED | ATA_FLAG_NOINTR))) { 282 !(ap->flags & ATA_FLAG_DISABLED)) {
283 struct ata_queued_cmd *qc; 283 struct ata_queued_cmd *qc;
284 284
285 qc = ata_qc_from_tag(ap, ap->active_tag); 285 qc = ata_qc_from_tag(ap, ap->active_tag);
286 if (qc && (!(qc->tf.ctl & ATA_NIEN))) 286 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)))
287 handled += ata_host_intr(ap, qc); 287 handled += ata_host_intr(ap, qc);
288 else 288 else
289 // No request pending? Clear interrupt status 289 // No request pending? Clear interrupt status
diff --git a/drivers/scsi/sata_promise.c b/drivers/scsi/sata_promise.c
index ddbc0c6dd9fe..bb000438cb6c 100644
--- a/drivers/scsi/sata_promise.c
+++ b/drivers/scsi/sata_promise.c
@@ -76,7 +76,8 @@ enum {
76 PDC_RESET = (1 << 11), /* HDMA reset */ 76 PDC_RESET = (1 << 11), /* HDMA reset */
77 77
78 PDC_COMMON_FLAGS = ATA_FLAG_NO_LEGACY | ATA_FLAG_SRST | 78 PDC_COMMON_FLAGS = ATA_FLAG_NO_LEGACY | ATA_FLAG_SRST |
79 ATA_FLAG_MMIO | ATA_FLAG_NO_ATAPI, 79 ATA_FLAG_MMIO | ATA_FLAG_NO_ATAPI |
80 ATA_FLAG_PIO_POLLING,
80}; 81};
81 82
82 83
@@ -435,7 +436,7 @@ static void pdc_eng_timeout(struct ata_port *ap)
435 switch (qc->tf.protocol) { 436 switch (qc->tf.protocol) {
436 case ATA_PROT_DMA: 437 case ATA_PROT_DMA:
437 case ATA_PROT_NODATA: 438 case ATA_PROT_NODATA:
438 printk(KERN_ERR "ata%u: command timeout\n", ap->id); 439 ata_port_printk(ap, KERN_ERR, "command timeout\n");
439 drv_stat = ata_wait_idle(ap); 440 drv_stat = ata_wait_idle(ap);
440 qc->err_mask |= __ac_err_mask(drv_stat); 441 qc->err_mask |= __ac_err_mask(drv_stat);
441 break; 442 break;
@@ -443,8 +444,9 @@ static void pdc_eng_timeout(struct ata_port *ap)
443 default: 444 default:
444 drv_stat = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 1000); 445 drv_stat = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 1000);
445 446
446 printk(KERN_ERR "ata%u: unknown timeout, cmd 0x%x stat 0x%x\n", 447 ata_port_printk(ap, KERN_ERR,
447 ap->id, qc->tf.command, drv_stat); 448 "unknown timeout, cmd 0x%x stat 0x%x\n",
449 qc->tf.command, drv_stat);
448 450
449 qc->err_mask |= ac_err_mask(drv_stat); 451 qc->err_mask |= ac_err_mask(drv_stat);
450 break; 452 break;
@@ -533,11 +535,11 @@ static irqreturn_t pdc_interrupt (int irq, void *dev_instance, struct pt_regs *r
533 ap = host_set->ports[i]; 535 ap = host_set->ports[i];
534 tmp = mask & (1 << (i + 1)); 536 tmp = mask & (1 << (i + 1));
535 if (tmp && ap && 537 if (tmp && ap &&
536 !(ap->flags & (ATA_FLAG_DISABLED | ATA_FLAG_NOINTR))) { 538 !(ap->flags & ATA_FLAG_DISABLED)) {
537 struct ata_queued_cmd *qc; 539 struct ata_queued_cmd *qc;
538 540
539 qc = ata_qc_from_tag(ap, ap->active_tag); 541 qc = ata_qc_from_tag(ap, ap->active_tag);
540 if (qc && (!(qc->tf.ctl & ATA_NIEN))) 542 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)))
541 handled += pdc_host_intr(ap, qc); 543 handled += pdc_host_intr(ap, qc);
542 } 544 }
543 } 545 }
diff --git a/drivers/scsi/sata_qstor.c b/drivers/scsi/sata_qstor.c
index 259c2dec4e21..54283e06070e 100644
--- a/drivers/scsi/sata_qstor.c
+++ b/drivers/scsi/sata_qstor.c
@@ -175,7 +175,7 @@ static const struct ata_port_info qs_port_info[] = {
175 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 175 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
176 ATA_FLAG_SATA_RESET | 176 ATA_FLAG_SATA_RESET |
177 //FIXME ATA_FLAG_SRST | 177 //FIXME ATA_FLAG_SRST |
178 ATA_FLAG_MMIO, 178 ATA_FLAG_MMIO | ATA_FLAG_PIO_POLLING,
179 .pio_mask = 0x10, /* pio4 */ 179 .pio_mask = 0x10, /* pio4 */
180 .udma_mask = 0x7f, /* udma0-6 */ 180 .udma_mask = 0x7f, /* udma0-6 */
181 .port_ops = &qs_ata_ops, 181 .port_ops = &qs_ata_ops,
@@ -394,14 +394,13 @@ static inline unsigned int qs_intr_pkt(struct ata_host_set *host_set)
394 DPRINTK("SFF=%08x%08x: sCHAN=%u sHST=%d sDST=%02x\n", 394 DPRINTK("SFF=%08x%08x: sCHAN=%u sHST=%d sDST=%02x\n",
395 sff1, sff0, port_no, sHST, sDST); 395 sff1, sff0, port_no, sHST, sDST);
396 handled = 1; 396 handled = 1;
397 if (ap && !(ap->flags & 397 if (ap && !(ap->flags & ATA_FLAG_DISABLED)) {
398 (ATA_FLAG_DISABLED|ATA_FLAG_NOINTR))) {
399 struct ata_queued_cmd *qc; 398 struct ata_queued_cmd *qc;
400 struct qs_port_priv *pp = ap->private_data; 399 struct qs_port_priv *pp = ap->private_data;
401 if (!pp || pp->state != qs_state_pkt) 400 if (!pp || pp->state != qs_state_pkt)
402 continue; 401 continue;
403 qc = ata_qc_from_tag(ap, ap->active_tag); 402 qc = ata_qc_from_tag(ap, ap->active_tag);
404 if (qc && (!(qc->tf.ctl & ATA_NIEN))) { 403 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) {
405 switch (sHST) { 404 switch (sHST) {
406 case 0: /* successful CPB */ 405 case 0: /* successful CPB */
407 case 3: /* device error */ 406 case 3: /* device error */
@@ -428,13 +427,13 @@ static inline unsigned int qs_intr_mmio(struct ata_host_set *host_set)
428 struct ata_port *ap; 427 struct ata_port *ap;
429 ap = host_set->ports[port_no]; 428 ap = host_set->ports[port_no];
430 if (ap && 429 if (ap &&
431 !(ap->flags & (ATA_FLAG_DISABLED | ATA_FLAG_NOINTR))) { 430 !(ap->flags & ATA_FLAG_DISABLED)) {
432 struct ata_queued_cmd *qc; 431 struct ata_queued_cmd *qc;
433 struct qs_port_priv *pp = ap->private_data; 432 struct qs_port_priv *pp = ap->private_data;
434 if (!pp || pp->state != qs_state_mmio) 433 if (!pp || pp->state != qs_state_mmio)
435 continue; 434 continue;
436 qc = ata_qc_from_tag(ap, ap->active_tag); 435 qc = ata_qc_from_tag(ap, ap->active_tag);
437 if (qc && (!(qc->tf.ctl & ATA_NIEN))) { 436 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) {
438 437
439 /* check main status, clearing INTRQ */ 438 /* check main status, clearing INTRQ */
440 u8 status = ata_check_status(ap); 439 u8 status = ata_check_status(ap);
diff --git a/drivers/scsi/sata_sil.c b/drivers/scsi/sata_sil.c
index c9333577330e..aa63044eed2e 100644
--- a/drivers/scsi/sata_sil.c
+++ b/drivers/scsi/sata_sil.c
@@ -96,6 +96,8 @@ static void sil_dev_config(struct ata_port *ap, struct ata_device *dev);
96static u32 sil_scr_read (struct ata_port *ap, unsigned int sc_reg); 96static u32 sil_scr_read (struct ata_port *ap, unsigned int sc_reg);
97static void sil_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val); 97static void sil_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val);
98static void sil_post_set_mode (struct ata_port *ap); 98static void sil_post_set_mode (struct ata_port *ap);
99static void sil_freeze(struct ata_port *ap);
100static void sil_thaw(struct ata_port *ap);
99 101
100 102
101static const struct pci_device_id sil_pci_tbl[] = { 103static const struct pci_device_id sil_pci_tbl[] = {
@@ -174,7 +176,10 @@ static const struct ata_port_operations sil_ops = {
174 .bmdma_status = ata_bmdma_status, 176 .bmdma_status = ata_bmdma_status,
175 .qc_prep = ata_qc_prep, 177 .qc_prep = ata_qc_prep,
176 .qc_issue = ata_qc_issue_prot, 178 .qc_issue = ata_qc_issue_prot,
177 .eng_timeout = ata_eng_timeout, 179 .freeze = sil_freeze,
180 .thaw = sil_thaw,
181 .error_handler = ata_bmdma_error_handler,
182 .post_internal_cmd = ata_bmdma_post_internal_cmd,
178 .irq_handler = ata_interrupt, 183 .irq_handler = ata_interrupt,
179 .irq_clear = ata_bmdma_irq_clear, 184 .irq_clear = ata_bmdma_irq_clear,
180 .scr_read = sil_scr_read, 185 .scr_read = sil_scr_read,
@@ -314,6 +319,33 @@ static void sil_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val)
314 writel(val, mmio); 319 writel(val, mmio);
315} 320}
316 321
322static void sil_freeze(struct ata_port *ap)
323{
324 void __iomem *mmio_base = ap->host_set->mmio_base;
325 u32 tmp;
326
327 /* plug IRQ */
328 tmp = readl(mmio_base + SIL_SYSCFG);
329 tmp |= SIL_MASK_IDE0_INT << ap->port_no;
330 writel(tmp, mmio_base + SIL_SYSCFG);
331 readl(mmio_base + SIL_SYSCFG); /* flush */
332}
333
334static void sil_thaw(struct ata_port *ap)
335{
336 void __iomem *mmio_base = ap->host_set->mmio_base;
337 u32 tmp;
338
339 /* clear IRQ */
340 ata_chk_status(ap);
341 ata_bmdma_irq_clear(ap);
342
343 /* turn on IRQ */
344 tmp = readl(mmio_base + SIL_SYSCFG);
345 tmp &= ~(SIL_MASK_IDE0_INT << ap->port_no);
346 writel(tmp, mmio_base + SIL_SYSCFG);
347}
348
317/** 349/**
318 * sil_dev_config - Apply device/host-specific errata fixups 350 * sil_dev_config - Apply device/host-specific errata fixups
319 * @ap: Port containing device to be examined 351 * @ap: Port containing device to be examined
@@ -360,16 +392,16 @@ static void sil_dev_config(struct ata_port *ap, struct ata_device *dev)
360 if (slow_down || 392 if (slow_down ||
361 ((ap->flags & SIL_FLAG_MOD15WRITE) && 393 ((ap->flags & SIL_FLAG_MOD15WRITE) &&
362 (quirks & SIL_QUIRK_MOD15WRITE))) { 394 (quirks & SIL_QUIRK_MOD15WRITE))) {
363 printk(KERN_INFO "ata%u(%u): applying Seagate errata fix (mod15write workaround)\n", 395 ata_dev_printk(dev, KERN_INFO, "applying Seagate errata fix "
364 ap->id, dev->devno); 396 "(mod15write workaround)\n");
365 dev->max_sectors = 15; 397 dev->max_sectors = 15;
366 return; 398 return;
367 } 399 }
368 400
369 /* limit to udma5 */ 401 /* limit to udma5 */
370 if (quirks & SIL_QUIRK_UDMA5MAX) { 402 if (quirks & SIL_QUIRK_UDMA5MAX) {
371 printk(KERN_INFO "ata%u(%u): applying Maxtor errata fix %s\n", 403 ata_dev_printk(dev, KERN_INFO,
372 ap->id, dev->devno, model_num); 404 "applying Maxtor errata fix %s\n", model_num);
373 dev->udma_mask &= ATA_UDMA5; 405 dev->udma_mask &= ATA_UDMA5;
374 return; 406 return;
375 } 407 }
@@ -384,7 +416,7 @@ static int sil_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
384 int rc; 416 int rc;
385 unsigned int i; 417 unsigned int i;
386 int pci_dev_busy = 0; 418 int pci_dev_busy = 0;
387 u32 tmp, irq_mask; 419 u32 tmp;
388 u8 cls; 420 u8 cls;
389 421
390 if (!printed_version++) 422 if (!printed_version++)
@@ -474,24 +506,11 @@ static int sil_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
474 } 506 }
475 507
476 if (ent->driver_data == sil_3114) { 508 if (ent->driver_data == sil_3114) {
477 irq_mask = SIL_MASK_4PORT;
478
479 /* flip the magic "make 4 ports work" bit */ 509 /* flip the magic "make 4 ports work" bit */
480 tmp = readl(mmio_base + sil_port[2].bmdma); 510 tmp = readl(mmio_base + sil_port[2].bmdma);
481 if ((tmp & SIL_INTR_STEERING) == 0) 511 if ((tmp & SIL_INTR_STEERING) == 0)
482 writel(tmp | SIL_INTR_STEERING, 512 writel(tmp | SIL_INTR_STEERING,
483 mmio_base + sil_port[2].bmdma); 513 mmio_base + sil_port[2].bmdma);
484
485 } else {
486 irq_mask = SIL_MASK_2PORT;
487 }
488
489 /* make sure IDE0/1/2/3 interrupts are not masked */
490 tmp = readl(mmio_base + SIL_SYSCFG);
491 if (tmp & irq_mask) {
492 tmp &= ~irq_mask;
493 writel(tmp, mmio_base + SIL_SYSCFG);
494 readl(mmio_base + SIL_SYSCFG); /* flush */
495 } 514 }
496 515
497 /* mask all SATA phy-related interrupts */ 516 /* mask all SATA phy-related interrupts */
diff --git a/drivers/scsi/sata_sil24.c b/drivers/scsi/sata_sil24.c
index e9fd869140c5..4c76f05d9b65 100644
--- a/drivers/scsi/sata_sil24.c
+++ b/drivers/scsi/sata_sil24.c
@@ -156,6 +156,9 @@ enum {
156 PORT_IRQ_HANDSHAKE = (1 << 10), /* handshake error threshold */ 156 PORT_IRQ_HANDSHAKE = (1 << 10), /* handshake error threshold */
157 PORT_IRQ_SDB_NOTIFY = (1 << 11), /* SDB notify received */ 157 PORT_IRQ_SDB_NOTIFY = (1 << 11), /* SDB notify received */
158 158
159 DEF_PORT_IRQ = PORT_IRQ_COMPLETE | PORT_IRQ_ERROR |
160 PORT_IRQ_DEV_XCHG | PORT_IRQ_UNK_FIS,
161
159 /* bits[27:16] are unmasked (raw) */ 162 /* bits[27:16] are unmasked (raw) */
160 PORT_IRQ_RAW_SHIFT = 16, 163 PORT_IRQ_RAW_SHIFT = 16,
161 PORT_IRQ_MASKED_MASK = 0x7ff, 164 PORT_IRQ_MASKED_MASK = 0x7ff,
@@ -213,6 +216,8 @@ enum {
213 SGE_DRD = (1 << 29), /* discard data read (/dev/null) 216 SGE_DRD = (1 << 29), /* discard data read (/dev/null)
214 data address ignored */ 217 data address ignored */
215 218
219 SIL24_MAX_CMDS = 31,
220
216 /* board id */ 221 /* board id */
217 BID_SIL3124 = 0, 222 BID_SIL3124 = 0,
218 BID_SIL3132 = 1, 223 BID_SIL3132 = 1,
@@ -220,7 +225,8 @@ enum {
220 225
221 /* host flags */ 226 /* host flags */
222 SIL24_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 227 SIL24_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
223 ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA, 228 ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA |
229 ATA_FLAG_NCQ,
224 SIL24_FLAG_PCIX_IRQ_WOC = (1 << 24), /* IRQ loss errata on PCI-X */ 230 SIL24_FLAG_PCIX_IRQ_WOC = (1 << 24), /* IRQ loss errata on PCI-X */
225 231
226 IRQ_STAT_4PORTS = 0xf, 232 IRQ_STAT_4PORTS = 0xf,
@@ -242,6 +248,58 @@ union sil24_cmd_block {
242 struct sil24_atapi_block atapi; 248 struct sil24_atapi_block atapi;
243}; 249};
244 250
251static struct sil24_cerr_info {
252 unsigned int err_mask, action;
253 const char *desc;
254} sil24_cerr_db[] = {
255 [0] = { AC_ERR_DEV, ATA_EH_REVALIDATE,
256 "device error" },
257 [PORT_CERR_DEV] = { AC_ERR_DEV, ATA_EH_REVALIDATE,
258 "device error via D2H FIS" },
259 [PORT_CERR_SDB] = { AC_ERR_DEV, ATA_EH_REVALIDATE,
260 "device error via SDB FIS" },
261 [PORT_CERR_DATA] = { AC_ERR_ATA_BUS, ATA_EH_SOFTRESET,
262 "error in data FIS" },
263 [PORT_CERR_SEND] = { AC_ERR_ATA_BUS, ATA_EH_SOFTRESET,
264 "failed to transmit command FIS" },
265 [PORT_CERR_INCONSISTENT] = { AC_ERR_HSM, ATA_EH_SOFTRESET,
266 "protocol mismatch" },
267 [PORT_CERR_DIRECTION] = { AC_ERR_HSM, ATA_EH_SOFTRESET,
268 "data directon mismatch" },
269 [PORT_CERR_UNDERRUN] = { AC_ERR_HSM, ATA_EH_SOFTRESET,
270 "ran out of SGEs while writing" },
271 [PORT_CERR_OVERRUN] = { AC_ERR_HSM, ATA_EH_SOFTRESET,
272 "ran out of SGEs while reading" },
273 [PORT_CERR_PKT_PROT] = { AC_ERR_HSM, ATA_EH_SOFTRESET,
274 "invalid data directon for ATAPI CDB" },
275 [PORT_CERR_SGT_BOUNDARY] = { AC_ERR_SYSTEM, ATA_EH_SOFTRESET,
276 "SGT no on qword boundary" },
277 [PORT_CERR_SGT_TGTABRT] = { AC_ERR_HOST_BUS, ATA_EH_SOFTRESET,
278 "PCI target abort while fetching SGT" },
279 [PORT_CERR_SGT_MSTABRT] = { AC_ERR_HOST_BUS, ATA_EH_SOFTRESET,
280 "PCI master abort while fetching SGT" },
281 [PORT_CERR_SGT_PCIPERR] = { AC_ERR_HOST_BUS, ATA_EH_SOFTRESET,
282 "PCI parity error while fetching SGT" },
283 [PORT_CERR_CMD_BOUNDARY] = { AC_ERR_SYSTEM, ATA_EH_SOFTRESET,
284 "PRB not on qword boundary" },
285 [PORT_CERR_CMD_TGTABRT] = { AC_ERR_HOST_BUS, ATA_EH_SOFTRESET,
286 "PCI target abort while fetching PRB" },
287 [PORT_CERR_CMD_MSTABRT] = { AC_ERR_HOST_BUS, ATA_EH_SOFTRESET,
288 "PCI master abort while fetching PRB" },
289 [PORT_CERR_CMD_PCIPERR] = { AC_ERR_HOST_BUS, ATA_EH_SOFTRESET,
290 "PCI parity error while fetching PRB" },
291 [PORT_CERR_XFR_UNDEF] = { AC_ERR_HOST_BUS, ATA_EH_SOFTRESET,
292 "undefined error while transferring data" },
293 [PORT_CERR_XFR_TGTABRT] = { AC_ERR_HOST_BUS, ATA_EH_SOFTRESET,
294 "PCI target abort while transferring data" },
295 [PORT_CERR_XFR_MSTABRT] = { AC_ERR_HOST_BUS, ATA_EH_SOFTRESET,
296 "PCI master abort while transferring data" },
297 [PORT_CERR_XFR_PCIPERR] = { AC_ERR_HOST_BUS, ATA_EH_SOFTRESET,
298 "PCI parity error while transferring data" },
299 [PORT_CERR_SENDSERVICE] = { AC_ERR_HSM, ATA_EH_SOFTRESET,
300 "FIS received while sending service FIS" },
301};
302
245/* 303/*
246 * ap->private_data 304 * ap->private_data
247 * 305 *
@@ -269,8 +327,11 @@ static int sil24_probe_reset(struct ata_port *ap, unsigned int *classes);
269static void sil24_qc_prep(struct ata_queued_cmd *qc); 327static void sil24_qc_prep(struct ata_queued_cmd *qc);
270static unsigned int sil24_qc_issue(struct ata_queued_cmd *qc); 328static unsigned int sil24_qc_issue(struct ata_queued_cmd *qc);
271static void sil24_irq_clear(struct ata_port *ap); 329static void sil24_irq_clear(struct ata_port *ap);
272static void sil24_eng_timeout(struct ata_port *ap);
273static irqreturn_t sil24_interrupt(int irq, void *dev_instance, struct pt_regs *regs); 330static irqreturn_t sil24_interrupt(int irq, void *dev_instance, struct pt_regs *regs);
331static void sil24_freeze(struct ata_port *ap);
332static void sil24_thaw(struct ata_port *ap);
333static void sil24_error_handler(struct ata_port *ap);
334static void sil24_post_internal_cmd(struct ata_queued_cmd *qc);
274static int sil24_port_start(struct ata_port *ap); 335static int sil24_port_start(struct ata_port *ap);
275static void sil24_port_stop(struct ata_port *ap); 336static void sil24_port_stop(struct ata_port *ap);
276static void sil24_host_stop(struct ata_host_set *host_set); 337static void sil24_host_stop(struct ata_host_set *host_set);
@@ -297,7 +358,8 @@ static struct scsi_host_template sil24_sht = {
297 .name = DRV_NAME, 358 .name = DRV_NAME,
298 .ioctl = ata_scsi_ioctl, 359 .ioctl = ata_scsi_ioctl,
299 .queuecommand = ata_scsi_queuecmd, 360 .queuecommand = ata_scsi_queuecmd,
300 .can_queue = ATA_DEF_QUEUE, 361 .change_queue_depth = ata_scsi_change_queue_depth,
362 .can_queue = SIL24_MAX_CMDS,
301 .this_id = ATA_SHT_THIS_ID, 363 .this_id = ATA_SHT_THIS_ID,
302 .sg_tablesize = LIBATA_MAX_PRD, 364 .sg_tablesize = LIBATA_MAX_PRD,
303 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 365 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
@@ -325,14 +387,17 @@ static const struct ata_port_operations sil24_ops = {
325 .qc_prep = sil24_qc_prep, 387 .qc_prep = sil24_qc_prep,
326 .qc_issue = sil24_qc_issue, 388 .qc_issue = sil24_qc_issue,
327 389
328 .eng_timeout = sil24_eng_timeout,
329
330 .irq_handler = sil24_interrupt, 390 .irq_handler = sil24_interrupt,
331 .irq_clear = sil24_irq_clear, 391 .irq_clear = sil24_irq_clear,
332 392
333 .scr_read = sil24_scr_read, 393 .scr_read = sil24_scr_read,
334 .scr_write = sil24_scr_write, 394 .scr_write = sil24_scr_write,
335 395
396 .freeze = sil24_freeze,
397 .thaw = sil24_thaw,
398 .error_handler = sil24_error_handler,
399 .post_internal_cmd = sil24_post_internal_cmd,
400
336 .port_start = sil24_port_start, 401 .port_start = sil24_port_start,
337 .port_stop = sil24_port_stop, 402 .port_stop = sil24_port_stop,
338 .host_stop = sil24_host_stop, 403 .host_stop = sil24_host_stop,
@@ -376,6 +441,13 @@ static struct ata_port_info sil24_port_info[] = {
376 }, 441 },
377}; 442};
378 443
444static int sil24_tag(int tag)
445{
446 if (unlikely(ata_tag_internal(tag)))
447 return 0;
448 return tag;
449}
450
379static void sil24_dev_config(struct ata_port *ap, struct ata_device *dev) 451static void sil24_dev_config(struct ata_port *ap, struct ata_device *dev)
380{ 452{
381 void __iomem *port = (void __iomem *)ap->ioaddr.cmd_addr; 453 void __iomem *port = (void __iomem *)ap->ioaddr.cmd_addr;
@@ -459,21 +531,17 @@ static int sil24_softreset(struct ata_port *ap, unsigned int *class)
459 struct sil24_port_priv *pp = ap->private_data; 531 struct sil24_port_priv *pp = ap->private_data;
460 struct sil24_prb *prb = &pp->cmd_block[0].ata.prb; 532 struct sil24_prb *prb = &pp->cmd_block[0].ata.prb;
461 dma_addr_t paddr = pp->cmd_block_dma; 533 dma_addr_t paddr = pp->cmd_block_dma;
462 u32 mask, irq_enable, irq_stat; 534 u32 mask, irq_stat;
463 const char *reason; 535 const char *reason;
464 536
465 DPRINTK("ENTER\n"); 537 DPRINTK("ENTER\n");
466 538
467 if (!sata_dev_present(ap)) { 539 if (ata_port_offline(ap)) {
468 DPRINTK("PHY reports no device\n"); 540 DPRINTK("PHY reports no device\n");
469 *class = ATA_DEV_NONE; 541 *class = ATA_DEV_NONE;
470 goto out; 542 goto out;
471 } 543 }
472 544
473 /* temporarily turn off IRQs during SRST */
474 irq_enable = readl(port + PORT_IRQ_ENABLE_SET);
475 writel(irq_enable, port + PORT_IRQ_ENABLE_CLR);
476
477 /* put the port into known state */ 545 /* put the port into known state */
478 if (sil24_init_port(ap)) { 546 if (sil24_init_port(ap)) {
479 reason ="port not ready"; 547 reason ="port not ready";
@@ -494,9 +562,6 @@ static int sil24_softreset(struct ata_port *ap, unsigned int *class)
494 writel(irq_stat, port + PORT_IRQ_STAT); /* clear IRQs */ 562 writel(irq_stat, port + PORT_IRQ_STAT); /* clear IRQs */
495 irq_stat >>= PORT_IRQ_RAW_SHIFT; 563 irq_stat >>= PORT_IRQ_RAW_SHIFT;
496 564
497 /* restore IRQs */
498 writel(irq_enable, port + PORT_IRQ_ENABLE_SET);
499
500 if (!(irq_stat & PORT_IRQ_COMPLETE)) { 565 if (!(irq_stat & PORT_IRQ_COMPLETE)) {
501 if (irq_stat & PORT_IRQ_ERROR) 566 if (irq_stat & PORT_IRQ_ERROR)
502 reason = "SRST command error"; 567 reason = "SRST command error";
@@ -516,7 +581,7 @@ static int sil24_softreset(struct ata_port *ap, unsigned int *class)
516 return 0; 581 return 0;
517 582
518 err: 583 err:
519 printk(KERN_ERR "ata%u: softreset failed (%s)\n", ap->id, reason); 584 ata_port_printk(ap, KERN_ERR, "softreset failed (%s)\n", reason);
520 return -EIO; 585 return -EIO;
521} 586}
522 587
@@ -528,10 +593,10 @@ static int sil24_hardreset(struct ata_port *ap, unsigned int *class)
528 u32 tmp; 593 u32 tmp;
529 594
530 /* sil24 does the right thing(tm) without any protection */ 595 /* sil24 does the right thing(tm) without any protection */
531 ata_set_sata_spd(ap); 596 sata_set_spd(ap);
532 597
533 tout_msec = 100; 598 tout_msec = 100;
534 if (sata_dev_present(ap)) 599 if (ata_port_online(ap))
535 tout_msec = 5000; 600 tout_msec = 5000;
536 601
537 writel(PORT_CS_DEV_RST, port + PORT_CTRL_STAT); 602 writel(PORT_CS_DEV_RST, port + PORT_CTRL_STAT);
@@ -544,7 +609,7 @@ static int sil24_hardreset(struct ata_port *ap, unsigned int *class)
544 msleep(100); 609 msleep(100);
545 610
546 if (tmp & PORT_CS_DEV_RST) { 611 if (tmp & PORT_CS_DEV_RST) {
547 if (!sata_dev_present(ap)) 612 if (ata_port_offline(ap))
548 return 0; 613 return 0;
549 reason = "link not ready"; 614 reason = "link not ready";
550 goto err; 615 goto err;
@@ -561,7 +626,7 @@ static int sil24_hardreset(struct ata_port *ap, unsigned int *class)
561 return 0; 626 return 0;
562 627
563 err: 628 err:
564 printk(KERN_ERR "ata%u: hardreset failed (%s)\n", ap->id, reason); 629 ata_port_printk(ap, KERN_ERR, "hardreset failed (%s)\n", reason);
565 return -EIO; 630 return -EIO;
566} 631}
567 632
@@ -595,14 +660,17 @@ static void sil24_qc_prep(struct ata_queued_cmd *qc)
595{ 660{
596 struct ata_port *ap = qc->ap; 661 struct ata_port *ap = qc->ap;
597 struct sil24_port_priv *pp = ap->private_data; 662 struct sil24_port_priv *pp = ap->private_data;
598 union sil24_cmd_block *cb = pp->cmd_block + qc->tag; 663 union sil24_cmd_block *cb;
599 struct sil24_prb *prb; 664 struct sil24_prb *prb;
600 struct sil24_sge *sge; 665 struct sil24_sge *sge;
601 u16 ctrl = 0; 666 u16 ctrl = 0;
602 667
668 cb = &pp->cmd_block[sil24_tag(qc->tag)];
669
603 switch (qc->tf.protocol) { 670 switch (qc->tf.protocol) {
604 case ATA_PROT_PIO: 671 case ATA_PROT_PIO:
605 case ATA_PROT_DMA: 672 case ATA_PROT_DMA:
673 case ATA_PROT_NCQ:
606 case ATA_PROT_NODATA: 674 case ATA_PROT_NODATA:
607 prb = &cb->ata.prb; 675 prb = &cb->ata.prb;
608 sge = cb->ata.sge; 676 sge = cb->ata.sge;
@@ -640,12 +708,17 @@ static void sil24_qc_prep(struct ata_queued_cmd *qc)
640static unsigned int sil24_qc_issue(struct ata_queued_cmd *qc) 708static unsigned int sil24_qc_issue(struct ata_queued_cmd *qc)
641{ 709{
642 struct ata_port *ap = qc->ap; 710 struct ata_port *ap = qc->ap;
643 void __iomem *port = (void __iomem *)ap->ioaddr.cmd_addr;
644 struct sil24_port_priv *pp = ap->private_data; 711 struct sil24_port_priv *pp = ap->private_data;
645 dma_addr_t paddr = pp->cmd_block_dma + qc->tag * sizeof(*pp->cmd_block); 712 void __iomem *port = (void __iomem *)ap->ioaddr.cmd_addr;
713 unsigned int tag = sil24_tag(qc->tag);
714 dma_addr_t paddr;
715 void __iomem *activate;
646 716
647 writel((u32)paddr, port + PORT_CMD_ACTIVATE); 717 paddr = pp->cmd_block_dma + tag * sizeof(*pp->cmd_block);
648 writel((u64)paddr >> 32, port + PORT_CMD_ACTIVATE + 4); 718 activate = port + PORT_CMD_ACTIVATE + tag * 8;
719
720 writel((u32)paddr, activate);
721 writel((u64)paddr >> 32, activate + 4);
649 722
650 return 0; 723 return 0;
651} 724}
@@ -655,166 +728,141 @@ static void sil24_irq_clear(struct ata_port *ap)
655 /* unused */ 728 /* unused */
656} 729}
657 730
658static int __sil24_restart_controller(void __iomem *port) 731static void sil24_freeze(struct ata_port *ap)
659{ 732{
660 u32 tmp; 733 void __iomem *port = (void __iomem *)ap->ioaddr.cmd_addr;
661 int cnt;
662
663 writel(PORT_CS_INIT, port + PORT_CTRL_STAT);
664
665 /* Max ~10ms */
666 for (cnt = 0; cnt < 10000; cnt++) {
667 tmp = readl(port + PORT_CTRL_STAT);
668 if (tmp & PORT_CS_RDY)
669 return 0;
670 udelay(1);
671 }
672 734
673 return -1; 735 /* Port-wide IRQ mask in HOST_CTRL doesn't really work, clear
736 * PORT_IRQ_ENABLE instead.
737 */
738 writel(0xffff, port + PORT_IRQ_ENABLE_CLR);
674} 739}
675 740
676static void sil24_restart_controller(struct ata_port *ap) 741static void sil24_thaw(struct ata_port *ap)
677{ 742{
678 if (__sil24_restart_controller((void __iomem *)ap->ioaddr.cmd_addr)) 743 void __iomem *port = (void __iomem *)ap->ioaddr.cmd_addr;
679 printk(KERN_ERR DRV_NAME 744 u32 tmp;
680 " ata%u: failed to restart controller\n", ap->id); 745
746 /* clear IRQ */
747 tmp = readl(port + PORT_IRQ_STAT);
748 writel(tmp, port + PORT_IRQ_STAT);
749
750 /* turn IRQ back on */
751 writel(DEF_PORT_IRQ, port + PORT_IRQ_ENABLE_SET);
681} 752}
682 753
683static int __sil24_reset_controller(void __iomem *port) 754static void sil24_error_intr(struct ata_port *ap)
684{ 755{
685 int cnt; 756 void __iomem *port = (void __iomem *)ap->ioaddr.cmd_addr;
686 u32 tmp; 757 struct ata_eh_info *ehi = &ap->eh_info;
758 int freeze = 0;
759 u32 irq_stat;
687 760
688 /* Reset controller state. Is this correct? */ 761 /* on error, we need to clear IRQ explicitly */
689 writel(PORT_CS_DEV_RST, port + PORT_CTRL_STAT); 762 irq_stat = readl(port + PORT_IRQ_STAT);
690 readl(port + PORT_CTRL_STAT); /* sync */ 763 writel(irq_stat, port + PORT_IRQ_STAT);
691 764
692 /* Max ~100ms */ 765 /* first, analyze and record host port events */
693 for (cnt = 0; cnt < 1000; cnt++) { 766 ata_ehi_clear_desc(ehi);
694 udelay(100);
695 tmp = readl(port + PORT_CTRL_STAT);
696 if (!(tmp & PORT_CS_DEV_RST))
697 break;
698 }
699 767
700 if (tmp & PORT_CS_DEV_RST) 768 ata_ehi_push_desc(ehi, "irq_stat 0x%08x", irq_stat);
701 return -1;
702 769
703 if (tmp & PORT_CS_RDY) 770 if (irq_stat & PORT_IRQ_DEV_XCHG) {
704 return 0; 771 ehi->err_mask |= AC_ERR_ATA_BUS;
772 /* sil24 doesn't recover very well from phy
773 * disconnection with a softreset. Force hardreset.
774 */
775 ehi->action |= ATA_EH_HARDRESET;
776 ata_ehi_push_desc(ehi, ", device_exchanged");
777 freeze = 1;
778 }
705 779
706 return __sil24_restart_controller(port); 780 if (irq_stat & PORT_IRQ_UNK_FIS) {
707} 781 ehi->err_mask |= AC_ERR_HSM;
782 ehi->action |= ATA_EH_SOFTRESET;
783 ata_ehi_push_desc(ehi , ", unknown FIS");
784 freeze = 1;
785 }
708 786
709static void sil24_reset_controller(struct ata_port *ap) 787 /* deal with command error */
710{ 788 if (irq_stat & PORT_IRQ_ERROR) {
711 printk(KERN_NOTICE DRV_NAME 789 struct sil24_cerr_info *ci = NULL;
712 " ata%u: resetting controller...\n", ap->id); 790 unsigned int err_mask = 0, action = 0;
713 if (__sil24_reset_controller((void __iomem *)ap->ioaddr.cmd_addr)) 791 struct ata_queued_cmd *qc;
714 printk(KERN_ERR DRV_NAME 792 u32 cerr;
715 " ata%u: failed to reset controller\n", ap->id); 793
716} 794 /* analyze CMD_ERR */
795 cerr = readl(port + PORT_CMD_ERR);
796 if (cerr < ARRAY_SIZE(sil24_cerr_db))
797 ci = &sil24_cerr_db[cerr];
798
799 if (ci && ci->desc) {
800 err_mask |= ci->err_mask;
801 action |= ci->action;
802 ata_ehi_push_desc(ehi, ", %s", ci->desc);
803 } else {
804 err_mask |= AC_ERR_OTHER;
805 action |= ATA_EH_SOFTRESET;
806 ata_ehi_push_desc(ehi, ", unknown command error %d",
807 cerr);
808 }
717 809
718static void sil24_eng_timeout(struct ata_port *ap) 810 /* record error info */
719{ 811 qc = ata_qc_from_tag(ap, ap->active_tag);
720 struct ata_queued_cmd *qc; 812 if (qc) {
813 sil24_update_tf(ap);
814 qc->err_mask |= err_mask;
815 } else
816 ehi->err_mask |= err_mask;
721 817
722 qc = ata_qc_from_tag(ap, ap->active_tag); 818 ehi->action |= action;
819 }
723 820
724 printk(KERN_ERR "ata%u: command timeout\n", ap->id); 821 /* freeze or abort */
725 qc->err_mask |= AC_ERR_TIMEOUT; 822 if (freeze)
726 ata_eh_qc_complete(qc); 823 ata_port_freeze(ap);
824 else
825 ata_port_abort(ap);
826}
727 827
728 sil24_reset_controller(ap); 828static void sil24_finish_qc(struct ata_queued_cmd *qc)
829{
830 if (qc->flags & ATA_QCFLAG_RESULT_TF)
831 sil24_update_tf(qc->ap);
729} 832}
730 833
731static void sil24_error_intr(struct ata_port *ap, u32 slot_stat) 834static inline void sil24_host_intr(struct ata_port *ap)
732{ 835{
733 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->active_tag);
734 struct sil24_port_priv *pp = ap->private_data;
735 void __iomem *port = (void __iomem *)ap->ioaddr.cmd_addr; 836 void __iomem *port = (void __iomem *)ap->ioaddr.cmd_addr;
736 u32 irq_stat, cmd_err, sstatus, serror; 837 u32 slot_stat, qc_active;
737 unsigned int err_mask; 838 int rc;
738 839
739 irq_stat = readl(port + PORT_IRQ_STAT); 840 slot_stat = readl(port + PORT_SLOT_STAT);
740 writel(irq_stat, port + PORT_IRQ_STAT); /* clear irq */
741 841
742 if (!(irq_stat & PORT_IRQ_ERROR)) { 842 if (unlikely(slot_stat & HOST_SSTAT_ATTN)) {
743 /* ignore non-completion, non-error irqs for now */ 843 sil24_error_intr(ap);
744 printk(KERN_WARNING DRV_NAME
745 "ata%u: non-error exception irq (irq_stat %x)\n",
746 ap->id, irq_stat);
747 return; 844 return;
748 } 845 }
749 846
750 cmd_err = readl(port + PORT_CMD_ERR); 847 if (ap->flags & SIL24_FLAG_PCIX_IRQ_WOC)
751 sstatus = readl(port + PORT_SSTATUS); 848 writel(PORT_IRQ_COMPLETE, port + PORT_IRQ_STAT);
752 serror = readl(port + PORT_SERROR);
753 if (serror)
754 writel(serror, port + PORT_SERROR);
755 849
756 /* 850 qc_active = slot_stat & ~HOST_SSTAT_ATTN;
757 * Don't log ATAPI device errors. They're supposed to happen 851 rc = ata_qc_complete_multiple(ap, qc_active, sil24_finish_qc);
758 * and any serious errors will be logged using sense data by 852 if (rc > 0)
759 * the SCSI layer. 853 return;
760 */ 854 if (rc < 0) {
761 if (ap->device[0].class != ATA_DEV_ATAPI || cmd_err > PORT_CERR_SDB) 855 struct ata_eh_info *ehi = &ap->eh_info;
762 printk("ata%u: error interrupt on port%d\n" 856 ehi->err_mask |= AC_ERR_HSM;
763 " stat=0x%x irq=0x%x cmd_err=%d sstatus=0x%x serror=0x%x\n", 857 ehi->action |= ATA_EH_SOFTRESET;
764 ap->id, ap->port_no, slot_stat, irq_stat, cmd_err, sstatus, serror); 858 ata_port_freeze(ap);
765 859 return;
766 if (cmd_err == PORT_CERR_DEV || cmd_err == PORT_CERR_SDB) {
767 /*
768 * Device is reporting error, tf registers are valid.
769 */
770 sil24_update_tf(ap);
771 err_mask = ac_err_mask(pp->tf.command);
772 sil24_restart_controller(ap);
773 } else {
774 /*
775 * Other errors. libata currently doesn't have any
776 * mechanism to report these errors. Just turn on
777 * ATA_ERR.
778 */
779 err_mask = AC_ERR_OTHER;
780 sil24_reset_controller(ap);
781 } 860 }
782 861
783 if (qc) { 862 if (ata_ratelimit())
784 qc->err_mask |= err_mask; 863 ata_port_printk(ap, KERN_INFO, "spurious interrupt "
785 ata_qc_complete(qc); 864 "(slot_stat 0x%x active_tag %d sactive 0x%x)\n",
786 } 865 slot_stat, ap->active_tag, ap->sactive);
787}
788
789static inline void sil24_host_intr(struct ata_port *ap)
790{
791 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->active_tag);
792 void __iomem *port = (void __iomem *)ap->ioaddr.cmd_addr;
793 u32 slot_stat;
794
795 slot_stat = readl(port + PORT_SLOT_STAT);
796 if (!(slot_stat & HOST_SSTAT_ATTN)) {
797 struct sil24_port_priv *pp = ap->private_data;
798
799 if (ap->flags & SIL24_FLAG_PCIX_IRQ_WOC)
800 writel(PORT_IRQ_COMPLETE, port + PORT_IRQ_STAT);
801
802 /*
803 * !HOST_SSAT_ATTN guarantees successful completion,
804 * so reading back tf registers is unnecessary for
805 * most commands. TODO: read tf registers for
806 * commands which require these values on successful
807 * completion (EXECUTE DEVICE DIAGNOSTIC, CHECK POWER,
808 * DEVICE RESET and READ PORT MULTIPLIER (any more?).
809 */
810 sil24_update_tf(ap);
811
812 if (qc) {
813 qc->err_mask |= ac_err_mask(pp->tf.command);
814 ata_qc_complete(qc);
815 }
816 } else
817 sil24_error_intr(ap, slot_stat);
818} 866}
819 867
820static irqreturn_t sil24_interrupt(int irq, void *dev_instance, struct pt_regs *regs) 868static irqreturn_t sil24_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
@@ -854,9 +902,34 @@ static irqreturn_t sil24_interrupt(int irq, void *dev_instance, struct pt_regs *
854 return IRQ_RETVAL(handled); 902 return IRQ_RETVAL(handled);
855} 903}
856 904
905static void sil24_error_handler(struct ata_port *ap)
906{
907 struct ata_eh_context *ehc = &ap->eh_context;
908
909 if (sil24_init_port(ap)) {
910 ata_eh_freeze_port(ap);
911 ehc->i.action |= ATA_EH_HARDRESET;
912 }
913
914 /* perform recovery */
915 ata_do_eh(ap, sil24_softreset, sil24_hardreset, ata_std_postreset);
916}
917
918static void sil24_post_internal_cmd(struct ata_queued_cmd *qc)
919{
920 struct ata_port *ap = qc->ap;
921
922 if (qc->flags & ATA_QCFLAG_FAILED)
923 qc->err_mask |= AC_ERR_OTHER;
924
925 /* make DMA engine forget about the failed command */
926 if (qc->err_mask)
927 sil24_init_port(ap);
928}
929
857static inline void sil24_cblk_free(struct sil24_port_priv *pp, struct device *dev) 930static inline void sil24_cblk_free(struct sil24_port_priv *pp, struct device *dev)
858{ 931{
859 const size_t cb_size = sizeof(*pp->cmd_block); 932 const size_t cb_size = sizeof(*pp->cmd_block) * SIL24_MAX_CMDS;
860 933
861 dma_free_coherent(dev, cb_size, pp->cmd_block, pp->cmd_block_dma); 934 dma_free_coherent(dev, cb_size, pp->cmd_block, pp->cmd_block_dma);
862} 935}
@@ -866,7 +939,7 @@ static int sil24_port_start(struct ata_port *ap)
866 struct device *dev = ap->host_set->dev; 939 struct device *dev = ap->host_set->dev;
867 struct sil24_port_priv *pp; 940 struct sil24_port_priv *pp;
868 union sil24_cmd_block *cb; 941 union sil24_cmd_block *cb;
869 size_t cb_size = sizeof(*cb); 942 size_t cb_size = sizeof(*cb) * SIL24_MAX_CMDS;
870 dma_addr_t cb_dma; 943 dma_addr_t cb_dma;
871 int rc = -ENOMEM; 944 int rc = -ENOMEM;
872 945
@@ -1066,15 +1139,6 @@ static int sil24_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1066 /* Always use 64bit activation */ 1139 /* Always use 64bit activation */
1067 writel(PORT_CS_32BIT_ACTV, port + PORT_CTRL_CLR); 1140 writel(PORT_CS_32BIT_ACTV, port + PORT_CTRL_CLR);
1068 1141
1069 /* Configure interrupts */
1070 writel(0xffff, port + PORT_IRQ_ENABLE_CLR);
1071 writel(PORT_IRQ_COMPLETE | PORT_IRQ_ERROR |
1072 PORT_IRQ_SDB_NOTIFY, port + PORT_IRQ_ENABLE_SET);
1073
1074 /* Clear interrupts */
1075 writel(0x0fff0fff, port + PORT_IRQ_STAT);
1076 writel(PORT_CS_IRQ_WOC, port + PORT_CTRL_CLR);
1077
1078 /* Clear port multiplier enable and resume bits */ 1142 /* Clear port multiplier enable and resume bits */
1079 writel(PORT_CS_PM_EN | PORT_CS_RESUME, port + PORT_CTRL_CLR); 1143 writel(PORT_CS_PM_EN | PORT_CS_RESUME, port + PORT_CTRL_CLR);
1080 } 1144 }
diff --git a/drivers/scsi/sata_sx4.c b/drivers/scsi/sata_sx4.c
index a669d0589889..70a695488291 100644
--- a/drivers/scsi/sata_sx4.c
+++ b/drivers/scsi/sata_sx4.c
@@ -218,7 +218,7 @@ static const struct ata_port_info pdc_port_info[] = {
218 .sht = &pdc_sata_sht, 218 .sht = &pdc_sata_sht,
219 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 219 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
220 ATA_FLAG_SRST | ATA_FLAG_MMIO | 220 ATA_FLAG_SRST | ATA_FLAG_MMIO |
221 ATA_FLAG_NO_ATAPI, 221 ATA_FLAG_PIO_POLLING,
222 .pio_mask = 0x1f, /* pio0-4 */ 222 .pio_mask = 0x1f, /* pio0-4 */
223 .mwdma_mask = 0x07, /* mwdma0-2 */ 223 .mwdma_mask = 0x07, /* mwdma0-2 */
224 .udma_mask = 0x7f, /* udma0-6 ; FIXME */ 224 .udma_mask = 0x7f, /* udma0-6 ; FIXME */
@@ -833,11 +833,11 @@ static irqreturn_t pdc20621_interrupt (int irq, void *dev_instance, struct pt_re
833 tmp = mask & (1 << i); 833 tmp = mask & (1 << i);
834 VPRINTK("seq %u, port_no %u, ap %p, tmp %x\n", i, port_no, ap, tmp); 834 VPRINTK("seq %u, port_no %u, ap %p, tmp %x\n", i, port_no, ap, tmp);
835 if (tmp && ap && 835 if (tmp && ap &&
836 !(ap->flags & (ATA_FLAG_DISABLED | ATA_FLAG_NOINTR))) { 836 !(ap->flags & ATA_FLAG_DISABLED)) {
837 struct ata_queued_cmd *qc; 837 struct ata_queued_cmd *qc;
838 838
839 qc = ata_qc_from_tag(ap, ap->active_tag); 839 qc = ata_qc_from_tag(ap, ap->active_tag);
840 if (qc && (!(qc->tf.ctl & ATA_NIEN))) 840 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)))
841 handled += pdc20621_host_intr(ap, qc, (i > 4), 841 handled += pdc20621_host_intr(ap, qc, (i > 4),
842 mmio_base); 842 mmio_base);
843 } 843 }
@@ -868,15 +868,16 @@ static void pdc_eng_timeout(struct ata_port *ap)
868 switch (qc->tf.protocol) { 868 switch (qc->tf.protocol) {
869 case ATA_PROT_DMA: 869 case ATA_PROT_DMA:
870 case ATA_PROT_NODATA: 870 case ATA_PROT_NODATA:
871 printk(KERN_ERR "ata%u: command timeout\n", ap->id); 871 ata_port_printk(ap, KERN_ERR, "command timeout\n");
872 qc->err_mask |= __ac_err_mask(ata_wait_idle(ap)); 872 qc->err_mask |= __ac_err_mask(ata_wait_idle(ap));
873 break; 873 break;
874 874
875 default: 875 default:
876 drv_stat = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 1000); 876 drv_stat = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 1000);
877 877
878 printk(KERN_ERR "ata%u: unknown timeout, cmd 0x%x stat 0x%x\n", 878 ata_port_printk(ap, KERN_ERR,
879 ap->id, qc->tf.command, drv_stat); 879 "unknown timeout, cmd 0x%x stat 0x%x\n",
880 qc->tf.command, drv_stat);
880 881
881 qc->err_mask |= ac_err_mask(drv_stat); 882 qc->err_mask |= ac_err_mask(drv_stat);
882 break; 883 break;
diff --git a/drivers/scsi/sata_vsc.c b/drivers/scsi/sata_vsc.c
index 9646c3932129..0372be7ff1c9 100644
--- a/drivers/scsi/sata_vsc.c
+++ b/drivers/scsi/sata_vsc.c
@@ -221,14 +221,21 @@ static irqreturn_t vsc_sata_interrupt (int irq, void *dev_instance,
221 221
222 ap = host_set->ports[i]; 222 ap = host_set->ports[i];
223 223
224 if (ap && !(ap->flags & 224 if (is_vsc_sata_int_err(i, int_status)) {
225 (ATA_FLAG_DISABLED|ATA_FLAG_NOINTR))) { 225 u32 err_status;
226 printk(KERN_DEBUG "%s: ignoring interrupt(s)\n", __FUNCTION__);
227 err_status = ap ? vsc_sata_scr_read(ap, SCR_ERROR) : 0;
228 vsc_sata_scr_write(ap, SCR_ERROR, err_status);
229 handled++;
230 }
231
232 if (ap && !(ap->flags & ATA_FLAG_DISABLED)) {
226 struct ata_queued_cmd *qc; 233 struct ata_queued_cmd *qc;
227 234
228 qc = ata_qc_from_tag(ap, ap->active_tag); 235 qc = ata_qc_from_tag(ap, ap->active_tag);
229 if (qc && (!(qc->tf.ctl & ATA_NIEN))) { 236 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)))
230 handled += ata_host_intr(ap, qc); 237 handled += ata_host_intr(ap, qc);
231 } else if (is_vsc_sata_int_err(i, int_status)) { 238 else if (is_vsc_sata_int_err(i, int_status)) {
232 /* 239 /*
233 * On some chips (i.e. Intel 31244), an error 240 * On some chips (i.e. Intel 31244), an error
234 * interrupt will sneak in at initialization 241 * interrupt will sneak in at initialization
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index 73994e2ac2cb..dae4f08adde0 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -720,6 +720,24 @@ void scsi_init_cmd_from_req(struct scsi_cmnd *cmd, struct scsi_request *sreq)
720static DEFINE_PER_CPU(struct list_head, scsi_done_q); 720static DEFINE_PER_CPU(struct list_head, scsi_done_q);
721 721
722/** 722/**
723 * scsi_req_abort_cmd -- Request command recovery for the specified command
724 * cmd: pointer to the SCSI command of interest
725 *
726 * This function requests that SCSI Core start recovery for the
727 * command by deleting the timer and adding the command to the eh
728 * queue. It can be called by either LLDDs or SCSI Core. LLDDs who
729 * implement their own error recovery MAY ignore the timeout event if
730 * they generated scsi_req_abort_cmd.
731 */
732void scsi_req_abort_cmd(struct scsi_cmnd *cmd)
733{
734 if (!scsi_delete_timer(cmd))
735 return;
736 scsi_times_out(cmd);
737}
738EXPORT_SYMBOL(scsi_req_abort_cmd);
739
740/**
723 * scsi_done - Enqueue the finished SCSI command into the done queue. 741 * scsi_done - Enqueue the finished SCSI command into the done queue.
724 * @cmd: The SCSI Command for which a low-level device driver (LLDD) gives 742 * @cmd: The SCSI Command for which a low-level device driver (LLDD) gives
725 * ownership back to SCSI Core -- i.e. the LLDD has finished with it. 743 * ownership back to SCSI Core -- i.e. the LLDD has finished with it.
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index 1c75646f9689..9ca71cbefce0 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -56,6 +56,7 @@ void scsi_eh_wakeup(struct Scsi_Host *shost)
56 printk("Waking error handler thread\n")); 56 printk("Waking error handler thread\n"));
57 } 57 }
58} 58}
59EXPORT_SYMBOL_GPL(scsi_eh_wakeup);
59 60
60/** 61/**
61 * scsi_eh_scmd_add - add scsi cmd to error handling. 62 * scsi_eh_scmd_add - add scsi cmd to error handling.
@@ -1517,7 +1518,7 @@ int scsi_error_handler(void *data)
1517 */ 1518 */
1518 set_current_state(TASK_INTERRUPTIBLE); 1519 set_current_state(TASK_INTERRUPTIBLE);
1519 while (!kthread_should_stop()) { 1520 while (!kthread_should_stop()) {
1520 if (shost->host_failed == 0 || 1521 if ((shost->host_failed == 0 && shost->host_eh_scheduled == 0) ||
1521 shost->host_failed != shost->host_busy) { 1522 shost->host_failed != shost->host_busy) {
1522 SCSI_LOG_ERROR_RECOVERY(1, 1523 SCSI_LOG_ERROR_RECOVERY(1,
1523 printk("Error handler scsi_eh_%d sleeping\n", 1524 printk("Error handler scsi_eh_%d sleeping\n",
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 764a8b375ead..18e34775b238 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -566,7 +566,7 @@ void scsi_device_unbusy(struct scsi_device *sdev)
566 spin_lock_irqsave(shost->host_lock, flags); 566 spin_lock_irqsave(shost->host_lock, flags);
567 shost->host_busy--; 567 shost->host_busy--;
568 if (unlikely(scsi_host_in_recovery(shost) && 568 if (unlikely(scsi_host_in_recovery(shost) &&
569 shost->host_failed)) 569 (shost->host_failed || shost->host_eh_scheduled)))
570 scsi_eh_wakeup(shost); 570 scsi_eh_wakeup(shost);
571 spin_unlock(shost->host_lock); 571 spin_unlock(shost->host_lock);
572 spin_lock(sdev->request_queue->queue_lock); 572 spin_lock(sdev->request_queue->queue_lock);
diff --git a/drivers/scsi/scsi_priv.h b/drivers/scsi/scsi_priv.h
index 27c48274e8cb..0b39081113be 100644
--- a/drivers/scsi/scsi_priv.h
+++ b/drivers/scsi/scsi_priv.h
@@ -63,7 +63,6 @@ extern int scsi_delete_timer(struct scsi_cmnd *);
63extern void scsi_times_out(struct scsi_cmnd *cmd); 63extern void scsi_times_out(struct scsi_cmnd *cmd);
64extern int scsi_error_handler(void *host); 64extern int scsi_error_handler(void *host);
65extern int scsi_decide_disposition(struct scsi_cmnd *cmd); 65extern int scsi_decide_disposition(struct scsi_cmnd *cmd);
66extern void scsi_eh_wakeup(struct Scsi_Host *shost);
67extern int scsi_eh_scmd_add(struct scsi_cmnd *, int); 66extern int scsi_eh_scmd_add(struct scsi_cmnd *, int);
68 67
69/* scsi_lib.c */ 68/* scsi_lib.c */
diff --git a/include/linux/ata.h b/include/linux/ata.h
index 312a2c0c64e6..c494e1c0531e 100644
--- a/include/linux/ata.h
+++ b/include/linux/ata.h
@@ -97,6 +97,9 @@ enum {
97 ATA_DRQ = (1 << 3), /* data request i/o */ 97 ATA_DRQ = (1 << 3), /* data request i/o */
98 ATA_ERR = (1 << 0), /* have an error */ 98 ATA_ERR = (1 << 0), /* have an error */
99 ATA_SRST = (1 << 2), /* software reset */ 99 ATA_SRST = (1 << 2), /* software reset */
100 ATA_ICRC = (1 << 7), /* interface CRC error */
101 ATA_UNC = (1 << 6), /* uncorrectable media error */
102 ATA_IDNF = (1 << 4), /* ID not found */
100 ATA_ABORTED = (1 << 2), /* command aborted */ 103 ATA_ABORTED = (1 << 2), /* command aborted */
101 104
102 /* ATA command block registers */ 105 /* ATA command block registers */
@@ -130,6 +133,8 @@ enum {
130 ATA_CMD_WRITE = 0xCA, 133 ATA_CMD_WRITE = 0xCA,
131 ATA_CMD_WRITE_EXT = 0x35, 134 ATA_CMD_WRITE_EXT = 0x35,
132 ATA_CMD_WRITE_FUA_EXT = 0x3D, 135 ATA_CMD_WRITE_FUA_EXT = 0x3D,
136 ATA_CMD_FPDMA_READ = 0x60,
137 ATA_CMD_FPDMA_WRITE = 0x61,
133 ATA_CMD_PIO_READ = 0x20, 138 ATA_CMD_PIO_READ = 0x20,
134 ATA_CMD_PIO_READ_EXT = 0x24, 139 ATA_CMD_PIO_READ_EXT = 0x24,
135 ATA_CMD_PIO_WRITE = 0x30, 140 ATA_CMD_PIO_WRITE = 0x30,
@@ -148,6 +153,10 @@ enum {
148 ATA_CMD_INIT_DEV_PARAMS = 0x91, 153 ATA_CMD_INIT_DEV_PARAMS = 0x91,
149 ATA_CMD_READ_NATIVE_MAX = 0xF8, 154 ATA_CMD_READ_NATIVE_MAX = 0xF8,
150 ATA_CMD_READ_NATIVE_MAX_EXT = 0x27, 155 ATA_CMD_READ_NATIVE_MAX_EXT = 0x27,
156 ATA_CMD_READ_LOG_EXT = 0x2f,
157
158 /* READ_LOG_EXT pages */
159 ATA_LOG_SATA_NCQ = 0x10,
151 160
152 /* SETFEATURES stuff */ 161 /* SETFEATURES stuff */
153 SETFEATURES_XFER = 0x03, 162 SETFEATURES_XFER = 0x03,
@@ -192,6 +201,16 @@ enum {
192 SCR_ACTIVE = 3, 201 SCR_ACTIVE = 3,
193 SCR_NOTIFICATION = 4, 202 SCR_NOTIFICATION = 4,
194 203
204 /* SError bits */
205 SERR_DATA_RECOVERED = (1 << 0), /* recovered data error */
206 SERR_COMM_RECOVERED = (1 << 1), /* recovered comm failure */
207 SERR_DATA = (1 << 8), /* unrecovered data error */
208 SERR_PERSISTENT = (1 << 9), /* persistent data/comm error */
209 SERR_PROTOCOL = (1 << 10), /* protocol violation */
210 SERR_INTERNAL = (1 << 11), /* host internal error */
211 SERR_PHYRDY_CHG = (1 << 16), /* PHY RDY changed */
212 SERR_DEV_XCHG = (1 << 26), /* device exchanged */
213
195 /* struct ata_taskfile flags */ 214 /* struct ata_taskfile flags */
196 ATA_TFLAG_LBA48 = (1 << 0), /* enable 48-bit LBA and "HOB" */ 215 ATA_TFLAG_LBA48 = (1 << 0), /* enable 48-bit LBA and "HOB" */
197 ATA_TFLAG_ISADDR = (1 << 1), /* enable r/w to nsect/lba regs */ 216 ATA_TFLAG_ISADDR = (1 << 1), /* enable r/w to nsect/lba regs */
@@ -199,6 +218,7 @@ enum {
199 ATA_TFLAG_WRITE = (1 << 3), /* data dir: host->dev==1 (write) */ 218 ATA_TFLAG_WRITE = (1 << 3), /* data dir: host->dev==1 (write) */
200 ATA_TFLAG_LBA = (1 << 4), /* enable LBA */ 219 ATA_TFLAG_LBA = (1 << 4), /* enable LBA */
201 ATA_TFLAG_FUA = (1 << 5), /* enable FUA */ 220 ATA_TFLAG_FUA = (1 << 5), /* enable FUA */
221 ATA_TFLAG_POLLING = (1 << 6), /* set nIEN to 1 and use polling */
202}; 222};
203 223
204enum ata_tf_protocols { 224enum ata_tf_protocols {
@@ -207,6 +227,7 @@ enum ata_tf_protocols {
207 ATA_PROT_NODATA, /* no data */ 227 ATA_PROT_NODATA, /* no data */
208 ATA_PROT_PIO, /* PIO single sector */ 228 ATA_PROT_PIO, /* PIO single sector */
209 ATA_PROT_DMA, /* DMA */ 229 ATA_PROT_DMA, /* DMA */
230 ATA_PROT_NCQ, /* NCQ */
210 ATA_PROT_ATAPI, /* packet command, PIO data xfer*/ 231 ATA_PROT_ATAPI, /* packet command, PIO data xfer*/
211 ATA_PROT_ATAPI_NODATA, /* packet command, no data */ 232 ATA_PROT_ATAPI_NODATA, /* packet command, no data */
212 ATA_PROT_ATAPI_DMA, /* packet command with special DMA sauce */ 233 ATA_PROT_ATAPI_DMA, /* packet command with special DMA sauce */
@@ -262,6 +283,8 @@ struct ata_taskfile {
262#define ata_id_has_pm(id) ((id)[82] & (1 << 3)) 283#define ata_id_has_pm(id) ((id)[82] & (1 << 3))
263#define ata_id_has_lba(id) ((id)[49] & (1 << 9)) 284#define ata_id_has_lba(id) ((id)[49] & (1 << 9))
264#define ata_id_has_dma(id) ((id)[49] & (1 << 8)) 285#define ata_id_has_dma(id) ((id)[49] & (1 << 8))
286#define ata_id_has_ncq(id) ((id)[76] & (1 << 8))
287#define ata_id_queue_depth(id) (((id)[75] & 0x1f) + 1)
265#define ata_id_removeable(id) ((id)[0] & (1 << 7)) 288#define ata_id_removeable(id) ((id)[0] & (1 << 7))
266#define ata_id_has_dword_io(id) ((id)[50] & (1 << 0)) 289#define ata_id_has_dword_io(id) ((id)[50] & (1 << 0))
267#define ata_id_u32(id,n) \ 290#define ata_id_u32(id,n) \
@@ -272,6 +295,8 @@ struct ata_taskfile {
272 ((u64) (id)[(n) + 1] << 16) | \ 295 ((u64) (id)[(n) + 1] << 16) | \
273 ((u64) (id)[(n) + 0]) ) 296 ((u64) (id)[(n) + 0]) )
274 297
298#define ata_id_cdb_intr(id) (((id)[0] & 0x60) == 0x20)
299
275static inline unsigned int ata_id_major_version(const u16 *id) 300static inline unsigned int ata_id_major_version(const u16 *id)
276{ 301{
277 unsigned int mver; 302 unsigned int mver;
@@ -311,6 +336,15 @@ static inline int is_atapi_taskfile(const struct ata_taskfile *tf)
311 (tf->protocol == ATA_PROT_ATAPI_DMA); 336 (tf->protocol == ATA_PROT_ATAPI_DMA);
312} 337}
313 338
339static inline int is_multi_taskfile(struct ata_taskfile *tf)
340{
341 return (tf->command == ATA_CMD_READ_MULTI) ||
342 (tf->command == ATA_CMD_WRITE_MULTI) ||
343 (tf->command == ATA_CMD_READ_MULTI_EXT) ||
344 (tf->command == ATA_CMD_WRITE_MULTI_EXT) ||
345 (tf->command == ATA_CMD_WRITE_MULTI_FUA_EXT);
346}
347
314static inline int ata_ok(u8 status) 348static inline int ata_ok(u8 status)
315{ 349{
316 return ((status & (ATA_BUSY | ATA_DRDY | ATA_DF | ATA_DRQ | ATA_ERR)) 350 return ((status & (ATA_BUSY | ATA_DRDY | ATA_DF | ATA_DRQ | ATA_ERR))
diff --git a/include/linux/libata.h b/include/linux/libata.h
index d35b1e3bb7e0..fcdd798bb086 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -108,7 +108,9 @@ enum {
108 LIBATA_MAX_PRD = ATA_MAX_PRD / 2, 108 LIBATA_MAX_PRD = ATA_MAX_PRD / 2,
109 ATA_MAX_PORTS = 8, 109 ATA_MAX_PORTS = 8,
110 ATA_DEF_QUEUE = 1, 110 ATA_DEF_QUEUE = 1,
111 ATA_MAX_QUEUE = 1, 111 /* tag ATA_MAX_QUEUE - 1 is reserved for internal commands */
112 ATA_MAX_QUEUE = 32,
113 ATA_TAG_INTERNAL = ATA_MAX_QUEUE - 1,
112 ATA_MAX_SECTORS = 200, /* FIXME */ 114 ATA_MAX_SECTORS = 200, /* FIXME */
113 ATA_MAX_BUS = 2, 115 ATA_MAX_BUS = 2,
114 ATA_DEF_BUSY_WAIT = 10000, 116 ATA_DEF_BUSY_WAIT = 10000,
@@ -122,6 +124,8 @@ enum {
122 /* struct ata_device stuff */ 124 /* struct ata_device stuff */
123 ATA_DFLAG_LBA = (1 << 0), /* device supports LBA */ 125 ATA_DFLAG_LBA = (1 << 0), /* device supports LBA */
124 ATA_DFLAG_LBA48 = (1 << 1), /* device supports LBA48 */ 126 ATA_DFLAG_LBA48 = (1 << 1), /* device supports LBA48 */
127 ATA_DFLAG_CDB_INTR = (1 << 2), /* device asserts INTRQ when ready for CDB */
128 ATA_DFLAG_NCQ = (1 << 3), /* device supports NCQ */
125 ATA_DFLAG_CFG_MASK = (1 << 8) - 1, 129 ATA_DFLAG_CFG_MASK = (1 << 8) - 1,
126 130
127 ATA_DFLAG_PIO = (1 << 8), /* device currently in PIO mode */ 131 ATA_DFLAG_PIO = (1 << 8), /* device currently in PIO mode */
@@ -145,14 +149,19 @@ enum {
145 ATA_FLAG_PIO_DMA = (1 << 7), /* PIO cmds via DMA */ 149 ATA_FLAG_PIO_DMA = (1 << 7), /* PIO cmds via DMA */
146 ATA_FLAG_PIO_LBA48 = (1 << 8), /* Host DMA engine is LBA28 only */ 150 ATA_FLAG_PIO_LBA48 = (1 << 8), /* Host DMA engine is LBA28 only */
147 ATA_FLAG_IRQ_MASK = (1 << 9), /* Mask IRQ in PIO xfers */ 151 ATA_FLAG_IRQ_MASK = (1 << 9), /* Mask IRQ in PIO xfers */
152 ATA_FLAG_PIO_POLLING = (1 << 10), /* use polling PIO if LLD
153 * doesn't handle PIO interrupts */
154 ATA_FLAG_NCQ = (1 << 11), /* host supports NCQ */
148 155
149 ATA_FLAG_NOINTR = (1 << 16), /* FIXME: Remove this once 156 ATA_FLAG_DEBUGMSG = (1 << 14),
150 * proper HSM is in place. */ 157 ATA_FLAG_FLUSH_PORT_TASK = (1 << 15), /* flush port task */
151 ATA_FLAG_DEBUGMSG = (1 << 17),
152 ATA_FLAG_FLUSH_PORT_TASK = (1 << 18), /* flush port task */
153 158
154 ATA_FLAG_DISABLED = (1 << 19), /* port is disabled, ignore it */ 159 ATA_FLAG_EH_PENDING = (1 << 16), /* EH pending */
155 ATA_FLAG_SUSPENDED = (1 << 20), /* port is suspended */ 160 ATA_FLAG_FROZEN = (1 << 17), /* port is frozen */
161 ATA_FLAG_RECOVERED = (1 << 18), /* recovery action performed */
162
163 ATA_FLAG_DISABLED = (1 << 22), /* port is disabled, ignore it */
164 ATA_FLAG_SUSPENDED = (1 << 23), /* port is suspended (power) */
156 165
157 /* bits 24:31 of ap->flags are reserved for LLDD specific flags */ 166 /* bits 24:31 of ap->flags are reserved for LLDD specific flags */
158 167
@@ -162,17 +171,18 @@ enum {
162 ATA_QCFLAG_SINGLE = (1 << 2), /* no s/g, just a single buffer */ 171 ATA_QCFLAG_SINGLE = (1 << 2), /* no s/g, just a single buffer */
163 ATA_QCFLAG_DMAMAP = ATA_QCFLAG_SG | ATA_QCFLAG_SINGLE, 172 ATA_QCFLAG_DMAMAP = ATA_QCFLAG_SG | ATA_QCFLAG_SINGLE,
164 ATA_QCFLAG_IO = (1 << 3), /* standard IO command */ 173 ATA_QCFLAG_IO = (1 << 3), /* standard IO command */
165 ATA_QCFLAG_EH_SCHEDULED = (1 << 4), /* EH scheduled */ 174 ATA_QCFLAG_RESULT_TF = (1 << 4), /* result TF requested */
175
176 ATA_QCFLAG_FAILED = (1 << 16), /* cmd failed and is owned by EH */
177 ATA_QCFLAG_SENSE_VALID = (1 << 17), /* sense data valid */
178 ATA_QCFLAG_EH_SCHEDULED = (1 << 18), /* EH scheduled (obsolete) */
166 179
167 /* host set flags */ 180 /* host set flags */
168 ATA_HOST_SIMPLEX = (1 << 0), /* Host is simplex, one DMA channel per host_set only */ 181 ATA_HOST_SIMPLEX = (1 << 0), /* Host is simplex, one DMA channel per host_set only */
169 182
170 /* various lengths of time */ 183 /* various lengths of time */
171 ATA_TMOUT_PIO = 30 * HZ,
172 ATA_TMOUT_BOOT = 30 * HZ, /* heuristic */ 184 ATA_TMOUT_BOOT = 30 * HZ, /* heuristic */
173 ATA_TMOUT_BOOT_QUICK = 7 * HZ, /* heuristic */ 185 ATA_TMOUT_BOOT_QUICK = 7 * HZ, /* heuristic */
174 ATA_TMOUT_CDB = 30 * HZ,
175 ATA_TMOUT_CDB_QUICK = 5 * HZ,
176 ATA_TMOUT_INTERNAL = 30 * HZ, 186 ATA_TMOUT_INTERNAL = 30 * HZ,
177 ATA_TMOUT_INTERNAL_QUICK = 5 * HZ, 187 ATA_TMOUT_INTERNAL_QUICK = 5 * HZ,
178 188
@@ -216,19 +226,39 @@ enum {
216 ATA_PORT_PRIMARY = (1 << 0), 226 ATA_PORT_PRIMARY = (1 << 0),
217 ATA_PORT_SECONDARY = (1 << 1), 227 ATA_PORT_SECONDARY = (1 << 1),
218 228
229 /* ering size */
230 ATA_ERING_SIZE = 32,
231
232 /* desc_len for ata_eh_info and context */
233 ATA_EH_DESC_LEN = 80,
234
235 /* reset / recovery action types */
236 ATA_EH_REVALIDATE = (1 << 0),
237 ATA_EH_SOFTRESET = (1 << 1),
238 ATA_EH_HARDRESET = (1 << 2),
239
240 ATA_EH_RESET_MASK = ATA_EH_SOFTRESET | ATA_EH_HARDRESET,
241
242 /* ata_eh_info->flags */
243 ATA_EHI_DID_RESET = (1 << 0), /* already reset this port */
244
245 /* max repeat if error condition is still set after ->error_handler */
246 ATA_EH_MAX_REPEAT = 5,
247
219 /* how hard are we gonna try to probe/recover devices */ 248 /* how hard are we gonna try to probe/recover devices */
220 ATA_PROBE_MAX_TRIES = 3, 249 ATA_PROBE_MAX_TRIES = 3,
250 ATA_EH_RESET_TRIES = 3,
251 ATA_EH_DEV_TRIES = 3,
221}; 252};
222 253
223enum hsm_task_states { 254enum hsm_task_states {
224 HSM_ST_UNKNOWN, 255 HSM_ST_UNKNOWN, /* state unknown */
225 HSM_ST_IDLE, 256 HSM_ST_IDLE, /* no command on going */
226 HSM_ST_POLL, 257 HSM_ST, /* (waiting the device to) transfer data */
227 HSM_ST_TMOUT, 258 HSM_ST_LAST, /* (waiting the device to) complete command */
228 HSM_ST, 259 HSM_ST_ERR, /* error */
229 HSM_ST_LAST, 260 HSM_ST_FIRST, /* (waiting the device to)
230 HSM_ST_LAST_POLL, 261 write CDB or first data block */
231 HSM_ST_ERR,
232}; 262};
233 263
234enum ata_completion_errors { 264enum ata_completion_errors {
@@ -343,7 +373,7 @@ struct ata_queued_cmd {
343 struct scatterlist *__sg; 373 struct scatterlist *__sg;
344 374
345 unsigned int err_mask; 375 unsigned int err_mask;
346 376 struct ata_taskfile result_tf;
347 ata_qc_cb_t complete_fn; 377 ata_qc_cb_t complete_fn;
348 378
349 void *private_data; 379 void *private_data;
@@ -355,12 +385,24 @@ struct ata_host_stats {
355 unsigned long rw_reqbuf; 385 unsigned long rw_reqbuf;
356}; 386};
357 387
388struct ata_ering_entry {
389 int is_io;
390 unsigned int err_mask;
391 u64 timestamp;
392};
393
394struct ata_ering {
395 int cursor;
396 struct ata_ering_entry ring[ATA_ERING_SIZE];
397};
398
358struct ata_device { 399struct ata_device {
400 struct ata_port *ap;
359 u64 n_sectors; /* size of device, if ATA */ 401 u64 n_sectors; /* size of device, if ATA */
360 unsigned long flags; /* ATA_DFLAG_xxx */ 402 unsigned long flags; /* ATA_DFLAG_xxx */
361 unsigned int class; /* ATA_DEV_xxx */ 403 unsigned int class; /* ATA_DEV_xxx */
362 unsigned int devno; /* 0 or 1 */ 404 unsigned int devno; /* 0 or 1 */
363 u16 *id; /* IDENTIFY xxx DEVICE data */ 405 u16 id[ATA_ID_WORDS]; /* IDENTIFY xxx DEVICE data */
364 u8 pio_mode; 406 u8 pio_mode;
365 u8 dma_mode; 407 u8 dma_mode;
366 u8 xfer_mode; 408 u8 xfer_mode;
@@ -380,6 +422,24 @@ struct ata_device {
380 u16 cylinders; /* Number of cylinders */ 422 u16 cylinders; /* Number of cylinders */
381 u16 heads; /* Number of heads */ 423 u16 heads; /* Number of heads */
382 u16 sectors; /* Number of sectors per track */ 424 u16 sectors; /* Number of sectors per track */
425
426 /* error history */
427 struct ata_ering ering;
428};
429
430struct ata_eh_info {
431 struct ata_device *dev; /* offending device */
432 u32 serror; /* SError from LLDD */
433 unsigned int err_mask; /* port-wide err_mask */
434 unsigned int action; /* ATA_EH_* action mask */
435 unsigned int flags; /* ATA_EHI_* flags */
436 char desc[ATA_EH_DESC_LEN];
437 int desc_len;
438};
439
440struct ata_eh_context {
441 struct ata_eh_info i;
442 int tries[ATA_MAX_DEVICES];
383}; 443};
384 444
385struct ata_port { 445struct ata_port {
@@ -406,11 +466,19 @@ struct ata_port {
406 unsigned int cbl; /* cable type; ATA_CBL_xxx */ 466 unsigned int cbl; /* cable type; ATA_CBL_xxx */
407 unsigned int sata_spd_limit; /* SATA PHY speed limit */ 467 unsigned int sata_spd_limit; /* SATA PHY speed limit */
408 468
469 /* record runtime error info, protected by host_set lock */
470 struct ata_eh_info eh_info;
471 /* EH context owned by EH */
472 struct ata_eh_context eh_context;
473
409 struct ata_device device[ATA_MAX_DEVICES]; 474 struct ata_device device[ATA_MAX_DEVICES];
410 475
411 struct ata_queued_cmd qcmd[ATA_MAX_QUEUE]; 476 struct ata_queued_cmd qcmd[ATA_MAX_QUEUE];
412 unsigned long qactive; 477 unsigned long qc_allocated;
478 unsigned int qc_active;
479
413 unsigned int active_tag; 480 unsigned int active_tag;
481 u32 sactive;
414 482
415 struct ata_host_stats stats; 483 struct ata_host_stats stats;
416 struct ata_host_set *host_set; 484 struct ata_host_set *host_set;
@@ -419,12 +487,13 @@ struct ata_port {
419 struct work_struct port_task; 487 struct work_struct port_task;
420 488
421 unsigned int hsm_task_state; 489 unsigned int hsm_task_state;
422 unsigned long pio_task_timeout;
423 490
424 u32 msg_enable; 491 u32 msg_enable;
425 struct list_head eh_done_q; 492 struct list_head eh_done_q;
426 493
427 void *private_data; 494 void *private_data;
495
496 u8 sector_buf[ATA_SECT_SIZE]; /* owned by EH */
428}; 497};
429 498
430struct ata_port_operations { 499struct ata_port_operations {
@@ -458,7 +527,15 @@ struct ata_port_operations {
458 void (*qc_prep) (struct ata_queued_cmd *qc); 527 void (*qc_prep) (struct ata_queued_cmd *qc);
459 unsigned int (*qc_issue) (struct ata_queued_cmd *qc); 528 unsigned int (*qc_issue) (struct ata_queued_cmd *qc);
460 529
461 void (*eng_timeout) (struct ata_port *ap); 530 /* Error handlers. ->error_handler overrides ->eng_timeout and
531 * indicates that new-style EH is in place.
532 */
533 void (*eng_timeout) (struct ata_port *ap); /* obsolete */
534
535 void (*freeze) (struct ata_port *ap);
536 void (*thaw) (struct ata_port *ap);
537 void (*error_handler) (struct ata_port *ap);
538 void (*post_internal_cmd) (struct ata_queued_cmd *qc);
462 539
463 irqreturn_t (*irq_handler)(int, void *, struct pt_regs *); 540 irqreturn_t (*irq_handler)(int, void *, struct pt_regs *);
464 void (*irq_clear) (struct ata_port *); 541 void (*irq_clear) (struct ata_port *);
@@ -504,7 +581,7 @@ extern void ata_port_probe(struct ata_port *);
504extern void __sata_phy_reset(struct ata_port *ap); 581extern void __sata_phy_reset(struct ata_port *ap);
505extern void sata_phy_reset(struct ata_port *ap); 582extern void sata_phy_reset(struct ata_port *ap);
506extern void ata_bus_reset(struct ata_port *ap); 583extern void ata_bus_reset(struct ata_port *ap);
507extern int ata_set_sata_spd(struct ata_port *ap); 584extern int sata_set_spd(struct ata_port *ap);
508extern int ata_drive_probe_reset(struct ata_port *ap, 585extern int ata_drive_probe_reset(struct ata_port *ap,
509 ata_probeinit_fn_t probeinit, 586 ata_probeinit_fn_t probeinit,
510 ata_reset_fn_t softreset, ata_reset_fn_t hardreset, 587 ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
@@ -513,8 +590,7 @@ extern void ata_std_probeinit(struct ata_port *ap);
513extern int ata_std_softreset(struct ata_port *ap, unsigned int *classes); 590extern int ata_std_softreset(struct ata_port *ap, unsigned int *classes);
514extern int sata_std_hardreset(struct ata_port *ap, unsigned int *class); 591extern int sata_std_hardreset(struct ata_port *ap, unsigned int *class);
515extern void ata_std_postreset(struct ata_port *ap, unsigned int *classes); 592extern void ata_std_postreset(struct ata_port *ap, unsigned int *classes);
516extern int ata_dev_revalidate(struct ata_port *ap, struct ata_device *dev, 593extern int ata_dev_revalidate(struct ata_device *dev, int post_reset);
517 int post_reset);
518extern void ata_port_disable(struct ata_port *); 594extern void ata_port_disable(struct ata_port *);
519extern void ata_std_ports(struct ata_ioports *ioaddr); 595extern void ata_std_ports(struct ata_ioports *ioaddr);
520#ifdef CONFIG_PCI 596#ifdef CONFIG_PCI
@@ -530,14 +606,18 @@ extern void ata_host_set_remove(struct ata_host_set *host_set);
530extern int ata_scsi_detect(struct scsi_host_template *sht); 606extern int ata_scsi_detect(struct scsi_host_template *sht);
531extern int ata_scsi_ioctl(struct scsi_device *dev, int cmd, void __user *arg); 607extern int ata_scsi_ioctl(struct scsi_device *dev, int cmd, void __user *arg);
532extern int ata_scsi_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)); 608extern int ata_scsi_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *));
533extern void ata_eh_qc_complete(struct ata_queued_cmd *qc);
534extern void ata_eh_qc_retry(struct ata_queued_cmd *qc);
535extern int ata_scsi_release(struct Scsi_Host *host); 609extern int ata_scsi_release(struct Scsi_Host *host);
536extern unsigned int ata_host_intr(struct ata_port *ap, struct ata_queued_cmd *qc); 610extern unsigned int ata_host_intr(struct ata_port *ap, struct ata_queued_cmd *qc);
611extern int sata_scr_valid(struct ata_port *ap);
612extern int sata_scr_read(struct ata_port *ap, int reg, u32 *val);
613extern int sata_scr_write(struct ata_port *ap, int reg, u32 val);
614extern int sata_scr_write_flush(struct ata_port *ap, int reg, u32 val);
615extern int ata_port_online(struct ata_port *ap);
616extern int ata_port_offline(struct ata_port *ap);
537extern int ata_scsi_device_resume(struct scsi_device *); 617extern int ata_scsi_device_resume(struct scsi_device *);
538extern int ata_scsi_device_suspend(struct scsi_device *, pm_message_t state); 618extern int ata_scsi_device_suspend(struct scsi_device *, pm_message_t state);
539extern int ata_device_resume(struct ata_port *, struct ata_device *); 619extern int ata_device_resume(struct ata_device *);
540extern int ata_device_suspend(struct ata_port *, struct ata_device *, pm_message_t state); 620extern int ata_device_suspend(struct ata_device *, pm_message_t state);
541extern int ata_ratelimit(void); 621extern int ata_ratelimit(void);
542extern unsigned int ata_busy_sleep(struct ata_port *ap, 622extern unsigned int ata_busy_sleep(struct ata_port *ap,
543 unsigned long timeout_pat, 623 unsigned long timeout_pat,
@@ -582,16 +662,26 @@ extern void ata_bmdma_start (struct ata_queued_cmd *qc);
582extern void ata_bmdma_stop(struct ata_queued_cmd *qc); 662extern void ata_bmdma_stop(struct ata_queued_cmd *qc);
583extern u8 ata_bmdma_status(struct ata_port *ap); 663extern u8 ata_bmdma_status(struct ata_port *ap);
584extern void ata_bmdma_irq_clear(struct ata_port *ap); 664extern void ata_bmdma_irq_clear(struct ata_port *ap);
585extern void __ata_qc_complete(struct ata_queued_cmd *qc); 665extern void ata_bmdma_freeze(struct ata_port *ap);
586extern void ata_scsi_simulate(struct ata_port *ap, struct ata_device *dev, 666extern void ata_bmdma_thaw(struct ata_port *ap);
587 struct scsi_cmnd *cmd, 667extern void ata_bmdma_drive_eh(struct ata_port *ap,
668 ata_reset_fn_t softreset,
669 ata_reset_fn_t hardreset,
670 ata_postreset_fn_t postreset);
671extern void ata_bmdma_error_handler(struct ata_port *ap);
672extern void ata_bmdma_post_internal_cmd(struct ata_queued_cmd *qc);
673extern void ata_qc_complete(struct ata_queued_cmd *qc);
674extern int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active,
675 void (*finish_qc)(struct ata_queued_cmd *));
676extern void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd,
588 void (*done)(struct scsi_cmnd *)); 677 void (*done)(struct scsi_cmnd *));
589extern int ata_std_bios_param(struct scsi_device *sdev, 678extern int ata_std_bios_param(struct scsi_device *sdev,
590 struct block_device *bdev, 679 struct block_device *bdev,
591 sector_t capacity, int geom[]); 680 sector_t capacity, int geom[]);
592extern int ata_scsi_slave_config(struct scsi_device *sdev); 681extern int ata_scsi_slave_config(struct scsi_device *sdev);
593extern struct ata_device *ata_dev_pair(struct ata_port *ap, 682extern int ata_scsi_change_queue_depth(struct scsi_device *sdev,
594 struct ata_device *adev); 683 int queue_depth);
684extern struct ata_device *ata_dev_pair(struct ata_device *adev);
595 685
596/* 686/*
597 * Timing helpers 687 * Timing helpers
@@ -641,10 +731,46 @@ extern unsigned long ata_pci_default_filter(const struct ata_port *, struct ata_
641 * EH 731 * EH
642 */ 732 */
643extern void ata_eng_timeout(struct ata_port *ap); 733extern void ata_eng_timeout(struct ata_port *ap);
734
735extern void ata_port_schedule_eh(struct ata_port *ap);
736extern int ata_port_abort(struct ata_port *ap);
737extern int ata_port_freeze(struct ata_port *ap);
738
739extern void ata_eh_freeze_port(struct ata_port *ap);
740extern void ata_eh_thaw_port(struct ata_port *ap);
741
644extern void ata_eh_qc_complete(struct ata_queued_cmd *qc); 742extern void ata_eh_qc_complete(struct ata_queued_cmd *qc);
645extern void ata_eh_qc_retry(struct ata_queued_cmd *qc); 743extern void ata_eh_qc_retry(struct ata_queued_cmd *qc);
646 744
745extern void ata_do_eh(struct ata_port *ap, ata_reset_fn_t softreset,
746 ata_reset_fn_t hardreset, ata_postreset_fn_t postreset);
747
748/*
749 * printk helpers
750 */
751#define ata_port_printk(ap, lv, fmt, args...) \
752 printk(lv"ata%u: "fmt, (ap)->id , ##args)
753
754#define ata_dev_printk(dev, lv, fmt, args...) \
755 printk(lv"ata%u.%02u: "fmt, (dev)->ap->id, (dev)->devno , ##args)
756
757/*
758 * ata_eh_info helpers
759 */
760#define ata_ehi_push_desc(ehi, fmt, args...) do { \
761 (ehi)->desc_len += scnprintf((ehi)->desc + (ehi)->desc_len, \
762 ATA_EH_DESC_LEN - (ehi)->desc_len, \
763 fmt , ##args); \
764} while (0)
765
766#define ata_ehi_clear_desc(ehi) do { \
767 (ehi)->desc[0] = '\0'; \
768 (ehi)->desc_len = 0; \
769} while (0)
647 770
771/*
772 * qc helpers
773 */
648static inline int 774static inline int
649ata_sg_is_last(struct scatterlist *sg, struct ata_queued_cmd *qc) 775ata_sg_is_last(struct scatterlist *sg, struct ata_queued_cmd *qc)
650{ 776{
@@ -687,6 +813,11 @@ static inline unsigned int ata_tag_valid(unsigned int tag)
687 return (tag < ATA_MAX_QUEUE) ? 1 : 0; 813 return (tag < ATA_MAX_QUEUE) ? 1 : 0;
688} 814}
689 815
816static inline unsigned int ata_tag_internal(unsigned int tag)
817{
818 return tag == ATA_MAX_QUEUE - 1;
819}
820
690static inline unsigned int ata_class_enabled(unsigned int class) 821static inline unsigned int ata_class_enabled(unsigned int class)
691{ 822{
692 return class == ATA_DEV_ATA || class == ATA_DEV_ATAPI; 823 return class == ATA_DEV_ATA || class == ATA_DEV_ATAPI;
@@ -795,20 +926,35 @@ static inline void ata_qc_set_polling(struct ata_queued_cmd *qc)
795 qc->tf.ctl |= ATA_NIEN; 926 qc->tf.ctl |= ATA_NIEN;
796} 927}
797 928
798static inline struct ata_queued_cmd *ata_qc_from_tag (struct ata_port *ap, 929static inline struct ata_queued_cmd *__ata_qc_from_tag(struct ata_port *ap,
799 unsigned int tag) 930 unsigned int tag)
800{ 931{
801 if (likely(ata_tag_valid(tag))) 932 if (likely(ata_tag_valid(tag)))
802 return &ap->qcmd[tag]; 933 return &ap->qcmd[tag];
803 return NULL; 934 return NULL;
804} 935}
805 936
806static inline void ata_tf_init(struct ata_port *ap, struct ata_taskfile *tf, unsigned int device) 937static inline struct ata_queued_cmd *ata_qc_from_tag(struct ata_port *ap,
938 unsigned int tag)
939{
940 struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
941
942 if (unlikely(!qc) || !ap->ops->error_handler)
943 return qc;
944
945 if ((qc->flags & (ATA_QCFLAG_ACTIVE |
946 ATA_QCFLAG_FAILED)) == ATA_QCFLAG_ACTIVE)
947 return qc;
948
949 return NULL;
950}
951
952static inline void ata_tf_init(struct ata_device *dev, struct ata_taskfile *tf)
807{ 953{
808 memset(tf, 0, sizeof(*tf)); 954 memset(tf, 0, sizeof(*tf));
809 955
810 tf->ctl = ap->ctl; 956 tf->ctl = dev->ap->ctl;
811 if (device == 0) 957 if (dev->devno == 0)
812 tf->device = ATA_DEVICE_OBS; 958 tf->device = ATA_DEVICE_OBS;
813 else 959 else
814 tf->device = ATA_DEVICE_OBS | ATA_DEV1; 960 tf->device = ATA_DEVICE_OBS | ATA_DEV1;
@@ -823,26 +969,11 @@ static inline void ata_qc_reinit(struct ata_queued_cmd *qc)
823 qc->nbytes = qc->curbytes = 0; 969 qc->nbytes = qc->curbytes = 0;
824 qc->err_mask = 0; 970 qc->err_mask = 0;
825 971
826 ata_tf_init(qc->ap, &qc->tf, qc->dev->devno); 972 ata_tf_init(qc->dev, &qc->tf);
827}
828 973
829/** 974 /* init result_tf such that it indicates normal completion */
830 * ata_qc_complete - Complete an active ATA command 975 qc->result_tf.command = ATA_DRDY;
831 * @qc: Command to complete 976 qc->result_tf.feature = 0;
832 * @err_mask: ATA Status register contents
833 *
834 * Indicate to the mid and upper layers that an ATA
835 * command has completed, with either an ok or not-ok status.
836 *
837 * LOCKING:
838 * spin_lock_irqsave(host_set lock)
839 */
840static inline void ata_qc_complete(struct ata_queued_cmd *qc)
841{
842 if (unlikely(qc->flags & ATA_QCFLAG_EH_SCHEDULED))
843 return;
844
845 __ata_qc_complete(qc);
846} 977}
847 978
848/** 979/**
@@ -921,28 +1052,6 @@ static inline u8 ata_irq_ack(struct ata_port *ap, unsigned int chk_drq)
921 return status; 1052 return status;
922} 1053}
923 1054
924static inline u32 scr_read(struct ata_port *ap, unsigned int reg)
925{
926 return ap->ops->scr_read(ap, reg);
927}
928
929static inline void scr_write(struct ata_port *ap, unsigned int reg, u32 val)
930{
931 ap->ops->scr_write(ap, reg, val);
932}
933
934static inline void scr_write_flush(struct ata_port *ap, unsigned int reg,
935 u32 val)
936{
937 ap->ops->scr_write(ap, reg, val);
938 (void) ap->ops->scr_read(ap, reg);
939}
940
941static inline unsigned int sata_dev_present(struct ata_port *ap)
942{
943 return ((scr_read(ap, SCR_STATUS) & 0xf) == 0x3) ? 1 : 0;
944}
945
946static inline int ata_try_flush_cache(const struct ata_device *dev) 1055static inline int ata_try_flush_cache(const struct ata_device *dev)
947{ 1056{
948 return ata_id_wcache_enabled(dev->id) || 1057 return ata_id_wcache_enabled(dev->id) ||
diff --git a/include/scsi/scsi_cmnd.h b/include/scsi/scsi_cmnd.h
index 1ace1b9fe537..88c6c4da6c05 100644
--- a/include/scsi/scsi_cmnd.h
+++ b/include/scsi/scsi_cmnd.h
@@ -151,5 +151,6 @@ extern struct scsi_cmnd *scsi_get_command(struct scsi_device *, gfp_t);
151extern void scsi_put_command(struct scsi_cmnd *); 151extern void scsi_put_command(struct scsi_cmnd *);
152extern void scsi_io_completion(struct scsi_cmnd *, unsigned int, unsigned int); 152extern void scsi_io_completion(struct scsi_cmnd *, unsigned int, unsigned int);
153extern void scsi_finish_command(struct scsi_cmnd *cmd); 153extern void scsi_finish_command(struct scsi_cmnd *cmd);
154extern void scsi_req_abort_cmd(struct scsi_cmnd *cmd);
154 155
155#endif /* _SCSI_SCSI_CMND_H */ 156#endif /* _SCSI_SCSI_CMND_H */
diff --git a/include/scsi/scsi_eh.h b/include/scsi/scsi_eh.h
index d160880b2a87..212c983a6a18 100644
--- a/include/scsi/scsi_eh.h
+++ b/include/scsi/scsi_eh.h
@@ -35,6 +35,7 @@ static inline int scsi_sense_valid(struct scsi_sense_hdr *sshdr)
35} 35}
36 36
37 37
38extern void scsi_eh_wakeup(struct Scsi_Host *shost);
38extern void scsi_eh_finish_cmd(struct scsi_cmnd *scmd, 39extern void scsi_eh_finish_cmd(struct scsi_cmnd *scmd,
39 struct list_head *done_q); 40 struct list_head *done_q);
40extern void scsi_eh_flush_done_q(struct list_head *done_q); 41extern void scsi_eh_flush_done_q(struct list_head *done_q);
diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h
index de6ce541a046..a42efd6e4be8 100644
--- a/include/scsi/scsi_host.h
+++ b/include/scsi/scsi_host.h
@@ -472,6 +472,7 @@ struct Scsi_Host {
472 */ 472 */
473 unsigned int host_busy; /* commands actually active on low-level */ 473 unsigned int host_busy; /* commands actually active on low-level */
474 unsigned int host_failed; /* commands that failed. */ 474 unsigned int host_failed; /* commands that failed. */
475 unsigned int host_eh_scheduled; /* EH scheduled without command */
475 476
476 unsigned short host_no; /* Used for IOCTL_GET_IDLUN, /proc/scsi et al. */ 477 unsigned short host_no; /* Used for IOCTL_GET_IDLUN, /proc/scsi et al. */
477 int resetting; /* if set, it means that last_reset is a valid value */ 478 int resetting; /* if set, it means that last_reset is a valid value */