aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/scsi/ahci.c256
-rw-r--r--drivers/scsi/ata_piix.c14
-rw-r--r--drivers/scsi/libata-bmdma.c144
-rw-r--r--drivers/scsi/libata-core.c915
-rw-r--r--drivers/scsi/libata-eh.c1154
-rw-r--r--drivers/scsi/libata-scsi.c136
-rw-r--r--drivers/scsi/libata.h20
-rw-r--r--drivers/scsi/sata_mv.c22
-rw-r--r--drivers/scsi/sata_promise.c7
-rw-r--r--drivers/scsi/sata_sil.c57
-rw-r--r--drivers/scsi/sata_sil24.c350
-rw-r--r--drivers/scsi/sata_sx4.c7
-rw-r--r--drivers/scsi/scsi.c18
-rw-r--r--drivers/scsi/scsi_error.c3
-rw-r--r--drivers/scsi/scsi_lib.c2
-rw-r--r--drivers/scsi/scsi_priv.h1
-rw-r--r--include/linux/ata.h13
-rw-r--r--include/linux/libata.h238
-rw-r--r--include/scsi/scsi_cmnd.h1
-rw-r--r--include/scsi/scsi_eh.h1
-rw-r--r--include/scsi/scsi_host.h1
21 files changed, 2608 insertions, 752 deletions
diff --git a/drivers/scsi/ahci.c b/drivers/scsi/ahci.c
index d23f00230a76..35487e30b0ff 100644
--- a/drivers/scsi/ahci.c
+++ b/drivers/scsi/ahci.c
@@ -71,6 +71,7 @@ enum {
71 AHCI_CMD_CLR_BUSY = (1 << 10), 71 AHCI_CMD_CLR_BUSY = (1 << 10),
72 72
73 RX_FIS_D2H_REG = 0x40, /* offset of D2H Register FIS data */ 73 RX_FIS_D2H_REG = 0x40, /* offset of D2H Register FIS data */
74 RX_FIS_UNK = 0x60, /* offset of Unknown FIS data */
74 75
75 board_ahci = 0, 76 board_ahci = 0,
76 board_ahci_vt8251 = 1, 77 board_ahci_vt8251 = 1,
@@ -128,15 +129,16 @@ enum {
128 PORT_IRQ_PIOS_FIS = (1 << 1), /* PIO Setup FIS rx'd */ 129 PORT_IRQ_PIOS_FIS = (1 << 1), /* PIO Setup FIS rx'd */
129 PORT_IRQ_D2H_REG_FIS = (1 << 0), /* D2H Register FIS rx'd */ 130 PORT_IRQ_D2H_REG_FIS = (1 << 0), /* D2H Register FIS rx'd */
130 131
131 PORT_IRQ_FATAL = PORT_IRQ_TF_ERR | 132 PORT_IRQ_FREEZE = PORT_IRQ_HBUS_ERR |
132 PORT_IRQ_HBUS_ERR | 133 PORT_IRQ_IF_ERR |
133 PORT_IRQ_HBUS_DATA_ERR | 134 PORT_IRQ_CONNECT |
134 PORT_IRQ_IF_ERR, 135 PORT_IRQ_UNK_FIS,
135 DEF_PORT_IRQ = PORT_IRQ_FATAL | PORT_IRQ_PHYRDY | 136 PORT_IRQ_ERROR = PORT_IRQ_FREEZE |
136 PORT_IRQ_CONNECT | PORT_IRQ_SG_DONE | 137 PORT_IRQ_TF_ERR |
137 PORT_IRQ_UNK_FIS | PORT_IRQ_SDB_FIS | 138 PORT_IRQ_HBUS_DATA_ERR,
138 PORT_IRQ_DMAS_FIS | PORT_IRQ_PIOS_FIS | 139 DEF_PORT_IRQ = PORT_IRQ_ERROR | PORT_IRQ_SG_DONE |
139 PORT_IRQ_D2H_REG_FIS, 140 PORT_IRQ_SDB_FIS | PORT_IRQ_DMAS_FIS |
141 PORT_IRQ_PIOS_FIS | PORT_IRQ_D2H_REG_FIS,
140 142
141 /* PORT_CMD bits */ 143 /* PORT_CMD bits */
142 PORT_CMD_ATAPI = (1 << 24), /* Device is ATAPI */ 144 PORT_CMD_ATAPI = (1 << 24), /* Device is ATAPI */
@@ -197,13 +199,15 @@ static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc);
197static irqreturn_t ahci_interrupt (int irq, void *dev_instance, struct pt_regs *regs); 199static irqreturn_t ahci_interrupt (int irq, void *dev_instance, struct pt_regs *regs);
198static int ahci_probe_reset(struct ata_port *ap, unsigned int *classes); 200static int ahci_probe_reset(struct ata_port *ap, unsigned int *classes);
199static void ahci_irq_clear(struct ata_port *ap); 201static void ahci_irq_clear(struct ata_port *ap);
200static void ahci_eng_timeout(struct ata_port *ap);
201static int ahci_port_start(struct ata_port *ap); 202static int ahci_port_start(struct ata_port *ap);
202static void ahci_port_stop(struct ata_port *ap); 203static void ahci_port_stop(struct ata_port *ap);
203static void ahci_tf_read(struct ata_port *ap, struct ata_taskfile *tf); 204static void ahci_tf_read(struct ata_port *ap, struct ata_taskfile *tf);
204static void ahci_qc_prep(struct ata_queued_cmd *qc); 205static void ahci_qc_prep(struct ata_queued_cmd *qc);
205static u8 ahci_check_status(struct ata_port *ap); 206static u8 ahci_check_status(struct ata_port *ap);
206static inline int ahci_host_intr(struct ata_port *ap, struct ata_queued_cmd *qc); 207static void ahci_freeze(struct ata_port *ap);
208static void ahci_thaw(struct ata_port *ap);
209static void ahci_error_handler(struct ata_port *ap);
210static void ahci_post_internal_cmd(struct ata_queued_cmd *qc);
207static void ahci_remove_one (struct pci_dev *pdev); 211static void ahci_remove_one (struct pci_dev *pdev);
208 212
209static struct scsi_host_template ahci_sht = { 213static struct scsi_host_template ahci_sht = {
@@ -237,14 +241,18 @@ static const struct ata_port_operations ahci_ops = {
237 .qc_prep = ahci_qc_prep, 241 .qc_prep = ahci_qc_prep,
238 .qc_issue = ahci_qc_issue, 242 .qc_issue = ahci_qc_issue,
239 243
240 .eng_timeout = ahci_eng_timeout,
241
242 .irq_handler = ahci_interrupt, 244 .irq_handler = ahci_interrupt,
243 .irq_clear = ahci_irq_clear, 245 .irq_clear = ahci_irq_clear,
244 246
245 .scr_read = ahci_scr_read, 247 .scr_read = ahci_scr_read,
246 .scr_write = ahci_scr_write, 248 .scr_write = ahci_scr_write,
247 249
250 .freeze = ahci_freeze,
251 .thaw = ahci_thaw,
252
253 .error_handler = ahci_error_handler,
254 .post_internal_cmd = ahci_post_internal_cmd,
255
248 .port_start = ahci_port_start, 256 .port_start = ahci_port_start,
249 .port_stop = ahci_port_stop, 257 .port_stop = ahci_port_stop,
250}; 258};
@@ -567,7 +575,7 @@ static int ahci_softreset(struct ata_port *ap, unsigned int *class)
567 575
568 DPRINTK("ENTER\n"); 576 DPRINTK("ENTER\n");
569 577
570 if (!sata_dev_present(ap)) { 578 if (ata_port_offline(ap)) {
571 DPRINTK("PHY reports no device\n"); 579 DPRINTK("PHY reports no device\n");
572 *class = ATA_DEV_NONE; 580 *class = ATA_DEV_NONE;
573 return 0; 581 return 0;
@@ -597,7 +605,7 @@ static int ahci_softreset(struct ata_port *ap, unsigned int *class)
597 /* restart engine */ 605 /* restart engine */
598 ahci_start_engine(ap); 606 ahci_start_engine(ap);
599 607
600 ata_tf_init(ap, &tf, 0); 608 ata_tf_init(ap->device, &tf);
601 fis = pp->cmd_tbl; 609 fis = pp->cmd_tbl;
602 610
603 /* issue the first D2H Register FIS */ 611 /* issue the first D2H Register FIS */
@@ -640,7 +648,7 @@ static int ahci_softreset(struct ata_port *ap, unsigned int *class)
640 msleep(150); 648 msleep(150);
641 649
642 *class = ATA_DEV_NONE; 650 *class = ATA_DEV_NONE;
643 if (sata_dev_present(ap)) { 651 if (ata_port_online(ap)) {
644 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) { 652 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
645 rc = -EIO; 653 rc = -EIO;
646 reason = "device not ready"; 654 reason = "device not ready";
@@ -655,8 +663,7 @@ static int ahci_softreset(struct ata_port *ap, unsigned int *class)
655 fail_restart: 663 fail_restart:
656 ahci_start_engine(ap); 664 ahci_start_engine(ap);
657 fail: 665 fail:
658 printk(KERN_ERR "ata%u: softreset failed (%s)\n", 666 ata_port_printk(ap, KERN_ERR, "softreset failed (%s)\n", reason);
659 ap->id, reason);
660 return rc; 667 return rc;
661} 668}
662 669
@@ -670,7 +677,7 @@ static int ahci_hardreset(struct ata_port *ap, unsigned int *class)
670 rc = sata_std_hardreset(ap, class); 677 rc = sata_std_hardreset(ap, class);
671 ahci_start_engine(ap); 678 ahci_start_engine(ap);
672 679
673 if (rc == 0) 680 if (rc == 0 && ata_port_online(ap))
674 *class = ahci_dev_classify(ap); 681 *class = ahci_dev_classify(ap);
675 if (*class == ATA_DEV_UNKNOWN) 682 if (*class == ATA_DEV_UNKNOWN)
676 *class = ATA_DEV_NONE; 683 *class = ATA_DEV_NONE;
@@ -790,109 +797,108 @@ static void ahci_qc_prep(struct ata_queued_cmd *qc)
790 ahci_fill_cmd_slot(pp, opts); 797 ahci_fill_cmd_slot(pp, opts);
791} 798}
792 799
793static void ahci_restart_port(struct ata_port *ap, u32 irq_stat) 800static void ahci_error_intr(struct ata_port *ap, u32 irq_stat)
794{ 801{
795 void __iomem *mmio = ap->host_set->mmio_base; 802 struct ahci_port_priv *pp = ap->private_data;
796 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no); 803 struct ata_eh_info *ehi = &ap->eh_info;
797 u32 tmp; 804 unsigned int err_mask = 0, action = 0;
805 struct ata_queued_cmd *qc;
806 u32 serror;
798 807
799 if ((ap->device[0].class != ATA_DEV_ATAPI) || 808 ata_ehi_clear_desc(ehi);
800 ((irq_stat & PORT_IRQ_TF_ERR) == 0))
801 printk(KERN_WARNING "ata%u: port reset, "
802 "p_is %x is %x pis %x cmd %x tf %x ss %x se %x\n",
803 ap->id,
804 irq_stat,
805 readl(mmio + HOST_IRQ_STAT),
806 readl(port_mmio + PORT_IRQ_STAT),
807 readl(port_mmio + PORT_CMD),
808 readl(port_mmio + PORT_TFDATA),
809 readl(port_mmio + PORT_SCR_STAT),
810 readl(port_mmio + PORT_SCR_ERR));
811
812 /* stop DMA */
813 ahci_stop_engine(ap);
814 809
815 /* clear SATA phy error, if any */ 810 /* AHCI needs SError cleared; otherwise, it might lock up */
816 tmp = readl(port_mmio + PORT_SCR_ERR); 811 serror = ahci_scr_read(ap, SCR_ERROR);
817 writel(tmp, port_mmio + PORT_SCR_ERR); 812 ahci_scr_write(ap, SCR_ERROR, serror);
818 813
819 /* if DRQ/BSY is set, device needs to be reset. 814 /* analyze @irq_stat */
820 * if so, issue COMRESET 815 ata_ehi_push_desc(ehi, "irq_stat 0x%08x", irq_stat);
821 */ 816
822 tmp = readl(port_mmio + PORT_TFDATA); 817 if (irq_stat & PORT_IRQ_TF_ERR)
823 if (tmp & (ATA_BUSY | ATA_DRQ)) { 818 err_mask |= AC_ERR_DEV;
824 writel(0x301, port_mmio + PORT_SCR_CTL); 819
825 readl(port_mmio + PORT_SCR_CTL); /* flush */ 820 if (irq_stat & (PORT_IRQ_HBUS_ERR | PORT_IRQ_HBUS_DATA_ERR)) {
826 udelay(10); 821 err_mask |= AC_ERR_HOST_BUS;
827 writel(0x300, port_mmio + PORT_SCR_CTL); 822 action |= ATA_EH_SOFTRESET;
828 readl(port_mmio + PORT_SCR_CTL); /* flush */
829 } 823 }
830 824
831 /* re-start DMA */ 825 if (irq_stat & PORT_IRQ_IF_ERR) {
832 ahci_start_engine(ap); 826 err_mask |= AC_ERR_ATA_BUS;
833} 827 action |= ATA_EH_SOFTRESET;
828 ata_ehi_push_desc(ehi, ", interface fatal error");
829 }
834 830
835static void ahci_eng_timeout(struct ata_port *ap) 831 if (irq_stat & (PORT_IRQ_CONNECT | PORT_IRQ_PHYRDY)) {
836{ 832 err_mask |= AC_ERR_ATA_BUS;
837 struct ata_host_set *host_set = ap->host_set; 833 action |= ATA_EH_SOFTRESET;
838 void __iomem *mmio = host_set->mmio_base; 834 ata_ehi_push_desc(ehi, ", %s", irq_stat & PORT_IRQ_CONNECT ?
839 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no); 835 "connection status changed" : "PHY RDY changed");
840 struct ata_queued_cmd *qc; 836 }
841 unsigned long flags; 837
838 if (irq_stat & PORT_IRQ_UNK_FIS) {
839 u32 *unk = (u32 *)(pp->rx_fis + RX_FIS_UNK);
842 840
843 printk(KERN_WARNING "ata%u: handling error/timeout\n", ap->id); 841 err_mask |= AC_ERR_HSM;
842 action |= ATA_EH_SOFTRESET;
843 ata_ehi_push_desc(ehi, ", unknown FIS %08x %08x %08x %08x",
844 unk[0], unk[1], unk[2], unk[3]);
845 }
844 846
845 spin_lock_irqsave(&host_set->lock, flags); 847 /* okay, let's hand over to EH */
848 ehi->serror |= serror;
849 ehi->action |= action;
846 850
847 ahci_restart_port(ap, readl(port_mmio + PORT_IRQ_STAT));
848 qc = ata_qc_from_tag(ap, ap->active_tag); 851 qc = ata_qc_from_tag(ap, ap->active_tag);
849 qc->err_mask |= AC_ERR_TIMEOUT; 852 if (qc)
850 853 qc->err_mask |= err_mask;
851 spin_unlock_irqrestore(&host_set->lock, flags); 854 else
855 ehi->err_mask |= err_mask;
852 856
853 ata_eh_qc_complete(qc); 857 if (irq_stat & PORT_IRQ_FREEZE)
858 ata_port_freeze(ap);
859 else
860 ata_port_abort(ap);
854} 861}
855 862
856static inline int ahci_host_intr(struct ata_port *ap, struct ata_queued_cmd *qc) 863static void ahci_host_intr(struct ata_port *ap)
857{ 864{
858 void __iomem *mmio = ap->host_set->mmio_base; 865 void __iomem *mmio = ap->host_set->mmio_base;
859 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no); 866 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
860 u32 status, serr, ci; 867 struct ata_queued_cmd *qc;
861 868 u32 status, ci;
862 serr = readl(port_mmio + PORT_SCR_ERR);
863 writel(serr, port_mmio + PORT_SCR_ERR);
864 869
865 status = readl(port_mmio + PORT_IRQ_STAT); 870 status = readl(port_mmio + PORT_IRQ_STAT);
866 writel(status, port_mmio + PORT_IRQ_STAT); 871 writel(status, port_mmio + PORT_IRQ_STAT);
867 872
868 ci = readl(port_mmio + PORT_CMD_ISSUE); 873 if (unlikely(status & PORT_IRQ_ERROR)) {
869 if (likely((ci & 0x1) == 0)) { 874 ahci_error_intr(ap, status);
870 if (qc) { 875 return;
871 WARN_ON(qc->err_mask); 876 }
877
878 if ((qc = ata_qc_from_tag(ap, ap->active_tag))) {
879 ci = readl(port_mmio + PORT_CMD_ISSUE);
880 if ((ci & 0x1) == 0) {
872 ata_qc_complete(qc); 881 ata_qc_complete(qc);
873 qc = NULL; 882 return;
874 } 883 }
875 } 884 }
876 885
877 if (status & PORT_IRQ_FATAL) { 886 /* hmmm... a spurious interupt */
878 unsigned int err_mask;
879 if (status & PORT_IRQ_TF_ERR)
880 err_mask = AC_ERR_DEV;
881 else if (status & PORT_IRQ_IF_ERR)
882 err_mask = AC_ERR_ATA_BUS;
883 else
884 err_mask = AC_ERR_HOST_BUS;
885 887
886 /* command processing has stopped due to error; restart */ 888 /* ignore interim PIO setup fis interrupts */
887 ahci_restart_port(ap, status); 889 if (ata_tag_valid(ap->active_tag)) {
890 struct ata_queued_cmd *qc =
891 ata_qc_from_tag(ap, ap->active_tag);
888 892
889 if (qc) { 893 if (qc && qc->tf.protocol == ATA_PROT_PIO &&
890 qc->err_mask |= err_mask; 894 (status & PORT_IRQ_PIOS_FIS))
891 ata_qc_complete(qc); 895 return;
892 }
893 } 896 }
894 897
895 return 1; 898 if (ata_ratelimit())
899 ata_port_printk(ap, KERN_INFO, "spurious interrupt "
900 "(irq_stat 0x%x active_tag %d)\n",
901 status, ap->active_tag);
896} 902}
897 903
898static void ahci_irq_clear(struct ata_port *ap) 904static void ahci_irq_clear(struct ata_port *ap)
@@ -929,14 +935,7 @@ static irqreturn_t ahci_interrupt (int irq, void *dev_instance, struct pt_regs *
929 935
930 ap = host_set->ports[i]; 936 ap = host_set->ports[i];
931 if (ap) { 937 if (ap) {
932 struct ata_queued_cmd *qc; 938 ahci_host_intr(ap);
933 qc = ata_qc_from_tag(ap, ap->active_tag);
934 if (!ahci_host_intr(ap, qc))
935 if (ata_ratelimit())
936 dev_printk(KERN_WARNING, host_set->dev,
937 "unhandled interrupt on port %u\n",
938 i);
939
940 VPRINTK("port %u\n", i); 939 VPRINTK("port %u\n", i);
941 } else { 940 } else {
942 VPRINTK("port %u (no irq)\n", i); 941 VPRINTK("port %u (no irq)\n", i);
@@ -953,7 +952,7 @@ static irqreturn_t ahci_interrupt (int irq, void *dev_instance, struct pt_regs *
953 handled = 1; 952 handled = 1;
954 } 953 }
955 954
956 spin_unlock(&host_set->lock); 955 spin_unlock(&host_set->lock);
957 956
958 VPRINTK("EXIT\n"); 957 VPRINTK("EXIT\n");
959 958
@@ -971,6 +970,56 @@ static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc)
971 return 0; 970 return 0;
972} 971}
973 972
973static void ahci_freeze(struct ata_port *ap)
974{
975 void __iomem *mmio = ap->host_set->mmio_base;
976 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
977
978 /* turn IRQ off */
979 writel(0, port_mmio + PORT_IRQ_MASK);
980}
981
982static void ahci_thaw(struct ata_port *ap)
983{
984 void __iomem *mmio = ap->host_set->mmio_base;
985 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
986 u32 tmp;
987
988 /* clear IRQ */
989 tmp = readl(port_mmio + PORT_IRQ_STAT);
990 writel(tmp, port_mmio + PORT_IRQ_STAT);
991 writel(1 << ap->id, mmio + HOST_IRQ_STAT);
992
993 /* turn IRQ back on */
994 writel(DEF_PORT_IRQ, port_mmio + PORT_IRQ_MASK);
995}
996
997static void ahci_error_handler(struct ata_port *ap)
998{
999 if (!(ap->flags & ATA_FLAG_FROZEN)) {
1000 /* restart engine */
1001 ahci_stop_engine(ap);
1002 ahci_start_engine(ap);
1003 }
1004
1005 /* perform recovery */
1006 ata_do_eh(ap, ahci_softreset, ahci_hardreset, ahci_postreset);
1007}
1008
1009static void ahci_post_internal_cmd(struct ata_queued_cmd *qc)
1010{
1011 struct ata_port *ap = qc->ap;
1012
1013 if (qc->flags & ATA_QCFLAG_FAILED)
1014 qc->err_mask |= AC_ERR_OTHER;
1015
1016 if (qc->err_mask) {
1017 /* make DMA engine forget about the failed command */
1018 ahci_stop_engine(ap);
1019 ahci_start_engine(ap);
1020 }
1021}
1022
974static void ahci_setup_port(struct ata_ioports *port, unsigned long base, 1023static void ahci_setup_port(struct ata_ioports *port, unsigned long base,
975 unsigned int port_idx) 1024 unsigned int port_idx)
976{ 1025{
@@ -1115,9 +1164,6 @@ static int ahci_host_init(struct ata_probe_ent *probe_ent)
1115 writel(tmp, port_mmio + PORT_IRQ_STAT); 1164 writel(tmp, port_mmio + PORT_IRQ_STAT);
1116 1165
1117 writel(1 << i, mmio + HOST_IRQ_STAT); 1166 writel(1 << i, mmio + HOST_IRQ_STAT);
1118
1119 /* set irq mask (enables interrupts) */
1120 writel(DEF_PORT_IRQ, port_mmio + PORT_IRQ_MASK);
1121 } 1167 }
1122 1168
1123 tmp = readl(mmio + HOST_CTL); 1169 tmp = readl(mmio + HOST_CTL);
diff --git a/drivers/scsi/ata_piix.c b/drivers/scsi/ata_piix.c
index 62dabf74188e..e3184a77a600 100644
--- a/drivers/scsi/ata_piix.c
+++ b/drivers/scsi/ata_piix.c
@@ -243,7 +243,10 @@ static const struct ata_port_operations piix_pata_ops = {
243 .qc_prep = ata_qc_prep, 243 .qc_prep = ata_qc_prep,
244 .qc_issue = ata_qc_issue_prot, 244 .qc_issue = ata_qc_issue_prot,
245 245
246 .eng_timeout = ata_eng_timeout, 246 .freeze = ata_bmdma_freeze,
247 .thaw = ata_bmdma_thaw,
248 .error_handler = ata_bmdma_error_handler,
249 .post_internal_cmd = ata_bmdma_post_internal_cmd,
247 250
248 .irq_handler = ata_interrupt, 251 .irq_handler = ata_interrupt,
249 .irq_clear = ata_bmdma_irq_clear, 252 .irq_clear = ata_bmdma_irq_clear,
@@ -271,7 +274,10 @@ static const struct ata_port_operations piix_sata_ops = {
271 .qc_prep = ata_qc_prep, 274 .qc_prep = ata_qc_prep,
272 .qc_issue = ata_qc_issue_prot, 275 .qc_issue = ata_qc_issue_prot,
273 276
274 .eng_timeout = ata_eng_timeout, 277 .freeze = ata_bmdma_freeze,
278 .thaw = ata_bmdma_thaw,
279 .error_handler = ata_bmdma_error_handler,
280 .post_internal_cmd = ata_bmdma_post_internal_cmd,
275 281
276 .irq_handler = ata_interrupt, 282 .irq_handler = ata_interrupt,
277 .irq_clear = ata_bmdma_irq_clear, 283 .irq_clear = ata_bmdma_irq_clear,
@@ -484,7 +490,7 @@ static int piix_pata_probe_reset(struct ata_port *ap, unsigned int *classes)
484 struct pci_dev *pdev = to_pci_dev(ap->host_set->dev); 490 struct pci_dev *pdev = to_pci_dev(ap->host_set->dev);
485 491
486 if (!pci_test_config_bits(pdev, &piix_enable_bits[ap->hard_port_no])) { 492 if (!pci_test_config_bits(pdev, &piix_enable_bits[ap->hard_port_no])) {
487 printk(KERN_INFO "ata%u: port disabled. ignoring.\n", ap->id); 493 ata_port_printk(ap, KERN_INFO, "port disabled. ignoring.\n");
488 return 0; 494 return 0;
489 } 495 }
490 496
@@ -565,7 +571,7 @@ static unsigned int piix_sata_probe (struct ata_port *ap)
565static int piix_sata_probe_reset(struct ata_port *ap, unsigned int *classes) 571static int piix_sata_probe_reset(struct ata_port *ap, unsigned int *classes)
566{ 572{
567 if (!piix_sata_probe(ap)) { 573 if (!piix_sata_probe(ap)) {
568 printk(KERN_INFO "ata%u: SATA port has no device.\n", ap->id); 574 ata_port_printk(ap, KERN_INFO, "SATA port has no device.\n");
569 return 0; 575 return 0;
570 } 576 }
571 577
diff --git a/drivers/scsi/libata-bmdma.c b/drivers/scsi/libata-bmdma.c
index 835dff0bafdc..49eff18a67e3 100644
--- a/drivers/scsi/libata-bmdma.c
+++ b/drivers/scsi/libata-bmdma.c
@@ -652,6 +652,150 @@ void ata_bmdma_stop(struct ata_queued_cmd *qc)
652 ata_altstatus(ap); /* dummy read */ 652 ata_altstatus(ap); /* dummy read */
653} 653}
654 654
655/**
656 * ata_bmdma_freeze - Freeze BMDMA controller port
657 * @ap: port to freeze
658 *
659 * Freeze BMDMA controller port.
660 *
661 * LOCKING:
662 * Inherited from caller.
663 */
664void ata_bmdma_freeze(struct ata_port *ap)
665{
666 struct ata_ioports *ioaddr = &ap->ioaddr;
667
668 ap->ctl |= ATA_NIEN;
669 ap->last_ctl = ap->ctl;
670
671 if (ap->flags & ATA_FLAG_MMIO)
672 writeb(ap->ctl, (void __iomem *)ioaddr->ctl_addr);
673 else
674 outb(ap->ctl, ioaddr->ctl_addr);
675}
676
677/**
678 * ata_bmdma_thaw - Thaw BMDMA controller port
679 * @ap: port to thaw
680 *
681 * Thaw BMDMA controller port.
682 *
683 * LOCKING:
684 * Inherited from caller.
685 */
686void ata_bmdma_thaw(struct ata_port *ap)
687{
688 /* clear & re-enable interrupts */
689 ata_chk_status(ap);
690 ap->ops->irq_clear(ap);
691 if (ap->ioaddr.ctl_addr) /* FIXME: hack. create a hook instead */
692 ata_irq_on(ap);
693}
694
695/**
696 * ata_bmdma_drive_eh - Perform EH with given methods for BMDMA controller
697 * @ap: port to handle error for
698 * @softreset: softreset method (can be NULL)
699 * @hardreset: hardreset method (can be NULL)
700 * @postreset: postreset method (can be NULL)
701 *
702 * Handle error for ATA BMDMA controller. It can handle both
703 * PATA and SATA controllers. Many controllers should be able to
704 * use this EH as-is or with some added handling before and
705 * after.
706 *
707 * This function is intended to be used for constructing
708 * ->error_handler callback by low level drivers.
709 *
710 * LOCKING:
711 * Kernel thread context (may sleep)
712 */
713void ata_bmdma_drive_eh(struct ata_port *ap, ata_reset_fn_t softreset,
714 ata_reset_fn_t hardreset, ata_postreset_fn_t postreset)
715{
716 struct ata_host_set *host_set = ap->host_set;
717 struct ata_eh_context *ehc = &ap->eh_context;
718 struct ata_queued_cmd *qc;
719 unsigned long flags;
720 int thaw = 0;
721
722 qc = __ata_qc_from_tag(ap, ap->active_tag);
723 if (qc && !(qc->flags & ATA_QCFLAG_FAILED))
724 qc = NULL;
725
726 /* reset PIO HSM and stop DMA engine */
727 spin_lock_irqsave(&host_set->lock, flags);
728
729 ap->flags &= ~ATA_FLAG_NOINTR;
730 ap->hsm_task_state = HSM_ST_IDLE;
731
732 if (qc && (qc->tf.protocol == ATA_PROT_DMA ||
733 qc->tf.protocol == ATA_PROT_ATAPI_DMA)) {
734 u8 host_stat;
735
736 host_stat = ata_bmdma_status(ap);
737
738 ata_ehi_push_desc(&ehc->i, "BMDMA stat 0x%x", host_stat);
739
740 /* BMDMA controllers indicate host bus error by
741 * setting DMA_ERR bit and timing out. As it wasn't
742 * really a timeout event, adjust error mask and
743 * cancel frozen state.
744 */
745 if (qc->err_mask == AC_ERR_TIMEOUT && host_stat & ATA_DMA_ERR) {
746 qc->err_mask = AC_ERR_HOST_BUS;
747 thaw = 1;
748 }
749
750 ap->ops->bmdma_stop(qc);
751 }
752
753 ata_altstatus(ap);
754 ata_chk_status(ap);
755 ap->ops->irq_clear(ap);
756
757 spin_unlock_irqrestore(&host_set->lock, flags);
758
759 if (thaw)
760 ata_eh_thaw_port(ap);
761
762 /* PIO and DMA engines have been stopped, perform recovery */
763 ata_do_eh(ap, softreset, hardreset, postreset);
764}
765
766/**
767 * ata_bmdma_error_handler - Stock error handler for BMDMA controller
768 * @ap: port to handle error for
769 *
770 * Stock error handler for BMDMA controller.
771 *
772 * LOCKING:
773 * Kernel thread context (may sleep)
774 */
775void ata_bmdma_error_handler(struct ata_port *ap)
776{
777 ata_reset_fn_t hardreset;
778
779 hardreset = NULL;
780 if (sata_scr_valid(ap))
781 hardreset = sata_std_hardreset;
782
783 ata_bmdma_drive_eh(ap, ata_std_softreset, hardreset, ata_std_postreset);
784}
785
786/**
787 * ata_bmdma_post_internal_cmd - Stock post_internal_cmd for
788 * BMDMA controller
789 * @qc: internal command to clean up
790 *
791 * LOCKING:
792 * Kernel thread context (may sleep)
793 */
794void ata_bmdma_post_internal_cmd(struct ata_queued_cmd *qc)
795{
796 ata_bmdma_stop(qc);
797}
798
655#ifdef CONFIG_PCI 799#ifdef CONFIG_PCI
656static struct ata_probe_ent * 800static struct ata_probe_ent *
657ata_probe_ent_alloc(struct device *dev, const struct ata_port_info *port) 801ata_probe_ent_alloc(struct device *dev, const struct ata_port_info *port)
diff --git a/drivers/scsi/libata-core.c b/drivers/scsi/libata-core.c
index 5d38a6cc5736..c859b96b891a 100644
--- a/drivers/scsi/libata-core.c
+++ b/drivers/scsi/libata-core.c
@@ -61,13 +61,10 @@
61 61
62#include "libata.h" 62#include "libata.h"
63 63
64static unsigned int ata_dev_init_params(struct ata_port *ap, 64static unsigned int ata_dev_init_params(struct ata_device *dev,
65 struct ata_device *dev, 65 u16 heads, u16 sectors);
66 u16 heads, 66static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
67 u16 sectors); 67static void ata_dev_xfermask(struct ata_device *dev);
68static unsigned int ata_dev_set_xfermode(struct ata_port *ap,
69 struct ata_device *dev);
70static void ata_dev_xfermask(struct ata_port *ap, struct ata_device *dev);
71 68
72static unsigned int ata_unique_id = 1; 69static unsigned int ata_unique_id = 1;
73static struct workqueue_struct *ata_wq; 70static struct workqueue_struct *ata_wq;
@@ -412,11 +409,10 @@ static const char *sata_spd_string(unsigned int spd)
412 return spd_str[spd - 1]; 409 return spd_str[spd - 1];
413} 410}
414 411
415void ata_dev_disable(struct ata_port *ap, struct ata_device *dev) 412void ata_dev_disable(struct ata_device *dev)
416{ 413{
417 if (ata_dev_enabled(dev)) { 414 if (ata_dev_enabled(dev)) {
418 printk(KERN_WARNING "ata%u: dev %u disabled\n", 415 ata_dev_printk(dev, KERN_WARNING, "disabled\n");
419 ap->id, dev->devno);
420 dev->class++; 416 dev->class++;
421 } 417 }
422} 418}
@@ -955,13 +951,11 @@ void ata_qc_complete_internal(struct ata_queued_cmd *qc)
955{ 951{
956 struct completion *waiting = qc->private_data; 952 struct completion *waiting = qc->private_data;
957 953
958 qc->ap->ops->tf_read(qc->ap, &qc->tf);
959 complete(waiting); 954 complete(waiting);
960} 955}
961 956
962/** 957/**
963 * ata_exec_internal - execute libata internal command 958 * ata_exec_internal - execute libata internal command
964 * @ap: Port to which the command is sent
965 * @dev: Device to which the command is sent 959 * @dev: Device to which the command is sent
966 * @tf: Taskfile registers for the command and the result 960 * @tf: Taskfile registers for the command and the result
967 * @cdb: CDB for packet command 961 * @cdb: CDB for packet command
@@ -979,24 +973,57 @@ void ata_qc_complete_internal(struct ata_queued_cmd *qc)
979 * None. Should be called with kernel context, might sleep. 973 * None. Should be called with kernel context, might sleep.
980 */ 974 */
981 975
982unsigned ata_exec_internal(struct ata_port *ap, struct ata_device *dev, 976unsigned ata_exec_internal(struct ata_device *dev,
983 struct ata_taskfile *tf, const u8 *cdb, 977 struct ata_taskfile *tf, const u8 *cdb,
984 int dma_dir, void *buf, unsigned int buflen) 978 int dma_dir, void *buf, unsigned int buflen)
985{ 979{
980 struct ata_port *ap = dev->ap;
986 u8 command = tf->command; 981 u8 command = tf->command;
987 struct ata_queued_cmd *qc; 982 struct ata_queued_cmd *qc;
983 unsigned int tag, preempted_tag;
988 DECLARE_COMPLETION(wait); 984 DECLARE_COMPLETION(wait);
989 unsigned long flags; 985 unsigned long flags;
990 unsigned int err_mask; 986 unsigned int err_mask;
987 int rc;
991 988
992 spin_lock_irqsave(&ap->host_set->lock, flags); 989 spin_lock_irqsave(&ap->host_set->lock, flags);
993 990
994 qc = ata_qc_new_init(ap, dev); 991 /* no internal command while frozen */
995 BUG_ON(qc == NULL); 992 if (ap->flags & ATA_FLAG_FROZEN) {
993 spin_unlock_irqrestore(&ap->host_set->lock, flags);
994 return AC_ERR_SYSTEM;
995 }
996
997 /* initialize internal qc */
998
999 /* XXX: Tag 0 is used for drivers with legacy EH as some
1000 * drivers choke if any other tag is given. This breaks
1001 * ata_tag_internal() test for those drivers. Don't use new
1002 * EH stuff without converting to it.
1003 */
1004 if (ap->ops->error_handler)
1005 tag = ATA_TAG_INTERNAL;
1006 else
1007 tag = 0;
1008
1009 if (test_and_set_bit(tag, &ap->qactive))
1010 BUG();
1011 qc = __ata_qc_from_tag(ap, tag);
1012
1013 qc->tag = tag;
1014 qc->scsicmd = NULL;
1015 qc->ap = ap;
1016 qc->dev = dev;
1017 ata_qc_reinit(qc);
996 1018
1019 preempted_tag = ap->active_tag;
1020 ap->active_tag = ATA_TAG_POISON;
1021
1022 /* prepare & issue qc */
997 qc->tf = *tf; 1023 qc->tf = *tf;
998 if (cdb) 1024 if (cdb)
999 memcpy(qc->cdb, cdb, ATAPI_CDB_LEN); 1025 memcpy(qc->cdb, cdb, ATAPI_CDB_LEN);
1026 qc->flags |= ATA_QCFLAG_RESULT_TF;
1000 qc->dma_dir = dma_dir; 1027 qc->dma_dir = dma_dir;
1001 if (dma_dir != DMA_NONE) { 1028 if (dma_dir != DMA_NONE) {
1002 ata_sg_init_one(qc, buf, buflen); 1029 ata_sg_init_one(qc, buf, buflen);
@@ -1010,31 +1037,51 @@ unsigned ata_exec_internal(struct ata_port *ap, struct ata_device *dev,
1010 1037
1011 spin_unlock_irqrestore(&ap->host_set->lock, flags); 1038 spin_unlock_irqrestore(&ap->host_set->lock, flags);
1012 1039
1013 if (!wait_for_completion_timeout(&wait, ATA_TMOUT_INTERNAL)) { 1040 rc = wait_for_completion_timeout(&wait, ATA_TMOUT_INTERNAL);
1014 ata_port_flush_task(ap); 1041
1042 ata_port_flush_task(ap);
1015 1043
1044 if (!rc) {
1016 spin_lock_irqsave(&ap->host_set->lock, flags); 1045 spin_lock_irqsave(&ap->host_set->lock, flags);
1017 1046
1018 /* We're racing with irq here. If we lose, the 1047 /* We're racing with irq here. If we lose, the
1019 * following test prevents us from completing the qc 1048 * following test prevents us from completing the qc
1020 * again. If completion irq occurs after here but 1049 * twice. If we win, the port is frozen and will be
1021 * before the caller cleans up, it will result in a 1050 * cleaned up by ->post_internal_cmd().
1022 * spurious interrupt. We can live with that.
1023 */ 1051 */
1024 if (qc->flags & ATA_QCFLAG_ACTIVE) { 1052 if (qc->flags & ATA_QCFLAG_ACTIVE) {
1025 qc->err_mask = AC_ERR_TIMEOUT; 1053 qc->err_mask |= AC_ERR_TIMEOUT;
1026 ata_qc_complete(qc); 1054
1027 printk(KERN_WARNING "ata%u: qc timeout (cmd 0x%x)\n", 1055 if (ap->ops->error_handler)
1028 ap->id, command); 1056 ata_port_freeze(ap);
1057 else
1058 ata_qc_complete(qc);
1059
1060 ata_dev_printk(dev, KERN_WARNING,
1061 "qc timeout (cmd 0x%x)\n", command);
1029 } 1062 }
1030 1063
1031 spin_unlock_irqrestore(&ap->host_set->lock, flags); 1064 spin_unlock_irqrestore(&ap->host_set->lock, flags);
1032 } 1065 }
1033 1066
1034 *tf = qc->tf; 1067 /* do post_internal_cmd */
1068 if (ap->ops->post_internal_cmd)
1069 ap->ops->post_internal_cmd(qc);
1070
1071 if (qc->flags & ATA_QCFLAG_FAILED && !qc->err_mask) {
1072 ata_dev_printk(dev, KERN_WARNING, "zero err_mask for failed "
1073 "internal command, assuming AC_ERR_OTHER\n");
1074 qc->err_mask |= AC_ERR_OTHER;
1075 }
1076
1077 /* finish up */
1078 spin_lock_irqsave(&ap->host_set->lock, flags);
1079
1080 *tf = qc->result_tf;
1035 err_mask = qc->err_mask; 1081 err_mask = qc->err_mask;
1036 1082
1037 ata_qc_free(qc); 1083 ata_qc_free(qc);
1084 ap->active_tag = preempted_tag;
1038 1085
1039 /* XXX - Some LLDDs (sata_mv) disable port on command failure. 1086 /* XXX - Some LLDDs (sata_mv) disable port on command failure.
1040 * Until those drivers are fixed, we detect the condition 1087 * Until those drivers are fixed, we detect the condition
@@ -1052,6 +1099,8 @@ unsigned ata_exec_internal(struct ata_port *ap, struct ata_device *dev,
1052 ata_port_probe(ap); 1099 ata_port_probe(ap);
1053 } 1100 }
1054 1101
1102 spin_unlock_irqrestore(&ap->host_set->lock, flags);
1103
1055 return err_mask; 1104 return err_mask;
1056} 1105}
1057 1106
@@ -1090,11 +1139,10 @@ unsigned int ata_pio_need_iordy(const struct ata_device *adev)
1090 1139
1091/** 1140/**
1092 * ata_dev_read_id - Read ID data from the specified device 1141 * ata_dev_read_id - Read ID data from the specified device
1093 * @ap: port on which target device resides
1094 * @dev: target device 1142 * @dev: target device
1095 * @p_class: pointer to class of the target device (may be changed) 1143 * @p_class: pointer to class of the target device (may be changed)
1096 * @post_reset: is this read ID post-reset? 1144 * @post_reset: is this read ID post-reset?
1097 * @p_id: read IDENTIFY page (newly allocated) 1145 * @id: buffer to read IDENTIFY data into
1098 * 1146 *
1099 * Read ID data from the specified device. ATA_CMD_ID_ATA is 1147 * Read ID data from the specified device. ATA_CMD_ID_ATA is
1100 * performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI 1148 * performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI
@@ -1107,13 +1155,13 @@ unsigned int ata_pio_need_iordy(const struct ata_device *adev)
1107 * RETURNS: 1155 * RETURNS:
1108 * 0 on success, -errno otherwise. 1156 * 0 on success, -errno otherwise.
1109 */ 1157 */
1110static int ata_dev_read_id(struct ata_port *ap, struct ata_device *dev, 1158static int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
1111 unsigned int *p_class, int post_reset, u16 **p_id) 1159 int post_reset, u16 *id)
1112{ 1160{
1161 struct ata_port *ap = dev->ap;
1113 unsigned int class = *p_class; 1162 unsigned int class = *p_class;
1114 struct ata_taskfile tf; 1163 struct ata_taskfile tf;
1115 unsigned int err_mask = 0; 1164 unsigned int err_mask = 0;
1116 u16 *id;
1117 const char *reason; 1165 const char *reason;
1118 int rc; 1166 int rc;
1119 1167
@@ -1121,15 +1169,8 @@ static int ata_dev_read_id(struct ata_port *ap, struct ata_device *dev,
1121 1169
1122 ata_dev_select(ap, dev->devno, 1, 1); /* select device 0/1 */ 1170 ata_dev_select(ap, dev->devno, 1, 1); /* select device 0/1 */
1123 1171
1124 id = kmalloc(sizeof(id[0]) * ATA_ID_WORDS, GFP_KERNEL);
1125 if (id == NULL) {
1126 rc = -ENOMEM;
1127 reason = "out of memory";
1128 goto err_out;
1129 }
1130
1131 retry: 1172 retry:
1132 ata_tf_init(ap, &tf, dev->devno); 1173 ata_tf_init(dev, &tf);
1133 1174
1134 switch (class) { 1175 switch (class) {
1135 case ATA_DEV_ATA: 1176 case ATA_DEV_ATA:
@@ -1146,7 +1187,7 @@ static int ata_dev_read_id(struct ata_port *ap, struct ata_device *dev,
1146 1187
1147 tf.protocol = ATA_PROT_PIO; 1188 tf.protocol = ATA_PROT_PIO;
1148 1189
1149 err_mask = ata_exec_internal(ap, dev, &tf, NULL, DMA_FROM_DEVICE, 1190 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE,
1150 id, sizeof(id[0]) * ATA_ID_WORDS); 1191 id, sizeof(id[0]) * ATA_ID_WORDS);
1151 if (err_mask) { 1192 if (err_mask) {
1152 rc = -EIO; 1193 rc = -EIO;
@@ -1173,7 +1214,7 @@ static int ata_dev_read_id(struct ata_port *ap, struct ata_device *dev,
1173 * Some drives were very specific about that exact sequence. 1214 * Some drives were very specific about that exact sequence.
1174 */ 1215 */
1175 if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) { 1216 if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
1176 err_mask = ata_dev_init_params(ap, dev, id[3], id[6]); 1217 err_mask = ata_dev_init_params(dev, id[3], id[6]);
1177 if (err_mask) { 1218 if (err_mask) {
1178 rc = -EIO; 1219 rc = -EIO;
1179 reason = "INIT_DEV_PARAMS failed"; 1220 reason = "INIT_DEV_PARAMS failed";
@@ -1189,25 +1230,22 @@ static int ata_dev_read_id(struct ata_port *ap, struct ata_device *dev,
1189 } 1230 }
1190 1231
1191 *p_class = class; 1232 *p_class = class;
1192 *p_id = id; 1233
1193 return 0; 1234 return 0;
1194 1235
1195 err_out: 1236 err_out:
1196 printk(KERN_WARNING "ata%u: dev %u failed to IDENTIFY (%s)\n", 1237 ata_dev_printk(dev, KERN_WARNING, "failed to IDENTIFY "
1197 ap->id, dev->devno, reason); 1238 "(%s, err_mask=0x%x)\n", reason, err_mask);
1198 kfree(id);
1199 return rc; 1239 return rc;
1200} 1240}
1201 1241
1202static inline u8 ata_dev_knobble(const struct ata_port *ap, 1242static inline u8 ata_dev_knobble(struct ata_device *dev)
1203 struct ata_device *dev)
1204{ 1243{
1205 return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id))); 1244 return ((dev->ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
1206} 1245}
1207 1246
1208/** 1247/**
1209 * ata_dev_configure - Configure the specified ATA/ATAPI device 1248 * ata_dev_configure - Configure the specified ATA/ATAPI device
1210 * @ap: Port on which target device resides
1211 * @dev: Target device to configure 1249 * @dev: Target device to configure
1212 * @print_info: Enable device info printout 1250 * @print_info: Enable device info printout
1213 * 1251 *
@@ -1220,9 +1258,9 @@ static inline u8 ata_dev_knobble(const struct ata_port *ap,
1220 * RETURNS: 1258 * RETURNS:
1221 * 0 on success, -errno otherwise 1259 * 0 on success, -errno otherwise
1222 */ 1260 */
1223static int ata_dev_configure(struct ata_port *ap, struct ata_device *dev, 1261static int ata_dev_configure(struct ata_device *dev, int print_info)
1224 int print_info)
1225{ 1262{
1263 struct ata_port *ap = dev->ap;
1226 const u16 *id = dev->id; 1264 const u16 *id = dev->id;
1227 unsigned int xfer_mask; 1265 unsigned int xfer_mask;
1228 int i, rc; 1266 int i, rc;
@@ -1237,10 +1275,10 @@ static int ata_dev_configure(struct ata_port *ap, struct ata_device *dev,
1237 1275
1238 /* print device capabilities */ 1276 /* print device capabilities */
1239 if (print_info) 1277 if (print_info)
1240 printk(KERN_DEBUG "ata%u: dev %u cfg 49:%04x 82:%04x 83:%04x " 1278 ata_dev_printk(dev, KERN_DEBUG, "cfg 49:%04x 82:%04x 83:%04x "
1241 "84:%04x 85:%04x 86:%04x 87:%04x 88:%04x\n", 1279 "84:%04x 85:%04x 86:%04x 87:%04x 88:%04x\n",
1242 ap->id, dev->devno, id[49], id[82], id[83], 1280 id[49], id[82], id[83], id[84],
1243 id[84], id[85], id[86], id[87], id[88]); 1281 id[85], id[86], id[87], id[88]);
1244 1282
1245 /* initialize to-be-configured parameters */ 1283 /* initialize to-be-configured parameters */
1246 dev->flags &= ~ATA_DFLAG_CFG_MASK; 1284 dev->flags &= ~ATA_DFLAG_CFG_MASK;
@@ -1276,13 +1314,12 @@ static int ata_dev_configure(struct ata_port *ap, struct ata_device *dev,
1276 1314
1277 /* print device info to dmesg */ 1315 /* print device info to dmesg */
1278 if (print_info) 1316 if (print_info)
1279 printk(KERN_INFO "ata%u: dev %u ATA-%d, " 1317 ata_dev_printk(dev, KERN_INFO, "ATA-%d, "
1280 "max %s, %Lu sectors: %s\n", 1318 "max %s, %Lu sectors: %s\n",
1281 ap->id, dev->devno, 1319 ata_id_major_version(id),
1282 ata_id_major_version(id), 1320 ata_mode_string(xfer_mask),
1283 ata_mode_string(xfer_mask), 1321 (unsigned long long)dev->n_sectors,
1284 (unsigned long long)dev->n_sectors, 1322 lba_desc);
1285 lba_desc);
1286 } else { 1323 } else {
1287 /* CHS */ 1324 /* CHS */
1288 1325
@@ -1300,13 +1337,12 @@ static int ata_dev_configure(struct ata_port *ap, struct ata_device *dev,
1300 1337
1301 /* print device info to dmesg */ 1338 /* print device info to dmesg */
1302 if (print_info) 1339 if (print_info)
1303 printk(KERN_INFO "ata%u: dev %u ATA-%d, " 1340 ata_dev_printk(dev, KERN_INFO, "ATA-%d, "
1304 "max %s, %Lu sectors: CHS %u/%u/%u\n", 1341 "max %s, %Lu sectors: CHS %u/%u/%u\n",
1305 ap->id, dev->devno, 1342 ata_id_major_version(id),
1306 ata_id_major_version(id), 1343 ata_mode_string(xfer_mask),
1307 ata_mode_string(xfer_mask), 1344 (unsigned long long)dev->n_sectors,
1308 (unsigned long long)dev->n_sectors, 1345 dev->cylinders, dev->heads, dev->sectors);
1309 dev->cylinders, dev->heads, dev->sectors);
1310 } 1346 }
1311 1347
1312 if (dev->id[59] & 0x100) { 1348 if (dev->id[59] & 0x100) {
@@ -1324,7 +1360,8 @@ static int ata_dev_configure(struct ata_port *ap, struct ata_device *dev,
1324 1360
1325 rc = atapi_cdb_len(id); 1361 rc = atapi_cdb_len(id);
1326 if ((rc < 12) || (rc > ATAPI_CDB_LEN)) { 1362 if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
1327 printk(KERN_WARNING "ata%u: unsupported CDB len\n", ap->id); 1363 ata_dev_printk(dev, KERN_WARNING,
1364 "unsupported CDB len\n");
1328 rc = -EINVAL; 1365 rc = -EINVAL;
1329 goto err_out_nosup; 1366 goto err_out_nosup;
1330 } 1367 }
@@ -1337,9 +1374,9 @@ static int ata_dev_configure(struct ata_port *ap, struct ata_device *dev,
1337 1374
1338 /* print device info to dmesg */ 1375 /* print device info to dmesg */
1339 if (print_info) 1376 if (print_info)
1340 printk(KERN_INFO "ata%u: dev %u ATAPI, max %s%s\n", 1377 ata_dev_printk(dev, KERN_INFO, "ATAPI, max %s%s\n",
1341 ap->id, dev->devno, ata_mode_string(xfer_mask), 1378 ata_mode_string(xfer_mask),
1342 cdb_intr_string); 1379 cdb_intr_string);
1343 } 1380 }
1344 1381
1345 ap->host->max_cmd_len = 0; 1382 ap->host->max_cmd_len = 0;
@@ -1349,10 +1386,10 @@ static int ata_dev_configure(struct ata_port *ap, struct ata_device *dev,
1349 ap->device[i].cdb_len); 1386 ap->device[i].cdb_len);
1350 1387
1351 /* limit bridge transfers to udma5, 200 sectors */ 1388 /* limit bridge transfers to udma5, 200 sectors */
1352 if (ata_dev_knobble(ap, dev)) { 1389 if (ata_dev_knobble(dev)) {
1353 if (print_info) 1390 if (print_info)
1354 printk(KERN_INFO "ata%u(%u): applying bridge limits\n", 1391 ata_dev_printk(dev, KERN_INFO,
1355 ap->id, dev->devno); 1392 "applying bridge limits\n");
1356 dev->udma_mask &= ATA_UDMA5; 1393 dev->udma_mask &= ATA_UDMA5;
1357 dev->max_sectors = ATA_MAX_SECTORS; 1394 dev->max_sectors = ATA_MAX_SECTORS;
1358 } 1395 }
@@ -1405,15 +1442,18 @@ static int ata_bus_probe(struct ata_port *ap)
1405 if (ap->ops->probe_reset) { 1442 if (ap->ops->probe_reset) {
1406 rc = ap->ops->probe_reset(ap, classes); 1443 rc = ap->ops->probe_reset(ap, classes);
1407 if (rc) { 1444 if (rc) {
1408 printk("ata%u: reset failed (errno=%d)\n", ap->id, rc); 1445 ata_port_printk(ap, KERN_ERR,
1446 "reset failed (errno=%d)\n", rc);
1409 return rc; 1447 return rc;
1410 } 1448 }
1411 } else { 1449 } else {
1412 ap->ops->phy_reset(ap); 1450 ap->ops->phy_reset(ap);
1413 1451
1414 if (!(ap->flags & ATA_FLAG_DISABLED)) 1452 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1415 for (i = 0; i < ATA_MAX_DEVICES; i++) 1453 if (!(ap->flags & ATA_FLAG_DISABLED))
1416 classes[i] = ap->device[i].class; 1454 classes[i] = ap->device[i].class;
1455 ap->device[i].class = ATA_DEV_UNKNOWN;
1456 }
1417 1457
1418 ata_port_probe(ap); 1458 ata_port_probe(ap);
1419 } 1459 }
@@ -1432,32 +1472,17 @@ static int ata_bus_probe(struct ata_port *ap)
1432 if (!ata_dev_enabled(dev)) 1472 if (!ata_dev_enabled(dev))
1433 continue; 1473 continue;
1434 1474
1435 kfree(dev->id); 1475 rc = ata_dev_read_id(dev, &dev->class, 1, dev->id);
1436 dev->id = NULL;
1437 rc = ata_dev_read_id(ap, dev, &dev->class, 1, &dev->id);
1438 if (rc) 1476 if (rc)
1439 goto fail; 1477 goto fail;
1440 1478
1441 rc = ata_dev_configure(ap, dev, 1); 1479 rc = ata_dev_configure(dev, 1);
1442 if (rc) 1480 if (rc)
1443 goto fail; 1481 goto fail;
1444 } 1482 }
1445 1483
1446 /* configure transfer mode */ 1484 /* configure transfer mode */
1447 if (ap->ops->set_mode) { 1485 rc = ata_set_mode(ap, &dev);
1448 /* FIXME: make ->set_mode handle no device case and
1449 * return error code and failing device on failure as
1450 * ata_set_mode() does.
1451 */
1452 for (i = 0; i < ATA_MAX_DEVICES; i++)
1453 if (ata_dev_enabled(&ap->device[i])) {
1454 ap->ops->set_mode(ap);
1455 break;
1456 }
1457 rc = 0;
1458 } else
1459 rc = ata_set_mode(ap, &dev);
1460
1461 if (rc) { 1486 if (rc) {
1462 down_xfermask = 1; 1487 down_xfermask = 1;
1463 goto fail; 1488 goto fail;
@@ -1479,18 +1504,18 @@ static int ata_bus_probe(struct ata_port *ap)
1479 tries[dev->devno] = 0; 1504 tries[dev->devno] = 0;
1480 break; 1505 break;
1481 case -EIO: 1506 case -EIO:
1482 ata_down_sata_spd_limit(ap); 1507 sata_down_spd_limit(ap);
1483 /* fall through */ 1508 /* fall through */
1484 default: 1509 default:
1485 tries[dev->devno]--; 1510 tries[dev->devno]--;
1486 if (down_xfermask && 1511 if (down_xfermask &&
1487 ata_down_xfermask_limit(ap, dev, tries[dev->devno] == 1)) 1512 ata_down_xfermask_limit(dev, tries[dev->devno] == 1))
1488 tries[dev->devno] = 0; 1513 tries[dev->devno] = 0;
1489 } 1514 }
1490 1515
1491 if (!tries[dev->devno]) { 1516 if (!tries[dev->devno]) {
1492 ata_down_xfermask_limit(ap, dev, 1); 1517 ata_down_xfermask_limit(dev, 1);
1493 ata_dev_disable(ap, dev); 1518 ata_dev_disable(dev);
1494 } 1519 }
1495 1520
1496 goto retry; 1521 goto retry;
@@ -1525,21 +1550,19 @@ static void sata_print_link_status(struct ata_port *ap)
1525{ 1550{
1526 u32 sstatus, scontrol, tmp; 1551 u32 sstatus, scontrol, tmp;
1527 1552
1528 if (!ap->ops->scr_read) 1553 if (sata_scr_read(ap, SCR_STATUS, &sstatus))
1529 return; 1554 return;
1555 sata_scr_read(ap, SCR_CONTROL, &scontrol);
1530 1556
1531 sstatus = scr_read(ap, SCR_STATUS); 1557 if (ata_port_online(ap)) {
1532 scontrol = scr_read(ap, SCR_CONTROL);
1533
1534 if (sata_dev_present(ap)) {
1535 tmp = (sstatus >> 4) & 0xf; 1558 tmp = (sstatus >> 4) & 0xf;
1536 printk(KERN_INFO 1559 ata_port_printk(ap, KERN_INFO,
1537 "ata%u: SATA link up %s (SStatus %X SControl %X)\n", 1560 "SATA link up %s (SStatus %X SControl %X)\n",
1538 ap->id, sata_spd_string(tmp), sstatus, scontrol); 1561 sata_spd_string(tmp), sstatus, scontrol);
1539 } else { 1562 } else {
1540 printk(KERN_INFO 1563 ata_port_printk(ap, KERN_INFO,
1541 "ata%u: SATA link down (SStatus %X SControl %X)\n", 1564 "SATA link down (SStatus %X SControl %X)\n",
1542 ap->id, sstatus, scontrol); 1565 sstatus, scontrol);
1543 } 1566 }
1544} 1567}
1545 1568
@@ -1562,17 +1585,18 @@ void __sata_phy_reset(struct ata_port *ap)
1562 1585
1563 if (ap->flags & ATA_FLAG_SATA_RESET) { 1586 if (ap->flags & ATA_FLAG_SATA_RESET) {
1564 /* issue phy wake/reset */ 1587 /* issue phy wake/reset */
1565 scr_write_flush(ap, SCR_CONTROL, 0x301); 1588 sata_scr_write_flush(ap, SCR_CONTROL, 0x301);
1566 /* Couldn't find anything in SATA I/II specs, but 1589 /* Couldn't find anything in SATA I/II specs, but
1567 * AHCI-1.1 10.4.2 says at least 1 ms. */ 1590 * AHCI-1.1 10.4.2 says at least 1 ms. */
1568 mdelay(1); 1591 mdelay(1);
1569 } 1592 }
1570 scr_write_flush(ap, SCR_CONTROL, 0x300); /* phy wake/clear reset */ 1593 /* phy wake/clear reset */
1594 sata_scr_write_flush(ap, SCR_CONTROL, 0x300);
1571 1595
1572 /* wait for phy to become ready, if necessary */ 1596 /* wait for phy to become ready, if necessary */
1573 do { 1597 do {
1574 msleep(200); 1598 msleep(200);
1575 sstatus = scr_read(ap, SCR_STATUS); 1599 sata_scr_read(ap, SCR_STATUS, &sstatus);
1576 if ((sstatus & 0xf) != 1) 1600 if ((sstatus & 0xf) != 1)
1577 break; 1601 break;
1578 } while (time_before(jiffies, timeout)); 1602 } while (time_before(jiffies, timeout));
@@ -1581,7 +1605,7 @@ void __sata_phy_reset(struct ata_port *ap)
1581 sata_print_link_status(ap); 1605 sata_print_link_status(ap);
1582 1606
1583 /* TODO: phy layer with polling, timeouts, etc. */ 1607 /* TODO: phy layer with polling, timeouts, etc. */
1584 if (sata_dev_present(ap)) 1608 if (!ata_port_offline(ap))
1585 ata_port_probe(ap); 1609 ata_port_probe(ap);
1586 else 1610 else
1587 ata_port_disable(ap); 1611 ata_port_disable(ap);
@@ -1618,15 +1642,15 @@ void sata_phy_reset(struct ata_port *ap)
1618 1642
1619/** 1643/**
1620 * ata_dev_pair - return other device on cable 1644 * ata_dev_pair - return other device on cable
1621 * @ap: port
1622 * @adev: device 1645 * @adev: device
1623 * 1646 *
1624 * Obtain the other device on the same cable, or if none is 1647 * Obtain the other device on the same cable, or if none is
1625 * present NULL is returned 1648 * present NULL is returned
1626 */ 1649 */
1627 1650
1628struct ata_device *ata_dev_pair(struct ata_port *ap, struct ata_device *adev) 1651struct ata_device *ata_dev_pair(struct ata_device *adev)
1629{ 1652{
1653 struct ata_port *ap = adev->ap;
1630 struct ata_device *pair = &ap->device[1 - adev->devno]; 1654 struct ata_device *pair = &ap->device[1 - adev->devno];
1631 if (!ata_dev_enabled(pair)) 1655 if (!ata_dev_enabled(pair))
1632 return NULL; 1656 return NULL;
@@ -1654,12 +1678,12 @@ void ata_port_disable(struct ata_port *ap)
1654} 1678}
1655 1679
1656/** 1680/**
1657 * ata_down_sata_spd_limit - adjust SATA spd limit downward 1681 * sata_down_spd_limit - adjust SATA spd limit downward
1658 * @ap: Port to adjust SATA spd limit for 1682 * @ap: Port to adjust SATA spd limit for
1659 * 1683 *
1660 * Adjust SATA spd limit of @ap downward. Note that this 1684 * Adjust SATA spd limit of @ap downward. Note that this
1661 * function only adjusts the limit. The change must be applied 1685 * function only adjusts the limit. The change must be applied
1662 * using ata_set_sata_spd(). 1686 * using sata_set_spd().
1663 * 1687 *
1664 * LOCKING: 1688 * LOCKING:
1665 * Inherited from caller. 1689 * Inherited from caller.
@@ -1667,13 +1691,14 @@ void ata_port_disable(struct ata_port *ap)
1667 * RETURNS: 1691 * RETURNS:
1668 * 0 on success, negative errno on failure 1692 * 0 on success, negative errno on failure
1669 */ 1693 */
1670int ata_down_sata_spd_limit(struct ata_port *ap) 1694int sata_down_spd_limit(struct ata_port *ap)
1671{ 1695{
1672 u32 spd, mask; 1696 u32 sstatus, spd, mask;
1673 int highbit; 1697 int rc, highbit;
1674 1698
1675 if (ap->cbl != ATA_CBL_SATA || !ap->ops->scr_read) 1699 rc = sata_scr_read(ap, SCR_STATUS, &sstatus);
1676 return -EOPNOTSUPP; 1700 if (rc)
1701 return rc;
1677 1702
1678 mask = ap->sata_spd_limit; 1703 mask = ap->sata_spd_limit;
1679 if (mask <= 1) 1704 if (mask <= 1)
@@ -1681,7 +1706,7 @@ int ata_down_sata_spd_limit(struct ata_port *ap)
1681 highbit = fls(mask) - 1; 1706 highbit = fls(mask) - 1;
1682 mask &= ~(1 << highbit); 1707 mask &= ~(1 << highbit);
1683 1708
1684 spd = (scr_read(ap, SCR_STATUS) >> 4) & 0xf; 1709 spd = (sstatus >> 4) & 0xf;
1685 if (spd <= 1) 1710 if (spd <= 1)
1686 return -EINVAL; 1711 return -EINVAL;
1687 spd--; 1712 spd--;
@@ -1691,13 +1716,13 @@ int ata_down_sata_spd_limit(struct ata_port *ap)
1691 1716
1692 ap->sata_spd_limit = mask; 1717 ap->sata_spd_limit = mask;
1693 1718
1694 printk(KERN_WARNING "ata%u: limiting SATA link speed to %s\n", 1719 ata_port_printk(ap, KERN_WARNING, "limiting SATA link speed to %s\n",
1695 ap->id, sata_spd_string(fls(mask))); 1720 sata_spd_string(fls(mask)));
1696 1721
1697 return 0; 1722 return 0;
1698} 1723}
1699 1724
1700static int __ata_set_sata_spd_needed(struct ata_port *ap, u32 *scontrol) 1725static int __sata_set_spd_needed(struct ata_port *ap, u32 *scontrol)
1701{ 1726{
1702 u32 spd, limit; 1727 u32 spd, limit;
1703 1728
@@ -1713,7 +1738,7 @@ static int __ata_set_sata_spd_needed(struct ata_port *ap, u32 *scontrol)
1713} 1738}
1714 1739
1715/** 1740/**
1716 * ata_set_sata_spd_needed - is SATA spd configuration needed 1741 * sata_set_spd_needed - is SATA spd configuration needed
1717 * @ap: Port in question 1742 * @ap: Port in question
1718 * 1743 *
1719 * Test whether the spd limit in SControl matches 1744 * Test whether the spd limit in SControl matches
@@ -1727,20 +1752,18 @@ static int __ata_set_sata_spd_needed(struct ata_port *ap, u32 *scontrol)
1727 * RETURNS: 1752 * RETURNS:
1728 * 1 if SATA spd configuration is needed, 0 otherwise. 1753 * 1 if SATA spd configuration is needed, 0 otherwise.
1729 */ 1754 */
1730int ata_set_sata_spd_needed(struct ata_port *ap) 1755int sata_set_spd_needed(struct ata_port *ap)
1731{ 1756{
1732 u32 scontrol; 1757 u32 scontrol;
1733 1758
1734 if (ap->cbl != ATA_CBL_SATA || !ap->ops->scr_read) 1759 if (sata_scr_read(ap, SCR_CONTROL, &scontrol))
1735 return 0; 1760 return 0;
1736 1761
1737 scontrol = scr_read(ap, SCR_CONTROL); 1762 return __sata_set_spd_needed(ap, &scontrol);
1738
1739 return __ata_set_sata_spd_needed(ap, &scontrol);
1740} 1763}
1741 1764
1742/** 1765/**
1743 * ata_set_sata_spd - set SATA spd according to spd limit 1766 * sata_set_spd - set SATA spd according to spd limit
1744 * @ap: Port to set SATA spd for 1767 * @ap: Port to set SATA spd for
1745 * 1768 *
1746 * Set SATA spd of @ap according to sata_spd_limit. 1769 * Set SATA spd of @ap according to sata_spd_limit.
@@ -1750,20 +1773,22 @@ int ata_set_sata_spd_needed(struct ata_port *ap)
1750 * 1773 *
1751 * RETURNS: 1774 * RETURNS:
1752 * 0 if spd doesn't need to be changed, 1 if spd has been 1775 * 0 if spd doesn't need to be changed, 1 if spd has been
1753 * changed. -EOPNOTSUPP if SCR registers are inaccessible. 1776 * changed. Negative errno if SCR registers are inaccessible.
1754 */ 1777 */
1755int ata_set_sata_spd(struct ata_port *ap) 1778int sata_set_spd(struct ata_port *ap)
1756{ 1779{
1757 u32 scontrol; 1780 u32 scontrol;
1781 int rc;
1758 1782
1759 if (ap->cbl != ATA_CBL_SATA || !ap->ops->scr_read) 1783 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
1760 return -EOPNOTSUPP; 1784 return rc;
1761 1785
1762 scontrol = scr_read(ap, SCR_CONTROL); 1786 if (!__sata_set_spd_needed(ap, &scontrol))
1763 if (!__ata_set_sata_spd_needed(ap, &scontrol))
1764 return 0; 1787 return 0;
1765 1788
1766 scr_write(ap, SCR_CONTROL, scontrol); 1789 if ((rc = sata_scr_write(ap, SCR_CONTROL, scontrol)))
1790 return rc;
1791
1767 return 1; 1792 return 1;
1768} 1793}
1769 1794
@@ -1917,7 +1942,6 @@ int ata_timing_compute(struct ata_device *adev, unsigned short speed,
1917 1942
1918/** 1943/**
1919 * ata_down_xfermask_limit - adjust dev xfer masks downward 1944 * ata_down_xfermask_limit - adjust dev xfer masks downward
1920 * @ap: Port associated with device @dev
1921 * @dev: Device to adjust xfer masks 1945 * @dev: Device to adjust xfer masks
1922 * @force_pio0: Force PIO0 1946 * @force_pio0: Force PIO0
1923 * 1947 *
@@ -1931,8 +1955,7 @@ int ata_timing_compute(struct ata_device *adev, unsigned short speed,
1931 * RETURNS: 1955 * RETURNS:
1932 * 0 on success, negative errno on failure 1956 * 0 on success, negative errno on failure
1933 */ 1957 */
1934int ata_down_xfermask_limit(struct ata_port *ap, struct ata_device *dev, 1958int ata_down_xfermask_limit(struct ata_device *dev, int force_pio0)
1935 int force_pio0)
1936{ 1959{
1937 unsigned long xfer_mask; 1960 unsigned long xfer_mask;
1938 int highbit; 1961 int highbit;
@@ -1956,8 +1979,8 @@ int ata_down_xfermask_limit(struct ata_port *ap, struct ata_device *dev,
1956 ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask, 1979 ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask,
1957 &dev->udma_mask); 1980 &dev->udma_mask);
1958 1981
1959 printk(KERN_WARNING "ata%u: dev %u limiting speed to %s\n", 1982 ata_dev_printk(dev, KERN_WARNING, "limiting speed to %s\n",
1960 ap->id, dev->devno, ata_mode_string(xfer_mask)); 1983 ata_mode_string(xfer_mask));
1961 1984
1962 return 0; 1985 return 0;
1963 1986
@@ -1965,7 +1988,7 @@ int ata_down_xfermask_limit(struct ata_port *ap, struct ata_device *dev,
1965 return -EINVAL; 1988 return -EINVAL;
1966} 1989}
1967 1990
1968static int ata_dev_set_mode(struct ata_port *ap, struct ata_device *dev) 1991static int ata_dev_set_mode(struct ata_device *dev)
1969{ 1992{
1970 unsigned int err_mask; 1993 unsigned int err_mask;
1971 int rc; 1994 int rc;
@@ -1974,24 +1997,22 @@ static int ata_dev_set_mode(struct ata_port *ap, struct ata_device *dev)
1974 if (dev->xfer_shift == ATA_SHIFT_PIO) 1997 if (dev->xfer_shift == ATA_SHIFT_PIO)
1975 dev->flags |= ATA_DFLAG_PIO; 1998 dev->flags |= ATA_DFLAG_PIO;
1976 1999
1977 err_mask = ata_dev_set_xfermode(ap, dev); 2000 err_mask = ata_dev_set_xfermode(dev);
1978 if (err_mask) { 2001 if (err_mask) {
1979 printk(KERN_ERR 2002 ata_dev_printk(dev, KERN_ERR, "failed to set xfermode "
1980 "ata%u: failed to set xfermode (err_mask=0x%x)\n", 2003 "(err_mask=0x%x)\n", err_mask);
1981 ap->id, err_mask);
1982 return -EIO; 2004 return -EIO;
1983 } 2005 }
1984 2006
1985 rc = ata_dev_revalidate(ap, dev, 0); 2007 rc = ata_dev_revalidate(dev, 0);
1986 if (rc) 2008 if (rc)
1987 return rc; 2009 return rc;
1988 2010
1989 DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n", 2011 DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n",
1990 dev->xfer_shift, (int)dev->xfer_mode); 2012 dev->xfer_shift, (int)dev->xfer_mode);
1991 2013
1992 printk(KERN_INFO "ata%u: dev %u configured for %s\n", 2014 ata_dev_printk(dev, KERN_INFO, "configured for %s\n",
1993 ap->id, dev->devno, 2015 ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)));
1994 ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)));
1995 return 0; 2016 return 0;
1996} 2017}
1997 2018
@@ -2015,6 +2036,20 @@ int ata_set_mode(struct ata_port *ap, struct ata_device **r_failed_dev)
2015 struct ata_device *dev; 2036 struct ata_device *dev;
2016 int i, rc = 0, used_dma = 0, found = 0; 2037 int i, rc = 0, used_dma = 0, found = 0;
2017 2038
2039 /* has private set_mode? */
2040 if (ap->ops->set_mode) {
2041 /* FIXME: make ->set_mode handle no device case and
2042 * return error code and failing device on failure.
2043 */
2044 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2045 if (ata_dev_enabled(&ap->device[i])) {
2046 ap->ops->set_mode(ap);
2047 break;
2048 }
2049 }
2050 return 0;
2051 }
2052
2018 /* step 1: calculate xfer_mask */ 2053 /* step 1: calculate xfer_mask */
2019 for (i = 0; i < ATA_MAX_DEVICES; i++) { 2054 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2020 unsigned int pio_mask, dma_mask; 2055 unsigned int pio_mask, dma_mask;
@@ -2024,7 +2059,7 @@ int ata_set_mode(struct ata_port *ap, struct ata_device **r_failed_dev)
2024 if (!ata_dev_enabled(dev)) 2059 if (!ata_dev_enabled(dev))
2025 continue; 2060 continue;
2026 2061
2027 ata_dev_xfermask(ap, dev); 2062 ata_dev_xfermask(dev);
2028 2063
2029 pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0); 2064 pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0);
2030 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask); 2065 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
@@ -2045,8 +2080,7 @@ int ata_set_mode(struct ata_port *ap, struct ata_device **r_failed_dev)
2045 continue; 2080 continue;
2046 2081
2047 if (!dev->pio_mode) { 2082 if (!dev->pio_mode) {
2048 printk(KERN_WARNING "ata%u: dev %u no PIO support\n", 2083 ata_dev_printk(dev, KERN_WARNING, "no PIO support\n");
2049 ap->id, dev->devno);
2050 rc = -EINVAL; 2084 rc = -EINVAL;
2051 goto out; 2085 goto out;
2052 } 2086 }
@@ -2077,7 +2111,7 @@ int ata_set_mode(struct ata_port *ap, struct ata_device **r_failed_dev)
2077 if (!ata_dev_enabled(dev)) 2111 if (!ata_dev_enabled(dev))
2078 continue; 2112 continue;
2079 2113
2080 rc = ata_dev_set_mode(ap, dev); 2114 rc = ata_dev_set_mode(dev);
2081 if (rc) 2115 if (rc)
2082 goto out; 2116 goto out;
2083 } 2117 }
@@ -2145,8 +2179,8 @@ unsigned int ata_busy_sleep (struct ata_port *ap,
2145 } 2179 }
2146 2180
2147 if (status & ATA_BUSY) 2181 if (status & ATA_BUSY)
2148 printk(KERN_WARNING "ata%u is slow to respond, " 2182 ata_port_printk(ap, KERN_WARNING,
2149 "please be patient\n", ap->id); 2183 "port is slow to respond, please be patient\n");
2150 2184
2151 timeout = timer_start + tmout; 2185 timeout = timer_start + tmout;
2152 while ((status & ATA_BUSY) && (time_before(jiffies, timeout))) { 2186 while ((status & ATA_BUSY) && (time_before(jiffies, timeout))) {
@@ -2155,8 +2189,8 @@ unsigned int ata_busy_sleep (struct ata_port *ap,
2155 } 2189 }
2156 2190
2157 if (status & ATA_BUSY) { 2191 if (status & ATA_BUSY) {
2158 printk(KERN_ERR "ata%u failed to respond (%lu secs)\n", 2192 ata_port_printk(ap, KERN_ERR, "port failed to respond "
2159 ap->id, tmout / HZ); 2193 "(%lu secs)\n", tmout / HZ);
2160 return 1; 2194 return 1;
2161 } 2195 }
2162 2196
@@ -2249,7 +2283,7 @@ static unsigned int ata_bus_softreset(struct ata_port *ap,
2249 * pulldown resistor. 2283 * pulldown resistor.
2250 */ 2284 */
2251 if (ata_check_status(ap) == 0xFF) { 2285 if (ata_check_status(ap) == 0xFF) {
2252 printk(KERN_ERR "ata%u: SRST failed (status 0xFF)\n", ap->id); 2286 ata_port_printk(ap, KERN_ERR, "SRST failed (status 0xFF)\n");
2253 return AC_ERR_OTHER; 2287 return AC_ERR_OTHER;
2254 } 2288 }
2255 2289
@@ -2343,7 +2377,7 @@ void ata_bus_reset(struct ata_port *ap)
2343 return; 2377 return;
2344 2378
2345err_out: 2379err_out:
2346 printk(KERN_ERR "ata%u: disabling port\n", ap->id); 2380 ata_port_printk(ap, KERN_ERR, "disabling port\n");
2347 ap->ops->port_disable(ap); 2381 ap->ops->port_disable(ap);
2348 2382
2349 DPRINTK("EXIT\n"); 2383 DPRINTK("EXIT\n");
@@ -2353,20 +2387,26 @@ static int sata_phy_resume(struct ata_port *ap)
2353{ 2387{
2354 unsigned long timeout = jiffies + (HZ * 5); 2388 unsigned long timeout = jiffies + (HZ * 5);
2355 u32 scontrol, sstatus; 2389 u32 scontrol, sstatus;
2390 int rc;
2391
2392 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
2393 return rc;
2356 2394
2357 scontrol = scr_read(ap, SCR_CONTROL);
2358 scontrol = (scontrol & 0x0f0) | 0x300; 2395 scontrol = (scontrol & 0x0f0) | 0x300;
2359 scr_write_flush(ap, SCR_CONTROL, scontrol); 2396
2397 if ((rc = sata_scr_write(ap, SCR_CONTROL, scontrol)))
2398 return rc;
2360 2399
2361 /* Wait for phy to become ready, if necessary. */ 2400 /* Wait for phy to become ready, if necessary. */
2362 do { 2401 do {
2363 msleep(200); 2402 msleep(200);
2364 sstatus = scr_read(ap, SCR_STATUS); 2403 if ((rc = sata_scr_read(ap, SCR_STATUS, &sstatus)))
2404 return rc;
2365 if ((sstatus & 0xf) != 1) 2405 if ((sstatus & 0xf) != 1)
2366 return 0; 2406 return 0;
2367 } while (time_before(jiffies, timeout)); 2407 } while (time_before(jiffies, timeout));
2368 2408
2369 return -1; 2409 return -EBUSY;
2370} 2410}
2371 2411
2372/** 2412/**
@@ -2384,22 +2424,20 @@ static int sata_phy_resume(struct ata_port *ap)
2384 */ 2424 */
2385void ata_std_probeinit(struct ata_port *ap) 2425void ata_std_probeinit(struct ata_port *ap)
2386{ 2426{
2387 if ((ap->flags & ATA_FLAG_SATA) && ap->ops->scr_read) { 2427 u32 scontrol;
2388 u32 spd;
2389
2390 /* set cable type and resume link */
2391 ap->cbl = ATA_CBL_SATA;
2392 sata_phy_resume(ap);
2393 2428
2394 /* init sata_spd_limit to the current value */ 2429 /* resume link */
2395 spd = (scr_read(ap, SCR_CONTROL) & 0xf0) >> 4; 2430 sata_phy_resume(ap);
2396 if (spd)
2397 ap->sata_spd_limit &= (1 << spd) - 1;
2398 2431
2399 /* wait for device */ 2432 /* init sata_spd_limit to the current value */
2400 if (sata_dev_present(ap)) 2433 if (sata_scr_read(ap, SCR_CONTROL, &scontrol) == 0) {
2401 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT); 2434 int spd = (scontrol >> 4) & 0xf;
2435 ap->sata_spd_limit &= (1 << spd) - 1;
2402 } 2436 }
2437
2438 /* wait for device */
2439 if (ata_port_online(ap))
2440 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
2403} 2441}
2404 2442
2405/** 2443/**
@@ -2424,7 +2462,7 @@ int ata_std_softreset(struct ata_port *ap, unsigned int *classes)
2424 2462
2425 DPRINTK("ENTER\n"); 2463 DPRINTK("ENTER\n");
2426 2464
2427 if (ap->ops->scr_read && !sata_dev_present(ap)) { 2465 if (ata_port_offline(ap)) {
2428 classes[0] = ATA_DEV_NONE; 2466 classes[0] = ATA_DEV_NONE;
2429 goto out; 2467 goto out;
2430 } 2468 }
@@ -2442,8 +2480,8 @@ int ata_std_softreset(struct ata_port *ap, unsigned int *classes)
2442 DPRINTK("about to softreset, devmask=%x\n", devmask); 2480 DPRINTK("about to softreset, devmask=%x\n", devmask);
2443 err_mask = ata_bus_softreset(ap, devmask); 2481 err_mask = ata_bus_softreset(ap, devmask);
2444 if (err_mask) { 2482 if (err_mask) {
2445 printk(KERN_ERR "ata%u: SRST failed (err_mask=0x%x)\n", 2483 ata_port_printk(ap, KERN_ERR, "SRST failed (err_mask=0x%x)\n",
2446 ap->id, err_mask); 2484 err_mask);
2447 return -EIO; 2485 return -EIO;
2448 } 2486 }
2449 2487
@@ -2475,26 +2513,35 @@ int ata_std_softreset(struct ata_port *ap, unsigned int *classes)
2475int sata_std_hardreset(struct ata_port *ap, unsigned int *class) 2513int sata_std_hardreset(struct ata_port *ap, unsigned int *class)
2476{ 2514{
2477 u32 scontrol; 2515 u32 scontrol;
2516 int rc;
2478 2517
2479 DPRINTK("ENTER\n"); 2518 DPRINTK("ENTER\n");
2480 2519
2481 if (ata_set_sata_spd_needed(ap)) { 2520 if (sata_set_spd_needed(ap)) {
2482 /* SATA spec says nothing about how to reconfigure 2521 /* SATA spec says nothing about how to reconfigure
2483 * spd. To be on the safe side, turn off phy during 2522 * spd. To be on the safe side, turn off phy during
2484 * reconfiguration. This works for at least ICH7 AHCI 2523 * reconfiguration. This works for at least ICH7 AHCI
2485 * and Sil3124. 2524 * and Sil3124.
2486 */ 2525 */
2487 scontrol = scr_read(ap, SCR_CONTROL); 2526 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
2527 return rc;
2528
2488 scontrol = (scontrol & 0x0f0) | 0x302; 2529 scontrol = (scontrol & 0x0f0) | 0x302;
2489 scr_write_flush(ap, SCR_CONTROL, scontrol);
2490 2530
2491 ata_set_sata_spd(ap); 2531 if ((rc = sata_scr_write(ap, SCR_CONTROL, scontrol)))
2532 return rc;
2533
2534 sata_set_spd(ap);
2492 } 2535 }
2493 2536
2494 /* issue phy wake/reset */ 2537 /* issue phy wake/reset */
2495 scontrol = scr_read(ap, SCR_CONTROL); 2538 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
2539 return rc;
2540
2496 scontrol = (scontrol & 0x0f0) | 0x301; 2541 scontrol = (scontrol & 0x0f0) | 0x301;
2497 scr_write_flush(ap, SCR_CONTROL, scontrol); 2542
2543 if ((rc = sata_scr_write_flush(ap, SCR_CONTROL, scontrol)))
2544 return rc;
2498 2545
2499 /* Couldn't find anything in SATA I/II specs, but AHCI-1.1 2546 /* Couldn't find anything in SATA I/II specs, but AHCI-1.1
2500 * 10.4.2 says at least 1 ms. 2547 * 10.4.2 says at least 1 ms.
@@ -2505,15 +2552,15 @@ int sata_std_hardreset(struct ata_port *ap, unsigned int *class)
2505 sata_phy_resume(ap); 2552 sata_phy_resume(ap);
2506 2553
2507 /* TODO: phy layer with polling, timeouts, etc. */ 2554 /* TODO: phy layer with polling, timeouts, etc. */
2508 if (!sata_dev_present(ap)) { 2555 if (ata_port_offline(ap)) {
2509 *class = ATA_DEV_NONE; 2556 *class = ATA_DEV_NONE;
2510 DPRINTK("EXIT, link offline\n"); 2557 DPRINTK("EXIT, link offline\n");
2511 return 0; 2558 return 0;
2512 } 2559 }
2513 2560
2514 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) { 2561 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
2515 printk(KERN_ERR 2562 ata_port_printk(ap, KERN_ERR,
2516 "ata%u: COMRESET failed (device not ready)\n", ap->id); 2563 "COMRESET failed (device not ready)\n");
2517 return -EIO; 2564 return -EIO;
2518 } 2565 }
2519 2566
@@ -2542,15 +2589,23 @@ int sata_std_hardreset(struct ata_port *ap, unsigned int *class)
2542 */ 2589 */
2543void ata_std_postreset(struct ata_port *ap, unsigned int *classes) 2590void ata_std_postreset(struct ata_port *ap, unsigned int *classes)
2544{ 2591{
2592 u32 serror;
2593
2545 DPRINTK("ENTER\n"); 2594 DPRINTK("ENTER\n");
2546 2595
2547 /* print link status */ 2596 /* print link status */
2548 if (ap->cbl == ATA_CBL_SATA) 2597 sata_print_link_status(ap);
2549 sata_print_link_status(ap); 2598
2599 /* clear SError */
2600 if (sata_scr_read(ap, SCR_ERROR, &serror) == 0)
2601 sata_scr_write(ap, SCR_ERROR, serror);
2550 2602
2551 /* re-enable interrupts */ 2603 /* re-enable interrupts */
2552 if (ap->ioaddr.ctl_addr) /* FIXME: hack. create a hook instead */ 2604 if (!ap->ops->error_handler) {
2553 ata_irq_on(ap); 2605 /* FIXME: hack. create a hook instead */
2606 if (ap->ioaddr.ctl_addr)
2607 ata_irq_on(ap);
2608 }
2554 2609
2555 /* is double-select really necessary? */ 2610 /* is double-select really necessary? */
2556 if (classes[0] != ATA_DEV_NONE) 2611 if (classes[0] != ATA_DEV_NONE)
@@ -2593,7 +2648,7 @@ int ata_std_probe_reset(struct ata_port *ap, unsigned int *classes)
2593 ata_reset_fn_t hardreset; 2648 ata_reset_fn_t hardreset;
2594 2649
2595 hardreset = NULL; 2650 hardreset = NULL;
2596 if (ap->cbl == ATA_CBL_SATA && ap->ops->scr_read) 2651 if (sata_scr_valid(ap))
2597 hardreset = sata_std_hardreset; 2652 hardreset = sata_std_hardreset;
2598 2653
2599 return ata_drive_probe_reset(ap, ata_std_probeinit, 2654 return ata_drive_probe_reset(ap, ata_std_probeinit,
@@ -2602,7 +2657,7 @@ int ata_std_probe_reset(struct ata_port *ap, unsigned int *classes)
2602} 2657}
2603 2658
2604int ata_do_reset(struct ata_port *ap, ata_reset_fn_t reset, 2659int ata_do_reset(struct ata_port *ap, ata_reset_fn_t reset,
2605 ata_postreset_fn_t postreset, unsigned int *classes) 2660 unsigned int *classes)
2606{ 2661{
2607 int i, rc; 2662 int i, rc;
2608 2663
@@ -2626,9 +2681,6 @@ int ata_do_reset(struct ata_port *ap, ata_reset_fn_t reset,
2626 if (classes[i] == ATA_DEV_UNKNOWN) 2681 if (classes[i] == ATA_DEV_UNKNOWN)
2627 classes[i] = ATA_DEV_NONE; 2682 classes[i] = ATA_DEV_NONE;
2628 2683
2629 if (postreset)
2630 postreset(ap, classes);
2631
2632 return 0; 2684 return 0;
2633} 2685}
2634 2686
@@ -2668,15 +2720,17 @@ int ata_drive_probe_reset(struct ata_port *ap, ata_probeinit_fn_t probeinit,
2668{ 2720{
2669 int rc = -EINVAL; 2721 int rc = -EINVAL;
2670 2722
2723 ata_eh_freeze_port(ap);
2724
2671 if (probeinit) 2725 if (probeinit)
2672 probeinit(ap); 2726 probeinit(ap);
2673 2727
2674 if (softreset && !ata_set_sata_spd_needed(ap)) { 2728 if (softreset && !sata_set_spd_needed(ap)) {
2675 rc = ata_do_reset(ap, softreset, postreset, classes); 2729 rc = ata_do_reset(ap, softreset, classes);
2676 if (rc == 0 && classes[0] != ATA_DEV_UNKNOWN) 2730 if (rc == 0 && classes[0] != ATA_DEV_UNKNOWN)
2677 goto done; 2731 goto done;
2678 printk(KERN_INFO "ata%u: softreset failed, will try " 2732 ata_port_printk(ap, KERN_INFO, "softreset failed, "
2679 "hardreset in 5 secs\n", ap->id); 2733 "will try hardreset in 5 secs\n");
2680 ssleep(5); 2734 ssleep(5);
2681 } 2735 }
2682 2736
@@ -2684,39 +2738,45 @@ int ata_drive_probe_reset(struct ata_port *ap, ata_probeinit_fn_t probeinit,
2684 goto done; 2738 goto done;
2685 2739
2686 while (1) { 2740 while (1) {
2687 rc = ata_do_reset(ap, hardreset, postreset, classes); 2741 rc = ata_do_reset(ap, hardreset, classes);
2688 if (rc == 0) { 2742 if (rc == 0) {
2689 if (classes[0] != ATA_DEV_UNKNOWN) 2743 if (classes[0] != ATA_DEV_UNKNOWN)
2690 goto done; 2744 goto done;
2691 break; 2745 break;
2692 } 2746 }
2693 2747
2694 if (ata_down_sata_spd_limit(ap)) 2748 if (sata_down_spd_limit(ap))
2695 goto done; 2749 goto done;
2696 2750
2697 printk(KERN_INFO "ata%u: hardreset failed, will retry " 2751 ata_port_printk(ap, KERN_INFO, "hardreset failed, "
2698 "in 5 secs\n", ap->id); 2752 "will retry in 5 secs\n");
2699 ssleep(5); 2753 ssleep(5);
2700 } 2754 }
2701 2755
2702 if (softreset) { 2756 if (softreset) {
2703 printk(KERN_INFO "ata%u: hardreset succeeded without " 2757 ata_port_printk(ap, KERN_INFO,
2704 "classification, will retry softreset in 5 secs\n", 2758 "hardreset succeeded without classification, "
2705 ap->id); 2759 "will retry softreset in 5 secs\n");
2706 ssleep(5); 2760 ssleep(5);
2707 2761
2708 rc = ata_do_reset(ap, softreset, postreset, classes); 2762 rc = ata_do_reset(ap, softreset, classes);
2709 } 2763 }
2710 2764
2711 done: 2765 done:
2712 if (rc == 0 && classes[0] == ATA_DEV_UNKNOWN) 2766 if (rc == 0) {
2713 rc = -ENODEV; 2767 if (postreset)
2768 postreset(ap, classes);
2769
2770 ata_eh_thaw_port(ap);
2771
2772 if (classes[0] == ATA_DEV_UNKNOWN)
2773 rc = -ENODEV;
2774 }
2714 return rc; 2775 return rc;
2715} 2776}
2716 2777
2717/** 2778/**
2718 * ata_dev_same_device - Determine whether new ID matches configured device 2779 * ata_dev_same_device - Determine whether new ID matches configured device
2719 * @ap: port on which the device to compare against resides
2720 * @dev: device to compare against 2780 * @dev: device to compare against
2721 * @new_class: class of the new device 2781 * @new_class: class of the new device
2722 * @new_id: IDENTIFY page of the new device 2782 * @new_id: IDENTIFY page of the new device
@@ -2731,17 +2791,16 @@ int ata_drive_probe_reset(struct ata_port *ap, ata_probeinit_fn_t probeinit,
2731 * RETURNS: 2791 * RETURNS:
2732 * 1 if @dev matches @new_class and @new_id, 0 otherwise. 2792 * 1 if @dev matches @new_class and @new_id, 0 otherwise.
2733 */ 2793 */
2734static int ata_dev_same_device(struct ata_port *ap, struct ata_device *dev, 2794static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class,
2735 unsigned int new_class, const u16 *new_id) 2795 const u16 *new_id)
2736{ 2796{
2737 const u16 *old_id = dev->id; 2797 const u16 *old_id = dev->id;
2738 unsigned char model[2][41], serial[2][21]; 2798 unsigned char model[2][41], serial[2][21];
2739 u64 new_n_sectors; 2799 u64 new_n_sectors;
2740 2800
2741 if (dev->class != new_class) { 2801 if (dev->class != new_class) {
2742 printk(KERN_INFO 2802 ata_dev_printk(dev, KERN_INFO, "class mismatch %d != %d\n",
2743 "ata%u: dev %u class mismatch %d != %d\n", 2803 dev->class, new_class);
2744 ap->id, dev->devno, dev->class, new_class);
2745 return 0; 2804 return 0;
2746 } 2805 }
2747 2806
@@ -2752,24 +2811,22 @@ static int ata_dev_same_device(struct ata_port *ap, struct ata_device *dev,
2752 new_n_sectors = ata_id_n_sectors(new_id); 2811 new_n_sectors = ata_id_n_sectors(new_id);
2753 2812
2754 if (strcmp(model[0], model[1])) { 2813 if (strcmp(model[0], model[1])) {
2755 printk(KERN_INFO 2814 ata_dev_printk(dev, KERN_INFO, "model number mismatch "
2756 "ata%u: dev %u model number mismatch '%s' != '%s'\n", 2815 "'%s' != '%s'\n", model[0], model[1]);
2757 ap->id, dev->devno, model[0], model[1]);
2758 return 0; 2816 return 0;
2759 } 2817 }
2760 2818
2761 if (strcmp(serial[0], serial[1])) { 2819 if (strcmp(serial[0], serial[1])) {
2762 printk(KERN_INFO 2820 ata_dev_printk(dev, KERN_INFO, "serial number mismatch "
2763 "ata%u: dev %u serial number mismatch '%s' != '%s'\n", 2821 "'%s' != '%s'\n", serial[0], serial[1]);
2764 ap->id, dev->devno, serial[0], serial[1]);
2765 return 0; 2822 return 0;
2766 } 2823 }
2767 2824
2768 if (dev->class == ATA_DEV_ATA && dev->n_sectors != new_n_sectors) { 2825 if (dev->class == ATA_DEV_ATA && dev->n_sectors != new_n_sectors) {
2769 printk(KERN_INFO 2826 ata_dev_printk(dev, KERN_INFO, "n_sectors mismatch "
2770 "ata%u: dev %u n_sectors mismatch %llu != %llu\n", 2827 "%llu != %llu\n",
2771 ap->id, dev->devno, (unsigned long long)dev->n_sectors, 2828 (unsigned long long)dev->n_sectors,
2772 (unsigned long long)new_n_sectors); 2829 (unsigned long long)new_n_sectors);
2773 return 0; 2830 return 0;
2774 } 2831 }
2775 2832
@@ -2778,7 +2835,6 @@ static int ata_dev_same_device(struct ata_port *ap, struct ata_device *dev,
2778 2835
2779/** 2836/**
2780 * ata_dev_revalidate - Revalidate ATA device 2837 * ata_dev_revalidate - Revalidate ATA device
2781 * @ap: port on which the device to revalidate resides
2782 * @dev: device to revalidate 2838 * @dev: device to revalidate
2783 * @post_reset: is this revalidation after reset? 2839 * @post_reset: is this revalidation after reset?
2784 * 2840 *
@@ -2791,11 +2847,10 @@ static int ata_dev_same_device(struct ata_port *ap, struct ata_device *dev,
2791 * RETURNS: 2847 * RETURNS:
2792 * 0 on success, negative errno otherwise 2848 * 0 on success, negative errno otherwise
2793 */ 2849 */
2794int ata_dev_revalidate(struct ata_port *ap, struct ata_device *dev, 2850int ata_dev_revalidate(struct ata_device *dev, int post_reset)
2795 int post_reset)
2796{ 2851{
2797 unsigned int class = dev->class; 2852 unsigned int class = dev->class;
2798 u16 *id = NULL; 2853 u16 *id = (void *)dev->ap->sector_buf;
2799 int rc; 2854 int rc;
2800 2855
2801 if (!ata_dev_enabled(dev)) { 2856 if (!ata_dev_enabled(dev)) {
@@ -2803,29 +2858,26 @@ int ata_dev_revalidate(struct ata_port *ap, struct ata_device *dev,
2803 goto fail; 2858 goto fail;
2804 } 2859 }
2805 2860
2806 /* allocate & read ID data */ 2861 /* read ID data */
2807 rc = ata_dev_read_id(ap, dev, &class, post_reset, &id); 2862 rc = ata_dev_read_id(dev, &class, post_reset, id);
2808 if (rc) 2863 if (rc)
2809 goto fail; 2864 goto fail;
2810 2865
2811 /* is the device still there? */ 2866 /* is the device still there? */
2812 if (!ata_dev_same_device(ap, dev, class, id)) { 2867 if (!ata_dev_same_device(dev, class, id)) {
2813 rc = -ENODEV; 2868 rc = -ENODEV;
2814 goto fail; 2869 goto fail;
2815 } 2870 }
2816 2871
2817 kfree(dev->id); 2872 memcpy(dev->id, id, sizeof(id[0]) * ATA_ID_WORDS);
2818 dev->id = id;
2819 2873
2820 /* configure device according to the new ID */ 2874 /* configure device according to the new ID */
2821 rc = ata_dev_configure(ap, dev, 0); 2875 rc = ata_dev_configure(dev, 0);
2822 if (rc == 0) 2876 if (rc == 0)
2823 return 0; 2877 return 0;
2824 2878
2825 fail: 2879 fail:
2826 printk(KERN_ERR "ata%u: dev %u revalidation failed (errno=%d)\n", 2880 ata_dev_printk(dev, KERN_ERR, "revalidation failed (errno=%d)\n", rc);
2827 ap->id, dev->devno, rc);
2828 kfree(id);
2829 return rc; 2881 return rc;
2830} 2882}
2831 2883
@@ -2901,7 +2953,6 @@ static int ata_dma_blacklisted(const struct ata_device *dev)
2901 2953
2902/** 2954/**
2903 * ata_dev_xfermask - Compute supported xfermask of the given device 2955 * ata_dev_xfermask - Compute supported xfermask of the given device
2904 * @ap: Port on which the device to compute xfermask for resides
2905 * @dev: Device to compute xfermask for 2956 * @dev: Device to compute xfermask for
2906 * 2957 *
2907 * Compute supported xfermask of @dev and store it in 2958 * Compute supported xfermask of @dev and store it in
@@ -2916,8 +2967,9 @@ static int ata_dma_blacklisted(const struct ata_device *dev)
2916 * LOCKING: 2967 * LOCKING:
2917 * None. 2968 * None.
2918 */ 2969 */
2919static void ata_dev_xfermask(struct ata_port *ap, struct ata_device *dev) 2970static void ata_dev_xfermask(struct ata_device *dev)
2920{ 2971{
2972 struct ata_port *ap = dev->ap;
2921 struct ata_host_set *hs = ap->host_set; 2973 struct ata_host_set *hs = ap->host_set;
2922 unsigned long xfer_mask; 2974 unsigned long xfer_mask;
2923 int i; 2975 int i;
@@ -2953,8 +3005,8 @@ static void ata_dev_xfermask(struct ata_port *ap, struct ata_device *dev)
2953 } 3005 }
2954 3006
2955 if (ata_dma_blacklisted(dev)) 3007 if (ata_dma_blacklisted(dev))
2956 printk(KERN_WARNING "ata%u: dev %u is on DMA blacklist, " 3008 ata_dev_printk(dev, KERN_WARNING,
2957 "disabling DMA\n", ap->id, dev->devno); 3009 "device is on DMA blacklist, disabling DMA\n");
2958 3010
2959 if (hs->flags & ATA_HOST_SIMPLEX) { 3011 if (hs->flags & ATA_HOST_SIMPLEX) {
2960 if (hs->simplex_claimed) 3012 if (hs->simplex_claimed)
@@ -2970,7 +3022,6 @@ static void ata_dev_xfermask(struct ata_port *ap, struct ata_device *dev)
2970 3022
2971/** 3023/**
2972 * ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command 3024 * ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
2973 * @ap: Port associated with device @dev
2974 * @dev: Device to which command will be sent 3025 * @dev: Device to which command will be sent
2975 * 3026 *
2976 * Issue SET FEATURES - XFER MODE command to device @dev 3027 * Issue SET FEATURES - XFER MODE command to device @dev
@@ -2983,8 +3034,7 @@ static void ata_dev_xfermask(struct ata_port *ap, struct ata_device *dev)
2983 * 0 on success, AC_ERR_* mask otherwise. 3034 * 0 on success, AC_ERR_* mask otherwise.
2984 */ 3035 */
2985 3036
2986static unsigned int ata_dev_set_xfermode(struct ata_port *ap, 3037static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
2987 struct ata_device *dev)
2988{ 3038{
2989 struct ata_taskfile tf; 3039 struct ata_taskfile tf;
2990 unsigned int err_mask; 3040 unsigned int err_mask;
@@ -2992,14 +3042,14 @@ static unsigned int ata_dev_set_xfermode(struct ata_port *ap,
2992 /* set up set-features taskfile */ 3042 /* set up set-features taskfile */
2993 DPRINTK("set features - xfer mode\n"); 3043 DPRINTK("set features - xfer mode\n");
2994 3044
2995 ata_tf_init(ap, &tf, dev->devno); 3045 ata_tf_init(dev, &tf);
2996 tf.command = ATA_CMD_SET_FEATURES; 3046 tf.command = ATA_CMD_SET_FEATURES;
2997 tf.feature = SETFEATURES_XFER; 3047 tf.feature = SETFEATURES_XFER;
2998 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; 3048 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
2999 tf.protocol = ATA_PROT_NODATA; 3049 tf.protocol = ATA_PROT_NODATA;
3000 tf.nsect = dev->xfer_mode; 3050 tf.nsect = dev->xfer_mode;
3001 3051
3002 err_mask = ata_exec_internal(ap, dev, &tf, NULL, DMA_NONE, NULL, 0); 3052 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
3003 3053
3004 DPRINTK("EXIT, err_mask=%x\n", err_mask); 3054 DPRINTK("EXIT, err_mask=%x\n", err_mask);
3005 return err_mask; 3055 return err_mask;
@@ -3007,8 +3057,9 @@ static unsigned int ata_dev_set_xfermode(struct ata_port *ap,
3007 3057
3008/** 3058/**
3009 * ata_dev_init_params - Issue INIT DEV PARAMS command 3059 * ata_dev_init_params - Issue INIT DEV PARAMS command
3010 * @ap: Port associated with device @dev
3011 * @dev: Device to which command will be sent 3060 * @dev: Device to which command will be sent
3061 * @heads: Number of heads
3062 * @sectors: Number of sectors
3012 * 3063 *
3013 * LOCKING: 3064 * LOCKING:
3014 * Kernel thread context (may sleep) 3065 * Kernel thread context (may sleep)
@@ -3016,11 +3067,8 @@ static unsigned int ata_dev_set_xfermode(struct ata_port *ap,
3016 * RETURNS: 3067 * RETURNS:
3017 * 0 on success, AC_ERR_* mask otherwise. 3068 * 0 on success, AC_ERR_* mask otherwise.
3018 */ 3069 */
3019 3070static unsigned int ata_dev_init_params(struct ata_device *dev,
3020static unsigned int ata_dev_init_params(struct ata_port *ap, 3071 u16 heads, u16 sectors)
3021 struct ata_device *dev,
3022 u16 heads,
3023 u16 sectors)
3024{ 3072{
3025 struct ata_taskfile tf; 3073 struct ata_taskfile tf;
3026 unsigned int err_mask; 3074 unsigned int err_mask;
@@ -3032,14 +3080,14 @@ static unsigned int ata_dev_init_params(struct ata_port *ap,
3032 /* set up init dev params taskfile */ 3080 /* set up init dev params taskfile */
3033 DPRINTK("init dev params \n"); 3081 DPRINTK("init dev params \n");
3034 3082
3035 ata_tf_init(ap, &tf, dev->devno); 3083 ata_tf_init(dev, &tf);
3036 tf.command = ATA_CMD_INIT_DEV_PARAMS; 3084 tf.command = ATA_CMD_INIT_DEV_PARAMS;
3037 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; 3085 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
3038 tf.protocol = ATA_PROT_NODATA; 3086 tf.protocol = ATA_PROT_NODATA;
3039 tf.nsect = sectors; 3087 tf.nsect = sectors;
3040 tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */ 3088 tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
3041 3089
3042 err_mask = ata_exec_internal(ap, dev, &tf, NULL, DMA_NONE, NULL, 0); 3090 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
3043 3091
3044 DPRINTK("EXIT, err_mask=%x\n", err_mask); 3092 DPRINTK("EXIT, err_mask=%x\n", err_mask);
3045 return err_mask; 3093 return err_mask;
@@ -3421,15 +3469,29 @@ skip_map:
3421 * LOCKING: 3469 * LOCKING:
3422 * None. (grabs host lock) 3470 * None. (grabs host lock)
3423 */ 3471 */
3424
3425void ata_poll_qc_complete(struct ata_queued_cmd *qc) 3472void ata_poll_qc_complete(struct ata_queued_cmd *qc)
3426{ 3473{
3427 struct ata_port *ap = qc->ap; 3474 struct ata_port *ap = qc->ap;
3428 unsigned long flags; 3475 unsigned long flags;
3429 3476
3430 spin_lock_irqsave(&ap->host_set->lock, flags); 3477 spin_lock_irqsave(&ap->host_set->lock, flags);
3431 ata_irq_on(ap); 3478
3432 ata_qc_complete(qc); 3479 if (ap->ops->error_handler) {
3480 /* EH might have kicked in while host_set lock is released */
3481 qc = ata_qc_from_tag(ap, qc->tag);
3482 if (qc) {
3483 if (!(qc->err_mask & AC_ERR_HSM)) {
3484 ata_irq_on(ap);
3485 ata_qc_complete(qc);
3486 } else
3487 ata_port_freeze(ap);
3488 }
3489 } else {
3490 /* old EH */
3491 ata_irq_on(ap);
3492 ata_qc_complete(qc);
3493 }
3494
3433 spin_unlock_irqrestore(&ap->host_set->lock, flags); 3495 spin_unlock_irqrestore(&ap->host_set->lock, flags);
3434} 3496}
3435 3497
@@ -3728,8 +3790,8 @@ next_sg:
3728 unsigned int i; 3790 unsigned int i;
3729 3791
3730 if (words) /* warning if bytes > 1 */ 3792 if (words) /* warning if bytes > 1 */
3731 printk(KERN_WARNING "ata%u: %u bytes trailing data\n", 3793 ata_dev_printk(qc->dev, KERN_WARNING,
3732 ap->id, bytes); 3794 "%u bytes trailing data\n", bytes);
3733 3795
3734 for (i = 0; i < words; i++) 3796 for (i = 0; i < words; i++)
3735 ata_data_xfer(ap, (unsigned char*)pad_buf, 2, do_write); 3797 ata_data_xfer(ap, (unsigned char*)pad_buf, 2, do_write);
@@ -3823,8 +3885,7 @@ static void atapi_pio_bytes(struct ata_queued_cmd *qc)
3823 return; 3885 return;
3824 3886
3825err_out: 3887err_out:
3826 printk(KERN_INFO "ata%u: dev %u: ATAPI check failed\n", 3888 ata_dev_printk(dev, KERN_INFO, "ATAPI check failed\n");
3827 ap->id, dev->devno);
3828 qc->err_mask |= AC_ERR_HSM; 3889 qc->err_mask |= AC_ERR_HSM;
3829 ap->hsm_task_state = HSM_ST_ERR; 3890 ap->hsm_task_state = HSM_ST_ERR;
3830} 3891}
@@ -4134,9 +4195,14 @@ static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
4134 struct ata_queued_cmd *qc = NULL; 4195 struct ata_queued_cmd *qc = NULL;
4135 unsigned int i; 4196 unsigned int i;
4136 4197
4137 for (i = 0; i < ATA_MAX_QUEUE; i++) 4198 /* no command while frozen */
4199 if (unlikely(ap->flags & ATA_FLAG_FROZEN))
4200 return NULL;
4201
4202 /* the last tag is reserved for internal command. */
4203 for (i = 0; i < ATA_MAX_QUEUE - 1; i++)
4138 if (!test_and_set_bit(i, &ap->qactive)) { 4204 if (!test_and_set_bit(i, &ap->qactive)) {
4139 qc = ata_qc_from_tag(ap, i); 4205 qc = __ata_qc_from_tag(ap, i);
4140 break; 4206 break;
4141 } 4207 }
4142 4208
@@ -4148,16 +4214,15 @@ static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
4148 4214
4149/** 4215/**
4150 * ata_qc_new_init - Request an available ATA command, and initialize it 4216 * ata_qc_new_init - Request an available ATA command, and initialize it
4151 * @ap: Port associated with device @dev
4152 * @dev: Device from whom we request an available command structure 4217 * @dev: Device from whom we request an available command structure
4153 * 4218 *
4154 * LOCKING: 4219 * LOCKING:
4155 * None. 4220 * None.
4156 */ 4221 */
4157 4222
4158struct ata_queued_cmd *ata_qc_new_init(struct ata_port *ap, 4223struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev)
4159 struct ata_device *dev)
4160{ 4224{
4225 struct ata_port *ap = dev->ap;
4161 struct ata_queued_cmd *qc; 4226 struct ata_queued_cmd *qc;
4162 4227
4163 qc = ata_qc_new(ap); 4228 qc = ata_qc_new(ap);
@@ -4192,8 +4257,6 @@ void ata_qc_free(struct ata_queued_cmd *qc)
4192 qc->flags = 0; 4257 qc->flags = 0;
4193 tag = qc->tag; 4258 tag = qc->tag;
4194 if (likely(ata_tag_valid(tag))) { 4259 if (likely(ata_tag_valid(tag))) {
4195 if (tag == ap->active_tag)
4196 ap->active_tag = ATA_TAG_POISON;
4197 qc->tag = ATA_TAG_POISON; 4260 qc->tag = ATA_TAG_POISON;
4198 clear_bit(tag, &ap->qactive); 4261 clear_bit(tag, &ap->qactive);
4199 } 4262 }
@@ -4207,6 +4270,9 @@ void __ata_qc_complete(struct ata_queued_cmd *qc)
4207 if (likely(qc->flags & ATA_QCFLAG_DMAMAP)) 4270 if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
4208 ata_sg_clean(qc); 4271 ata_sg_clean(qc);
4209 4272
4273 /* command should be marked inactive atomically with qc completion */
4274 qc->ap->active_tag = ATA_TAG_POISON;
4275
4210 /* atapi: mark qc as inactive to prevent the interrupt handler 4276 /* atapi: mark qc as inactive to prevent the interrupt handler
4211 * from completing the command twice later, before the error handler 4277 * from completing the command twice later, before the error handler
4212 * is called. (when rc != 0 and atapi request sense is needed) 4278 * is called. (when rc != 0 and atapi request sense is needed)
@@ -4217,6 +4283,66 @@ void __ata_qc_complete(struct ata_queued_cmd *qc)
4217 qc->complete_fn(qc); 4283 qc->complete_fn(qc);
4218} 4284}
4219 4285
4286/**
4287 * ata_qc_complete - Complete an active ATA command
4288 * @qc: Command to complete
4289 * @err_mask: ATA Status register contents
4290 *
4291 * Indicate to the mid and upper layers that an ATA
4292 * command has completed, with either an ok or not-ok status.
4293 *
4294 * LOCKING:
4295 * spin_lock_irqsave(host_set lock)
4296 */
4297void ata_qc_complete(struct ata_queued_cmd *qc)
4298{
4299 struct ata_port *ap = qc->ap;
4300
4301 /* XXX: New EH and old EH use different mechanisms to
4302 * synchronize EH with regular execution path.
4303 *
4304 * In new EH, a failed qc is marked with ATA_QCFLAG_FAILED.
4305 * Normal execution path is responsible for not accessing a
4306 * failed qc. libata core enforces the rule by returning NULL
4307 * from ata_qc_from_tag() for failed qcs.
4308 *
4309 * Old EH depends on ata_qc_complete() nullifying completion
4310 * requests if ATA_QCFLAG_EH_SCHEDULED is set. Old EH does
4311 * not synchronize with interrupt handler. Only PIO task is
4312 * taken care of.
4313 */
4314 if (ap->ops->error_handler) {
4315 WARN_ON(ap->flags & ATA_FLAG_FROZEN);
4316
4317 if (unlikely(qc->err_mask))
4318 qc->flags |= ATA_QCFLAG_FAILED;
4319
4320 if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) {
4321 if (!ata_tag_internal(qc->tag)) {
4322 /* always fill result TF for failed qc */
4323 ap->ops->tf_read(ap, &qc->result_tf);
4324 ata_qc_schedule_eh(qc);
4325 return;
4326 }
4327 }
4328
4329 /* read result TF if requested */
4330 if (qc->flags & ATA_QCFLAG_RESULT_TF)
4331 ap->ops->tf_read(ap, &qc->result_tf);
4332
4333 __ata_qc_complete(qc);
4334 } else {
4335 if (qc->flags & ATA_QCFLAG_EH_SCHEDULED)
4336 return;
4337
4338 /* read result TF if failed or requested */
4339 if (qc->err_mask || qc->flags & ATA_QCFLAG_RESULT_TF)
4340 ap->ops->tf_read(ap, &qc->result_tf);
4341
4342 __ata_qc_complete(qc);
4343 }
4344}
4345
4220static inline int ata_should_dma_map(struct ata_queued_cmd *qc) 4346static inline int ata_should_dma_map(struct ata_queued_cmd *qc)
4221{ 4347{
4222 struct ata_port *ap = qc->ap; 4348 struct ata_port *ap = qc->ap;
@@ -4503,7 +4629,7 @@ idle_irq:
4503#ifdef ATA_IRQ_TRAP 4629#ifdef ATA_IRQ_TRAP
4504 if ((ap->stats.idle_irq % 1000) == 0) { 4630 if ((ap->stats.idle_irq % 1000) == 0) {
4505 ata_irq_ack(ap, 0); /* debug trap */ 4631 ata_irq_ack(ap, 0); /* debug trap */
4506 printk(KERN_WARNING "ata%d: irq trap\n", ap->id); 4632 ata_port_printk(ap, KERN_WARNING, "irq trap\n");
4507 return 1; 4633 return 1;
4508 } 4634 }
4509#endif 4635#endif
@@ -4556,32 +4682,168 @@ irqreturn_t ata_interrupt (int irq, void *dev_instance, struct pt_regs *regs)
4556 return IRQ_RETVAL(handled); 4682 return IRQ_RETVAL(handled);
4557} 4683}
4558 4684
4685/**
4686 * sata_scr_valid - test whether SCRs are accessible
4687 * @ap: ATA port to test SCR accessibility for
4688 *
4689 * Test whether SCRs are accessible for @ap.
4690 *
4691 * LOCKING:
4692 * None.
4693 *
4694 * RETURNS:
4695 * 1 if SCRs are accessible, 0 otherwise.
4696 */
4697int sata_scr_valid(struct ata_port *ap)
4698{
4699 return ap->cbl == ATA_CBL_SATA && ap->ops->scr_read;
4700}
4701
4702/**
4703 * sata_scr_read - read SCR register of the specified port
4704 * @ap: ATA port to read SCR for
4705 * @reg: SCR to read
4706 * @val: Place to store read value
4707 *
4708 * Read SCR register @reg of @ap into *@val. This function is
4709 * guaranteed to succeed if the cable type of the port is SATA
4710 * and the port implements ->scr_read.
4711 *
4712 * LOCKING:
4713 * None.
4714 *
4715 * RETURNS:
4716 * 0 on success, negative errno on failure.
4717 */
4718int sata_scr_read(struct ata_port *ap, int reg, u32 *val)
4719{
4720 if (sata_scr_valid(ap)) {
4721 *val = ap->ops->scr_read(ap, reg);
4722 return 0;
4723 }
4724 return -EOPNOTSUPP;
4725}
4726
4727/**
4728 * sata_scr_write - write SCR register of the specified port
4729 * @ap: ATA port to write SCR for
4730 * @reg: SCR to write
4731 * @val: value to write
4732 *
4733 * Write @val to SCR register @reg of @ap. This function is
4734 * guaranteed to succeed if the cable type of the port is SATA
4735 * and the port implements ->scr_read.
4736 *
4737 * LOCKING:
4738 * None.
4739 *
4740 * RETURNS:
4741 * 0 on success, negative errno on failure.
4742 */
4743int sata_scr_write(struct ata_port *ap, int reg, u32 val)
4744{
4745 if (sata_scr_valid(ap)) {
4746 ap->ops->scr_write(ap, reg, val);
4747 return 0;
4748 }
4749 return -EOPNOTSUPP;
4750}
4751
4752/**
4753 * sata_scr_write_flush - write SCR register of the specified port and flush
4754 * @ap: ATA port to write SCR for
4755 * @reg: SCR to write
4756 * @val: value to write
4757 *
4758 * This function is identical to sata_scr_write() except that this
4759 * function performs flush after writing to the register.
4760 *
4761 * LOCKING:
4762 * None.
4763 *
4764 * RETURNS:
4765 * 0 on success, negative errno on failure.
4766 */
4767int sata_scr_write_flush(struct ata_port *ap, int reg, u32 val)
4768{
4769 if (sata_scr_valid(ap)) {
4770 ap->ops->scr_write(ap, reg, val);
4771 ap->ops->scr_read(ap, reg);
4772 return 0;
4773 }
4774 return -EOPNOTSUPP;
4775}
4776
4777/**
4778 * ata_port_online - test whether the given port is online
4779 * @ap: ATA port to test
4780 *
4781 * Test whether @ap is online. Note that this function returns 0
4782 * if online status of @ap cannot be obtained, so
4783 * ata_port_online(ap) != !ata_port_offline(ap).
4784 *
4785 * LOCKING:
4786 * None.
4787 *
4788 * RETURNS:
4789 * 1 if the port online status is available and online.
4790 */
4791int ata_port_online(struct ata_port *ap)
4792{
4793 u32 sstatus;
4794
4795 if (!sata_scr_read(ap, SCR_STATUS, &sstatus) && (sstatus & 0xf) == 0x3)
4796 return 1;
4797 return 0;
4798}
4799
4800/**
4801 * ata_port_offline - test whether the given port is offline
4802 * @ap: ATA port to test
4803 *
4804 * Test whether @ap is offline. Note that this function returns
4805 * 0 if offline status of @ap cannot be obtained, so
4806 * ata_port_online(ap) != !ata_port_offline(ap).
4807 *
4808 * LOCKING:
4809 * None.
4810 *
4811 * RETURNS:
4812 * 1 if the port offline status is available and offline.
4813 */
4814int ata_port_offline(struct ata_port *ap)
4815{
4816 u32 sstatus;
4817
4818 if (!sata_scr_read(ap, SCR_STATUS, &sstatus) && (sstatus & 0xf) != 0x3)
4819 return 1;
4820 return 0;
4821}
4559 4822
4560/* 4823/*
4561 * Execute a 'simple' command, that only consists of the opcode 'cmd' itself, 4824 * Execute a 'simple' command, that only consists of the opcode 'cmd' itself,
4562 * without filling any other registers 4825 * without filling any other registers
4563 */ 4826 */
4564static int ata_do_simple_cmd(struct ata_port *ap, struct ata_device *dev, 4827static int ata_do_simple_cmd(struct ata_device *dev, u8 cmd)
4565 u8 cmd)
4566{ 4828{
4567 struct ata_taskfile tf; 4829 struct ata_taskfile tf;
4568 int err; 4830 int err;
4569 4831
4570 ata_tf_init(ap, &tf, dev->devno); 4832 ata_tf_init(dev, &tf);
4571 4833
4572 tf.command = cmd; 4834 tf.command = cmd;
4573 tf.flags |= ATA_TFLAG_DEVICE; 4835 tf.flags |= ATA_TFLAG_DEVICE;
4574 tf.protocol = ATA_PROT_NODATA; 4836 tf.protocol = ATA_PROT_NODATA;
4575 4837
4576 err = ata_exec_internal(ap, dev, &tf, NULL, DMA_NONE, NULL, 0); 4838 err = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
4577 if (err) 4839 if (err)
4578 printk(KERN_ERR "%s: ata command failed: %d\n", 4840 ata_dev_printk(dev, KERN_ERR, "%s: ata command failed: %d\n",
4579 __FUNCTION__, err); 4841 __FUNCTION__, err);
4580 4842
4581 return err; 4843 return err;
4582} 4844}
4583 4845
4584static int ata_flush_cache(struct ata_port *ap, struct ata_device *dev) 4846static int ata_flush_cache(struct ata_device *dev)
4585{ 4847{
4586 u8 cmd; 4848 u8 cmd;
4587 4849
@@ -4593,22 +4855,21 @@ static int ata_flush_cache(struct ata_port *ap, struct ata_device *dev)
4593 else 4855 else
4594 cmd = ATA_CMD_FLUSH; 4856 cmd = ATA_CMD_FLUSH;
4595 4857
4596 return ata_do_simple_cmd(ap, dev, cmd); 4858 return ata_do_simple_cmd(dev, cmd);
4597} 4859}
4598 4860
4599static int ata_standby_drive(struct ata_port *ap, struct ata_device *dev) 4861static int ata_standby_drive(struct ata_device *dev)
4600{ 4862{
4601 return ata_do_simple_cmd(ap, dev, ATA_CMD_STANDBYNOW1); 4863 return ata_do_simple_cmd(dev, ATA_CMD_STANDBYNOW1);
4602} 4864}
4603 4865
4604static int ata_start_drive(struct ata_port *ap, struct ata_device *dev) 4866static int ata_start_drive(struct ata_device *dev)
4605{ 4867{
4606 return ata_do_simple_cmd(ap, dev, ATA_CMD_IDLEIMMEDIATE); 4868 return ata_do_simple_cmd(dev, ATA_CMD_IDLEIMMEDIATE);
4607} 4869}
4608 4870
4609/** 4871/**
4610 * ata_device_resume - wakeup a previously suspended devices 4872 * ata_device_resume - wakeup a previously suspended devices
4611 * @ap: port the device is connected to
4612 * @dev: the device to resume 4873 * @dev: the device to resume
4613 * 4874 *
4614 * Kick the drive back into action, by sending it an idle immediate 4875 * Kick the drive back into action, by sending it an idle immediate
@@ -4616,39 +4877,42 @@ static int ata_start_drive(struct ata_port *ap, struct ata_device *dev)
4616 * and host. 4877 * and host.
4617 * 4878 *
4618 */ 4879 */
4619int ata_device_resume(struct ata_port *ap, struct ata_device *dev) 4880int ata_device_resume(struct ata_device *dev)
4620{ 4881{
4882 struct ata_port *ap = dev->ap;
4883
4621 if (ap->flags & ATA_FLAG_SUSPENDED) { 4884 if (ap->flags & ATA_FLAG_SUSPENDED) {
4622 struct ata_device *failed_dev; 4885 struct ata_device *failed_dev;
4623 ap->flags &= ~ATA_FLAG_SUSPENDED; 4886 ap->flags &= ~ATA_FLAG_SUSPENDED;
4624 while (ata_set_mode(ap, &failed_dev)) 4887 while (ata_set_mode(ap, &failed_dev))
4625 ata_dev_disable(ap, failed_dev); 4888 ata_dev_disable(failed_dev);
4626 } 4889 }
4627 if (!ata_dev_enabled(dev)) 4890 if (!ata_dev_enabled(dev))
4628 return 0; 4891 return 0;
4629 if (dev->class == ATA_DEV_ATA) 4892 if (dev->class == ATA_DEV_ATA)
4630 ata_start_drive(ap, dev); 4893 ata_start_drive(dev);
4631 4894
4632 return 0; 4895 return 0;
4633} 4896}
4634 4897
4635/** 4898/**
4636 * ata_device_suspend - prepare a device for suspend 4899 * ata_device_suspend - prepare a device for suspend
4637 * @ap: port the device is connected to
4638 * @dev: the device to suspend 4900 * @dev: the device to suspend
4639 * 4901 *
4640 * Flush the cache on the drive, if appropriate, then issue a 4902 * Flush the cache on the drive, if appropriate, then issue a
4641 * standbynow command. 4903 * standbynow command.
4642 */ 4904 */
4643int ata_device_suspend(struct ata_port *ap, struct ata_device *dev, pm_message_t state) 4905int ata_device_suspend(struct ata_device *dev, pm_message_t state)
4644{ 4906{
4907 struct ata_port *ap = dev->ap;
4908
4645 if (!ata_dev_enabled(dev)) 4909 if (!ata_dev_enabled(dev))
4646 return 0; 4910 return 0;
4647 if (dev->class == ATA_DEV_ATA) 4911 if (dev->class == ATA_DEV_ATA)
4648 ata_flush_cache(ap, dev); 4912 ata_flush_cache(dev);
4649 4913
4650 if (state.event != PM_EVENT_FREEZE) 4914 if (state.event != PM_EVENT_FREEZE)
4651 ata_standby_drive(ap, dev); 4915 ata_standby_drive(dev);
4652 ap->flags |= ATA_FLAG_SUSPENDED; 4916 ap->flags |= ATA_FLAG_SUSPENDED;
4653 return 0; 4917 return 0;
4654} 4918}
@@ -4776,7 +5040,6 @@ static void ata_host_init(struct ata_port *ap, struct Scsi_Host *host,
4776 ap->udma_mask = ent->udma_mask; 5040 ap->udma_mask = ent->udma_mask;
4777 ap->flags |= ent->host_flags; 5041 ap->flags |= ent->host_flags;
4778 ap->ops = ent->port_ops; 5042 ap->ops = ent->port_ops;
4779 ap->cbl = ATA_CBL_NONE;
4780 ap->sata_spd_limit = UINT_MAX; 5043 ap->sata_spd_limit = UINT_MAX;
4781 ap->active_tag = ATA_TAG_POISON; 5044 ap->active_tag = ATA_TAG_POISON;
4782 ap->last_ctl = 0xFF; 5045 ap->last_ctl = 0xFF;
@@ -4784,8 +5047,14 @@ static void ata_host_init(struct ata_port *ap, struct Scsi_Host *host,
4784 INIT_WORK(&ap->port_task, NULL, NULL); 5047 INIT_WORK(&ap->port_task, NULL, NULL);
4785 INIT_LIST_HEAD(&ap->eh_done_q); 5048 INIT_LIST_HEAD(&ap->eh_done_q);
4786 5049
5050 /* set cable type */
5051 ap->cbl = ATA_CBL_NONE;
5052 if (ap->flags & ATA_FLAG_SATA)
5053 ap->cbl = ATA_CBL_SATA;
5054
4787 for (i = 0; i < ATA_MAX_DEVICES; i++) { 5055 for (i = 0; i < ATA_MAX_DEVICES; i++) {
4788 struct ata_device *dev = &ap->device[i]; 5056 struct ata_device *dev = &ap->device[i];
5057 dev->ap = ap;
4789 dev->devno = i; 5058 dev->devno = i;
4790 dev->pio_mask = UINT_MAX; 5059 dev->pio_mask = UINT_MAX;
4791 dev->mwdma_mask = UINT_MAX; 5060 dev->mwdma_mask = UINT_MAX;
@@ -4909,18 +5178,18 @@ int ata_device_add(const struct ata_probe_ent *ent)
4909 (ap->pio_mask << ATA_SHIFT_PIO); 5178 (ap->pio_mask << ATA_SHIFT_PIO);
4910 5179
4911 /* print per-port info to dmesg */ 5180 /* print per-port info to dmesg */
4912 printk(KERN_INFO "ata%u: %cATA max %s cmd 0x%lX ctl 0x%lX " 5181 ata_port_printk(ap, KERN_INFO, "%cATA max %s cmd 0x%lX "
4913 "bmdma 0x%lX irq %lu\n", 5182 "ctl 0x%lX bmdma 0x%lX irq %lu\n",
4914 ap->id, 5183 ap->flags & ATA_FLAG_SATA ? 'S' : 'P',
4915 ap->flags & ATA_FLAG_SATA ? 'S' : 'P', 5184 ata_mode_string(xfer_mode_mask),
4916 ata_mode_string(xfer_mode_mask), 5185 ap->ioaddr.cmd_addr,
4917 ap->ioaddr.cmd_addr, 5186 ap->ioaddr.ctl_addr,
4918 ap->ioaddr.ctl_addr, 5187 ap->ioaddr.bmdma_addr,
4919 ap->ioaddr.bmdma_addr, 5188 ent->irq);
4920 ent->irq);
4921 5189
4922 ata_chk_status(ap); 5190 ata_chk_status(ap);
4923 host_set->ops->irq_clear(ap); 5191 host_set->ops->irq_clear(ap);
5192 ata_eh_freeze_port(ap); /* freeze port before requesting IRQ */
4924 count++; 5193 count++;
4925 } 5194 }
4926 5195
@@ -4955,8 +5224,7 @@ int ata_device_add(const struct ata_probe_ent *ent)
4955 5224
4956 rc = scsi_add_host(ap->host, dev); 5225 rc = scsi_add_host(ap->host, dev);
4957 if (rc) { 5226 if (rc) {
4958 printk(KERN_ERR "ata%u: scsi_add_host failed\n", 5227 ata_port_printk(ap, KERN_ERR, "scsi_add_host failed\n");
4959 ap->id);
4960 /* FIXME: do something useful here */ 5228 /* FIXME: do something useful here */
4961 /* FIXME: handle unconditional calls to 5229 /* FIXME: handle unconditional calls to
4962 * scsi_scan_host and ata_host_remove, below, 5230 * scsi_scan_host and ata_host_remove, below,
@@ -5052,14 +5320,11 @@ void ata_host_set_remove(struct ata_host_set *host_set)
5052int ata_scsi_release(struct Scsi_Host *host) 5320int ata_scsi_release(struct Scsi_Host *host)
5053{ 5321{
5054 struct ata_port *ap = ata_shost_to_port(host); 5322 struct ata_port *ap = ata_shost_to_port(host);
5055 int i;
5056 5323
5057 DPRINTK("ENTER\n"); 5324 DPRINTK("ENTER\n");
5058 5325
5059 ap->ops->port_disable(ap); 5326 ap->ops->port_disable(ap);
5060 ata_host_remove(ap, 0); 5327 ata_host_remove(ap, 0);
5061 for (i = 0; i < ATA_MAX_DEVICES; i++)
5062 kfree(ap->device[i].id);
5063 5328
5064 DPRINTK("EXIT\n"); 5329 DPRINTK("EXIT\n");
5065 return 1; 5330 return 1;
@@ -5277,7 +5542,7 @@ EXPORT_SYMBOL_GPL(ata_device_add);
5277EXPORT_SYMBOL_GPL(ata_host_set_remove); 5542EXPORT_SYMBOL_GPL(ata_host_set_remove);
5278EXPORT_SYMBOL_GPL(ata_sg_init); 5543EXPORT_SYMBOL_GPL(ata_sg_init);
5279EXPORT_SYMBOL_GPL(ata_sg_init_one); 5544EXPORT_SYMBOL_GPL(ata_sg_init_one);
5280EXPORT_SYMBOL_GPL(__ata_qc_complete); 5545EXPORT_SYMBOL_GPL(ata_qc_complete);
5281EXPORT_SYMBOL_GPL(ata_qc_issue_prot); 5546EXPORT_SYMBOL_GPL(ata_qc_issue_prot);
5282EXPORT_SYMBOL_GPL(ata_tf_load); 5547EXPORT_SYMBOL_GPL(ata_tf_load);
5283EXPORT_SYMBOL_GPL(ata_tf_read); 5548EXPORT_SYMBOL_GPL(ata_tf_read);
@@ -5299,8 +5564,13 @@ EXPORT_SYMBOL_GPL(ata_bmdma_start);
5299EXPORT_SYMBOL_GPL(ata_bmdma_irq_clear); 5564EXPORT_SYMBOL_GPL(ata_bmdma_irq_clear);
5300EXPORT_SYMBOL_GPL(ata_bmdma_status); 5565EXPORT_SYMBOL_GPL(ata_bmdma_status);
5301EXPORT_SYMBOL_GPL(ata_bmdma_stop); 5566EXPORT_SYMBOL_GPL(ata_bmdma_stop);
5567EXPORT_SYMBOL_GPL(ata_bmdma_freeze);
5568EXPORT_SYMBOL_GPL(ata_bmdma_thaw);
5569EXPORT_SYMBOL_GPL(ata_bmdma_drive_eh);
5570EXPORT_SYMBOL_GPL(ata_bmdma_error_handler);
5571EXPORT_SYMBOL_GPL(ata_bmdma_post_internal_cmd);
5302EXPORT_SYMBOL_GPL(ata_port_probe); 5572EXPORT_SYMBOL_GPL(ata_port_probe);
5303EXPORT_SYMBOL_GPL(ata_set_sata_spd); 5573EXPORT_SYMBOL_GPL(sata_set_spd);
5304EXPORT_SYMBOL_GPL(sata_phy_reset); 5574EXPORT_SYMBOL_GPL(sata_phy_reset);
5305EXPORT_SYMBOL_GPL(__sata_phy_reset); 5575EXPORT_SYMBOL_GPL(__sata_phy_reset);
5306EXPORT_SYMBOL_GPL(ata_bus_reset); 5576EXPORT_SYMBOL_GPL(ata_bus_reset);
@@ -5323,6 +5593,12 @@ EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
5323EXPORT_SYMBOL_GPL(ata_scsi_slave_config); 5593EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
5324EXPORT_SYMBOL_GPL(ata_scsi_release); 5594EXPORT_SYMBOL_GPL(ata_scsi_release);
5325EXPORT_SYMBOL_GPL(ata_host_intr); 5595EXPORT_SYMBOL_GPL(ata_host_intr);
5596EXPORT_SYMBOL_GPL(sata_scr_valid);
5597EXPORT_SYMBOL_GPL(sata_scr_read);
5598EXPORT_SYMBOL_GPL(sata_scr_write);
5599EXPORT_SYMBOL_GPL(sata_scr_write_flush);
5600EXPORT_SYMBOL_GPL(ata_port_online);
5601EXPORT_SYMBOL_GPL(ata_port_offline);
5326EXPORT_SYMBOL_GPL(ata_id_string); 5602EXPORT_SYMBOL_GPL(ata_id_string);
5327EXPORT_SYMBOL_GPL(ata_id_c_string); 5603EXPORT_SYMBOL_GPL(ata_id_c_string);
5328EXPORT_SYMBOL_GPL(ata_scsi_simulate); 5604EXPORT_SYMBOL_GPL(ata_scsi_simulate);
@@ -5348,7 +5624,12 @@ EXPORT_SYMBOL_GPL(ata_device_resume);
5348EXPORT_SYMBOL_GPL(ata_scsi_device_suspend); 5624EXPORT_SYMBOL_GPL(ata_scsi_device_suspend);
5349EXPORT_SYMBOL_GPL(ata_scsi_device_resume); 5625EXPORT_SYMBOL_GPL(ata_scsi_device_resume);
5350 5626
5351EXPORT_SYMBOL_GPL(ata_scsi_error);
5352EXPORT_SYMBOL_GPL(ata_eng_timeout); 5627EXPORT_SYMBOL_GPL(ata_eng_timeout);
5628EXPORT_SYMBOL_GPL(ata_port_schedule_eh);
5629EXPORT_SYMBOL_GPL(ata_port_abort);
5630EXPORT_SYMBOL_GPL(ata_port_freeze);
5631EXPORT_SYMBOL_GPL(ata_eh_freeze_port);
5632EXPORT_SYMBOL_GPL(ata_eh_thaw_port);
5353EXPORT_SYMBOL_GPL(ata_eh_qc_complete); 5633EXPORT_SYMBOL_GPL(ata_eh_qc_complete);
5354EXPORT_SYMBOL_GPL(ata_eh_qc_retry); 5634EXPORT_SYMBOL_GPL(ata_eh_qc_retry);
5635EXPORT_SYMBOL_GPL(ata_do_eh);
diff --git a/drivers/scsi/libata-eh.c b/drivers/scsi/libata-eh.c
index 16db62211716..e401f353f848 100644
--- a/drivers/scsi/libata-eh.c
+++ b/drivers/scsi/libata-eh.c
@@ -44,6 +44,53 @@
44 44
45#include "libata.h" 45#include "libata.h"
46 46
47static void __ata_port_freeze(struct ata_port *ap);
48
49static void ata_ering_record(struct ata_ering *ering, int is_io,
50 unsigned int err_mask)
51{
52 struct ata_ering_entry *ent;
53
54 WARN_ON(!err_mask);
55
56 ering->cursor++;
57 ering->cursor %= ATA_ERING_SIZE;
58
59 ent = &ering->ring[ering->cursor];
60 ent->is_io = is_io;
61 ent->err_mask = err_mask;
62 ent->timestamp = get_jiffies_64();
63}
64
65static struct ata_ering_entry * ata_ering_top(struct ata_ering *ering)
66{
67 struct ata_ering_entry *ent = &ering->ring[ering->cursor];
68 if (!ent->err_mask)
69 return NULL;
70 return ent;
71}
72
73static int ata_ering_map(struct ata_ering *ering,
74 int (*map_fn)(struct ata_ering_entry *, void *),
75 void *arg)
76{
77 int idx, rc = 0;
78 struct ata_ering_entry *ent;
79
80 idx = ering->cursor;
81 do {
82 ent = &ering->ring[idx];
83 if (!ent->err_mask)
84 break;
85 rc = map_fn(ent, arg);
86 if (rc)
87 break;
88 idx = (idx - 1 + ATA_ERING_SIZE) % ATA_ERING_SIZE;
89 } while (idx != ering->cursor);
90
91 return rc;
92}
93
47/** 94/**
48 * ata_scsi_timed_out - SCSI layer time out callback 95 * ata_scsi_timed_out - SCSI layer time out callback
49 * @cmd: timed out SCSI command 96 * @cmd: timed out SCSI command
@@ -55,6 +102,8 @@
55 * from finishing it by setting EH_SCHEDULED and return 102 * from finishing it by setting EH_SCHEDULED and return
56 * EH_NOT_HANDLED. 103 * EH_NOT_HANDLED.
57 * 104 *
105 * TODO: kill this function once old EH is gone.
106 *
58 * LOCKING: 107 * LOCKING:
59 * Called from timer context 108 * Called from timer context
60 * 109 *
@@ -67,10 +116,16 @@ enum scsi_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd)
67 struct ata_port *ap = ata_shost_to_port(host); 116 struct ata_port *ap = ata_shost_to_port(host);
68 unsigned long flags; 117 unsigned long flags;
69 struct ata_queued_cmd *qc; 118 struct ata_queued_cmd *qc;
70 enum scsi_eh_timer_return ret = EH_HANDLED; 119 enum scsi_eh_timer_return ret;
71 120
72 DPRINTK("ENTER\n"); 121 DPRINTK("ENTER\n");
73 122
123 if (ap->ops->error_handler) {
124 ret = EH_NOT_HANDLED;
125 goto out;
126 }
127
128 ret = EH_HANDLED;
74 spin_lock_irqsave(&ap->host_set->lock, flags); 129 spin_lock_irqsave(&ap->host_set->lock, flags);
75 qc = ata_qc_from_tag(ap, ap->active_tag); 130 qc = ata_qc_from_tag(ap, ap->active_tag);
76 if (qc) { 131 if (qc) {
@@ -81,6 +136,7 @@ enum scsi_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd)
81 } 136 }
82 spin_unlock_irqrestore(&ap->host_set->lock, flags); 137 spin_unlock_irqrestore(&ap->host_set->lock, flags);
83 138
139 out:
84 DPRINTK("EXIT, ret=%d\n", ret); 140 DPRINTK("EXIT, ret=%d\n", ret);
85 return ret; 141 return ret;
86} 142}
@@ -100,21 +156,141 @@ enum scsi_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd)
100void ata_scsi_error(struct Scsi_Host *host) 156void ata_scsi_error(struct Scsi_Host *host)
101{ 157{
102 struct ata_port *ap = ata_shost_to_port(host); 158 struct ata_port *ap = ata_shost_to_port(host);
159 spinlock_t *hs_lock = &ap->host_set->lock;
160 int i, repeat_cnt = ATA_EH_MAX_REPEAT;
161 unsigned long flags;
103 162
104 DPRINTK("ENTER\n"); 163 DPRINTK("ENTER\n");
105 164
106 /* synchronize with IRQ handler and port task */ 165 /* synchronize with port task */
107 spin_unlock_wait(&ap->host_set->lock);
108 ata_port_flush_task(ap); 166 ata_port_flush_task(ap);
109 167
110 WARN_ON(ata_qc_from_tag(ap, ap->active_tag) == NULL); 168 /* synchronize with host_set lock and sort out timeouts */
169
170 /* For new EH, all qcs are finished in one of three ways -
171 * normal completion, error completion, and SCSI timeout.
172 * Both cmpletions can race against SCSI timeout. When normal
173 * completion wins, the qc never reaches EH. When error
174 * completion wins, the qc has ATA_QCFLAG_FAILED set.
175 *
176 * When SCSI timeout wins, things are a bit more complex.
177 * Normal or error completion can occur after the timeout but
178 * before this point. In such cases, both types of
179 * completions are honored. A scmd is determined to have
180 * timed out iff its associated qc is active and not failed.
181 */
182 if (ap->ops->error_handler) {
183 struct scsi_cmnd *scmd, *tmp;
184 int nr_timedout = 0;
185
186 spin_lock_irqsave(hs_lock, flags);
187
188 list_for_each_entry_safe(scmd, tmp, &host->eh_cmd_q, eh_entry) {
189 struct ata_queued_cmd *qc;
190
191 for (i = 0; i < ATA_MAX_QUEUE; i++) {
192 qc = __ata_qc_from_tag(ap, i);
193 if (qc->flags & ATA_QCFLAG_ACTIVE &&
194 qc->scsicmd == scmd)
195 break;
196 }
197
198 if (i < ATA_MAX_QUEUE) {
199 /* the scmd has an associated qc */
200 if (!(qc->flags & ATA_QCFLAG_FAILED)) {
201 /* which hasn't failed yet, timeout */
202 qc->err_mask |= AC_ERR_TIMEOUT;
203 qc->flags |= ATA_QCFLAG_FAILED;
204 nr_timedout++;
205 }
206 } else {
207 /* Normal completion occurred after
208 * SCSI timeout but before this point.
209 * Successfully complete it.
210 */
211 scmd->retries = scmd->allowed;
212 scsi_eh_finish_cmd(scmd, &ap->eh_done_q);
213 }
214 }
111 215
112 ap->ops->eng_timeout(ap); 216 /* If we have timed out qcs. They belong to EH from
217 * this point but the state of the controller is
218 * unknown. Freeze the port to make sure the IRQ
219 * handler doesn't diddle with those qcs. This must
220 * be done atomically w.r.t. setting QCFLAG_FAILED.
221 */
222 if (nr_timedout)
223 __ata_port_freeze(ap);
113 224
225 spin_unlock_irqrestore(hs_lock, flags);
226 } else
227 spin_unlock_wait(hs_lock);
228
229 repeat:
230 /* invoke error handler */
231 if (ap->ops->error_handler) {
232 /* fetch & clear EH info */
233 spin_lock_irqsave(hs_lock, flags);
234
235 memset(&ap->eh_context, 0, sizeof(ap->eh_context));
236 ap->eh_context.i = ap->eh_info;
237 memset(&ap->eh_info, 0, sizeof(ap->eh_info));
238
239 ap->flags &= ~ATA_FLAG_EH_PENDING;
240
241 spin_unlock_irqrestore(hs_lock, flags);
242
243 /* invoke EH */
244 ap->ops->error_handler(ap);
245
246 /* Exception might have happend after ->error_handler
247 * recovered the port but before this point. Repeat
248 * EH in such case.
249 */
250 spin_lock_irqsave(hs_lock, flags);
251
252 if (ap->flags & ATA_FLAG_EH_PENDING) {
253 if (--repeat_cnt) {
254 ata_port_printk(ap, KERN_INFO,
255 "EH pending after completion, "
256 "repeating EH (cnt=%d)\n", repeat_cnt);
257 spin_unlock_irqrestore(hs_lock, flags);
258 goto repeat;
259 }
260 ata_port_printk(ap, KERN_ERR, "EH pending after %d "
261 "tries, giving up\n", ATA_EH_MAX_REPEAT);
262 }
263
264 /* this run is complete, make sure EH info is clear */
265 memset(&ap->eh_info, 0, sizeof(ap->eh_info));
266
267 /* Clear host_eh_scheduled while holding hs_lock such
268 * that if exception occurs after this point but
269 * before EH completion, SCSI midlayer will
270 * re-initiate EH.
271 */
272 host->host_eh_scheduled = 0;
273
274 spin_unlock_irqrestore(hs_lock, flags);
275 } else {
276 WARN_ON(ata_qc_from_tag(ap, ap->active_tag) == NULL);
277 ap->ops->eng_timeout(ap);
278 }
279
280 /* finish or retry handled scmd's and clean up */
114 WARN_ON(host->host_failed || !list_empty(&host->eh_cmd_q)); 281 WARN_ON(host->host_failed || !list_empty(&host->eh_cmd_q));
115 282
116 scsi_eh_flush_done_q(&ap->eh_done_q); 283 scsi_eh_flush_done_q(&ap->eh_done_q);
117 284
285 /* clean up */
286 spin_lock_irqsave(hs_lock, flags);
287
288 if (ap->flags & ATA_FLAG_RECOVERED)
289 ata_port_printk(ap, KERN_INFO, "EH complete\n");
290 ap->flags &= ~ATA_FLAG_RECOVERED;
291
292 spin_unlock_irqrestore(hs_lock, flags);
293
118 DPRINTK("EXIT\n"); 294 DPRINTK("EXIT\n");
119} 295}
120 296
@@ -133,6 +309,8 @@ void ata_scsi_error(struct Scsi_Host *host)
133 * an interrupt was not delivered to the driver, even though the 309 * an interrupt was not delivered to the driver, even though the
134 * transaction completed successfully. 310 * transaction completed successfully.
135 * 311 *
312 * TODO: kill this function once old EH is gone.
313 *
136 * LOCKING: 314 * LOCKING:
137 * Inherited from SCSI layer (none, can sleep) 315 * Inherited from SCSI layer (none, can sleep)
138 */ 316 */
@@ -167,8 +345,9 @@ static void ata_qc_timeout(struct ata_queued_cmd *qc)
167 /* ack bmdma irq events */ 345 /* ack bmdma irq events */
168 ap->ops->irq_clear(ap); 346 ap->ops->irq_clear(ap);
169 347
170 printk(KERN_ERR "ata%u: command 0x%x timeout, stat 0x%x host_stat 0x%x\n", 348 ata_dev_printk(qc->dev, KERN_ERR, "command 0x%x timeout, "
171 ap->id, qc->tf.command, drv_stat, host_stat); 349 "stat 0x%x host_stat 0x%x\n",
350 qc->tf.command, drv_stat, host_stat);
172 351
173 /* complete taskfile transaction */ 352 /* complete taskfile transaction */
174 qc->err_mask |= AC_ERR_TIMEOUT; 353 qc->err_mask |= AC_ERR_TIMEOUT;
@@ -197,6 +376,8 @@ static void ata_qc_timeout(struct ata_queued_cmd *qc)
197 * an interrupt was not delivered to the driver, even though the 376 * an interrupt was not delivered to the driver, even though the
198 * transaction completed successfully. 377 * transaction completed successfully.
199 * 378 *
379 * TODO: kill this function once old EH is gone.
380 *
200 * LOCKING: 381 * LOCKING:
201 * Inherited from SCSI layer (none, can sleep) 382 * Inherited from SCSI layer (none, can sleep)
202 */ 383 */
@@ -209,6 +390,190 @@ void ata_eng_timeout(struct ata_port *ap)
209 DPRINTK("EXIT\n"); 390 DPRINTK("EXIT\n");
210} 391}
211 392
393/**
394 * ata_qc_schedule_eh - schedule qc for error handling
395 * @qc: command to schedule error handling for
396 *
397 * Schedule error handling for @qc. EH will kick in as soon as
398 * other commands are drained.
399 *
400 * LOCKING:
401 * spin_lock_irqsave(host_set lock)
402 */
403void ata_qc_schedule_eh(struct ata_queued_cmd *qc)
404{
405 struct ata_port *ap = qc->ap;
406
407 WARN_ON(!ap->ops->error_handler);
408
409 qc->flags |= ATA_QCFLAG_FAILED;
410 qc->ap->flags |= ATA_FLAG_EH_PENDING;
411
412 /* The following will fail if timeout has already expired.
413 * ata_scsi_error() takes care of such scmds on EH entry.
414 * Note that ATA_QCFLAG_FAILED is unconditionally set after
415 * this function completes.
416 */
417 scsi_req_abort_cmd(qc->scsicmd);
418}
419
420/**
421 * ata_port_schedule_eh - schedule error handling without a qc
422 * @ap: ATA port to schedule EH for
423 *
424 * Schedule error handling for @ap. EH will kick in as soon as
425 * all commands are drained.
426 *
427 * LOCKING:
428 * spin_lock_irqsave(host_set lock)
429 */
430void ata_port_schedule_eh(struct ata_port *ap)
431{
432 WARN_ON(!ap->ops->error_handler);
433
434 ap->flags |= ATA_FLAG_EH_PENDING;
435 ata_schedule_scsi_eh(ap->host);
436
437 DPRINTK("port EH scheduled\n");
438}
439
440/**
441 * ata_port_abort - abort all qc's on the port
442 * @ap: ATA port to abort qc's for
443 *
444 * Abort all active qc's of @ap and schedule EH.
445 *
446 * LOCKING:
447 * spin_lock_irqsave(host_set lock)
448 *
449 * RETURNS:
450 * Number of aborted qc's.
451 */
452int ata_port_abort(struct ata_port *ap)
453{
454 int tag, nr_aborted = 0;
455
456 WARN_ON(!ap->ops->error_handler);
457
458 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
459 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, tag);
460
461 if (qc) {
462 qc->flags |= ATA_QCFLAG_FAILED;
463 ata_qc_complete(qc);
464 nr_aborted++;
465 }
466 }
467
468 if (!nr_aborted)
469 ata_port_schedule_eh(ap);
470
471 return nr_aborted;
472}
473
474/**
475 * __ata_port_freeze - freeze port
476 * @ap: ATA port to freeze
477 *
478 * This function is called when HSM violation or some other
479 * condition disrupts normal operation of the port. Frozen port
480 * is not allowed to perform any operation until the port is
481 * thawed, which usually follows a successful reset.
482 *
483 * ap->ops->freeze() callback can be used for freezing the port
484 * hardware-wise (e.g. mask interrupt and stop DMA engine). If a
485 * port cannot be frozen hardware-wise, the interrupt handler
486 * must ack and clear interrupts unconditionally while the port
487 * is frozen.
488 *
489 * LOCKING:
490 * spin_lock_irqsave(host_set lock)
491 */
492static void __ata_port_freeze(struct ata_port *ap)
493{
494 WARN_ON(!ap->ops->error_handler);
495
496 if (ap->ops->freeze)
497 ap->ops->freeze(ap);
498
499 ap->flags |= ATA_FLAG_FROZEN;
500
501 DPRINTK("ata%u port frozen\n", ap->id);
502}
503
504/**
505 * ata_port_freeze - abort & freeze port
506 * @ap: ATA port to freeze
507 *
508 * Abort and freeze @ap.
509 *
510 * LOCKING:
511 * spin_lock_irqsave(host_set lock)
512 *
513 * RETURNS:
514 * Number of aborted commands.
515 */
516int ata_port_freeze(struct ata_port *ap)
517{
518 int nr_aborted;
519
520 WARN_ON(!ap->ops->error_handler);
521
522 nr_aborted = ata_port_abort(ap);
523 __ata_port_freeze(ap);
524
525 return nr_aborted;
526}
527
528/**
529 * ata_eh_freeze_port - EH helper to freeze port
530 * @ap: ATA port to freeze
531 *
532 * Freeze @ap.
533 *
534 * LOCKING:
535 * None.
536 */
537void ata_eh_freeze_port(struct ata_port *ap)
538{
539 unsigned long flags;
540
541 if (!ap->ops->error_handler)
542 return;
543
544 spin_lock_irqsave(&ap->host_set->lock, flags);
545 __ata_port_freeze(ap);
546 spin_unlock_irqrestore(&ap->host_set->lock, flags);
547}
548
549/**
550 * ata_port_thaw_port - EH helper to thaw port
551 * @ap: ATA port to thaw
552 *
553 * Thaw frozen port @ap.
554 *
555 * LOCKING:
556 * None.
557 */
558void ata_eh_thaw_port(struct ata_port *ap)
559{
560 unsigned long flags;
561
562 if (!ap->ops->error_handler)
563 return;
564
565 spin_lock_irqsave(&ap->host_set->lock, flags);
566
567 ap->flags &= ~ATA_FLAG_FROZEN;
568
569 if (ap->ops->thaw)
570 ap->ops->thaw(ap);
571
572 spin_unlock_irqrestore(&ap->host_set->lock, flags);
573
574 DPRINTK("ata%u port thawed\n", ap->id);
575}
576
212static void ata_eh_scsidone(struct scsi_cmnd *scmd) 577static void ata_eh_scsidone(struct scsi_cmnd *scmd)
213{ 578{
214 /* nada */ 579 /* nada */
@@ -261,3 +626,778 @@ void ata_eh_qc_retry(struct ata_queued_cmd *qc)
261 scmd->retries--; 626 scmd->retries--;
262 __ata_eh_qc_complete(qc); 627 __ata_eh_qc_complete(qc);
263} 628}
629
630/**
631 * ata_eh_about_to_do - about to perform eh_action
632 * @ap: target ATA port
633 * @action: action about to be performed
634 *
635 * Called just before performing EH actions to clear related bits
636 * in @ap->eh_info such that eh actions are not unnecessarily
637 * repeated.
638 *
639 * LOCKING:
640 * None.
641 */
642static void ata_eh_about_to_do(struct ata_port *ap, unsigned int action)
643{
644 unsigned long flags;
645
646 spin_lock_irqsave(&ap->host_set->lock, flags);
647 ap->eh_info.action &= ~action;
648 ap->flags |= ATA_FLAG_RECOVERED;
649 spin_unlock_irqrestore(&ap->host_set->lock, flags);
650}
651
652/**
653 * ata_err_string - convert err_mask to descriptive string
654 * @err_mask: error mask to convert to string
655 *
656 * Convert @err_mask to descriptive string. Errors are
657 * prioritized according to severity and only the most severe
658 * error is reported.
659 *
660 * LOCKING:
661 * None.
662 *
663 * RETURNS:
664 * Descriptive string for @err_mask
665 */
666static const char * ata_err_string(unsigned int err_mask)
667{
668 if (err_mask & AC_ERR_HOST_BUS)
669 return "host bus error";
670 if (err_mask & AC_ERR_ATA_BUS)
671 return "ATA bus error";
672 if (err_mask & AC_ERR_TIMEOUT)
673 return "timeout";
674 if (err_mask & AC_ERR_HSM)
675 return "HSM violation";
676 if (err_mask & AC_ERR_SYSTEM)
677 return "internal error";
678 if (err_mask & AC_ERR_MEDIA)
679 return "media error";
680 if (err_mask & AC_ERR_INVALID)
681 return "invalid argument";
682 if (err_mask & AC_ERR_DEV)
683 return "device error";
684 return "unknown error";
685}
686
687/**
688 * atapi_eh_request_sense - perform ATAPI REQUEST_SENSE
689 * @dev: device to perform REQUEST_SENSE to
690 * @sense_buf: result sense data buffer (SCSI_SENSE_BUFFERSIZE bytes long)
691 *
692 * Perform ATAPI REQUEST_SENSE after the device reported CHECK
693 * SENSE. This function is EH helper.
694 *
695 * LOCKING:
696 * Kernel thread context (may sleep).
697 *
698 * RETURNS:
699 * 0 on success, AC_ERR_* mask on failure
700 */
701static unsigned int atapi_eh_request_sense(struct ata_device *dev,
702 unsigned char *sense_buf)
703{
704 struct ata_port *ap = dev->ap;
705 struct ata_taskfile tf;
706 u8 cdb[ATAPI_CDB_LEN];
707
708 DPRINTK("ATAPI request sense\n");
709
710 ata_tf_init(dev, &tf);
711
712 /* FIXME: is this needed? */
713 memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
714
715 /* XXX: why tf_read here? */
716 ap->ops->tf_read(ap, &tf);
717
718 /* fill these in, for the case where they are -not- overwritten */
719 sense_buf[0] = 0x70;
720 sense_buf[2] = tf.feature >> 4;
721
722 memset(cdb, 0, ATAPI_CDB_LEN);
723 cdb[0] = REQUEST_SENSE;
724 cdb[4] = SCSI_SENSE_BUFFERSIZE;
725
726 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
727 tf.command = ATA_CMD_PACKET;
728
729 /* is it pointless to prefer PIO for "safety reasons"? */
730 if (ap->flags & ATA_FLAG_PIO_DMA) {
731 tf.protocol = ATA_PROT_ATAPI_DMA;
732 tf.feature |= ATAPI_PKT_DMA;
733 } else {
734 tf.protocol = ATA_PROT_ATAPI;
735 tf.lbam = (8 * 1024) & 0xff;
736 tf.lbah = (8 * 1024) >> 8;
737 }
738
739 return ata_exec_internal(dev, &tf, cdb, DMA_FROM_DEVICE,
740 sense_buf, SCSI_SENSE_BUFFERSIZE);
741}
742
743/**
744 * ata_eh_analyze_serror - analyze SError for a failed port
745 * @ap: ATA port to analyze SError for
746 *
747 * Analyze SError if available and further determine cause of
748 * failure.
749 *
750 * LOCKING:
751 * None.
752 */
753static void ata_eh_analyze_serror(struct ata_port *ap)
754{
755 struct ata_eh_context *ehc = &ap->eh_context;
756 u32 serror = ehc->i.serror;
757 unsigned int err_mask = 0, action = 0;
758
759 if (serror & SERR_PERSISTENT) {
760 err_mask |= AC_ERR_ATA_BUS;
761 action |= ATA_EH_HARDRESET;
762 }
763 if (serror &
764 (SERR_DATA_RECOVERED | SERR_COMM_RECOVERED | SERR_DATA)) {
765 err_mask |= AC_ERR_ATA_BUS;
766 action |= ATA_EH_SOFTRESET;
767 }
768 if (serror & SERR_PROTOCOL) {
769 err_mask |= AC_ERR_HSM;
770 action |= ATA_EH_SOFTRESET;
771 }
772 if (serror & SERR_INTERNAL) {
773 err_mask |= AC_ERR_SYSTEM;
774 action |= ATA_EH_SOFTRESET;
775 }
776 if (serror & (SERR_PHYRDY_CHG | SERR_DEV_XCHG)) {
777 err_mask |= AC_ERR_ATA_BUS;
778 action |= ATA_EH_HARDRESET;
779 }
780
781 ehc->i.err_mask |= err_mask;
782 ehc->i.action |= action;
783}
784
785/**
786 * ata_eh_analyze_tf - analyze taskfile of a failed qc
787 * @qc: qc to analyze
788 * @tf: Taskfile registers to analyze
789 *
790 * Analyze taskfile of @qc and further determine cause of
791 * failure. This function also requests ATAPI sense data if
792 * avaliable.
793 *
794 * LOCKING:
795 * Kernel thread context (may sleep).
796 *
797 * RETURNS:
798 * Determined recovery action
799 */
800static unsigned int ata_eh_analyze_tf(struct ata_queued_cmd *qc,
801 const struct ata_taskfile *tf)
802{
803 unsigned int tmp, action = 0;
804 u8 stat = tf->command, err = tf->feature;
805
806 if ((stat & (ATA_BUSY | ATA_DRQ | ATA_DRDY)) != ATA_DRDY) {
807 qc->err_mask |= AC_ERR_HSM;
808 return ATA_EH_SOFTRESET;
809 }
810
811 if (!(qc->err_mask & AC_ERR_DEV))
812 return 0;
813
814 switch (qc->dev->class) {
815 case ATA_DEV_ATA:
816 if (err & ATA_ICRC)
817 qc->err_mask |= AC_ERR_ATA_BUS;
818 if (err & ATA_UNC)
819 qc->err_mask |= AC_ERR_MEDIA;
820 if (err & ATA_IDNF)
821 qc->err_mask |= AC_ERR_INVALID;
822 break;
823
824 case ATA_DEV_ATAPI:
825 tmp = atapi_eh_request_sense(qc->dev,
826 qc->scsicmd->sense_buffer);
827 if (!tmp) {
828 /* ATA_QCFLAG_SENSE_VALID is used to tell
829 * atapi_qc_complete() that sense data is
830 * already valid.
831 *
832 * TODO: interpret sense data and set
833 * appropriate err_mask.
834 */
835 qc->flags |= ATA_QCFLAG_SENSE_VALID;
836 } else
837 qc->err_mask |= tmp;
838 }
839
840 if (qc->err_mask & (AC_ERR_HSM | AC_ERR_TIMEOUT | AC_ERR_ATA_BUS))
841 action |= ATA_EH_SOFTRESET;
842
843 return action;
844}
845
846static int ata_eh_categorize_ering_entry(struct ata_ering_entry *ent)
847{
848 if (ent->err_mask & (AC_ERR_ATA_BUS | AC_ERR_TIMEOUT))
849 return 1;
850
851 if (ent->is_io) {
852 if (ent->err_mask & AC_ERR_HSM)
853 return 1;
854 if ((ent->err_mask &
855 (AC_ERR_DEV|AC_ERR_MEDIA|AC_ERR_INVALID)) == AC_ERR_DEV)
856 return 2;
857 }
858
859 return 0;
860}
861
862struct speed_down_needed_arg {
863 u64 since;
864 int nr_errors[3];
865};
866
867static int speed_down_needed_cb(struct ata_ering_entry *ent, void *void_arg)
868{
869 struct speed_down_needed_arg *arg = void_arg;
870
871 if (ent->timestamp < arg->since)
872 return -1;
873
874 arg->nr_errors[ata_eh_categorize_ering_entry(ent)]++;
875 return 0;
876}
877
878/**
879 * ata_eh_speed_down_needed - Determine wheter speed down is necessary
880 * @dev: Device of interest
881 *
882 * This function examines error ring of @dev and determines
883 * whether speed down is necessary. Speed down is necessary if
884 * there have been more than 3 of Cat-1 errors or 10 of Cat-2
885 * errors during last 15 minutes.
886 *
887 * Cat-1 errors are ATA_BUS, TIMEOUT for any command and HSM
888 * violation for known supported commands.
889 *
890 * Cat-2 errors are unclassified DEV error for known supported
891 * command.
892 *
893 * LOCKING:
894 * Inherited from caller.
895 *
896 * RETURNS:
897 * 1 if speed down is necessary, 0 otherwise
898 */
899static int ata_eh_speed_down_needed(struct ata_device *dev)
900{
901 const u64 interval = 15LLU * 60 * HZ;
902 static const int err_limits[3] = { -1, 3, 10 };
903 struct speed_down_needed_arg arg;
904 struct ata_ering_entry *ent;
905 int err_cat;
906 u64 j64;
907
908 ent = ata_ering_top(&dev->ering);
909 if (!ent)
910 return 0;
911
912 err_cat = ata_eh_categorize_ering_entry(ent);
913 if (err_cat == 0)
914 return 0;
915
916 memset(&arg, 0, sizeof(arg));
917
918 j64 = get_jiffies_64();
919 if (j64 >= interval)
920 arg.since = j64 - interval;
921 else
922 arg.since = 0;
923
924 ata_ering_map(&dev->ering, speed_down_needed_cb, &arg);
925
926 return arg.nr_errors[err_cat] > err_limits[err_cat];
927}
928
929/**
930 * ata_eh_speed_down - record error and speed down if necessary
931 * @dev: Failed device
932 * @is_io: Did the device fail during normal IO?
933 * @err_mask: err_mask of the error
934 *
935 * Record error and examine error history to determine whether
936 * adjusting transmission speed is necessary. It also sets
937 * transmission limits appropriately if such adjustment is
938 * necessary.
939 *
940 * LOCKING:
941 * Kernel thread context (may sleep).
942 *
943 * RETURNS:
944 * 0 on success, -errno otherwise
945 */
946static int ata_eh_speed_down(struct ata_device *dev, int is_io,
947 unsigned int err_mask)
948{
949 if (!err_mask)
950 return 0;
951
952 /* record error and determine whether speed down is necessary */
953 ata_ering_record(&dev->ering, is_io, err_mask);
954
955 if (!ata_eh_speed_down_needed(dev))
956 return 0;
957
958 /* speed down SATA link speed if possible */
959 if (sata_down_spd_limit(dev->ap) == 0)
960 return ATA_EH_HARDRESET;
961
962 /* lower transfer mode */
963 if (ata_down_xfermask_limit(dev, 0) == 0)
964 return ATA_EH_SOFTRESET;
965
966 ata_dev_printk(dev, KERN_ERR,
967 "speed down requested but no transfer mode left\n");
968 return 0;
969}
970
971/**
972 * ata_eh_autopsy - analyze error and determine recovery action
973 * @ap: ATA port to perform autopsy on
974 *
975 * Analyze why @ap failed and determine which recovery action is
976 * needed. This function also sets more detailed AC_ERR_* values
977 * and fills sense data for ATAPI CHECK SENSE.
978 *
979 * LOCKING:
980 * Kernel thread context (may sleep).
981 */
982static void ata_eh_autopsy(struct ata_port *ap)
983{
984 struct ata_eh_context *ehc = &ap->eh_context;
985 unsigned int action = ehc->i.action;
986 struct ata_device *failed_dev = NULL;
987 unsigned int all_err_mask = 0;
988 int tag, is_io = 0;
989 u32 serror;
990 int rc;
991
992 DPRINTK("ENTER\n");
993
994 /* obtain and analyze SError */
995 rc = sata_scr_read(ap, SCR_ERROR, &serror);
996 if (rc == 0) {
997 ehc->i.serror |= serror;
998 ata_eh_analyze_serror(ap);
999 } else if (rc != -EOPNOTSUPP)
1000 action |= ATA_EH_HARDRESET;
1001
1002 /* any real error trumps AC_ERR_OTHER */
1003 if (ehc->i.err_mask & ~AC_ERR_OTHER)
1004 ehc->i.err_mask &= ~AC_ERR_OTHER;
1005
1006 all_err_mask |= ehc->i.err_mask;
1007
1008 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
1009 struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
1010
1011 if (!(qc->flags & ATA_QCFLAG_FAILED))
1012 continue;
1013
1014 /* inherit upper level err_mask */
1015 qc->err_mask |= ehc->i.err_mask;
1016
1017 if (qc->err_mask & AC_ERR_TIMEOUT)
1018 action |= ATA_EH_SOFTRESET;
1019
1020 /* analyze TF */
1021 action |= ata_eh_analyze_tf(qc, &qc->result_tf);
1022
1023 /* DEV errors are probably spurious in case of ATA_BUS error */
1024 if (qc->err_mask & AC_ERR_ATA_BUS)
1025 qc->err_mask &= ~(AC_ERR_DEV | AC_ERR_MEDIA |
1026 AC_ERR_INVALID);
1027
1028 /* any real error trumps unknown error */
1029 if (qc->err_mask & ~AC_ERR_OTHER)
1030 qc->err_mask &= ~AC_ERR_OTHER;
1031
1032 /* SENSE_VALID trumps dev/unknown error and revalidation */
1033 if (qc->flags & ATA_QCFLAG_SENSE_VALID) {
1034 qc->err_mask &= ~(AC_ERR_DEV | AC_ERR_OTHER);
1035 action &= ~ATA_EH_REVALIDATE;
1036 }
1037
1038 /* accumulate error info */
1039 failed_dev = qc->dev;
1040 all_err_mask |= qc->err_mask;
1041 if (qc->flags & ATA_QCFLAG_IO)
1042 is_io = 1;
1043 }
1044
1045 /* speed down iff command was in progress */
1046 if (failed_dev)
1047 action |= ata_eh_speed_down(failed_dev, is_io, all_err_mask);
1048
1049 if (all_err_mask)
1050 action |= ATA_EH_REVALIDATE;
1051
1052 ehc->i.dev = failed_dev;
1053 ehc->i.action = action;
1054
1055 DPRINTK("EXIT\n");
1056}
1057
1058/**
1059 * ata_eh_report - report error handling to user
1060 * @ap: ATA port EH is going on
1061 *
1062 * Report EH to user.
1063 *
1064 * LOCKING:
1065 * None.
1066 */
1067static void ata_eh_report(struct ata_port *ap)
1068{
1069 struct ata_eh_context *ehc = &ap->eh_context;
1070 const char *frozen, *desc;
1071 int tag, nr_failed = 0;
1072
1073 desc = NULL;
1074 if (ehc->i.desc[0] != '\0')
1075 desc = ehc->i.desc;
1076
1077 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
1078 struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
1079
1080 if (!(qc->flags & ATA_QCFLAG_FAILED))
1081 continue;
1082 if (qc->flags & ATA_QCFLAG_SENSE_VALID && !qc->err_mask)
1083 continue;
1084
1085 nr_failed++;
1086 }
1087
1088 if (!nr_failed && !ehc->i.err_mask)
1089 return;
1090
1091 frozen = "";
1092 if (ap->flags & ATA_FLAG_FROZEN)
1093 frozen = " frozen";
1094
1095 if (ehc->i.dev) {
1096 ata_dev_printk(ehc->i.dev, KERN_ERR,
1097 "exception Emask 0x%x SErr 0x%x action 0x%x%s\n",
1098 ehc->i.err_mask, ehc->i.serror, ehc->i.action,
1099 frozen);
1100 if (desc)
1101 ata_dev_printk(ehc->i.dev, KERN_ERR, "(%s)\n", desc);
1102 } else {
1103 ata_port_printk(ap, KERN_ERR,
1104 "exception Emask 0x%x SErr 0x%x action 0x%x%s\n",
1105 ehc->i.err_mask, ehc->i.serror, ehc->i.action,
1106 frozen);
1107 if (desc)
1108 ata_port_printk(ap, KERN_ERR, "(%s)\n", desc);
1109 }
1110
1111 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
1112 struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
1113
1114 if (!(qc->flags & ATA_QCFLAG_FAILED) || !qc->err_mask)
1115 continue;
1116
1117 ata_dev_printk(qc->dev, KERN_ERR, "tag %d cmd 0x%x "
1118 "Emask 0x%x stat 0x%x err 0x%x (%s)\n",
1119 qc->tag, qc->tf.command, qc->err_mask,
1120 qc->result_tf.command, qc->result_tf.feature,
1121 ata_err_string(qc->err_mask));
1122 }
1123}
1124
1125static int ata_eh_reset(struct ata_port *ap, ata_reset_fn_t softreset,
1126 ata_reset_fn_t hardreset, ata_postreset_fn_t postreset)
1127{
1128 struct ata_eh_context *ehc = &ap->eh_context;
1129 unsigned int classes[ATA_MAX_DEVICES];
1130 int tries = ATA_EH_RESET_TRIES;
1131 ata_reset_fn_t reset;
1132 int rc;
1133
1134 if (softreset && (!hardreset || (!sata_set_spd_needed(ap) &&
1135 !(ehc->i.action & ATA_EH_HARDRESET))))
1136 reset = softreset;
1137 else
1138 reset = hardreset;
1139
1140 retry:
1141 ata_port_printk(ap, KERN_INFO, "%s resetting port\n",
1142 reset == softreset ? "soft" : "hard");
1143
1144 /* reset */
1145 ata_eh_about_to_do(ap, ATA_EH_RESET_MASK);
1146 ehc->i.flags |= ATA_EHI_DID_RESET;
1147
1148 rc = ata_do_reset(ap, reset, classes);
1149
1150 if (rc && --tries) {
1151 ata_port_printk(ap, KERN_WARNING,
1152 "%sreset failed, retrying in 5 secs\n",
1153 reset == softreset ? "soft" : "hard");
1154 ssleep(5);
1155
1156 if (reset == hardreset)
1157 sata_down_spd_limit(ap);
1158 if (hardreset)
1159 reset = hardreset;
1160 goto retry;
1161 }
1162
1163 if (rc == 0) {
1164 if (postreset)
1165 postreset(ap, classes);
1166
1167 /* reset successful, schedule revalidation */
1168 ehc->i.dev = NULL;
1169 ehc->i.action &= ~ATA_EH_RESET_MASK;
1170 ehc->i.action |= ATA_EH_REVALIDATE;
1171 }
1172
1173 return rc;
1174}
1175
1176static int ata_eh_revalidate(struct ata_port *ap,
1177 struct ata_device **r_failed_dev)
1178{
1179 struct ata_eh_context *ehc = &ap->eh_context;
1180 struct ata_device *dev;
1181 int i, rc = 0;
1182
1183 DPRINTK("ENTER\n");
1184
1185 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1186 dev = &ap->device[i];
1187
1188 if (ehc->i.action & ATA_EH_REVALIDATE && ata_dev_enabled(dev) &&
1189 (!ehc->i.dev || ehc->i.dev == dev)) {
1190 if (ata_port_offline(ap)) {
1191 rc = -EIO;
1192 break;
1193 }
1194
1195 ata_eh_about_to_do(ap, ATA_EH_REVALIDATE);
1196 rc = ata_dev_revalidate(dev,
1197 ehc->i.flags & ATA_EHI_DID_RESET);
1198 if (rc)
1199 break;
1200
1201 ehc->i.action &= ~ATA_EH_REVALIDATE;
1202 }
1203 }
1204
1205 if (rc)
1206 *r_failed_dev = dev;
1207
1208 DPRINTK("EXIT\n");
1209 return rc;
1210}
1211
1212static int ata_port_nr_enabled(struct ata_port *ap)
1213{
1214 int i, cnt = 0;
1215
1216 for (i = 0; i < ATA_MAX_DEVICES; i++)
1217 if (ata_dev_enabled(&ap->device[i]))
1218 cnt++;
1219 return cnt;
1220}
1221
1222/**
1223 * ata_eh_recover - recover host port after error
1224 * @ap: host port to recover
1225 * @softreset: softreset method (can be NULL)
1226 * @hardreset: hardreset method (can be NULL)
1227 * @postreset: postreset method (can be NULL)
1228 *
1229 * This is the alpha and omega, eum and yang, heart and soul of
1230 * libata exception handling. On entry, actions required to
1231 * recover each devices are recorded in eh_context. This
1232 * function executes all the operations with appropriate retrials
1233 * and fallbacks to resurrect failed devices.
1234 *
1235 * LOCKING:
1236 * Kernel thread context (may sleep).
1237 *
1238 * RETURNS:
1239 * 0 on success, -errno on failure.
1240 */
1241static int ata_eh_recover(struct ata_port *ap, ata_reset_fn_t softreset,
1242 ata_reset_fn_t hardreset,
1243 ata_postreset_fn_t postreset)
1244{
1245 struct ata_eh_context *ehc = &ap->eh_context;
1246 struct ata_device *dev;
1247 int down_xfermask, i, rc;
1248
1249 DPRINTK("ENTER\n");
1250
1251 /* prep for recovery */
1252 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1253 dev = &ap->device[i];
1254
1255 ehc->tries[dev->devno] = ATA_EH_DEV_TRIES;
1256 }
1257
1258 retry:
1259 down_xfermask = 0;
1260 rc = 0;
1261
1262 /* skip EH if possible. */
1263 if (!ata_port_nr_enabled(ap) && !(ap->flags & ATA_FLAG_FROZEN))
1264 ehc->i.action = 0;
1265
1266 /* reset */
1267 if (ehc->i.action & ATA_EH_RESET_MASK) {
1268 ata_eh_freeze_port(ap);
1269
1270 rc = ata_eh_reset(ap, softreset, hardreset, postreset);
1271 if (rc) {
1272 ata_port_printk(ap, KERN_ERR,
1273 "reset failed, giving up\n");
1274 goto out;
1275 }
1276
1277 ata_eh_thaw_port(ap);
1278 }
1279
1280 /* revalidate existing devices */
1281 rc = ata_eh_revalidate(ap, &dev);
1282 if (rc)
1283 goto dev_fail;
1284
1285 /* configure transfer mode if the port has been reset */
1286 if (ehc->i.flags & ATA_EHI_DID_RESET) {
1287 rc = ata_set_mode(ap, &dev);
1288 if (rc) {
1289 down_xfermask = 1;
1290 goto dev_fail;
1291 }
1292 }
1293
1294 goto out;
1295
1296 dev_fail:
1297 switch (rc) {
1298 case -ENODEV:
1299 case -EINVAL:
1300 ehc->tries[dev->devno] = 0;
1301 break;
1302 case -EIO:
1303 sata_down_spd_limit(ap);
1304 default:
1305 ehc->tries[dev->devno]--;
1306 if (down_xfermask &&
1307 ata_down_xfermask_limit(dev, ehc->tries[dev->devno] == 1))
1308 ehc->tries[dev->devno] = 0;
1309 }
1310
1311 /* disable device if it has used up all its chances */
1312 if (ata_dev_enabled(dev) && !ehc->tries[dev->devno])
1313 ata_dev_disable(dev);
1314
1315 /* soft didn't work? be haaaaard */
1316 if (ehc->i.flags & ATA_EHI_DID_RESET)
1317 ehc->i.action |= ATA_EH_HARDRESET;
1318 else
1319 ehc->i.action |= ATA_EH_SOFTRESET;
1320
1321 if (ata_port_nr_enabled(ap)) {
1322 ata_port_printk(ap, KERN_WARNING, "failed to recover some "
1323 "devices, retrying in 5 secs\n");
1324 ssleep(5);
1325 } else {
1326 /* no device left, repeat fast */
1327 msleep(500);
1328 }
1329
1330 goto retry;
1331
1332 out:
1333 if (rc) {
1334 for (i = 0; i < ATA_MAX_DEVICES; i++)
1335 ata_dev_disable(&ap->device[i]);
1336 }
1337
1338 DPRINTK("EXIT, rc=%d\n", rc);
1339 return rc;
1340}
1341
1342/**
1343 * ata_eh_finish - finish up EH
1344 * @ap: host port to finish EH for
1345 *
1346 * Recovery is complete. Clean up EH states and retry or finish
1347 * failed qcs.
1348 *
1349 * LOCKING:
1350 * None.
1351 */
1352static void ata_eh_finish(struct ata_port *ap)
1353{
1354 int tag;
1355
1356 /* retry or finish qcs */
1357 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
1358 struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
1359
1360 if (!(qc->flags & ATA_QCFLAG_FAILED))
1361 continue;
1362
1363 if (qc->err_mask) {
1364 /* FIXME: Once EH migration is complete,
1365 * generate sense data in this function,
1366 * considering both err_mask and tf.
1367 */
1368 if (qc->err_mask & AC_ERR_INVALID)
1369 ata_eh_qc_complete(qc);
1370 else
1371 ata_eh_qc_retry(qc);
1372 } else {
1373 if (qc->flags & ATA_QCFLAG_SENSE_VALID) {
1374 ata_eh_qc_complete(qc);
1375 } else {
1376 /* feed zero TF to sense generation */
1377 memset(&qc->result_tf, 0, sizeof(qc->result_tf));
1378 ata_eh_qc_retry(qc);
1379 }
1380 }
1381 }
1382}
1383
1384/**
1385 * ata_do_eh - do standard error handling
1386 * @ap: host port to handle error for
1387 * @softreset: softreset method (can be NULL)
1388 * @hardreset: hardreset method (can be NULL)
1389 * @postreset: postreset method (can be NULL)
1390 *
1391 * Perform standard error handling sequence.
1392 *
1393 * LOCKING:
1394 * Kernel thread context (may sleep).
1395 */
1396void ata_do_eh(struct ata_port *ap, ata_reset_fn_t softreset,
1397 ata_reset_fn_t hardreset, ata_postreset_fn_t postreset)
1398{
1399 ata_eh_autopsy(ap);
1400 ata_eh_report(ap);
1401 ata_eh_recover(ap, softreset, hardreset, postreset);
1402 ata_eh_finish(ap);
1403}
diff --git a/drivers/scsi/libata-scsi.c b/drivers/scsi/libata-scsi.c
index 9871f8272df0..e61cc357ae4a 100644
--- a/drivers/scsi/libata-scsi.c
+++ b/drivers/scsi/libata-scsi.c
@@ -302,7 +302,6 @@ int ata_scsi_ioctl(struct scsi_device *scsidev, int cmd, void __user *arg)
302 302
303/** 303/**
304 * ata_scsi_qc_new - acquire new ata_queued_cmd reference 304 * ata_scsi_qc_new - acquire new ata_queued_cmd reference
305 * @ap: ATA port to which the new command is attached
306 * @dev: ATA device to which the new command is attached 305 * @dev: ATA device to which the new command is attached
307 * @cmd: SCSI command that originated this ATA command 306 * @cmd: SCSI command that originated this ATA command
308 * @done: SCSI command completion function 307 * @done: SCSI command completion function
@@ -321,14 +320,13 @@ int ata_scsi_ioctl(struct scsi_device *scsidev, int cmd, void __user *arg)
321 * RETURNS: 320 * RETURNS:
322 * Command allocated, or %NULL if none available. 321 * Command allocated, or %NULL if none available.
323 */ 322 */
324struct ata_queued_cmd *ata_scsi_qc_new(struct ata_port *ap, 323struct ata_queued_cmd *ata_scsi_qc_new(struct ata_device *dev,
325 struct ata_device *dev,
326 struct scsi_cmnd *cmd, 324 struct scsi_cmnd *cmd,
327 void (*done)(struct scsi_cmnd *)) 325 void (*done)(struct scsi_cmnd *))
328{ 326{
329 struct ata_queued_cmd *qc; 327 struct ata_queued_cmd *qc;
330 328
331 qc = ata_qc_new_init(ap, dev); 329 qc = ata_qc_new_init(dev);
332 if (qc) { 330 if (qc) {
333 qc->scsicmd = cmd; 331 qc->scsicmd = cmd;
334 qc->scsidone = done; 332 qc->scsidone = done;
@@ -398,7 +396,7 @@ int ata_scsi_device_resume(struct scsi_device *sdev)
398 struct ata_port *ap = ata_shost_to_port(sdev->host); 396 struct ata_port *ap = ata_shost_to_port(sdev->host);
399 struct ata_device *dev = &ap->device[sdev->id]; 397 struct ata_device *dev = &ap->device[sdev->id];
400 398
401 return ata_device_resume(ap, dev); 399 return ata_device_resume(dev);
402} 400}
403 401
404int ata_scsi_device_suspend(struct scsi_device *sdev, pm_message_t state) 402int ata_scsi_device_suspend(struct scsi_device *sdev, pm_message_t state)
@@ -406,7 +404,7 @@ int ata_scsi_device_suspend(struct scsi_device *sdev, pm_message_t state)
406 struct ata_port *ap = ata_shost_to_port(sdev->host); 404 struct ata_port *ap = ata_shost_to_port(sdev->host);
407 struct ata_device *dev = &ap->device[sdev->id]; 405 struct ata_device *dev = &ap->device[sdev->id];
408 406
409 return ata_device_suspend(ap, dev, state); 407 return ata_device_suspend(dev, state);
410} 408}
411 409
412/** 410/**
@@ -417,6 +415,7 @@ int ata_scsi_device_suspend(struct scsi_device *sdev, pm_message_t state)
417 * @sk: the sense key we'll fill out 415 * @sk: the sense key we'll fill out
418 * @asc: the additional sense code we'll fill out 416 * @asc: the additional sense code we'll fill out
419 * @ascq: the additional sense code qualifier we'll fill out 417 * @ascq: the additional sense code qualifier we'll fill out
418 * @verbose: be verbose
420 * 419 *
421 * Converts an ATA error into a SCSI error. Fill out pointers to 420 * Converts an ATA error into a SCSI error. Fill out pointers to
422 * SK, ASC, and ASCQ bytes for later use in fixed or descriptor 421 * SK, ASC, and ASCQ bytes for later use in fixed or descriptor
@@ -426,7 +425,7 @@ int ata_scsi_device_suspend(struct scsi_device *sdev, pm_message_t state)
426 * spin_lock_irqsave(host_set lock) 425 * spin_lock_irqsave(host_set lock)
427 */ 426 */
428void ata_to_sense_error(unsigned id, u8 drv_stat, u8 drv_err, u8 *sk, u8 *asc, 427void ata_to_sense_error(unsigned id, u8 drv_stat, u8 drv_err, u8 *sk, u8 *asc,
429 u8 *ascq) 428 u8 *ascq, int verbose)
430{ 429{
431 int i; 430 int i;
432 431
@@ -491,8 +490,9 @@ void ata_to_sense_error(unsigned id, u8 drv_stat, u8 drv_err, u8 *sk, u8 *asc,
491 } 490 }
492 } 491 }
493 /* No immediate match */ 492 /* No immediate match */
494 printk(KERN_WARNING "ata%u: no sense translation for " 493 if (verbose)
495 "error 0x%02x\n", id, drv_err); 494 printk(KERN_WARNING "ata%u: no sense translation for "
495 "error 0x%02x\n", id, drv_err);
496 } 496 }
497 497
498 /* Fall back to interpreting status bits */ 498 /* Fall back to interpreting status bits */
@@ -505,8 +505,9 @@ void ata_to_sense_error(unsigned id, u8 drv_stat, u8 drv_err, u8 *sk, u8 *asc,
505 } 505 }
506 } 506 }
507 /* No error? Undecoded? */ 507 /* No error? Undecoded? */
508 printk(KERN_WARNING "ata%u: no sense translation for status: 0x%02x\n", 508 if (verbose)
509 id, drv_stat); 509 printk(KERN_WARNING "ata%u: no sense translation for "
510 "status: 0x%02x\n", id, drv_stat);
510 511
511 /* We need a sensible error return here, which is tricky, and one 512 /* We need a sensible error return here, which is tricky, and one
512 that won't cause people to do things like return a disk wrongly */ 513 that won't cause people to do things like return a disk wrongly */
@@ -515,9 +516,10 @@ void ata_to_sense_error(unsigned id, u8 drv_stat, u8 drv_err, u8 *sk, u8 *asc,
515 *ascq = 0x00; 516 *ascq = 0x00;
516 517
517 translate_done: 518 translate_done:
518 printk(KERN_ERR "ata%u: translated ATA stat/err 0x%02x/%02x to " 519 if (verbose)
519 "SCSI SK/ASC/ASCQ 0x%x/%02x/%02x\n", id, drv_stat, drv_err, 520 printk(KERN_ERR "ata%u: translated ATA stat/err 0x%02x/%02x "
520 *sk, *asc, *ascq); 521 "to SCSI SK/ASC/ASCQ 0x%x/%02x/%02x\n",
522 id, drv_stat, drv_err, *sk, *asc, *ascq);
521 return; 523 return;
522} 524}
523 525
@@ -537,9 +539,10 @@ void ata_to_sense_error(unsigned id, u8 drv_stat, u8 drv_err, u8 *sk, u8 *asc,
537void ata_gen_ata_desc_sense(struct ata_queued_cmd *qc) 539void ata_gen_ata_desc_sense(struct ata_queued_cmd *qc)
538{ 540{
539 struct scsi_cmnd *cmd = qc->scsicmd; 541 struct scsi_cmnd *cmd = qc->scsicmd;
540 struct ata_taskfile *tf = &qc->tf; 542 struct ata_taskfile *tf = &qc->result_tf;
541 unsigned char *sb = cmd->sense_buffer; 543 unsigned char *sb = cmd->sense_buffer;
542 unsigned char *desc = sb + 8; 544 unsigned char *desc = sb + 8;
545 int verbose = qc->ap->ops->error_handler == NULL;
543 546
544 memset(sb, 0, SCSI_SENSE_BUFFERSIZE); 547 memset(sb, 0, SCSI_SENSE_BUFFERSIZE);
545 548
@@ -552,7 +555,7 @@ void ata_gen_ata_desc_sense(struct ata_queued_cmd *qc)
552 if (qc->err_mask || 555 if (qc->err_mask ||
553 tf->command & (ATA_BUSY | ATA_DF | ATA_ERR | ATA_DRQ)) { 556 tf->command & (ATA_BUSY | ATA_DF | ATA_ERR | ATA_DRQ)) {
554 ata_to_sense_error(qc->ap->id, tf->command, tf->feature, 557 ata_to_sense_error(qc->ap->id, tf->command, tf->feature,
555 &sb[1], &sb[2], &sb[3]); 558 &sb[1], &sb[2], &sb[3], verbose);
556 sb[1] &= 0x0f; 559 sb[1] &= 0x0f;
557 } 560 }
558 561
@@ -608,8 +611,9 @@ void ata_gen_ata_desc_sense(struct ata_queued_cmd *qc)
608void ata_gen_fixed_sense(struct ata_queued_cmd *qc) 611void ata_gen_fixed_sense(struct ata_queued_cmd *qc)
609{ 612{
610 struct scsi_cmnd *cmd = qc->scsicmd; 613 struct scsi_cmnd *cmd = qc->scsicmd;
611 struct ata_taskfile *tf = &qc->tf; 614 struct ata_taskfile *tf = &qc->result_tf;
612 unsigned char *sb = cmd->sense_buffer; 615 unsigned char *sb = cmd->sense_buffer;
616 int verbose = qc->ap->ops->error_handler == NULL;
613 617
614 memset(sb, 0, SCSI_SENSE_BUFFERSIZE); 618 memset(sb, 0, SCSI_SENSE_BUFFERSIZE);
615 619
@@ -622,7 +626,7 @@ void ata_gen_fixed_sense(struct ata_queued_cmd *qc)
622 if (qc->err_mask || 626 if (qc->err_mask ||
623 tf->command & (ATA_BUSY | ATA_DF | ATA_ERR | ATA_DRQ)) { 627 tf->command & (ATA_BUSY | ATA_DF | ATA_ERR | ATA_DRQ)) {
624 ata_to_sense_error(qc->ap->id, tf->command, tf->feature, 628 ata_to_sense_error(qc->ap->id, tf->command, tf->feature,
625 &sb[2], &sb[12], &sb[13]); 629 &sb[2], &sb[12], &sb[13], verbose);
626 sb[2] &= 0x0f; 630 sb[2] &= 0x0f;
627 } 631 }
628 632
@@ -748,7 +752,7 @@ static unsigned int ata_scsi_start_stop_xlat(struct ata_queued_cmd *qc,
748 tf->nsect = 1; /* 1 sector, lba=0 */ 752 tf->nsect = 1; /* 1 sector, lba=0 */
749 753
750 if (qc->dev->flags & ATA_DFLAG_LBA) { 754 if (qc->dev->flags & ATA_DFLAG_LBA) {
751 qc->tf.flags |= ATA_TFLAG_LBA; 755 tf->flags |= ATA_TFLAG_LBA;
752 756
753 tf->lbah = 0x0; 757 tf->lbah = 0x0;
754 tf->lbam = 0x0; 758 tf->lbam = 0x0;
@@ -1199,14 +1203,11 @@ static void ata_scsi_qc_complete(struct ata_queued_cmd *qc)
1199 */ 1203 */
1200 if (((cdb[0] == ATA_16) || (cdb[0] == ATA_12)) && 1204 if (((cdb[0] == ATA_16) || (cdb[0] == ATA_12)) &&
1201 ((cdb[2] & 0x20) || need_sense)) { 1205 ((cdb[2] & 0x20) || need_sense)) {
1202 qc->ap->ops->tf_read(qc->ap, &qc->tf);
1203 ata_gen_ata_desc_sense(qc); 1206 ata_gen_ata_desc_sense(qc);
1204 } else { 1207 } else {
1205 if (!need_sense) { 1208 if (!need_sense) {
1206 cmd->result = SAM_STAT_GOOD; 1209 cmd->result = SAM_STAT_GOOD;
1207 } else { 1210 } else {
1208 qc->ap->ops->tf_read(qc->ap, &qc->tf);
1209
1210 /* TODO: decide which descriptor format to use 1211 /* TODO: decide which descriptor format to use
1211 * for 48b LBA devices and call that here 1212 * for 48b LBA devices and call that here
1212 * instead of the fixed desc, which is only 1213 * instead of the fixed desc, which is only
@@ -1217,10 +1218,8 @@ static void ata_scsi_qc_complete(struct ata_queued_cmd *qc)
1217 } 1218 }
1218 } 1219 }
1219 1220
1220 if (need_sense) { 1221 if (need_sense && !qc->ap->ops->error_handler)
1221 /* The ata_gen_..._sense routines fill in tf */ 1222 ata_dump_status(qc->ap->id, &qc->result_tf);
1222 ata_dump_status(qc->ap->id, &qc->tf);
1223 }
1224 1223
1225 qc->scsidone(cmd); 1224 qc->scsidone(cmd);
1226 1225
@@ -1229,7 +1228,6 @@ static void ata_scsi_qc_complete(struct ata_queued_cmd *qc)
1229 1228
1230/** 1229/**
1231 * ata_scsi_translate - Translate then issue SCSI command to ATA device 1230 * ata_scsi_translate - Translate then issue SCSI command to ATA device
1232 * @ap: ATA port to which the command is addressed
1233 * @dev: ATA device to which the command is addressed 1231 * @dev: ATA device to which the command is addressed
1234 * @cmd: SCSI command to execute 1232 * @cmd: SCSI command to execute
1235 * @done: SCSI command completion function 1233 * @done: SCSI command completion function
@@ -1252,17 +1250,16 @@ static void ata_scsi_qc_complete(struct ata_queued_cmd *qc)
1252 * spin_lock_irqsave(host_set lock) 1250 * spin_lock_irqsave(host_set lock)
1253 */ 1251 */
1254 1252
1255static void ata_scsi_translate(struct ata_port *ap, struct ata_device *dev, 1253static void ata_scsi_translate(struct ata_device *dev, struct scsi_cmnd *cmd,
1256 struct scsi_cmnd *cmd, 1254 void (*done)(struct scsi_cmnd *),
1257 void (*done)(struct scsi_cmnd *), 1255 ata_xlat_func_t xlat_func)
1258 ata_xlat_func_t xlat_func)
1259{ 1256{
1260 struct ata_queued_cmd *qc; 1257 struct ata_queued_cmd *qc;
1261 u8 *scsicmd = cmd->cmnd; 1258 u8 *scsicmd = cmd->cmnd;
1262 1259
1263 VPRINTK("ENTER\n"); 1260 VPRINTK("ENTER\n");
1264 1261
1265 qc = ata_scsi_qc_new(ap, dev, cmd, done); 1262 qc = ata_scsi_qc_new(dev, cmd, done);
1266 if (!qc) 1263 if (!qc)
1267 goto err_mem; 1264 goto err_mem;
1268 1265
@@ -1270,8 +1267,8 @@ static void ata_scsi_translate(struct ata_port *ap, struct ata_device *dev,
1270 if (cmd->sc_data_direction == DMA_FROM_DEVICE || 1267 if (cmd->sc_data_direction == DMA_FROM_DEVICE ||
1271 cmd->sc_data_direction == DMA_TO_DEVICE) { 1268 cmd->sc_data_direction == DMA_TO_DEVICE) {
1272 if (unlikely(cmd->request_bufflen < 1)) { 1269 if (unlikely(cmd->request_bufflen < 1)) {
1273 printk(KERN_WARNING "ata%u(%u): WARNING: zero len r/w req\n", 1270 ata_dev_printk(dev, KERN_WARNING,
1274 ap->id, dev->devno); 1271 "WARNING: zero len r/w req\n");
1275 goto err_did; 1272 goto err_did;
1276 } 1273 }
1277 1274
@@ -2004,7 +2001,6 @@ static void atapi_sense_complete(struct ata_queued_cmd *qc)
2004 * a sense descriptors, since that's only 2001 * a sense descriptors, since that's only
2005 * correct for ATA, not ATAPI 2002 * correct for ATA, not ATAPI
2006 */ 2003 */
2007 qc->ap->ops->tf_read(qc->ap, &qc->tf);
2008 ata_gen_ata_desc_sense(qc); 2004 ata_gen_ata_desc_sense(qc);
2009 } 2005 }
2010 2006
@@ -2070,6 +2066,26 @@ static void atapi_qc_complete(struct ata_queued_cmd *qc)
2070 2066
2071 VPRINTK("ENTER, err_mask 0x%X\n", err_mask); 2067 VPRINTK("ENTER, err_mask 0x%X\n", err_mask);
2072 2068
2069 /* handle completion from new EH */
2070 if (unlikely(qc->ap->ops->error_handler &&
2071 (err_mask || qc->flags & ATA_QCFLAG_SENSE_VALID))) {
2072
2073 if (!(qc->flags & ATA_QCFLAG_SENSE_VALID)) {
2074 /* FIXME: not quite right; we don't want the
2075 * translation of taskfile registers into a
2076 * sense descriptors, since that's only
2077 * correct for ATA, not ATAPI
2078 */
2079 ata_gen_ata_desc_sense(qc);
2080 }
2081
2082 qc->scsicmd->result = SAM_STAT_CHECK_CONDITION;
2083 qc->scsidone(cmd);
2084 ata_qc_free(qc);
2085 return;
2086 }
2087
2088 /* successful completion or old EH failure path */
2073 if (unlikely(err_mask & AC_ERR_DEV)) { 2089 if (unlikely(err_mask & AC_ERR_DEV)) {
2074 cmd->result = SAM_STAT_CHECK_CONDITION; 2090 cmd->result = SAM_STAT_CHECK_CONDITION;
2075 atapi_request_sense(qc); 2091 atapi_request_sense(qc);
@@ -2080,7 +2096,6 @@ static void atapi_qc_complete(struct ata_queued_cmd *qc)
2080 * a sense descriptors, since that's only 2096 * a sense descriptors, since that's only
2081 * correct for ATA, not ATAPI 2097 * correct for ATA, not ATAPI
2082 */ 2098 */
2083 qc->ap->ops->tf_read(qc->ap, &qc->tf);
2084 ata_gen_ata_desc_sense(qc); 2099 ata_gen_ata_desc_sense(qc);
2085 } else { 2100 } else {
2086 u8 *scsicmd = cmd->cmnd; 2101 u8 *scsicmd = cmd->cmnd;
@@ -2211,8 +2226,9 @@ ata_scsi_find_dev(struct ata_port *ap, const struct scsi_device *scsidev)
2211 2226
2212 if (!atapi_enabled || (ap->flags & ATA_FLAG_NO_ATAPI)) { 2227 if (!atapi_enabled || (ap->flags & ATA_FLAG_NO_ATAPI)) {
2213 if (unlikely(dev->class == ATA_DEV_ATAPI)) { 2228 if (unlikely(dev->class == ATA_DEV_ATAPI)) {
2214 printk(KERN_WARNING "ata%u(%u): WARNING: ATAPI is %s, device ignored.\n", 2229 ata_dev_printk(dev, KERN_WARNING,
2215 ap->id, dev->devno, atapi_enabled ? "not supported with this driver" : "disabled"); 2230 "WARNING: ATAPI is %s, device ignored.\n",
2231 atapi_enabled ? "not supported with this driver" : "disabled");
2216 return NULL; 2232 return NULL;
2217 } 2233 }
2218 } 2234 }
@@ -2361,6 +2377,9 @@ ata_scsi_pass_thru(struct ata_queued_cmd *qc, const u8 *scsicmd)
2361 */ 2377 */
2362 qc->nsect = cmd->bufflen / ATA_SECT_SIZE; 2378 qc->nsect = cmd->bufflen / ATA_SECT_SIZE;
2363 2379
2380 /* request result TF */
2381 qc->flags |= ATA_QCFLAG_RESULT_TF;
2382
2364 return 0; 2383 return 0;
2365 2384
2366 invalid_fld: 2385 invalid_fld:
@@ -2437,19 +2456,20 @@ static inline void ata_scsi_dump_cdb(struct ata_port *ap,
2437#endif 2456#endif
2438} 2457}
2439 2458
2440static inline void __ata_scsi_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *), 2459static inline void __ata_scsi_queuecmd(struct scsi_cmnd *cmd,
2441 struct ata_port *ap, struct ata_device *dev) 2460 void (*done)(struct scsi_cmnd *),
2461 struct ata_device *dev)
2442{ 2462{
2443 if (dev->class == ATA_DEV_ATA) { 2463 if (dev->class == ATA_DEV_ATA) {
2444 ata_xlat_func_t xlat_func = ata_get_xlat_func(dev, 2464 ata_xlat_func_t xlat_func = ata_get_xlat_func(dev,
2445 cmd->cmnd[0]); 2465 cmd->cmnd[0]);
2446 2466
2447 if (xlat_func) 2467 if (xlat_func)
2448 ata_scsi_translate(ap, dev, cmd, done, xlat_func); 2468 ata_scsi_translate(dev, cmd, done, xlat_func);
2449 else 2469 else
2450 ata_scsi_simulate(ap, dev, cmd, done); 2470 ata_scsi_simulate(dev, cmd, done);
2451 } else 2471 } else
2452 ata_scsi_translate(ap, dev, cmd, done, atapi_xlat); 2472 ata_scsi_translate(dev, cmd, done, atapi_xlat);
2453} 2473}
2454 2474
2455/** 2475/**
@@ -2487,7 +2507,7 @@ int ata_scsi_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
2487 2507
2488 dev = ata_scsi_find_dev(ap, scsidev); 2508 dev = ata_scsi_find_dev(ap, scsidev);
2489 if (likely(dev)) 2509 if (likely(dev))
2490 __ata_scsi_queuecmd(cmd, done, ap, dev); 2510 __ata_scsi_queuecmd(cmd, done, dev);
2491 else { 2511 else {
2492 cmd->result = (DID_BAD_TARGET << 16); 2512 cmd->result = (DID_BAD_TARGET << 16);
2493 done(cmd); 2513 done(cmd);
@@ -2500,7 +2520,6 @@ int ata_scsi_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
2500 2520
2501/** 2521/**
2502 * ata_scsi_simulate - simulate SCSI command on ATA device 2522 * ata_scsi_simulate - simulate SCSI command on ATA device
2503 * @ap: port the device is connected to
2504 * @dev: the target device 2523 * @dev: the target device
2505 * @cmd: SCSI command being sent to device. 2524 * @cmd: SCSI command being sent to device.
2506 * @done: SCSI command completion function. 2525 * @done: SCSI command completion function.
@@ -2512,14 +2531,12 @@ int ata_scsi_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
2512 * spin_lock_irqsave(host_set lock) 2531 * spin_lock_irqsave(host_set lock)
2513 */ 2532 */
2514 2533
2515void ata_scsi_simulate(struct ata_port *ap, struct ata_device *dev, 2534void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd,
2516 struct scsi_cmnd *cmd,
2517 void (*done)(struct scsi_cmnd *)) 2535 void (*done)(struct scsi_cmnd *))
2518{ 2536{
2519 struct ata_scsi_args args; 2537 struct ata_scsi_args args;
2520 const u8 *scsicmd = cmd->cmnd; 2538 const u8 *scsicmd = cmd->cmnd;
2521 2539
2522 args.ap = ap;
2523 args.dev = dev; 2540 args.dev = dev;
2524 args.id = dev->id; 2541 args.id = dev->id;
2525 args.cmd = cmd; 2542 args.cmd = cmd;
@@ -2605,3 +2622,26 @@ void ata_scsi_scan_host(struct ata_port *ap)
2605 } 2622 }
2606} 2623}
2607 2624
2625/**
2626 * ata_schedule_scsi_eh - schedule EH for SCSI host
2627 * @shost: SCSI host to invoke error handling on.
2628 *
2629 * Schedule SCSI EH without scmd. This is a hack.
2630 *
2631 * LOCKING:
2632 * spin_lock_irqsave(host_set lock)
2633 **/
2634void ata_schedule_scsi_eh(struct Scsi_Host *shost)
2635{
2636 unsigned long flags;
2637
2638 spin_lock_irqsave(shost->host_lock, flags);
2639
2640 if (scsi_host_set_state(shost, SHOST_RECOVERY) == 0 ||
2641 scsi_host_set_state(shost, SHOST_CANCEL_RECOVERY) == 0) {
2642 shost->host_eh_scheduled++;
2643 scsi_eh_wakeup(shost);
2644 }
2645
2646 spin_unlock_irqrestore(shost->host_lock, flags);
2647}
diff --git a/drivers/scsi/libata.h b/drivers/scsi/libata.h
index 3f8b0a863781..b76ad7d7062a 100644
--- a/drivers/scsi/libata.h
+++ b/drivers/scsi/libata.h
@@ -32,7 +32,6 @@
32#define DRV_VERSION "1.30" /* must be exactly four chars */ 32#define DRV_VERSION "1.30" /* must be exactly four chars */
33 33
34struct ata_scsi_args { 34struct ata_scsi_args {
35 struct ata_port *ap;
36 struct ata_device *dev; 35 struct ata_device *dev;
37 u16 *id; 36 u16 *id;
38 struct scsi_cmnd *cmd; 37 struct scsi_cmnd *cmd;
@@ -43,23 +42,22 @@ struct ata_scsi_args {
43extern int atapi_enabled; 42extern int atapi_enabled;
44extern int atapi_dmadir; 43extern int atapi_dmadir;
45extern int libata_fua; 44extern int libata_fua;
46extern struct ata_queued_cmd *ata_qc_new_init(struct ata_port *ap, 45extern struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev);
47 struct ata_device *dev);
48extern int ata_rwcmd_protocol(struct ata_queued_cmd *qc); 46extern int ata_rwcmd_protocol(struct ata_queued_cmd *qc);
49extern void ata_dev_disable(struct ata_port *ap, struct ata_device *dev); 47extern void ata_dev_disable(struct ata_device *dev);
50extern void ata_port_flush_task(struct ata_port *ap); 48extern void ata_port_flush_task(struct ata_port *ap);
51extern unsigned ata_exec_internal(struct ata_port *ap, struct ata_device *dev, 49extern unsigned ata_exec_internal(struct ata_device *dev,
52 struct ata_taskfile *tf, const u8 *cdb, 50 struct ata_taskfile *tf, const u8 *cdb,
53 int dma_dir, void *buf, unsigned int buflen); 51 int dma_dir, void *buf, unsigned int buflen);
54extern int ata_down_sata_spd_limit(struct ata_port *ap); 52extern int sata_down_spd_limit(struct ata_port *ap);
55extern int ata_set_sata_spd_needed(struct ata_port *ap); 53extern int sata_set_spd_needed(struct ata_port *ap);
56extern int ata_down_xfermask_limit(struct ata_port *ap, struct ata_device *dev, 54extern int ata_down_xfermask_limit(struct ata_device *dev, int force_pio0);
57 int force_pio0);
58extern int ata_set_mode(struct ata_port *ap, struct ata_device **r_failed_dev); 55extern int ata_set_mode(struct ata_port *ap, struct ata_device **r_failed_dev);
59extern int ata_do_reset(struct ata_port *ap, ata_reset_fn_t reset, 56extern int ata_do_reset(struct ata_port *ap, ata_reset_fn_t reset,
60 ata_postreset_fn_t postreset, unsigned int *classes); 57 unsigned int *classes);
61extern void ata_qc_free(struct ata_queued_cmd *qc); 58extern void ata_qc_free(struct ata_queued_cmd *qc);
62extern void ata_qc_issue(struct ata_queued_cmd *qc); 59extern void ata_qc_issue(struct ata_queued_cmd *qc);
60extern void __ata_qc_complete(struct ata_queued_cmd *qc);
63extern int ata_check_atapi_dma(struct ata_queued_cmd *qc); 61extern int ata_check_atapi_dma(struct ata_queued_cmd *qc);
64extern void ata_dev_select(struct ata_port *ap, unsigned int device, 62extern void ata_dev_select(struct ata_port *ap, unsigned int device,
65 unsigned int wait, unsigned int can_sleep); 63 unsigned int wait, unsigned int can_sleep);
@@ -100,9 +98,11 @@ extern void ata_scsi_set_sense(struct scsi_cmnd *cmd,
100extern void ata_scsi_rbuf_fill(struct ata_scsi_args *args, 98extern void ata_scsi_rbuf_fill(struct ata_scsi_args *args,
101 unsigned int (*actor) (struct ata_scsi_args *args, 99 unsigned int (*actor) (struct ata_scsi_args *args,
102 u8 *rbuf, unsigned int buflen)); 100 u8 *rbuf, unsigned int buflen));
101extern void ata_schedule_scsi_eh(struct Scsi_Host *shost);
103 102
104/* libata-eh.c */ 103/* libata-eh.c */
105extern enum scsi_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd); 104extern enum scsi_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd);
106extern void ata_scsi_error(struct Scsi_Host *host); 105extern void ata_scsi_error(struct Scsi_Host *host);
106extern void ata_qc_schedule_eh(struct ata_queued_cmd *qc);
107 107
108#endif /* __LIBATA_H__ */ 108#endif /* __LIBATA_H__ */
diff --git a/drivers/scsi/sata_mv.c b/drivers/scsi/sata_mv.c
index 670ef16a8437..e6d141dd0385 100644
--- a/drivers/scsi/sata_mv.c
+++ b/drivers/scsi/sata_mv.c
@@ -680,7 +680,7 @@ static void mv_stop_dma(struct ata_port *ap)
680 } 680 }
681 681
682 if (EDMA_EN & reg) { 682 if (EDMA_EN & reg) {
683 printk(KERN_ERR "ata%u: Unable to stop eDMA\n", ap->id); 683 ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n");
684 /* FIXME: Consider doing a reset here to recover */ 684 /* FIXME: Consider doing a reset here to recover */
685 } 685 }
686} 686}
@@ -1309,8 +1309,8 @@ static void mv_err_intr(struct ata_port *ap)
1309 edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS); 1309 edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1310 1310
1311 if (EDMA_ERR_SERR & edma_err_cause) { 1311 if (EDMA_ERR_SERR & edma_err_cause) {
1312 serr = scr_read(ap, SCR_ERROR); 1312 sata_scr_read(ap, SCR_ERROR, &serr);
1313 scr_write_flush(ap, SCR_ERROR, serr); 1313 sata_scr_write_flush(ap, SCR_ERROR, serr);
1314 } 1314 }
1315 if (EDMA_ERR_SELF_DIS & edma_err_cause) { 1315 if (EDMA_ERR_SELF_DIS & edma_err_cause) {
1316 struct mv_port_priv *pp = ap->private_data; 1316 struct mv_port_priv *pp = ap->private_data;
@@ -1934,15 +1934,16 @@ static void __mv_phy_reset(struct ata_port *ap, int can_sleep)
1934 1934
1935 /* Issue COMRESET via SControl */ 1935 /* Issue COMRESET via SControl */
1936comreset_retry: 1936comreset_retry:
1937 scr_write_flush(ap, SCR_CONTROL, 0x301); 1937 sata_scr_write_flush(ap, SCR_CONTROL, 0x301);
1938 __msleep(1, can_sleep); 1938 __msleep(1, can_sleep);
1939 1939
1940 scr_write_flush(ap, SCR_CONTROL, 0x300); 1940 sata_scr_write_flush(ap, SCR_CONTROL, 0x300);
1941 __msleep(20, can_sleep); 1941 __msleep(20, can_sleep);
1942 1942
1943 timeout = jiffies + msecs_to_jiffies(200); 1943 timeout = jiffies + msecs_to_jiffies(200);
1944 do { 1944 do {
1945 sstatus = scr_read(ap, SCR_STATUS) & 0x3; 1945 sata_scr_read(ap, SCR_STATUS, &sstatus);
1946 sstatus &= 0x3;
1946 if ((sstatus == 3) || (sstatus == 0)) 1947 if ((sstatus == 3) || (sstatus == 0))
1947 break; 1948 break;
1948 1949
@@ -1959,11 +1960,12 @@ comreset_retry:
1959 "SCtrl 0x%08x\n", mv_scr_read(ap, SCR_STATUS), 1960 "SCtrl 0x%08x\n", mv_scr_read(ap, SCR_STATUS),
1960 mv_scr_read(ap, SCR_ERROR), mv_scr_read(ap, SCR_CONTROL)); 1961 mv_scr_read(ap, SCR_ERROR), mv_scr_read(ap, SCR_CONTROL));
1961 1962
1962 if (sata_dev_present(ap)) { 1963 if (ata_port_online(ap)) {
1963 ata_port_probe(ap); 1964 ata_port_probe(ap);
1964 } else { 1965 } else {
1965 printk(KERN_INFO "ata%u: no device found (phy stat %08x)\n", 1966 sata_scr_read(ap, SCR_STATUS, &sstatus);
1966 ap->id, scr_read(ap, SCR_STATUS)); 1967 ata_port_printk(ap, KERN_INFO,
1968 "no device found (phy stat %08x)\n", sstatus);
1967 ata_port_disable(ap); 1969 ata_port_disable(ap);
1968 return; 1970 return;
1969 } 1971 }
@@ -2021,7 +2023,7 @@ static void mv_eng_timeout(struct ata_port *ap)
2021{ 2023{
2022 struct ata_queued_cmd *qc; 2024 struct ata_queued_cmd *qc;
2023 2025
2024 printk(KERN_ERR "ata%u: Entering mv_eng_timeout\n",ap->id); 2026 ata_port_printk(ap, KERN_ERR, "Entering mv_eng_timeout\n");
2025 DPRINTK("All regs @ start of eng_timeout\n"); 2027 DPRINTK("All regs @ start of eng_timeout\n");
2026 mv_dump_all_regs(ap->host_set->mmio_base, ap->port_no, 2028 mv_dump_all_regs(ap->host_set->mmio_base, ap->port_no,
2027 to_pci_dev(ap->host_set->dev)); 2029 to_pci_dev(ap->host_set->dev));
diff --git a/drivers/scsi/sata_promise.c b/drivers/scsi/sata_promise.c
index aaf896a0c63a..bb000438cb6c 100644
--- a/drivers/scsi/sata_promise.c
+++ b/drivers/scsi/sata_promise.c
@@ -436,7 +436,7 @@ static void pdc_eng_timeout(struct ata_port *ap)
436 switch (qc->tf.protocol) { 436 switch (qc->tf.protocol) {
437 case ATA_PROT_DMA: 437 case ATA_PROT_DMA:
438 case ATA_PROT_NODATA: 438 case ATA_PROT_NODATA:
439 printk(KERN_ERR "ata%u: command timeout\n", ap->id); 439 ata_port_printk(ap, KERN_ERR, "command timeout\n");
440 drv_stat = ata_wait_idle(ap); 440 drv_stat = ata_wait_idle(ap);
441 qc->err_mask |= __ac_err_mask(drv_stat); 441 qc->err_mask |= __ac_err_mask(drv_stat);
442 break; 442 break;
@@ -444,8 +444,9 @@ static void pdc_eng_timeout(struct ata_port *ap)
444 default: 444 default:
445 drv_stat = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 1000); 445 drv_stat = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 1000);
446 446
447 printk(KERN_ERR "ata%u: unknown timeout, cmd 0x%x stat 0x%x\n", 447 ata_port_printk(ap, KERN_ERR,
448 ap->id, qc->tf.command, drv_stat); 448 "unknown timeout, cmd 0x%x stat 0x%x\n",
449 qc->tf.command, drv_stat);
449 450
450 qc->err_mask |= ac_err_mask(drv_stat); 451 qc->err_mask |= ac_err_mask(drv_stat);
451 break; 452 break;
diff --git a/drivers/scsi/sata_sil.c b/drivers/scsi/sata_sil.c
index c9333577330e..aa63044eed2e 100644
--- a/drivers/scsi/sata_sil.c
+++ b/drivers/scsi/sata_sil.c
@@ -96,6 +96,8 @@ static void sil_dev_config(struct ata_port *ap, struct ata_device *dev);
96static u32 sil_scr_read (struct ata_port *ap, unsigned int sc_reg); 96static u32 sil_scr_read (struct ata_port *ap, unsigned int sc_reg);
97static void sil_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val); 97static void sil_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val);
98static void sil_post_set_mode (struct ata_port *ap); 98static void sil_post_set_mode (struct ata_port *ap);
99static void sil_freeze(struct ata_port *ap);
100static void sil_thaw(struct ata_port *ap);
99 101
100 102
101static const struct pci_device_id sil_pci_tbl[] = { 103static const struct pci_device_id sil_pci_tbl[] = {
@@ -174,7 +176,10 @@ static const struct ata_port_operations sil_ops = {
174 .bmdma_status = ata_bmdma_status, 176 .bmdma_status = ata_bmdma_status,
175 .qc_prep = ata_qc_prep, 177 .qc_prep = ata_qc_prep,
176 .qc_issue = ata_qc_issue_prot, 178 .qc_issue = ata_qc_issue_prot,
177 .eng_timeout = ata_eng_timeout, 179 .freeze = sil_freeze,
180 .thaw = sil_thaw,
181 .error_handler = ata_bmdma_error_handler,
182 .post_internal_cmd = ata_bmdma_post_internal_cmd,
178 .irq_handler = ata_interrupt, 183 .irq_handler = ata_interrupt,
179 .irq_clear = ata_bmdma_irq_clear, 184 .irq_clear = ata_bmdma_irq_clear,
180 .scr_read = sil_scr_read, 185 .scr_read = sil_scr_read,
@@ -314,6 +319,33 @@ static void sil_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val)
314 writel(val, mmio); 319 writel(val, mmio);
315} 320}
316 321
322static void sil_freeze(struct ata_port *ap)
323{
324 void __iomem *mmio_base = ap->host_set->mmio_base;
325 u32 tmp;
326
327 /* plug IRQ */
328 tmp = readl(mmio_base + SIL_SYSCFG);
329 tmp |= SIL_MASK_IDE0_INT << ap->port_no;
330 writel(tmp, mmio_base + SIL_SYSCFG);
331 readl(mmio_base + SIL_SYSCFG); /* flush */
332}
333
334static void sil_thaw(struct ata_port *ap)
335{
336 void __iomem *mmio_base = ap->host_set->mmio_base;
337 u32 tmp;
338
339 /* clear IRQ */
340 ata_chk_status(ap);
341 ata_bmdma_irq_clear(ap);
342
343 /* turn on IRQ */
344 tmp = readl(mmio_base + SIL_SYSCFG);
345 tmp &= ~(SIL_MASK_IDE0_INT << ap->port_no);
346 writel(tmp, mmio_base + SIL_SYSCFG);
347}
348
317/** 349/**
318 * sil_dev_config - Apply device/host-specific errata fixups 350 * sil_dev_config - Apply device/host-specific errata fixups
319 * @ap: Port containing device to be examined 351 * @ap: Port containing device to be examined
@@ -360,16 +392,16 @@ static void sil_dev_config(struct ata_port *ap, struct ata_device *dev)
360 if (slow_down || 392 if (slow_down ||
361 ((ap->flags & SIL_FLAG_MOD15WRITE) && 393 ((ap->flags & SIL_FLAG_MOD15WRITE) &&
362 (quirks & SIL_QUIRK_MOD15WRITE))) { 394 (quirks & SIL_QUIRK_MOD15WRITE))) {
363 printk(KERN_INFO "ata%u(%u): applying Seagate errata fix (mod15write workaround)\n", 395 ata_dev_printk(dev, KERN_INFO, "applying Seagate errata fix "
364 ap->id, dev->devno); 396 "(mod15write workaround)\n");
365 dev->max_sectors = 15; 397 dev->max_sectors = 15;
366 return; 398 return;
367 } 399 }
368 400
369 /* limit to udma5 */ 401 /* limit to udma5 */
370 if (quirks & SIL_QUIRK_UDMA5MAX) { 402 if (quirks & SIL_QUIRK_UDMA5MAX) {
371 printk(KERN_INFO "ata%u(%u): applying Maxtor errata fix %s\n", 403 ata_dev_printk(dev, KERN_INFO,
372 ap->id, dev->devno, model_num); 404 "applying Maxtor errata fix %s\n", model_num);
373 dev->udma_mask &= ATA_UDMA5; 405 dev->udma_mask &= ATA_UDMA5;
374 return; 406 return;
375 } 407 }
@@ -384,7 +416,7 @@ static int sil_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
384 int rc; 416 int rc;
385 unsigned int i; 417 unsigned int i;
386 int pci_dev_busy = 0; 418 int pci_dev_busy = 0;
387 u32 tmp, irq_mask; 419 u32 tmp;
388 u8 cls; 420 u8 cls;
389 421
390 if (!printed_version++) 422 if (!printed_version++)
@@ -474,24 +506,11 @@ static int sil_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
474 } 506 }
475 507
476 if (ent->driver_data == sil_3114) { 508 if (ent->driver_data == sil_3114) {
477 irq_mask = SIL_MASK_4PORT;
478
479 /* flip the magic "make 4 ports work" bit */ 509 /* flip the magic "make 4 ports work" bit */
480 tmp = readl(mmio_base + sil_port[2].bmdma); 510 tmp = readl(mmio_base + sil_port[2].bmdma);
481 if ((tmp & SIL_INTR_STEERING) == 0) 511 if ((tmp & SIL_INTR_STEERING) == 0)
482 writel(tmp | SIL_INTR_STEERING, 512 writel(tmp | SIL_INTR_STEERING,
483 mmio_base + sil_port[2].bmdma); 513 mmio_base + sil_port[2].bmdma);
484
485 } else {
486 irq_mask = SIL_MASK_2PORT;
487 }
488
489 /* make sure IDE0/1/2/3 interrupts are not masked */
490 tmp = readl(mmio_base + SIL_SYSCFG);
491 if (tmp & irq_mask) {
492 tmp &= ~irq_mask;
493 writel(tmp, mmio_base + SIL_SYSCFG);
494 readl(mmio_base + SIL_SYSCFG); /* flush */
495 } 514 }
496 515
497 /* mask all SATA phy-related interrupts */ 516 /* mask all SATA phy-related interrupts */
diff --git a/drivers/scsi/sata_sil24.c b/drivers/scsi/sata_sil24.c
index e9fd869140c5..6e7728cfaf6b 100644
--- a/drivers/scsi/sata_sil24.c
+++ b/drivers/scsi/sata_sil24.c
@@ -156,6 +156,9 @@ enum {
156 PORT_IRQ_HANDSHAKE = (1 << 10), /* handshake error threshold */ 156 PORT_IRQ_HANDSHAKE = (1 << 10), /* handshake error threshold */
157 PORT_IRQ_SDB_NOTIFY = (1 << 11), /* SDB notify received */ 157 PORT_IRQ_SDB_NOTIFY = (1 << 11), /* SDB notify received */
158 158
159 DEF_PORT_IRQ = PORT_IRQ_COMPLETE | PORT_IRQ_ERROR |
160 PORT_IRQ_DEV_XCHG | PORT_IRQ_UNK_FIS,
161
159 /* bits[27:16] are unmasked (raw) */ 162 /* bits[27:16] are unmasked (raw) */
160 PORT_IRQ_RAW_SHIFT = 16, 163 PORT_IRQ_RAW_SHIFT = 16,
161 PORT_IRQ_MASKED_MASK = 0x7ff, 164 PORT_IRQ_MASKED_MASK = 0x7ff,
@@ -242,6 +245,58 @@ union sil24_cmd_block {
242 struct sil24_atapi_block atapi; 245 struct sil24_atapi_block atapi;
243}; 246};
244 247
248static struct sil24_cerr_info {
249 unsigned int err_mask, action;
250 const char *desc;
251} sil24_cerr_db[] = {
252 [0] = { AC_ERR_DEV, ATA_EH_REVALIDATE,
253 "device error" },
254 [PORT_CERR_DEV] = { AC_ERR_DEV, ATA_EH_REVALIDATE,
255 "device error via D2H FIS" },
256 [PORT_CERR_SDB] = { AC_ERR_DEV, ATA_EH_REVALIDATE,
257 "device error via SDB FIS" },
258 [PORT_CERR_DATA] = { AC_ERR_ATA_BUS, ATA_EH_SOFTRESET,
259 "error in data FIS" },
260 [PORT_CERR_SEND] = { AC_ERR_ATA_BUS, ATA_EH_SOFTRESET,
261 "failed to transmit command FIS" },
262 [PORT_CERR_INCONSISTENT] = { AC_ERR_HSM, ATA_EH_SOFTRESET,
263 "protocol mismatch" },
264 [PORT_CERR_DIRECTION] = { AC_ERR_HSM, ATA_EH_SOFTRESET,
265 "data directon mismatch" },
266 [PORT_CERR_UNDERRUN] = { AC_ERR_HSM, ATA_EH_SOFTRESET,
267 "ran out of SGEs while writing" },
268 [PORT_CERR_OVERRUN] = { AC_ERR_HSM, ATA_EH_SOFTRESET,
269 "ran out of SGEs while reading" },
270 [PORT_CERR_PKT_PROT] = { AC_ERR_HSM, ATA_EH_SOFTRESET,
271 "invalid data directon for ATAPI CDB" },
272 [PORT_CERR_SGT_BOUNDARY] = { AC_ERR_SYSTEM, ATA_EH_SOFTRESET,
273 "SGT no on qword boundary" },
274 [PORT_CERR_SGT_TGTABRT] = { AC_ERR_HOST_BUS, ATA_EH_SOFTRESET,
275 "PCI target abort while fetching SGT" },
276 [PORT_CERR_SGT_MSTABRT] = { AC_ERR_HOST_BUS, ATA_EH_SOFTRESET,
277 "PCI master abort while fetching SGT" },
278 [PORT_CERR_SGT_PCIPERR] = { AC_ERR_HOST_BUS, ATA_EH_SOFTRESET,
279 "PCI parity error while fetching SGT" },
280 [PORT_CERR_CMD_BOUNDARY] = { AC_ERR_SYSTEM, ATA_EH_SOFTRESET,
281 "PRB not on qword boundary" },
282 [PORT_CERR_CMD_TGTABRT] = { AC_ERR_HOST_BUS, ATA_EH_SOFTRESET,
283 "PCI target abort while fetching PRB" },
284 [PORT_CERR_CMD_MSTABRT] = { AC_ERR_HOST_BUS, ATA_EH_SOFTRESET,
285 "PCI master abort while fetching PRB" },
286 [PORT_CERR_CMD_PCIPERR] = { AC_ERR_HOST_BUS, ATA_EH_SOFTRESET,
287 "PCI parity error while fetching PRB" },
288 [PORT_CERR_XFR_UNDEF] = { AC_ERR_HOST_BUS, ATA_EH_SOFTRESET,
289 "undefined error while transferring data" },
290 [PORT_CERR_XFR_TGTABRT] = { AC_ERR_HOST_BUS, ATA_EH_SOFTRESET,
291 "PCI target abort while transferring data" },
292 [PORT_CERR_XFR_MSTABRT] = { AC_ERR_HOST_BUS, ATA_EH_SOFTRESET,
293 "PCI master abort while transferring data" },
294 [PORT_CERR_XFR_PCIPERR] = { AC_ERR_HOST_BUS, ATA_EH_SOFTRESET,
295 "PCI parity error while transferring data" },
296 [PORT_CERR_SENDSERVICE] = { AC_ERR_HSM, ATA_EH_SOFTRESET,
297 "FIS received while sending service FIS" },
298};
299
245/* 300/*
246 * ap->private_data 301 * ap->private_data
247 * 302 *
@@ -269,8 +324,11 @@ static int sil24_probe_reset(struct ata_port *ap, unsigned int *classes);
269static void sil24_qc_prep(struct ata_queued_cmd *qc); 324static void sil24_qc_prep(struct ata_queued_cmd *qc);
270static unsigned int sil24_qc_issue(struct ata_queued_cmd *qc); 325static unsigned int sil24_qc_issue(struct ata_queued_cmd *qc);
271static void sil24_irq_clear(struct ata_port *ap); 326static void sil24_irq_clear(struct ata_port *ap);
272static void sil24_eng_timeout(struct ata_port *ap);
273static irqreturn_t sil24_interrupt(int irq, void *dev_instance, struct pt_regs *regs); 327static irqreturn_t sil24_interrupt(int irq, void *dev_instance, struct pt_regs *regs);
328static void sil24_freeze(struct ata_port *ap);
329static void sil24_thaw(struct ata_port *ap);
330static void sil24_error_handler(struct ata_port *ap);
331static void sil24_post_internal_cmd(struct ata_queued_cmd *qc);
274static int sil24_port_start(struct ata_port *ap); 332static int sil24_port_start(struct ata_port *ap);
275static void sil24_port_stop(struct ata_port *ap); 333static void sil24_port_stop(struct ata_port *ap);
276static void sil24_host_stop(struct ata_host_set *host_set); 334static void sil24_host_stop(struct ata_host_set *host_set);
@@ -325,14 +383,17 @@ static const struct ata_port_operations sil24_ops = {
325 .qc_prep = sil24_qc_prep, 383 .qc_prep = sil24_qc_prep,
326 .qc_issue = sil24_qc_issue, 384 .qc_issue = sil24_qc_issue,
327 385
328 .eng_timeout = sil24_eng_timeout,
329
330 .irq_handler = sil24_interrupt, 386 .irq_handler = sil24_interrupt,
331 .irq_clear = sil24_irq_clear, 387 .irq_clear = sil24_irq_clear,
332 388
333 .scr_read = sil24_scr_read, 389 .scr_read = sil24_scr_read,
334 .scr_write = sil24_scr_write, 390 .scr_write = sil24_scr_write,
335 391
392 .freeze = sil24_freeze,
393 .thaw = sil24_thaw,
394 .error_handler = sil24_error_handler,
395 .post_internal_cmd = sil24_post_internal_cmd,
396
336 .port_start = sil24_port_start, 397 .port_start = sil24_port_start,
337 .port_stop = sil24_port_stop, 398 .port_stop = sil24_port_stop,
338 .host_stop = sil24_host_stop, 399 .host_stop = sil24_host_stop,
@@ -459,21 +520,17 @@ static int sil24_softreset(struct ata_port *ap, unsigned int *class)
459 struct sil24_port_priv *pp = ap->private_data; 520 struct sil24_port_priv *pp = ap->private_data;
460 struct sil24_prb *prb = &pp->cmd_block[0].ata.prb; 521 struct sil24_prb *prb = &pp->cmd_block[0].ata.prb;
461 dma_addr_t paddr = pp->cmd_block_dma; 522 dma_addr_t paddr = pp->cmd_block_dma;
462 u32 mask, irq_enable, irq_stat; 523 u32 mask, irq_stat;
463 const char *reason; 524 const char *reason;
464 525
465 DPRINTK("ENTER\n"); 526 DPRINTK("ENTER\n");
466 527
467 if (!sata_dev_present(ap)) { 528 if (ata_port_offline(ap)) {
468 DPRINTK("PHY reports no device\n"); 529 DPRINTK("PHY reports no device\n");
469 *class = ATA_DEV_NONE; 530 *class = ATA_DEV_NONE;
470 goto out; 531 goto out;
471 } 532 }
472 533
473 /* temporarily turn off IRQs during SRST */
474 irq_enable = readl(port + PORT_IRQ_ENABLE_SET);
475 writel(irq_enable, port + PORT_IRQ_ENABLE_CLR);
476
477 /* put the port into known state */ 534 /* put the port into known state */
478 if (sil24_init_port(ap)) { 535 if (sil24_init_port(ap)) {
479 reason ="port not ready"; 536 reason ="port not ready";
@@ -494,9 +551,6 @@ static int sil24_softreset(struct ata_port *ap, unsigned int *class)
494 writel(irq_stat, port + PORT_IRQ_STAT); /* clear IRQs */ 551 writel(irq_stat, port + PORT_IRQ_STAT); /* clear IRQs */
495 irq_stat >>= PORT_IRQ_RAW_SHIFT; 552 irq_stat >>= PORT_IRQ_RAW_SHIFT;
496 553
497 /* restore IRQs */
498 writel(irq_enable, port + PORT_IRQ_ENABLE_SET);
499
500 if (!(irq_stat & PORT_IRQ_COMPLETE)) { 554 if (!(irq_stat & PORT_IRQ_COMPLETE)) {
501 if (irq_stat & PORT_IRQ_ERROR) 555 if (irq_stat & PORT_IRQ_ERROR)
502 reason = "SRST command error"; 556 reason = "SRST command error";
@@ -516,7 +570,7 @@ static int sil24_softreset(struct ata_port *ap, unsigned int *class)
516 return 0; 570 return 0;
517 571
518 err: 572 err:
519 printk(KERN_ERR "ata%u: softreset failed (%s)\n", ap->id, reason); 573 ata_port_printk(ap, KERN_ERR, "softreset failed (%s)\n", reason);
520 return -EIO; 574 return -EIO;
521} 575}
522 576
@@ -528,10 +582,10 @@ static int sil24_hardreset(struct ata_port *ap, unsigned int *class)
528 u32 tmp; 582 u32 tmp;
529 583
530 /* sil24 does the right thing(tm) without any protection */ 584 /* sil24 does the right thing(tm) without any protection */
531 ata_set_sata_spd(ap); 585 sata_set_spd(ap);
532 586
533 tout_msec = 100; 587 tout_msec = 100;
534 if (sata_dev_present(ap)) 588 if (ata_port_online(ap))
535 tout_msec = 5000; 589 tout_msec = 5000;
536 590
537 writel(PORT_CS_DEV_RST, port + PORT_CTRL_STAT); 591 writel(PORT_CS_DEV_RST, port + PORT_CTRL_STAT);
@@ -544,7 +598,7 @@ static int sil24_hardreset(struct ata_port *ap, unsigned int *class)
544 msleep(100); 598 msleep(100);
545 599
546 if (tmp & PORT_CS_DEV_RST) { 600 if (tmp & PORT_CS_DEV_RST) {
547 if (!sata_dev_present(ap)) 601 if (ata_port_offline(ap))
548 return 0; 602 return 0;
549 reason = "link not ready"; 603 reason = "link not ready";
550 goto err; 604 goto err;
@@ -561,7 +615,7 @@ static int sil24_hardreset(struct ata_port *ap, unsigned int *class)
561 return 0; 615 return 0;
562 616
563 err: 617 err:
564 printk(KERN_ERR "ata%u: hardreset failed (%s)\n", ap->id, reason); 618 ata_port_printk(ap, KERN_ERR, "hardreset failed (%s)\n", reason);
565 return -EIO; 619 return -EIO;
566} 620}
567 621
@@ -655,166 +709,134 @@ static void sil24_irq_clear(struct ata_port *ap)
655 /* unused */ 709 /* unused */
656} 710}
657 711
658static int __sil24_restart_controller(void __iomem *port) 712static void sil24_freeze(struct ata_port *ap)
659{ 713{
660 u32 tmp; 714 void __iomem *port = (void __iomem *)ap->ioaddr.cmd_addr;
661 int cnt;
662
663 writel(PORT_CS_INIT, port + PORT_CTRL_STAT);
664
665 /* Max ~10ms */
666 for (cnt = 0; cnt < 10000; cnt++) {
667 tmp = readl(port + PORT_CTRL_STAT);
668 if (tmp & PORT_CS_RDY)
669 return 0;
670 udelay(1);
671 }
672
673 return -1;
674}
675 715
676static void sil24_restart_controller(struct ata_port *ap) 716 /* Port-wide IRQ mask in HOST_CTRL doesn't really work, clear
677{ 717 * PORT_IRQ_ENABLE instead.
678 if (__sil24_restart_controller((void __iomem *)ap->ioaddr.cmd_addr)) 718 */
679 printk(KERN_ERR DRV_NAME 719 writel(0xffff, port + PORT_IRQ_ENABLE_CLR);
680 " ata%u: failed to restart controller\n", ap->id);
681} 720}
682 721
683static int __sil24_reset_controller(void __iomem *port) 722static void sil24_thaw(struct ata_port *ap)
684{ 723{
685 int cnt; 724 void __iomem *port = (void __iomem *)ap->ioaddr.cmd_addr;
686 u32 tmp; 725 u32 tmp;
687 726
688 /* Reset controller state. Is this correct? */ 727 /* clear IRQ */
689 writel(PORT_CS_DEV_RST, port + PORT_CTRL_STAT); 728 tmp = readl(port + PORT_IRQ_STAT);
690 readl(port + PORT_CTRL_STAT); /* sync */ 729 writel(tmp, port + PORT_IRQ_STAT);
691 730
692 /* Max ~100ms */ 731 /* turn IRQ back on */
693 for (cnt = 0; cnt < 1000; cnt++) { 732 writel(DEF_PORT_IRQ, port + PORT_IRQ_ENABLE_SET);
694 udelay(100);
695 tmp = readl(port + PORT_CTRL_STAT);
696 if (!(tmp & PORT_CS_DEV_RST))
697 break;
698 }
699
700 if (tmp & PORT_CS_DEV_RST)
701 return -1;
702
703 if (tmp & PORT_CS_RDY)
704 return 0;
705
706 return __sil24_restart_controller(port);
707}
708
709static void sil24_reset_controller(struct ata_port *ap)
710{
711 printk(KERN_NOTICE DRV_NAME
712 " ata%u: resetting controller...\n", ap->id);
713 if (__sil24_reset_controller((void __iomem *)ap->ioaddr.cmd_addr))
714 printk(KERN_ERR DRV_NAME
715 " ata%u: failed to reset controller\n", ap->id);
716}
717
718static void sil24_eng_timeout(struct ata_port *ap)
719{
720 struct ata_queued_cmd *qc;
721
722 qc = ata_qc_from_tag(ap, ap->active_tag);
723
724 printk(KERN_ERR "ata%u: command timeout\n", ap->id);
725 qc->err_mask |= AC_ERR_TIMEOUT;
726 ata_eh_qc_complete(qc);
727
728 sil24_reset_controller(ap);
729} 733}
730 734
731static void sil24_error_intr(struct ata_port *ap, u32 slot_stat) 735static void sil24_error_intr(struct ata_port *ap)
732{ 736{
733 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->active_tag);
734 struct sil24_port_priv *pp = ap->private_data;
735 void __iomem *port = (void __iomem *)ap->ioaddr.cmd_addr; 737 void __iomem *port = (void __iomem *)ap->ioaddr.cmd_addr;
736 u32 irq_stat, cmd_err, sstatus, serror; 738 struct ata_eh_info *ehi = &ap->eh_info;
737 unsigned int err_mask; 739 int freeze = 0;
740 u32 irq_stat;
738 741
742 /* on error, we need to clear IRQ explicitly */
739 irq_stat = readl(port + PORT_IRQ_STAT); 743 irq_stat = readl(port + PORT_IRQ_STAT);
740 writel(irq_stat, port + PORT_IRQ_STAT); /* clear irq */ 744 writel(irq_stat, port + PORT_IRQ_STAT);
741 745
742 if (!(irq_stat & PORT_IRQ_ERROR)) { 746 /* first, analyze and record host port events */
743 /* ignore non-completion, non-error irqs for now */ 747 ata_ehi_clear_desc(ehi);
744 printk(KERN_WARNING DRV_NAME
745 "ata%u: non-error exception irq (irq_stat %x)\n",
746 ap->id, irq_stat);
747 return;
748 }
749 748
750 cmd_err = readl(port + PORT_CMD_ERR); 749 ata_ehi_push_desc(ehi, "irq_stat 0x%08x", irq_stat);
751 sstatus = readl(port + PORT_SSTATUS);
752 serror = readl(port + PORT_SERROR);
753 if (serror)
754 writel(serror, port + PORT_SERROR);
755 750
756 /* 751 if (irq_stat & PORT_IRQ_DEV_XCHG) {
757 * Don't log ATAPI device errors. They're supposed to happen 752 ehi->err_mask |= AC_ERR_ATA_BUS;
758 * and any serious errors will be logged using sense data by 753 /* sil24 doesn't recover very well from phy
759 * the SCSI layer. 754 * disconnection with a softreset. Force hardreset.
760 */
761 if (ap->device[0].class != ATA_DEV_ATAPI || cmd_err > PORT_CERR_SDB)
762 printk("ata%u: error interrupt on port%d\n"
763 " stat=0x%x irq=0x%x cmd_err=%d sstatus=0x%x serror=0x%x\n",
764 ap->id, ap->port_no, slot_stat, irq_stat, cmd_err, sstatus, serror);
765
766 if (cmd_err == PORT_CERR_DEV || cmd_err == PORT_CERR_SDB) {
767 /*
768 * Device is reporting error, tf registers are valid.
769 */
770 sil24_update_tf(ap);
771 err_mask = ac_err_mask(pp->tf.command);
772 sil24_restart_controller(ap);
773 } else {
774 /*
775 * Other errors. libata currently doesn't have any
776 * mechanism to report these errors. Just turn on
777 * ATA_ERR.
778 */ 755 */
779 err_mask = AC_ERR_OTHER; 756 ehi->action |= ATA_EH_HARDRESET;
780 sil24_reset_controller(ap); 757 ata_ehi_push_desc(ehi, ", device_exchanged");
758 freeze = 1;
781 } 759 }
782 760
783 if (qc) { 761 if (irq_stat & PORT_IRQ_UNK_FIS) {
784 qc->err_mask |= err_mask; 762 ehi->err_mask |= AC_ERR_HSM;
785 ata_qc_complete(qc); 763 ehi->action |= ATA_EH_SOFTRESET;
764 ata_ehi_push_desc(ehi , ", unknown FIS");
765 freeze = 1;
786 } 766 }
767
768 /* deal with command error */
769 if (irq_stat & PORT_IRQ_ERROR) {
770 struct sil24_cerr_info *ci = NULL;
771 unsigned int err_mask = 0, action = 0;
772 struct ata_queued_cmd *qc;
773 u32 cerr;
774
775 /* analyze CMD_ERR */
776 cerr = readl(port + PORT_CMD_ERR);
777 if (cerr < ARRAY_SIZE(sil24_cerr_db))
778 ci = &sil24_cerr_db[cerr];
779
780 if (ci && ci->desc) {
781 err_mask |= ci->err_mask;
782 action |= ci->action;
783 ata_ehi_push_desc(ehi, ", %s", ci->desc);
784 } else {
785 err_mask |= AC_ERR_OTHER;
786 action |= ATA_EH_SOFTRESET;
787 ata_ehi_push_desc(ehi, ", unknown command error %d",
788 cerr);
789 }
790
791 /* record error info */
792 qc = ata_qc_from_tag(ap, ap->active_tag);
793 if (qc) {
794 int tag = qc->tag;
795 if (unlikely(ata_tag_internal(tag)))
796 tag = 0;
797 sil24_update_tf(ap);
798 qc->err_mask |= err_mask;
799 } else
800 ehi->err_mask |= err_mask;
801
802 ehi->action |= action;
803 }
804
805 /* freeze or abort */
806 if (freeze)
807 ata_port_freeze(ap);
808 else
809 ata_port_abort(ap);
787} 810}
788 811
789static inline void sil24_host_intr(struct ata_port *ap) 812static inline void sil24_host_intr(struct ata_port *ap)
790{ 813{
791 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->active_tag);
792 void __iomem *port = (void __iomem *)ap->ioaddr.cmd_addr; 814 void __iomem *port = (void __iomem *)ap->ioaddr.cmd_addr;
815 struct ata_queued_cmd *qc;
793 u32 slot_stat; 816 u32 slot_stat;
794 817
795 slot_stat = readl(port + PORT_SLOT_STAT); 818 slot_stat = readl(port + PORT_SLOT_STAT);
796 if (!(slot_stat & HOST_SSTAT_ATTN)) {
797 struct sil24_port_priv *pp = ap->private_data;
798
799 if (ap->flags & SIL24_FLAG_PCIX_IRQ_WOC)
800 writel(PORT_IRQ_COMPLETE, port + PORT_IRQ_STAT);
801
802 /*
803 * !HOST_SSAT_ATTN guarantees successful completion,
804 * so reading back tf registers is unnecessary for
805 * most commands. TODO: read tf registers for
806 * commands which require these values on successful
807 * completion (EXECUTE DEVICE DIAGNOSTIC, CHECK POWER,
808 * DEVICE RESET and READ PORT MULTIPLIER (any more?).
809 */
810 sil24_update_tf(ap);
811 819
812 if (qc) { 820 if (unlikely(slot_stat & HOST_SSTAT_ATTN)) {
813 qc->err_mask |= ac_err_mask(pp->tf.command); 821 sil24_error_intr(ap);
814 ata_qc_complete(qc); 822 return;
815 } 823 }
816 } else 824
817 sil24_error_intr(ap, slot_stat); 825 if (ap->flags & SIL24_FLAG_PCIX_IRQ_WOC)
826 writel(PORT_IRQ_COMPLETE, port + PORT_IRQ_STAT);
827
828 qc = ata_qc_from_tag(ap, ap->active_tag);
829 if (qc) {
830 if (qc->flags & ATA_QCFLAG_RESULT_TF)
831 sil24_update_tf(ap);
832 ata_qc_complete(qc);
833 return;
834 }
835
836 if (ata_ratelimit())
837 ata_port_printk(ap, KERN_INFO, "spurious interrupt "
838 "(slot_stat 0x%x active_tag %d)\n",
839 slot_stat, ap->active_tag);
818} 840}
819 841
820static irqreturn_t sil24_interrupt(int irq, void *dev_instance, struct pt_regs *regs) 842static irqreturn_t sil24_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
@@ -854,6 +876,31 @@ static irqreturn_t sil24_interrupt(int irq, void *dev_instance, struct pt_regs *
854 return IRQ_RETVAL(handled); 876 return IRQ_RETVAL(handled);
855} 877}
856 878
879static void sil24_error_handler(struct ata_port *ap)
880{
881 struct ata_eh_context *ehc = &ap->eh_context;
882
883 if (sil24_init_port(ap)) {
884 ata_eh_freeze_port(ap);
885 ehc->i.action |= ATA_EH_HARDRESET;
886 }
887
888 /* perform recovery */
889 ata_do_eh(ap, sil24_softreset, sil24_hardreset, ata_std_postreset);
890}
891
892static void sil24_post_internal_cmd(struct ata_queued_cmd *qc)
893{
894 struct ata_port *ap = qc->ap;
895
896 if (qc->flags & ATA_QCFLAG_FAILED)
897 qc->err_mask |= AC_ERR_OTHER;
898
899 /* make DMA engine forget about the failed command */
900 if (qc->err_mask)
901 sil24_init_port(ap);
902}
903
857static inline void sil24_cblk_free(struct sil24_port_priv *pp, struct device *dev) 904static inline void sil24_cblk_free(struct sil24_port_priv *pp, struct device *dev)
858{ 905{
859 const size_t cb_size = sizeof(*pp->cmd_block); 906 const size_t cb_size = sizeof(*pp->cmd_block);
@@ -1066,15 +1113,6 @@ static int sil24_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1066 /* Always use 64bit activation */ 1113 /* Always use 64bit activation */
1067 writel(PORT_CS_32BIT_ACTV, port + PORT_CTRL_CLR); 1114 writel(PORT_CS_32BIT_ACTV, port + PORT_CTRL_CLR);
1068 1115
1069 /* Configure interrupts */
1070 writel(0xffff, port + PORT_IRQ_ENABLE_CLR);
1071 writel(PORT_IRQ_COMPLETE | PORT_IRQ_ERROR |
1072 PORT_IRQ_SDB_NOTIFY, port + PORT_IRQ_ENABLE_SET);
1073
1074 /* Clear interrupts */
1075 writel(0x0fff0fff, port + PORT_IRQ_STAT);
1076 writel(PORT_CS_IRQ_WOC, port + PORT_CTRL_CLR);
1077
1078 /* Clear port multiplier enable and resume bits */ 1116 /* Clear port multiplier enable and resume bits */
1079 writel(PORT_CS_PM_EN | PORT_CS_RESUME, port + PORT_CTRL_CLR); 1117 writel(PORT_CS_PM_EN | PORT_CS_RESUME, port + PORT_CTRL_CLR);
1080 } 1118 }
diff --git a/drivers/scsi/sata_sx4.c b/drivers/scsi/sata_sx4.c
index 4c07ba1f6e62..70a695488291 100644
--- a/drivers/scsi/sata_sx4.c
+++ b/drivers/scsi/sata_sx4.c
@@ -868,15 +868,16 @@ static void pdc_eng_timeout(struct ata_port *ap)
868 switch (qc->tf.protocol) { 868 switch (qc->tf.protocol) {
869 case ATA_PROT_DMA: 869 case ATA_PROT_DMA:
870 case ATA_PROT_NODATA: 870 case ATA_PROT_NODATA:
871 printk(KERN_ERR "ata%u: command timeout\n", ap->id); 871 ata_port_printk(ap, KERN_ERR, "command timeout\n");
872 qc->err_mask |= __ac_err_mask(ata_wait_idle(ap)); 872 qc->err_mask |= __ac_err_mask(ata_wait_idle(ap));
873 break; 873 break;
874 874
875 default: 875 default:
876 drv_stat = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 1000); 876 drv_stat = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 1000);
877 877
878 printk(KERN_ERR "ata%u: unknown timeout, cmd 0x%x stat 0x%x\n", 878 ata_port_printk(ap, KERN_ERR,
879 ap->id, qc->tf.command, drv_stat); 879 "unknown timeout, cmd 0x%x stat 0x%x\n",
880 qc->tf.command, drv_stat);
880 881
881 qc->err_mask |= ac_err_mask(drv_stat); 882 qc->err_mask |= ac_err_mask(drv_stat);
882 break; 883 break;
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index 73994e2ac2cb..dae4f08adde0 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -720,6 +720,24 @@ void scsi_init_cmd_from_req(struct scsi_cmnd *cmd, struct scsi_request *sreq)
720static DEFINE_PER_CPU(struct list_head, scsi_done_q); 720static DEFINE_PER_CPU(struct list_head, scsi_done_q);
721 721
722/** 722/**
723 * scsi_req_abort_cmd -- Request command recovery for the specified command
724 * cmd: pointer to the SCSI command of interest
725 *
726 * This function requests that SCSI Core start recovery for the
727 * command by deleting the timer and adding the command to the eh
728 * queue. It can be called by either LLDDs or SCSI Core. LLDDs who
729 * implement their own error recovery MAY ignore the timeout event if
730 * they generated scsi_req_abort_cmd.
731 */
732void scsi_req_abort_cmd(struct scsi_cmnd *cmd)
733{
734 if (!scsi_delete_timer(cmd))
735 return;
736 scsi_times_out(cmd);
737}
738EXPORT_SYMBOL(scsi_req_abort_cmd);
739
740/**
723 * scsi_done - Enqueue the finished SCSI command into the done queue. 741 * scsi_done - Enqueue the finished SCSI command into the done queue.
724 * @cmd: The SCSI Command for which a low-level device driver (LLDD) gives 742 * @cmd: The SCSI Command for which a low-level device driver (LLDD) gives
725 * ownership back to SCSI Core -- i.e. the LLDD has finished with it. 743 * ownership back to SCSI Core -- i.e. the LLDD has finished with it.
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index 1c75646f9689..9ca71cbefce0 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -56,6 +56,7 @@ void scsi_eh_wakeup(struct Scsi_Host *shost)
56 printk("Waking error handler thread\n")); 56 printk("Waking error handler thread\n"));
57 } 57 }
58} 58}
59EXPORT_SYMBOL_GPL(scsi_eh_wakeup);
59 60
60/** 61/**
61 * scsi_eh_scmd_add - add scsi cmd to error handling. 62 * scsi_eh_scmd_add - add scsi cmd to error handling.
@@ -1517,7 +1518,7 @@ int scsi_error_handler(void *data)
1517 */ 1518 */
1518 set_current_state(TASK_INTERRUPTIBLE); 1519 set_current_state(TASK_INTERRUPTIBLE);
1519 while (!kthread_should_stop()) { 1520 while (!kthread_should_stop()) {
1520 if (shost->host_failed == 0 || 1521 if ((shost->host_failed == 0 && shost->host_eh_scheduled == 0) ||
1521 shost->host_failed != shost->host_busy) { 1522 shost->host_failed != shost->host_busy) {
1522 SCSI_LOG_ERROR_RECOVERY(1, 1523 SCSI_LOG_ERROR_RECOVERY(1,
1523 printk("Error handler scsi_eh_%d sleeping\n", 1524 printk("Error handler scsi_eh_%d sleeping\n",
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 7b0f9a3810d2..c55d195b6f4f 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -566,7 +566,7 @@ void scsi_device_unbusy(struct scsi_device *sdev)
566 spin_lock_irqsave(shost->host_lock, flags); 566 spin_lock_irqsave(shost->host_lock, flags);
567 shost->host_busy--; 567 shost->host_busy--;
568 if (unlikely(scsi_host_in_recovery(shost) && 568 if (unlikely(scsi_host_in_recovery(shost) &&
569 shost->host_failed)) 569 (shost->host_failed || shost->host_eh_scheduled)))
570 scsi_eh_wakeup(shost); 570 scsi_eh_wakeup(shost);
571 spin_unlock(shost->host_lock); 571 spin_unlock(shost->host_lock);
572 spin_lock(sdev->request_queue->queue_lock); 572 spin_lock(sdev->request_queue->queue_lock);
diff --git a/drivers/scsi/scsi_priv.h b/drivers/scsi/scsi_priv.h
index 27c48274e8cb..0b39081113be 100644
--- a/drivers/scsi/scsi_priv.h
+++ b/drivers/scsi/scsi_priv.h
@@ -63,7 +63,6 @@ extern int scsi_delete_timer(struct scsi_cmnd *);
63extern void scsi_times_out(struct scsi_cmnd *cmd); 63extern void scsi_times_out(struct scsi_cmnd *cmd);
64extern int scsi_error_handler(void *host); 64extern int scsi_error_handler(void *host);
65extern int scsi_decide_disposition(struct scsi_cmnd *cmd); 65extern int scsi_decide_disposition(struct scsi_cmnd *cmd);
66extern void scsi_eh_wakeup(struct Scsi_Host *shost);
67extern int scsi_eh_scmd_add(struct scsi_cmnd *, int); 66extern int scsi_eh_scmd_add(struct scsi_cmnd *, int);
68 67
69/* scsi_lib.c */ 68/* scsi_lib.c */
diff --git a/include/linux/ata.h b/include/linux/ata.h
index 206d859083ea..1cbeb434af9a 100644
--- a/include/linux/ata.h
+++ b/include/linux/ata.h
@@ -97,6 +97,9 @@ enum {
97 ATA_DRQ = (1 << 3), /* data request i/o */ 97 ATA_DRQ = (1 << 3), /* data request i/o */
98 ATA_ERR = (1 << 0), /* have an error */ 98 ATA_ERR = (1 << 0), /* have an error */
99 ATA_SRST = (1 << 2), /* software reset */ 99 ATA_SRST = (1 << 2), /* software reset */
100 ATA_ICRC = (1 << 7), /* interface CRC error */
101 ATA_UNC = (1 << 6), /* uncorrectable media error */
102 ATA_IDNF = (1 << 4), /* ID not found */
100 ATA_ABORTED = (1 << 2), /* command aborted */ 103 ATA_ABORTED = (1 << 2), /* command aborted */
101 104
102 /* ATA command block registers */ 105 /* ATA command block registers */
@@ -192,6 +195,16 @@ enum {
192 SCR_ACTIVE = 3, 195 SCR_ACTIVE = 3,
193 SCR_NOTIFICATION = 4, 196 SCR_NOTIFICATION = 4,
194 197
198 /* SError bits */
199 SERR_DATA_RECOVERED = (1 << 0), /* recovered data error */
200 SERR_COMM_RECOVERED = (1 << 1), /* recovered comm failure */
201 SERR_DATA = (1 << 8), /* unrecovered data error */
202 SERR_PERSISTENT = (1 << 9), /* persistent data/comm error */
203 SERR_PROTOCOL = (1 << 10), /* protocol violation */
204 SERR_INTERNAL = (1 << 11), /* host internal error */
205 SERR_PHYRDY_CHG = (1 << 16), /* PHY RDY changed */
206 SERR_DEV_XCHG = (1 << 26), /* device exchanged */
207
195 /* struct ata_taskfile flags */ 208 /* struct ata_taskfile flags */
196 ATA_TFLAG_LBA48 = (1 << 0), /* enable 48-bit LBA and "HOB" */ 209 ATA_TFLAG_LBA48 = (1 << 0), /* enable 48-bit LBA and "HOB" */
197 ATA_TFLAG_ISADDR = (1 << 1), /* enable r/w to nsect/lba regs */ 210 ATA_TFLAG_ISADDR = (1 << 1), /* enable r/w to nsect/lba regs */
diff --git a/include/linux/libata.h b/include/linux/libata.h
index bf86ee474533..db17723e23fb 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -108,7 +108,9 @@ enum {
108 LIBATA_MAX_PRD = ATA_MAX_PRD / 2, 108 LIBATA_MAX_PRD = ATA_MAX_PRD / 2,
109 ATA_MAX_PORTS = 8, 109 ATA_MAX_PORTS = 8,
110 ATA_DEF_QUEUE = 1, 110 ATA_DEF_QUEUE = 1,
111 ATA_MAX_QUEUE = 1, 111 /* tag ATA_MAX_QUEUE - 1 is reserved for internal commands */
112 ATA_MAX_QUEUE = 2,
113 ATA_TAG_INTERNAL = ATA_MAX_QUEUE - 1,
112 ATA_MAX_SECTORS = 200, /* FIXME */ 114 ATA_MAX_SECTORS = 200, /* FIXME */
113 ATA_MAX_BUS = 2, 115 ATA_MAX_BUS = 2,
114 ATA_DEF_BUSY_WAIT = 10000, 116 ATA_DEF_BUSY_WAIT = 10000,
@@ -149,11 +151,15 @@ enum {
149 ATA_FLAG_PIO_POLLING = (1 << 10), /* use polling PIO if LLD 151 ATA_FLAG_PIO_POLLING = (1 << 10), /* use polling PIO if LLD
150 * doesn't handle PIO interrupts */ 152 * doesn't handle PIO interrupts */
151 153
152 ATA_FLAG_DEBUGMSG = (1 << 17), 154 ATA_FLAG_DEBUGMSG = (1 << 14),
153 ATA_FLAG_FLUSH_PORT_TASK = (1 << 18), /* flush port task */ 155 ATA_FLAG_FLUSH_PORT_TASK = (1 << 15), /* flush port task */
154 156
155 ATA_FLAG_DISABLED = (1 << 19), /* port is disabled, ignore it */ 157 ATA_FLAG_EH_PENDING = (1 << 16), /* EH pending */
156 ATA_FLAG_SUSPENDED = (1 << 20), /* port is suspended */ 158 ATA_FLAG_FROZEN = (1 << 17), /* port is frozen */
159 ATA_FLAG_RECOVERED = (1 << 18), /* recovery action performed */
160
161 ATA_FLAG_DISABLED = (1 << 22), /* port is disabled, ignore it */
162 ATA_FLAG_SUSPENDED = (1 << 23), /* port is suspended (power) */
157 163
158 /* bits 24:31 of ap->flags are reserved for LLDD specific flags */ 164 /* bits 24:31 of ap->flags are reserved for LLDD specific flags */
159 165
@@ -163,7 +169,11 @@ enum {
163 ATA_QCFLAG_SINGLE = (1 << 2), /* no s/g, just a single buffer */ 169 ATA_QCFLAG_SINGLE = (1 << 2), /* no s/g, just a single buffer */
164 ATA_QCFLAG_DMAMAP = ATA_QCFLAG_SG | ATA_QCFLAG_SINGLE, 170 ATA_QCFLAG_DMAMAP = ATA_QCFLAG_SG | ATA_QCFLAG_SINGLE,
165 ATA_QCFLAG_IO = (1 << 3), /* standard IO command */ 171 ATA_QCFLAG_IO = (1 << 3), /* standard IO command */
166 ATA_QCFLAG_EH_SCHEDULED = (1 << 4), /* EH scheduled */ 172 ATA_QCFLAG_RESULT_TF = (1 << 4), /* result TF requested */
173
174 ATA_QCFLAG_FAILED = (1 << 16), /* cmd failed and is owned by EH */
175 ATA_QCFLAG_SENSE_VALID = (1 << 17), /* sense data valid */
176 ATA_QCFLAG_EH_SCHEDULED = (1 << 18), /* EH scheduled (obsolete) */
167 177
168 /* host set flags */ 178 /* host set flags */
169 ATA_HOST_SIMPLEX = (1 << 0), /* Host is simplex, one DMA channel per host_set only */ 179 ATA_HOST_SIMPLEX = (1 << 0), /* Host is simplex, one DMA channel per host_set only */
@@ -214,8 +224,29 @@ enum {
214 ATA_PORT_PRIMARY = (1 << 0), 224 ATA_PORT_PRIMARY = (1 << 0),
215 ATA_PORT_SECONDARY = (1 << 1), 225 ATA_PORT_SECONDARY = (1 << 1),
216 226
227 /* ering size */
228 ATA_ERING_SIZE = 32,
229
230 /* desc_len for ata_eh_info and context */
231 ATA_EH_DESC_LEN = 80,
232
233 /* reset / recovery action types */
234 ATA_EH_REVALIDATE = (1 << 0),
235 ATA_EH_SOFTRESET = (1 << 1),
236 ATA_EH_HARDRESET = (1 << 2),
237
238 ATA_EH_RESET_MASK = ATA_EH_SOFTRESET | ATA_EH_HARDRESET,
239
240 /* ata_eh_info->flags */
241 ATA_EHI_DID_RESET = (1 << 0), /* already reset this port */
242
243 /* max repeat if error condition is still set after ->error_handler */
244 ATA_EH_MAX_REPEAT = 5,
245
217 /* how hard are we gonna try to probe/recover devices */ 246 /* how hard are we gonna try to probe/recover devices */
218 ATA_PROBE_MAX_TRIES = 3, 247 ATA_PROBE_MAX_TRIES = 3,
248 ATA_EH_RESET_TRIES = 3,
249 ATA_EH_DEV_TRIES = 3,
219}; 250};
220 251
221enum hsm_task_states { 252enum hsm_task_states {
@@ -340,7 +371,7 @@ struct ata_queued_cmd {
340 struct scatterlist *__sg; 371 struct scatterlist *__sg;
341 372
342 unsigned int err_mask; 373 unsigned int err_mask;
343 374 struct ata_taskfile result_tf;
344 ata_qc_cb_t complete_fn; 375 ata_qc_cb_t complete_fn;
345 376
346 void *private_data; 377 void *private_data;
@@ -352,12 +383,24 @@ struct ata_host_stats {
352 unsigned long rw_reqbuf; 383 unsigned long rw_reqbuf;
353}; 384};
354 385
386struct ata_ering_entry {
387 int is_io;
388 unsigned int err_mask;
389 u64 timestamp;
390};
391
392struct ata_ering {
393 int cursor;
394 struct ata_ering_entry ring[ATA_ERING_SIZE];
395};
396
355struct ata_device { 397struct ata_device {
398 struct ata_port *ap;
356 u64 n_sectors; /* size of device, if ATA */ 399 u64 n_sectors; /* size of device, if ATA */
357 unsigned long flags; /* ATA_DFLAG_xxx */ 400 unsigned long flags; /* ATA_DFLAG_xxx */
358 unsigned int class; /* ATA_DEV_xxx */ 401 unsigned int class; /* ATA_DEV_xxx */
359 unsigned int devno; /* 0 or 1 */ 402 unsigned int devno; /* 0 or 1 */
360 u16 *id; /* IDENTIFY xxx DEVICE data */ 403 u16 id[ATA_ID_WORDS]; /* IDENTIFY xxx DEVICE data */
361 u8 pio_mode; 404 u8 pio_mode;
362 u8 dma_mode; 405 u8 dma_mode;
363 u8 xfer_mode; 406 u8 xfer_mode;
@@ -377,6 +420,24 @@ struct ata_device {
377 u16 cylinders; /* Number of cylinders */ 420 u16 cylinders; /* Number of cylinders */
378 u16 heads; /* Number of heads */ 421 u16 heads; /* Number of heads */
379 u16 sectors; /* Number of sectors per track */ 422 u16 sectors; /* Number of sectors per track */
423
424 /* error history */
425 struct ata_ering ering;
426};
427
428struct ata_eh_info {
429 struct ata_device *dev; /* offending device */
430 u32 serror; /* SError from LLDD */
431 unsigned int err_mask; /* port-wide err_mask */
432 unsigned int action; /* ATA_EH_* action mask */
433 unsigned int flags; /* ATA_EHI_* flags */
434 char desc[ATA_EH_DESC_LEN];
435 int desc_len;
436};
437
438struct ata_eh_context {
439 struct ata_eh_info i;
440 int tries[ATA_MAX_DEVICES];
380}; 441};
381 442
382struct ata_port { 443struct ata_port {
@@ -403,6 +464,11 @@ struct ata_port {
403 unsigned int cbl; /* cable type; ATA_CBL_xxx */ 464 unsigned int cbl; /* cable type; ATA_CBL_xxx */
404 unsigned int sata_spd_limit; /* SATA PHY speed limit */ 465 unsigned int sata_spd_limit; /* SATA PHY speed limit */
405 466
467 /* record runtime error info, protected by host_set lock */
468 struct ata_eh_info eh_info;
469 /* EH context owned by EH */
470 struct ata_eh_context eh_context;
471
406 struct ata_device device[ATA_MAX_DEVICES]; 472 struct ata_device device[ATA_MAX_DEVICES];
407 473
408 struct ata_queued_cmd qcmd[ATA_MAX_QUEUE]; 474 struct ata_queued_cmd qcmd[ATA_MAX_QUEUE];
@@ -421,6 +487,8 @@ struct ata_port {
421 struct list_head eh_done_q; 487 struct list_head eh_done_q;
422 488
423 void *private_data; 489 void *private_data;
490
491 u8 sector_buf[ATA_SECT_SIZE]; /* owned by EH */
424}; 492};
425 493
426struct ata_port_operations { 494struct ata_port_operations {
@@ -454,7 +522,15 @@ struct ata_port_operations {
454 void (*qc_prep) (struct ata_queued_cmd *qc); 522 void (*qc_prep) (struct ata_queued_cmd *qc);
455 unsigned int (*qc_issue) (struct ata_queued_cmd *qc); 523 unsigned int (*qc_issue) (struct ata_queued_cmd *qc);
456 524
457 void (*eng_timeout) (struct ata_port *ap); 525 /* Error handlers. ->error_handler overrides ->eng_timeout and
526 * indicates that new-style EH is in place.
527 */
528 void (*eng_timeout) (struct ata_port *ap); /* obsolete */
529
530 void (*freeze) (struct ata_port *ap);
531 void (*thaw) (struct ata_port *ap);
532 void (*error_handler) (struct ata_port *ap);
533 void (*post_internal_cmd) (struct ata_queued_cmd *qc);
458 534
459 irqreturn_t (*irq_handler)(int, void *, struct pt_regs *); 535 irqreturn_t (*irq_handler)(int, void *, struct pt_regs *);
460 void (*irq_clear) (struct ata_port *); 536 void (*irq_clear) (struct ata_port *);
@@ -500,7 +576,7 @@ extern void ata_port_probe(struct ata_port *);
500extern void __sata_phy_reset(struct ata_port *ap); 576extern void __sata_phy_reset(struct ata_port *ap);
501extern void sata_phy_reset(struct ata_port *ap); 577extern void sata_phy_reset(struct ata_port *ap);
502extern void ata_bus_reset(struct ata_port *ap); 578extern void ata_bus_reset(struct ata_port *ap);
503extern int ata_set_sata_spd(struct ata_port *ap); 579extern int sata_set_spd(struct ata_port *ap);
504extern int ata_drive_probe_reset(struct ata_port *ap, 580extern int ata_drive_probe_reset(struct ata_port *ap,
505 ata_probeinit_fn_t probeinit, 581 ata_probeinit_fn_t probeinit,
506 ata_reset_fn_t softreset, ata_reset_fn_t hardreset, 582 ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
@@ -509,8 +585,7 @@ extern void ata_std_probeinit(struct ata_port *ap);
509extern int ata_std_softreset(struct ata_port *ap, unsigned int *classes); 585extern int ata_std_softreset(struct ata_port *ap, unsigned int *classes);
510extern int sata_std_hardreset(struct ata_port *ap, unsigned int *class); 586extern int sata_std_hardreset(struct ata_port *ap, unsigned int *class);
511extern void ata_std_postreset(struct ata_port *ap, unsigned int *classes); 587extern void ata_std_postreset(struct ata_port *ap, unsigned int *classes);
512extern int ata_dev_revalidate(struct ata_port *ap, struct ata_device *dev, 588extern int ata_dev_revalidate(struct ata_device *dev, int post_reset);
513 int post_reset);
514extern void ata_port_disable(struct ata_port *); 589extern void ata_port_disable(struct ata_port *);
515extern void ata_std_ports(struct ata_ioports *ioaddr); 590extern void ata_std_ports(struct ata_ioports *ioaddr);
516#ifdef CONFIG_PCI 591#ifdef CONFIG_PCI
@@ -526,14 +601,18 @@ extern void ata_host_set_remove(struct ata_host_set *host_set);
526extern int ata_scsi_detect(struct scsi_host_template *sht); 601extern int ata_scsi_detect(struct scsi_host_template *sht);
527extern int ata_scsi_ioctl(struct scsi_device *dev, int cmd, void __user *arg); 602extern int ata_scsi_ioctl(struct scsi_device *dev, int cmd, void __user *arg);
528extern int ata_scsi_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)); 603extern int ata_scsi_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *));
529extern void ata_eh_qc_complete(struct ata_queued_cmd *qc);
530extern void ata_eh_qc_retry(struct ata_queued_cmd *qc);
531extern int ata_scsi_release(struct Scsi_Host *host); 604extern int ata_scsi_release(struct Scsi_Host *host);
532extern unsigned int ata_host_intr(struct ata_port *ap, struct ata_queued_cmd *qc); 605extern unsigned int ata_host_intr(struct ata_port *ap, struct ata_queued_cmd *qc);
606extern int sata_scr_valid(struct ata_port *ap);
607extern int sata_scr_read(struct ata_port *ap, int reg, u32 *val);
608extern int sata_scr_write(struct ata_port *ap, int reg, u32 val);
609extern int sata_scr_write_flush(struct ata_port *ap, int reg, u32 val);
610extern int ata_port_online(struct ata_port *ap);
611extern int ata_port_offline(struct ata_port *ap);
533extern int ata_scsi_device_resume(struct scsi_device *); 612extern int ata_scsi_device_resume(struct scsi_device *);
534extern int ata_scsi_device_suspend(struct scsi_device *, pm_message_t state); 613extern int ata_scsi_device_suspend(struct scsi_device *, pm_message_t state);
535extern int ata_device_resume(struct ata_port *, struct ata_device *); 614extern int ata_device_resume(struct ata_device *);
536extern int ata_device_suspend(struct ata_port *, struct ata_device *, pm_message_t state); 615extern int ata_device_suspend(struct ata_device *, pm_message_t state);
537extern int ata_ratelimit(void); 616extern int ata_ratelimit(void);
538extern unsigned int ata_busy_sleep(struct ata_port *ap, 617extern unsigned int ata_busy_sleep(struct ata_port *ap,
539 unsigned long timeout_pat, 618 unsigned long timeout_pat,
@@ -578,16 +657,22 @@ extern void ata_bmdma_start (struct ata_queued_cmd *qc);
578extern void ata_bmdma_stop(struct ata_queued_cmd *qc); 657extern void ata_bmdma_stop(struct ata_queued_cmd *qc);
579extern u8 ata_bmdma_status(struct ata_port *ap); 658extern u8 ata_bmdma_status(struct ata_port *ap);
580extern void ata_bmdma_irq_clear(struct ata_port *ap); 659extern void ata_bmdma_irq_clear(struct ata_port *ap);
581extern void __ata_qc_complete(struct ata_queued_cmd *qc); 660extern void ata_bmdma_freeze(struct ata_port *ap);
582extern void ata_scsi_simulate(struct ata_port *ap, struct ata_device *dev, 661extern void ata_bmdma_thaw(struct ata_port *ap);
583 struct scsi_cmnd *cmd, 662extern void ata_bmdma_drive_eh(struct ata_port *ap,
663 ata_reset_fn_t softreset,
664 ata_reset_fn_t hardreset,
665 ata_postreset_fn_t postreset);
666extern void ata_bmdma_error_handler(struct ata_port *ap);
667extern void ata_bmdma_post_internal_cmd(struct ata_queued_cmd *qc);
668extern void ata_qc_complete(struct ata_queued_cmd *qc);
669extern void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd,
584 void (*done)(struct scsi_cmnd *)); 670 void (*done)(struct scsi_cmnd *));
585extern int ata_std_bios_param(struct scsi_device *sdev, 671extern int ata_std_bios_param(struct scsi_device *sdev,
586 struct block_device *bdev, 672 struct block_device *bdev,
587 sector_t capacity, int geom[]); 673 sector_t capacity, int geom[]);
588extern int ata_scsi_slave_config(struct scsi_device *sdev); 674extern int ata_scsi_slave_config(struct scsi_device *sdev);
589extern struct ata_device *ata_dev_pair(struct ata_port *ap, 675extern struct ata_device *ata_dev_pair(struct ata_device *adev);
590 struct ata_device *adev);
591 676
592/* 677/*
593 * Timing helpers 678 * Timing helpers
@@ -637,10 +722,46 @@ extern unsigned long ata_pci_default_filter(const struct ata_port *, struct ata_
637 * EH 722 * EH
638 */ 723 */
639extern void ata_eng_timeout(struct ata_port *ap); 724extern void ata_eng_timeout(struct ata_port *ap);
725
726extern void ata_port_schedule_eh(struct ata_port *ap);
727extern int ata_port_abort(struct ata_port *ap);
728extern int ata_port_freeze(struct ata_port *ap);
729
730extern void ata_eh_freeze_port(struct ata_port *ap);
731extern void ata_eh_thaw_port(struct ata_port *ap);
732
640extern void ata_eh_qc_complete(struct ata_queued_cmd *qc); 733extern void ata_eh_qc_complete(struct ata_queued_cmd *qc);
641extern void ata_eh_qc_retry(struct ata_queued_cmd *qc); 734extern void ata_eh_qc_retry(struct ata_queued_cmd *qc);
642 735
736extern void ata_do_eh(struct ata_port *ap, ata_reset_fn_t softreset,
737 ata_reset_fn_t hardreset, ata_postreset_fn_t postreset);
738
739/*
740 * printk helpers
741 */
742#define ata_port_printk(ap, lv, fmt, args...) \
743 printk(lv"ata%u: "fmt, (ap)->id , ##args)
744
745#define ata_dev_printk(dev, lv, fmt, args...) \
746 printk(lv"ata%u.%02u: "fmt, (dev)->ap->id, (dev)->devno , ##args)
643 747
748/*
749 * ata_eh_info helpers
750 */
751#define ata_ehi_push_desc(ehi, fmt, args...) do { \
752 (ehi)->desc_len += scnprintf((ehi)->desc + (ehi)->desc_len, \
753 ATA_EH_DESC_LEN - (ehi)->desc_len, \
754 fmt , ##args); \
755} while (0)
756
757#define ata_ehi_clear_desc(ehi) do { \
758 (ehi)->desc[0] = '\0'; \
759 (ehi)->desc_len = 0; \
760} while (0)
761
762/*
763 * qc helpers
764 */
644static inline int 765static inline int
645ata_sg_is_last(struct scatterlist *sg, struct ata_queued_cmd *qc) 766ata_sg_is_last(struct scatterlist *sg, struct ata_queued_cmd *qc)
646{ 767{
@@ -683,6 +804,11 @@ static inline unsigned int ata_tag_valid(unsigned int tag)
683 return (tag < ATA_MAX_QUEUE) ? 1 : 0; 804 return (tag < ATA_MAX_QUEUE) ? 1 : 0;
684} 805}
685 806
807static inline unsigned int ata_tag_internal(unsigned int tag)
808{
809 return tag == ATA_MAX_QUEUE - 1;
810}
811
686static inline unsigned int ata_class_enabled(unsigned int class) 812static inline unsigned int ata_class_enabled(unsigned int class)
687{ 813{
688 return class == ATA_DEV_ATA || class == ATA_DEV_ATAPI; 814 return class == ATA_DEV_ATA || class == ATA_DEV_ATAPI;
@@ -791,20 +917,35 @@ static inline void ata_qc_set_polling(struct ata_queued_cmd *qc)
791 qc->tf.ctl |= ATA_NIEN; 917 qc->tf.ctl |= ATA_NIEN;
792} 918}
793 919
794static inline struct ata_queued_cmd *ata_qc_from_tag (struct ata_port *ap, 920static inline struct ata_queued_cmd *__ata_qc_from_tag(struct ata_port *ap,
795 unsigned int tag) 921 unsigned int tag)
796{ 922{
797 if (likely(ata_tag_valid(tag))) 923 if (likely(ata_tag_valid(tag)))
798 return &ap->qcmd[tag]; 924 return &ap->qcmd[tag];
799 return NULL; 925 return NULL;
800} 926}
801 927
802static inline void ata_tf_init(struct ata_port *ap, struct ata_taskfile *tf, unsigned int device) 928static inline struct ata_queued_cmd *ata_qc_from_tag(struct ata_port *ap,
929 unsigned int tag)
930{
931 struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
932
933 if (unlikely(!qc) || !ap->ops->error_handler)
934 return qc;
935
936 if ((qc->flags & (ATA_QCFLAG_ACTIVE |
937 ATA_QCFLAG_FAILED)) == ATA_QCFLAG_ACTIVE)
938 return qc;
939
940 return NULL;
941}
942
943static inline void ata_tf_init(struct ata_device *dev, struct ata_taskfile *tf)
803{ 944{
804 memset(tf, 0, sizeof(*tf)); 945 memset(tf, 0, sizeof(*tf));
805 946
806 tf->ctl = ap->ctl; 947 tf->ctl = dev->ap->ctl;
807 if (device == 0) 948 if (dev->devno == 0)
808 tf->device = ATA_DEVICE_OBS; 949 tf->device = ATA_DEVICE_OBS;
809 else 950 else
810 tf->device = ATA_DEVICE_OBS | ATA_DEV1; 951 tf->device = ATA_DEVICE_OBS | ATA_DEV1;
@@ -819,26 +960,11 @@ static inline void ata_qc_reinit(struct ata_queued_cmd *qc)
819 qc->nbytes = qc->curbytes = 0; 960 qc->nbytes = qc->curbytes = 0;
820 qc->err_mask = 0; 961 qc->err_mask = 0;
821 962
822 ata_tf_init(qc->ap, &qc->tf, qc->dev->devno); 963 ata_tf_init(qc->dev, &qc->tf);
823}
824
825/**
826 * ata_qc_complete - Complete an active ATA command
827 * @qc: Command to complete
828 * @err_mask: ATA Status register contents
829 *
830 * Indicate to the mid and upper layers that an ATA
831 * command has completed, with either an ok or not-ok status.
832 *
833 * LOCKING:
834 * spin_lock_irqsave(host_set lock)
835 */
836static inline void ata_qc_complete(struct ata_queued_cmd *qc)
837{
838 if (unlikely(qc->flags & ATA_QCFLAG_EH_SCHEDULED))
839 return;
840 964
841 __ata_qc_complete(qc); 965 /* init result_tf such that it indicates normal completion */
966 qc->result_tf.command = ATA_DRDY;
967 qc->result_tf.feature = 0;
842} 968}
843 969
844/** 970/**
@@ -917,28 +1043,6 @@ static inline u8 ata_irq_ack(struct ata_port *ap, unsigned int chk_drq)
917 return status; 1043 return status;
918} 1044}
919 1045
920static inline u32 scr_read(struct ata_port *ap, unsigned int reg)
921{
922 return ap->ops->scr_read(ap, reg);
923}
924
925static inline void scr_write(struct ata_port *ap, unsigned int reg, u32 val)
926{
927 ap->ops->scr_write(ap, reg, val);
928}
929
930static inline void scr_write_flush(struct ata_port *ap, unsigned int reg,
931 u32 val)
932{
933 ap->ops->scr_write(ap, reg, val);
934 (void) ap->ops->scr_read(ap, reg);
935}
936
937static inline unsigned int sata_dev_present(struct ata_port *ap)
938{
939 return ((scr_read(ap, SCR_STATUS) & 0xf) == 0x3) ? 1 : 0;
940}
941
942static inline int ata_try_flush_cache(const struct ata_device *dev) 1046static inline int ata_try_flush_cache(const struct ata_device *dev)
943{ 1047{
944 return ata_id_wcache_enabled(dev->id) || 1048 return ata_id_wcache_enabled(dev->id) ||
diff --git a/include/scsi/scsi_cmnd.h b/include/scsi/scsi_cmnd.h
index 1ace1b9fe537..88c6c4da6c05 100644
--- a/include/scsi/scsi_cmnd.h
+++ b/include/scsi/scsi_cmnd.h
@@ -151,5 +151,6 @@ extern struct scsi_cmnd *scsi_get_command(struct scsi_device *, gfp_t);
151extern void scsi_put_command(struct scsi_cmnd *); 151extern void scsi_put_command(struct scsi_cmnd *);
152extern void scsi_io_completion(struct scsi_cmnd *, unsigned int, unsigned int); 152extern void scsi_io_completion(struct scsi_cmnd *, unsigned int, unsigned int);
153extern void scsi_finish_command(struct scsi_cmnd *cmd); 153extern void scsi_finish_command(struct scsi_cmnd *cmd);
154extern void scsi_req_abort_cmd(struct scsi_cmnd *cmd);
154 155
155#endif /* _SCSI_SCSI_CMND_H */ 156#endif /* _SCSI_SCSI_CMND_H */
diff --git a/include/scsi/scsi_eh.h b/include/scsi/scsi_eh.h
index d160880b2a87..212c983a6a18 100644
--- a/include/scsi/scsi_eh.h
+++ b/include/scsi/scsi_eh.h
@@ -35,6 +35,7 @@ static inline int scsi_sense_valid(struct scsi_sense_hdr *sshdr)
35} 35}
36 36
37 37
38extern void scsi_eh_wakeup(struct Scsi_Host *shost);
38extern void scsi_eh_finish_cmd(struct scsi_cmnd *scmd, 39extern void scsi_eh_finish_cmd(struct scsi_cmnd *scmd,
39 struct list_head *done_q); 40 struct list_head *done_q);
40extern void scsi_eh_flush_done_q(struct list_head *done_q); 41extern void scsi_eh_flush_done_q(struct list_head *done_q);
diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h
index de6ce541a046..a42efd6e4be8 100644
--- a/include/scsi/scsi_host.h
+++ b/include/scsi/scsi_host.h
@@ -472,6 +472,7 @@ struct Scsi_Host {
472 */ 472 */
473 unsigned int host_busy; /* commands actually active on low-level */ 473 unsigned int host_busy; /* commands actually active on low-level */
474 unsigned int host_failed; /* commands that failed. */ 474 unsigned int host_failed; /* commands that failed. */
475 unsigned int host_eh_scheduled; /* EH scheduled without command */
475 476
476 unsigned short host_no; /* Used for IOCTL_GET_IDLUN, /proc/scsi et al. */ 477 unsigned short host_no; /* Used for IOCTL_GET_IDLUN, /proc/scsi et al. */
477 int resetting; /* if set, it means that last_reset is a valid value */ 478 int resetting; /* if set, it means that last_reset is a valid value */