aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi
diff options
context:
space:
mode:
authorJeff Garzik <jgarzik@pobox.com>2006-02-11 18:02:04 -0500
committerJeff Garzik <jgarzik@pobox.com>2006-02-11 18:02:04 -0500
commit1cb9d721d9df9182a42d1ff59427a100c6522efc (patch)
tree3c7d47828f89853de49e979f97ffe78fd1dba7d6 /drivers/scsi
parentca7d5e42d783e54f0057317c9226262d68ab7717 (diff)
parentbef4a456b8dc8b3638f4d49a25a89e1467da9483 (diff)
Merge branch 'upstream'
Diffstat (limited to 'drivers/scsi')
-rw-r--r--drivers/scsi/ahci.c53
-rw-r--r--drivers/scsi/libata-core.c120
-rw-r--r--drivers/scsi/libata-scsi.c14
-rw-r--r--drivers/scsi/libata.h1
-rw-r--r--drivers/scsi/sata_mv.c50
-rw-r--r--drivers/scsi/sata_qstor.c4
-rw-r--r--drivers/scsi/sata_sil.c9
-rw-r--r--drivers/scsi/sata_sil24.c61
-rw-r--r--drivers/scsi/sata_sx4.c2
9 files changed, 162 insertions, 152 deletions
diff --git a/drivers/scsi/ahci.c b/drivers/scsi/ahci.c
index 98ce6bb62ff8..24a54a5a91b8 100644
--- a/drivers/scsi/ahci.c
+++ b/drivers/scsi/ahci.c
@@ -190,7 +190,7 @@ static void ahci_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val);
190static int ahci_init_one (struct pci_dev *pdev, const struct pci_device_id *ent); 190static int ahci_init_one (struct pci_dev *pdev, const struct pci_device_id *ent);
191static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc); 191static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc);
192static irqreturn_t ahci_interrupt (int irq, void *dev_instance, struct pt_regs *regs); 192static irqreturn_t ahci_interrupt (int irq, void *dev_instance, struct pt_regs *regs);
193static void ahci_phy_reset(struct ata_port *ap); 193static int ahci_probe_reset(struct ata_port *ap, unsigned int *classes);
194static void ahci_irq_clear(struct ata_port *ap); 194static void ahci_irq_clear(struct ata_port *ap);
195static void ahci_eng_timeout(struct ata_port *ap); 195static void ahci_eng_timeout(struct ata_port *ap);
196static int ahci_port_start(struct ata_port *ap); 196static int ahci_port_start(struct ata_port *ap);
@@ -230,7 +230,7 @@ static const struct ata_port_operations ahci_ops = {
230 230
231 .tf_read = ahci_tf_read, 231 .tf_read = ahci_tf_read,
232 232
233 .phy_reset = ahci_phy_reset, 233 .probe_reset = ahci_probe_reset,
234 234
235 .qc_prep = ahci_qc_prep, 235 .qc_prep = ahci_qc_prep,
236 .qc_issue = ahci_qc_issue, 236 .qc_issue = ahci_qc_issue,
@@ -252,8 +252,7 @@ static const struct ata_port_info ahci_port_info[] = {
252 { 252 {
253 .sht = &ahci_sht, 253 .sht = &ahci_sht,
254 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 254 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
255 ATA_FLAG_SATA_RESET | ATA_FLAG_MMIO | 255 ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA,
256 ATA_FLAG_PIO_DMA,
257 .pio_mask = 0x1f, /* pio0-4 */ 256 .pio_mask = 0x1f, /* pio0-4 */
258 .udma_mask = 0x7f, /* udma0-6 ; FIXME */ 257 .udma_mask = 0x7f, /* udma0-6 ; FIXME */
259 .port_ops = &ahci_ops, 258 .port_ops = &ahci_ops,
@@ -507,37 +506,43 @@ static unsigned int ahci_dev_classify(struct ata_port *ap)
507 return ata_dev_classify(&tf); 506 return ata_dev_classify(&tf);
508} 507}
509 508
510static void ahci_fill_cmd_slot(struct ata_port *ap, u32 opts) 509static void ahci_fill_cmd_slot(struct ahci_port_priv *pp, u32 opts)
511{ 510{
512 struct ahci_port_priv *pp = ap->private_data;
513 pp->cmd_slot[0].opts = cpu_to_le32(opts); 511 pp->cmd_slot[0].opts = cpu_to_le32(opts);
514 pp->cmd_slot[0].status = 0; 512 pp->cmd_slot[0].status = 0;
515 pp->cmd_slot[0].tbl_addr = cpu_to_le32(pp->cmd_tbl_dma & 0xffffffff); 513 pp->cmd_slot[0].tbl_addr = cpu_to_le32(pp->cmd_tbl_dma & 0xffffffff);
516 pp->cmd_slot[0].tbl_addr_hi = cpu_to_le32((pp->cmd_tbl_dma >> 16) >> 16); 514 pp->cmd_slot[0].tbl_addr_hi = cpu_to_le32((pp->cmd_tbl_dma >> 16) >> 16);
517} 515}
518 516
519static void ahci_phy_reset(struct ata_port *ap) 517static int ahci_hardreset(struct ata_port *ap, int verbose, unsigned int *class)
520{ 518{
521 void __iomem *port_mmio = (void __iomem *) ap->ioaddr.cmd_addr; 519 int rc;
522 struct ata_device *dev = &ap->device[0]; 520
523 u32 new_tmp, tmp; 521 DPRINTK("ENTER\n");
524 522
525 ahci_stop_engine(ap); 523 ahci_stop_engine(ap);
526 __sata_phy_reset(ap); 524 rc = sata_std_hardreset(ap, verbose, class);
527 ahci_start_engine(ap); 525 ahci_start_engine(ap);
528 526
529 if (ap->flags & ATA_FLAG_PORT_DISABLED) 527 if (rc == 0)
530 return; 528 *class = ahci_dev_classify(ap);
529 if (*class == ATA_DEV_UNKNOWN)
530 *class = ATA_DEV_NONE;
531 531
532 dev->class = ahci_dev_classify(ap); 532 DPRINTK("EXIT, rc=%d, class=%u\n", rc, *class);
533 if (!ata_dev_present(dev)) { 533 return rc;
534 ata_port_disable(ap); 534}
535 return; 535
536 } 536static void ahci_postreset(struct ata_port *ap, unsigned int *class)
537{
538 void __iomem *port_mmio = (void __iomem *) ap->ioaddr.cmd_addr;
539 u32 new_tmp, tmp;
540
541 ata_std_postreset(ap, class);
537 542
538 /* Make sure port's ATAPI bit is set appropriately */ 543 /* Make sure port's ATAPI bit is set appropriately */
539 new_tmp = tmp = readl(port_mmio + PORT_CMD); 544 new_tmp = tmp = readl(port_mmio + PORT_CMD);
540 if (dev->class == ATA_DEV_ATAPI) 545 if (*class == ATA_DEV_ATAPI)
541 new_tmp |= PORT_CMD_ATAPI; 546 new_tmp |= PORT_CMD_ATAPI;
542 else 547 else
543 new_tmp &= ~PORT_CMD_ATAPI; 548 new_tmp &= ~PORT_CMD_ATAPI;
@@ -547,6 +552,12 @@ static void ahci_phy_reset(struct ata_port *ap)
547 } 552 }
548} 553}
549 554
555static int ahci_probe_reset(struct ata_port *ap, unsigned int *classes)
556{
557 return ata_drive_probe_reset(ap, NULL, NULL, ahci_hardreset,
558 ahci_postreset, classes);
559}
560
550static u8 ahci_check_status(struct ata_port *ap) 561static u8 ahci_check_status(struct ata_port *ap)
551{ 562{
552 void __iomem *mmio = (void __iomem *) ap->ioaddr.cmd_addr; 563 void __iomem *mmio = (void __iomem *) ap->ioaddr.cmd_addr;
@@ -622,7 +633,7 @@ static void ahci_qc_prep(struct ata_queued_cmd *qc)
622 if (is_atapi) 633 if (is_atapi)
623 opts |= AHCI_CMD_ATAPI; 634 opts |= AHCI_CMD_ATAPI;
624 635
625 ahci_fill_cmd_slot(ap, opts); 636 ahci_fill_cmd_slot(pp, opts);
626} 637}
627 638
628static void ahci_restart_port(struct ata_port *ap, u32 irq_stat) 639static void ahci_restart_port(struct ata_port *ap, u32 irq_stat)
@@ -703,7 +714,7 @@ static inline int ahci_host_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
703 ci = readl(port_mmio + PORT_CMD_ISSUE); 714 ci = readl(port_mmio + PORT_CMD_ISSUE);
704 if (likely((ci & 0x1) == 0)) { 715 if (likely((ci & 0x1) == 0)) {
705 if (qc) { 716 if (qc) {
706 assert(qc->err_mask == 0); 717 WARN_ON(qc->err_mask);
707 ata_qc_complete(qc); 718 ata_qc_complete(qc);
708 qc = NULL; 719 qc = NULL;
709 } 720 }
diff --git a/drivers/scsi/libata-core.c b/drivers/scsi/libata-core.c
index 592d3a86c840..3fd55ef5410f 100644
--- a/drivers/scsi/libata-core.c
+++ b/drivers/scsi/libata-core.c
@@ -897,8 +897,8 @@ static void ata_dev_identify(struct ata_port *ap, unsigned int device)
897 897
898 DPRINTK("ENTER, host %u, dev %u\n", ap->id, device); 898 DPRINTK("ENTER, host %u, dev %u\n", ap->id, device);
899 899
900 assert (dev->class == ATA_DEV_ATA || dev->class == ATA_DEV_ATAPI || 900 WARN_ON(dev->class != ATA_DEV_ATA && dev->class != ATA_DEV_ATAPI &&
901 dev->class == ATA_DEV_NONE); 901 dev->class != ATA_DEV_NONE);
902 902
903 ata_dev_select(ap, device, 1, 1); /* select device 0/1 */ 903 ata_dev_select(ap, device, 1, 1); /* select device 0/1 */
904 904
@@ -1926,11 +1926,20 @@ static int sata_phy_resume(struct ata_port *ap)
1926 * 1926 *
1927 * @ap is about to be probed. Initialize it. This function is 1927 * @ap is about to be probed. Initialize it. This function is
1928 * to be used as standard callback for ata_drive_probe_reset(). 1928 * to be used as standard callback for ata_drive_probe_reset().
1929 *
1930 * NOTE!!! Do not use this function as probeinit if a low level
1931 * driver implements only hardreset. Just pass NULL as probeinit
1932 * in that case. Using this function is probably okay but doing
1933 * so makes reset sequence different from the original
1934 * ->phy_reset implementation and Jeff nervous. :-P
1929 */ 1935 */
1930extern void ata_std_probeinit(struct ata_port *ap) 1936extern void ata_std_probeinit(struct ata_port *ap)
1931{ 1937{
1932 if (ap->flags & ATA_FLAG_SATA && ap->ops->scr_read) 1938 if (ap->flags & ATA_FLAG_SATA && ap->ops->scr_read) {
1933 sata_phy_resume(ap); 1939 sata_phy_resume(ap);
1940 if (sata_dev_present(ap))
1941 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
1942 }
1934} 1943}
1935 1944
1936/** 1945/**
@@ -1956,20 +1965,17 @@ int ata_std_softreset(struct ata_port *ap, int verbose, unsigned int *classes)
1956 1965
1957 DPRINTK("ENTER\n"); 1966 DPRINTK("ENTER\n");
1958 1967
1968 if (ap->ops->scr_read && !sata_dev_present(ap)) {
1969 classes[0] = ATA_DEV_NONE;
1970 goto out;
1971 }
1972
1959 /* determine if device 0/1 are present */ 1973 /* determine if device 0/1 are present */
1960 if (ata_devchk(ap, 0)) 1974 if (ata_devchk(ap, 0))
1961 devmask |= (1 << 0); 1975 devmask |= (1 << 0);
1962 if (slave_possible && ata_devchk(ap, 1)) 1976 if (slave_possible && ata_devchk(ap, 1))
1963 devmask |= (1 << 1); 1977 devmask |= (1 << 1);
1964 1978
1965 /* devchk reports device presence without actual device on
1966 * most SATA controllers. Check SStatus and turn devmask off
1967 * if link is offline. Note that we should continue resetting
1968 * even when it seems like there's no device.
1969 */
1970 if (ap->ops->scr_read && !sata_dev_present(ap))
1971 devmask = 0;
1972
1973 /* select device 0 again */ 1979 /* select device 0 again */
1974 ap->ops->dev_select(ap, 0); 1980 ap->ops->dev_select(ap, 0);
1975 1981
@@ -1991,6 +1997,7 @@ int ata_std_softreset(struct ata_port *ap, int verbose, unsigned int *classes)
1991 if (slave_possible && err != 0x81) 1997 if (slave_possible && err != 0x81)
1992 classes[1] = ata_dev_try_classify(ap, 1, &err); 1998 classes[1] = ata_dev_try_classify(ap, 1, &err);
1993 1999
2000 out:
1994 DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]); 2001 DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]);
1995 return 0; 2002 return 0;
1996} 2003}
@@ -2013,8 +2020,6 @@ int ata_std_softreset(struct ata_port *ap, int verbose, unsigned int *classes)
2013 */ 2020 */
2014int sata_std_hardreset(struct ata_port *ap, int verbose, unsigned int *class) 2021int sata_std_hardreset(struct ata_port *ap, int verbose, unsigned int *class)
2015{ 2022{
2016 u32 serror;
2017
2018 DPRINTK("ENTER\n"); 2023 DPRINTK("ENTER\n");
2019 2024
2020 /* Issue phy wake/reset */ 2025 /* Issue phy wake/reset */
@@ -2029,10 +2034,6 @@ int sata_std_hardreset(struct ata_port *ap, int verbose, unsigned int *class)
2029 /* Bring phy back */ 2034 /* Bring phy back */
2030 sata_phy_resume(ap); 2035 sata_phy_resume(ap);
2031 2036
2032 /* Clear SError */
2033 serror = scr_read(ap, SCR_ERROR);
2034 scr_write(ap, SCR_ERROR, serror);
2035
2036 /* TODO: phy layer with polling, timeouts, etc. */ 2037 /* TODO: phy layer with polling, timeouts, etc. */
2037 if (!sata_dev_present(ap)) { 2038 if (!sata_dev_present(ap)) {
2038 *class = ATA_DEV_NONE; 2039 *class = ATA_DEV_NONE;
@@ -2049,6 +2050,8 @@ int sata_std_hardreset(struct ata_port *ap, int verbose, unsigned int *class)
2049 return -EIO; 2050 return -EIO;
2050 } 2051 }
2051 2052
2053 ap->ops->dev_select(ap, 0); /* probably unnecessary */
2054
2052 *class = ata_dev_try_classify(ap, 0, NULL); 2055 *class = ata_dev_try_classify(ap, 0, NULL);
2053 2056
2054 DPRINTK("EXIT, class=%u\n", *class); 2057 DPRINTK("EXIT, class=%u\n", *class);
@@ -2083,11 +2086,9 @@ void ata_std_postreset(struct ata_port *ap, unsigned int *classes)
2083 if (ap->cbl == ATA_CBL_SATA) 2086 if (ap->cbl == ATA_CBL_SATA)
2084 sata_print_link_status(ap); 2087 sata_print_link_status(ap);
2085 2088
2086 /* bail out if no device is present */ 2089 /* re-enable interrupts */
2087 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) { 2090 if (ap->ioaddr.ctl_addr) /* FIXME: hack. create a hook instead */
2088 DPRINTK("EXIT, no device\n"); 2091 ata_irq_on(ap);
2089 return;
2090 }
2091 2092
2092 /* is double-select really necessary? */ 2093 /* is double-select really necessary? */
2093 if (classes[0] != ATA_DEV_NONE) 2094 if (classes[0] != ATA_DEV_NONE)
@@ -2095,9 +2096,19 @@ void ata_std_postreset(struct ata_port *ap, unsigned int *classes)
2095 if (classes[1] != ATA_DEV_NONE) 2096 if (classes[1] != ATA_DEV_NONE)
2096 ap->ops->dev_select(ap, 0); 2097 ap->ops->dev_select(ap, 0);
2097 2098
2098 /* re-enable interrupts & set up device control */ 2099 /* bail out if no device is present */
2099 if (ap->ioaddr.ctl_addr) /* FIXME: hack. create a hook instead */ 2100 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
2100 ata_irq_on(ap); 2101 DPRINTK("EXIT, no device\n");
2102 return;
2103 }
2104
2105 /* set up device control */
2106 if (ap->ioaddr.ctl_addr) {
2107 if (ap->flags & ATA_FLAG_MMIO)
2108 writeb(ap->ctl, (void __iomem *) ap->ioaddr.ctl_addr);
2109 else
2110 outb(ap->ctl, ap->ioaddr.ctl_addr);
2111 }
2101 2112
2102 DPRINTK("EXIT\n"); 2113 DPRINTK("EXIT\n");
2103} 2114}
@@ -2292,7 +2303,7 @@ static unsigned int ata_get_mode_mask(const struct ata_port *ap, int shift)
2292 master = &ap->device[0]; 2303 master = &ap->device[0];
2293 slave = &ap->device[1]; 2304 slave = &ap->device[1];
2294 2305
2295 assert (ata_dev_present(master) || ata_dev_present(slave)); 2306 WARN_ON(!ata_dev_present(master) && !ata_dev_present(slave));
2296 2307
2297 if (shift == ATA_SHIFT_UDMA) { 2308 if (shift == ATA_SHIFT_UDMA) {
2298 mask = ap->udma_mask; 2309 mask = ap->udma_mask;
@@ -2538,11 +2549,11 @@ static void ata_sg_clean(struct ata_queued_cmd *qc)
2538 int dir = qc->dma_dir; 2549 int dir = qc->dma_dir;
2539 void *pad_buf = NULL; 2550 void *pad_buf = NULL;
2540 2551
2541 assert(qc->flags & ATA_QCFLAG_DMAMAP); 2552 WARN_ON(!(qc->flags & ATA_QCFLAG_DMAMAP));
2542 assert(sg != NULL); 2553 WARN_ON(sg == NULL);
2543 2554
2544 if (qc->flags & ATA_QCFLAG_SINGLE) 2555 if (qc->flags & ATA_QCFLAG_SINGLE)
2545 assert(qc->n_elem == 1); 2556 WARN_ON(qc->n_elem != 1);
2546 2557
2547 VPRINTK("unmapping %u sg elements\n", qc->n_elem); 2558 VPRINTK("unmapping %u sg elements\n", qc->n_elem);
2548 2559
@@ -2597,8 +2608,8 @@ static void ata_fill_sg(struct ata_queued_cmd *qc)
2597 struct scatterlist *sg; 2608 struct scatterlist *sg;
2598 unsigned int idx; 2609 unsigned int idx;
2599 2610
2600 assert(qc->__sg != NULL); 2611 WARN_ON(qc->__sg == NULL);
2601 assert(qc->n_elem > 0); 2612 WARN_ON(qc->n_elem == 0);
2602 2613
2603 idx = 0; 2614 idx = 0;
2604 ata_for_each_sg(sg, qc) { 2615 ata_for_each_sg(sg, qc) {
@@ -2750,7 +2761,7 @@ static int ata_sg_setup_one(struct ata_queued_cmd *qc)
2750 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ); 2761 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
2751 struct scatterlist *psg = &qc->pad_sgent; 2762 struct scatterlist *psg = &qc->pad_sgent;
2752 2763
2753 assert(qc->dev->class == ATA_DEV_ATAPI); 2764 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
2754 2765
2755 memset(pad_buf, 0, ATA_DMA_PAD_SZ); 2766 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
2756 2767
@@ -2812,7 +2823,7 @@ static int ata_sg_setup(struct ata_queued_cmd *qc)
2812 int n_elem, pre_n_elem, dir, trim_sg = 0; 2823 int n_elem, pre_n_elem, dir, trim_sg = 0;
2813 2824
2814 VPRINTK("ENTER, ata%u\n", ap->id); 2825 VPRINTK("ENTER, ata%u\n", ap->id);
2815 assert(qc->flags & ATA_QCFLAG_SG); 2826 WARN_ON(!(qc->flags & ATA_QCFLAG_SG));
2816 2827
2817 /* we must lengthen transfers to end on a 32-bit boundary */ 2828 /* we must lengthen transfers to end on a 32-bit boundary */
2818 qc->pad_len = lsg->length & 3; 2829 qc->pad_len = lsg->length & 3;
@@ -2821,7 +2832,7 @@ static int ata_sg_setup(struct ata_queued_cmd *qc)
2821 struct scatterlist *psg = &qc->pad_sgent; 2832 struct scatterlist *psg = &qc->pad_sgent;
2822 unsigned int offset; 2833 unsigned int offset;
2823 2834
2824 assert(qc->dev->class == ATA_DEV_ATAPI); 2835 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
2825 2836
2826 memset(pad_buf, 0, ATA_DMA_PAD_SZ); 2837 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
2827 2838
@@ -2914,7 +2925,7 @@ static unsigned long ata_pio_poll(struct ata_port *ap)
2914 unsigned int reg_state = HSM_ST_UNKNOWN; 2925 unsigned int reg_state = HSM_ST_UNKNOWN;
2915 2926
2916 qc = ata_qc_from_tag(ap, ap->active_tag); 2927 qc = ata_qc_from_tag(ap, ap->active_tag);
2917 assert(qc != NULL); 2928 WARN_ON(qc == NULL);
2918 2929
2919 switch (ap->hsm_task_state) { 2930 switch (ap->hsm_task_state) {
2920 case HSM_ST: 2931 case HSM_ST:
@@ -2983,7 +2994,7 @@ static int ata_pio_complete (struct ata_port *ap)
2983 } 2994 }
2984 2995
2985 qc = ata_qc_from_tag(ap, ap->active_tag); 2996 qc = ata_qc_from_tag(ap, ap->active_tag);
2986 assert(qc != NULL); 2997 WARN_ON(qc == NULL);
2987 2998
2988 drv_stat = ata_wait_idle(ap); 2999 drv_stat = ata_wait_idle(ap);
2989 if (!ata_ok(drv_stat)) { 3000 if (!ata_ok(drv_stat)) {
@@ -2994,7 +3005,7 @@ static int ata_pio_complete (struct ata_port *ap)
2994 3005
2995 ap->hsm_task_state = HSM_ST_IDLE; 3006 ap->hsm_task_state = HSM_ST_IDLE;
2996 3007
2997 assert(qc->err_mask == 0); 3008 WARN_ON(qc->err_mask);
2998 ata_poll_qc_complete(qc); 3009 ata_poll_qc_complete(qc);
2999 3010
3000 /* another command may start at this point */ 3011 /* another command may start at this point */
@@ -3517,7 +3528,7 @@ static void ata_pio_block(struct ata_port *ap)
3517 } 3528 }
3518 3529
3519 qc = ata_qc_from_tag(ap, ap->active_tag); 3530 qc = ata_qc_from_tag(ap, ap->active_tag);
3520 assert(qc != NULL); 3531 WARN_ON(qc == NULL);
3521 3532
3522 /* check error */ 3533 /* check error */
3523 if (status & (ATA_ERR | ATA_DF)) { 3534 if (status & (ATA_ERR | ATA_DF)) {
@@ -3554,7 +3565,7 @@ static void ata_pio_error(struct ata_port *ap)
3554 struct ata_queued_cmd *qc; 3565 struct ata_queued_cmd *qc;
3555 3566
3556 qc = ata_qc_from_tag(ap, ap->active_tag); 3567 qc = ata_qc_from_tag(ap, ap->active_tag);
3557 assert(qc != NULL); 3568 WARN_ON(qc == NULL);
3558 3569
3559 if (qc->tf.command != ATA_CMD_PACKET) 3570 if (qc->tf.command != ATA_CMD_PACKET)
3560 printk(KERN_WARNING "ata%u: PIO error\n", ap->id); 3571 printk(KERN_WARNING "ata%u: PIO error\n", ap->id);
@@ -3562,7 +3573,7 @@ static void ata_pio_error(struct ata_port *ap)
3562 /* make sure qc->err_mask is available to 3573 /* make sure qc->err_mask is available to
3563 * know what's wrong and recover 3574 * know what's wrong and recover
3564 */ 3575 */
3565 assert(qc->err_mask); 3576 WARN_ON(qc->err_mask == 0);
3566 3577
3567 ap->hsm_task_state = HSM_ST_IDLE; 3578 ap->hsm_task_state = HSM_ST_IDLE;
3568 3579
@@ -3776,7 +3787,7 @@ void ata_qc_free(struct ata_queued_cmd *qc)
3776 struct ata_port *ap = qc->ap; 3787 struct ata_port *ap = qc->ap;
3777 unsigned int tag; 3788 unsigned int tag;
3778 3789
3779 assert(qc != NULL); /* ata_qc_from_tag _might_ return NULL */ 3790 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
3780 3791
3781 qc->flags = 0; 3792 qc->flags = 0;
3782 tag = qc->tag; 3793 tag = qc->tag;
@@ -3788,10 +3799,10 @@ void ata_qc_free(struct ata_queued_cmd *qc)
3788 } 3799 }
3789} 3800}
3790 3801
3791inline void __ata_qc_complete(struct ata_queued_cmd *qc) 3802void __ata_qc_complete(struct ata_queued_cmd *qc)
3792{ 3803{
3793 assert(qc != NULL); /* ata_qc_from_tag _might_ return NULL */ 3804 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
3794 assert(qc->flags & ATA_QCFLAG_ACTIVE); 3805 WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
3795 3806
3796 if (likely(qc->flags & ATA_QCFLAG_DMAMAP)) 3807 if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
3797 ata_sg_clean(qc); 3808 ata_sg_clean(qc);
@@ -3806,25 +3817,6 @@ inline void __ata_qc_complete(struct ata_queued_cmd *qc)
3806 qc->complete_fn(qc); 3817 qc->complete_fn(qc);
3807} 3818}
3808 3819
3809/**
3810 * ata_qc_complete - Complete an active ATA command
3811 * @qc: Command to complete
3812 * @err_mask: ATA Status register contents
3813 *
3814 * Indicate to the mid and upper layers that an ATA
3815 * command has completed, with either an ok or not-ok status.
3816 *
3817 * LOCKING:
3818 * spin_lock_irqsave(host_set lock)
3819 */
3820void ata_qc_complete(struct ata_queued_cmd *qc)
3821{
3822 if (unlikely(qc->flags & ATA_QCFLAG_EH_SCHEDULED))
3823 return;
3824
3825 __ata_qc_complete(qc);
3826}
3827
3828static inline int ata_should_dma_map(struct ata_queued_cmd *qc) 3820static inline int ata_should_dma_map(struct ata_queued_cmd *qc)
3829{ 3821{
3830 struct ata_port *ap = qc->ap; 3822 struct ata_port *ap = qc->ap;
@@ -5143,7 +5135,7 @@ EXPORT_SYMBOL_GPL(ata_device_add);
5143EXPORT_SYMBOL_GPL(ata_host_set_remove); 5135EXPORT_SYMBOL_GPL(ata_host_set_remove);
5144EXPORT_SYMBOL_GPL(ata_sg_init); 5136EXPORT_SYMBOL_GPL(ata_sg_init);
5145EXPORT_SYMBOL_GPL(ata_sg_init_one); 5137EXPORT_SYMBOL_GPL(ata_sg_init_one);
5146EXPORT_SYMBOL_GPL(ata_qc_complete); 5138EXPORT_SYMBOL_GPL(__ata_qc_complete);
5147EXPORT_SYMBOL_GPL(ata_qc_issue_prot); 5139EXPORT_SYMBOL_GPL(ata_qc_issue_prot);
5148EXPORT_SYMBOL_GPL(ata_eng_timeout); 5140EXPORT_SYMBOL_GPL(ata_eng_timeout);
5149EXPORT_SYMBOL_GPL(ata_tf_load); 5141EXPORT_SYMBOL_GPL(ata_tf_load);
diff --git a/drivers/scsi/libata-scsi.c b/drivers/scsi/libata-scsi.c
index 9d67c6768335..26f07a2617f9 100644
--- a/drivers/scsi/libata-scsi.c
+++ b/drivers/scsi/libata-scsi.c
@@ -553,7 +553,7 @@ void ata_gen_ata_desc_sense(struct ata_queued_cmd *qc)
553 /* 553 /*
554 * Read the controller registers. 554 * Read the controller registers.
555 */ 555 */
556 assert(NULL != qc->ap->ops->tf_read); 556 WARN_ON(qc->ap->ops->tf_read == NULL);
557 qc->ap->ops->tf_read(qc->ap, tf); 557 qc->ap->ops->tf_read(qc->ap, tf);
558 558
559 /* 559 /*
@@ -628,7 +628,7 @@ void ata_gen_fixed_sense(struct ata_queued_cmd *qc)
628 /* 628 /*
629 * Read the controller registers. 629 * Read the controller registers.
630 */ 630 */
631 assert(NULL != qc->ap->ops->tf_read); 631 WARN_ON(qc->ap->ops->tf_read == NULL);
632 qc->ap->ops->tf_read(qc->ap, tf); 632 qc->ap->ops->tf_read(qc->ap, tf);
633 633
634 /* 634 /*
@@ -746,7 +746,7 @@ enum scsi_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd)
746 spin_lock_irqsave(&ap->host_set->lock, flags); 746 spin_lock_irqsave(&ap->host_set->lock, flags);
747 qc = ata_qc_from_tag(ap, ap->active_tag); 747 qc = ata_qc_from_tag(ap, ap->active_tag);
748 if (qc) { 748 if (qc) {
749 assert(qc->scsicmd == cmd); 749 WARN_ON(qc->scsicmd != cmd);
750 qc->flags |= ATA_QCFLAG_EH_SCHEDULED; 750 qc->flags |= ATA_QCFLAG_EH_SCHEDULED;
751 qc->err_mask |= AC_ERR_TIMEOUT; 751 qc->err_mask |= AC_ERR_TIMEOUT;
752 ret = EH_NOT_HANDLED; 752 ret = EH_NOT_HANDLED;
@@ -780,14 +780,14 @@ int ata_scsi_error(struct Scsi_Host *host)
780 ap = (struct ata_port *) &host->hostdata[0]; 780 ap = (struct ata_port *) &host->hostdata[0];
781 781
782 spin_lock_irqsave(&ap->host_set->lock, flags); 782 spin_lock_irqsave(&ap->host_set->lock, flags);
783 assert(!(ap->flags & ATA_FLAG_IN_EH)); 783 WARN_ON(ap->flags & ATA_FLAG_IN_EH);
784 ap->flags |= ATA_FLAG_IN_EH; 784 ap->flags |= ATA_FLAG_IN_EH;
785 assert(ata_qc_from_tag(ap, ap->active_tag) != NULL); 785 WARN_ON(ata_qc_from_tag(ap, ap->active_tag) == NULL);
786 spin_unlock_irqrestore(&ap->host_set->lock, flags); 786 spin_unlock_irqrestore(&ap->host_set->lock, flags);
787 787
788 ap->ops->eng_timeout(ap); 788 ap->ops->eng_timeout(ap);
789 789
790 assert(host->host_failed == 0 && list_empty(&host->eh_cmd_q)); 790 WARN_ON(host->host_failed || !list_empty(&host->eh_cmd_q));
791 791
792 scsi_eh_flush_done_q(&ap->eh_done_q); 792 scsi_eh_flush_done_q(&ap->eh_done_q);
793 793
@@ -813,7 +813,7 @@ static void __ata_eh_qc_complete(struct ata_queued_cmd *qc)
813 spin_lock_irqsave(&ap->host_set->lock, flags); 813 spin_lock_irqsave(&ap->host_set->lock, flags);
814 qc->scsidone = ata_eh_scsidone; 814 qc->scsidone = ata_eh_scsidone;
815 __ata_qc_complete(qc); 815 __ata_qc_complete(qc);
816 assert(!ata_tag_valid(qc->tag)); 816 WARN_ON(ata_tag_valid(qc->tag));
817 spin_unlock_irqrestore(&ap->host_set->lock, flags); 817 spin_unlock_irqrestore(&ap->host_set->lock, flags);
818 818
819 scsi_eh_finish_cmd(scmd, &ap->eh_done_q); 819 scsi_eh_finish_cmd(scmd, &ap->eh_done_q);
diff --git a/drivers/scsi/libata.h b/drivers/scsi/libata.h
index 1cd071a32e93..9d76923a2253 100644
--- a/drivers/scsi/libata.h
+++ b/drivers/scsi/libata.h
@@ -46,7 +46,6 @@ extern struct ata_queued_cmd *ata_qc_new_init(struct ata_port *ap,
46extern int ata_rwcmd_protocol(struct ata_queued_cmd *qc); 46extern int ata_rwcmd_protocol(struct ata_queued_cmd *qc);
47extern void ata_qc_free(struct ata_queued_cmd *qc); 47extern void ata_qc_free(struct ata_queued_cmd *qc);
48extern unsigned int ata_qc_issue(struct ata_queued_cmd *qc); 48extern unsigned int ata_qc_issue(struct ata_queued_cmd *qc);
49extern void __ata_qc_complete(struct ata_queued_cmd *qc);
50extern int ata_check_atapi_dma(struct ata_queued_cmd *qc); 49extern int ata_check_atapi_dma(struct ata_queued_cmd *qc);
51extern void ata_dev_select(struct ata_port *ap, unsigned int device, 50extern void ata_dev_select(struct ata_port *ap, unsigned int device,
52 unsigned int wait, unsigned int can_sleep); 51 unsigned int wait, unsigned int can_sleep);
diff --git a/drivers/scsi/sata_mv.c b/drivers/scsi/sata_mv.c
index cda4c495c10f..d35460ff5275 100644
--- a/drivers/scsi/sata_mv.c
+++ b/drivers/scsi/sata_mv.c
@@ -632,8 +632,8 @@ static void mv_irq_clear(struct ata_port *ap)
632 * @base: port base address 632 * @base: port base address
633 * @pp: port private data 633 * @pp: port private data
634 * 634 *
635 * Verify the local cache of the eDMA state is accurate with an 635 * Verify the local cache of the eDMA state is accurate with a
636 * assert. 636 * WARN_ON.
637 * 637 *
638 * LOCKING: 638 * LOCKING:
639 * Inherited from caller. 639 * Inherited from caller.
@@ -644,15 +644,15 @@ static void mv_start_dma(void __iomem *base, struct mv_port_priv *pp)
644 writelfl(EDMA_EN, base + EDMA_CMD_OFS); 644 writelfl(EDMA_EN, base + EDMA_CMD_OFS);
645 pp->pp_flags |= MV_PP_FLAG_EDMA_EN; 645 pp->pp_flags |= MV_PP_FLAG_EDMA_EN;
646 } 646 }
647 assert(EDMA_EN & readl(base + EDMA_CMD_OFS)); 647 WARN_ON(!(EDMA_EN & readl(base + EDMA_CMD_OFS)));
648} 648}
649 649
650/** 650/**
651 * mv_stop_dma - Disable eDMA engine 651 * mv_stop_dma - Disable eDMA engine
652 * @ap: ATA channel to manipulate 652 * @ap: ATA channel to manipulate
653 * 653 *
654 * Verify the local cache of the eDMA state is accurate with an 654 * Verify the local cache of the eDMA state is accurate with a
655 * assert. 655 * WARN_ON.
656 * 656 *
657 * LOCKING: 657 * LOCKING:
658 * Inherited from caller. 658 * Inherited from caller.
@@ -670,7 +670,7 @@ static void mv_stop_dma(struct ata_port *ap)
670 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS); 670 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
671 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN; 671 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
672 } else { 672 } else {
673 assert(!(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS))); 673 WARN_ON(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS));
674 } 674 }
675 675
676 /* now properly wait for the eDMA to stop */ 676 /* now properly wait for the eDMA to stop */
@@ -1061,15 +1061,15 @@ static void mv_qc_prep(struct ata_queued_cmd *qc)
1061 return; 1061 return;
1062 1062
1063 /* the req producer index should be the same as we remember it */ 1063 /* the req producer index should be the same as we remember it */
1064 assert(((readl(mv_ap_base(qc->ap) + EDMA_REQ_Q_IN_PTR_OFS) >> 1064 WARN_ON(((readl(mv_ap_base(qc->ap) + EDMA_REQ_Q_IN_PTR_OFS) >>
1065 EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) == 1065 EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) !=
1066 pp->req_producer); 1066 pp->req_producer);
1067 1067
1068 /* Fill in command request block 1068 /* Fill in command request block
1069 */ 1069 */
1070 if (!(qc->tf.flags & ATA_TFLAG_WRITE)) 1070 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1071 flags |= CRQB_FLAG_READ; 1071 flags |= CRQB_FLAG_READ;
1072 assert(MV_MAX_Q_DEPTH > qc->tag); 1072 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
1073 flags |= qc->tag << CRQB_TAG_SHIFT; 1073 flags |= qc->tag << CRQB_TAG_SHIFT;
1074 1074
1075 pp->crqb[pp->req_producer].sg_addr = 1075 pp->crqb[pp->req_producer].sg_addr =
@@ -1152,16 +1152,16 @@ static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
1152 return; 1152 return;
1153 1153
1154 /* the req producer index should be the same as we remember it */ 1154 /* the req producer index should be the same as we remember it */
1155 assert(((readl(mv_ap_base(qc->ap) + EDMA_REQ_Q_IN_PTR_OFS) >> 1155 WARN_ON(((readl(mv_ap_base(qc->ap) + EDMA_REQ_Q_IN_PTR_OFS) >>
1156 EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) == 1156 EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) !=
1157 pp->req_producer); 1157 pp->req_producer);
1158 1158
1159 /* Fill in Gen IIE command request block 1159 /* Fill in Gen IIE command request block
1160 */ 1160 */
1161 if (!(qc->tf.flags & ATA_TFLAG_WRITE)) 1161 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1162 flags |= CRQB_FLAG_READ; 1162 flags |= CRQB_FLAG_READ;
1163 1163
1164 assert(MV_MAX_Q_DEPTH > qc->tag); 1164 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
1165 flags |= qc->tag << CRQB_TAG_SHIFT; 1165 flags |= qc->tag << CRQB_TAG_SHIFT;
1166 1166
1167 crqb = (struct mv_crqb_iie *) &pp->crqb[pp->req_producer]; 1167 crqb = (struct mv_crqb_iie *) &pp->crqb[pp->req_producer];
@@ -1226,12 +1226,12 @@ static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
1226 in_ptr = readl(port_mmio + EDMA_REQ_Q_IN_PTR_OFS); 1226 in_ptr = readl(port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
1227 1227
1228 /* the req producer index should be the same as we remember it */ 1228 /* the req producer index should be the same as we remember it */
1229 assert(((in_ptr >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) == 1229 WARN_ON(((in_ptr >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) !=
1230 pp->req_producer); 1230 pp->req_producer);
1231 /* until we do queuing, the queue should be empty at this point */ 1231 /* until we do queuing, the queue should be empty at this point */
1232 assert(((in_ptr >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) == 1232 WARN_ON(((in_ptr >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) !=
1233 ((readl(port_mmio + EDMA_REQ_Q_OUT_PTR_OFS) >> 1233 ((readl(port_mmio + EDMA_REQ_Q_OUT_PTR_OFS) >>
1234 EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK)); 1234 EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK));
1235 1235
1236 mv_inc_q_index(&pp->req_producer); /* now incr producer index */ 1236 mv_inc_q_index(&pp->req_producer); /* now incr producer index */
1237 1237
@@ -1251,7 +1251,7 @@ static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
1251 * 1251 *
1252 * This routine is for use when the port is in DMA mode, when it 1252 * This routine is for use when the port is in DMA mode, when it
1253 * will be using the CRPB (command response block) method of 1253 * will be using the CRPB (command response block) method of
1254 * returning command completion information. We assert indices 1254 * returning command completion information. We check indices
1255 * are good, grab status, and bump the response consumer index to 1255 * are good, grab status, and bump the response consumer index to
1256 * prove that we're up to date. 1256 * prove that we're up to date.
1257 * 1257 *
@@ -1267,16 +1267,16 @@ static u8 mv_get_crpb_status(struct ata_port *ap)
1267 out_ptr = readl(port_mmio + EDMA_RSP_Q_OUT_PTR_OFS); 1267 out_ptr = readl(port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
1268 1268
1269 /* the response consumer index should be the same as we remember it */ 1269 /* the response consumer index should be the same as we remember it */
1270 assert(((out_ptr >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) == 1270 WARN_ON(((out_ptr >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) !=
1271 pp->rsp_consumer); 1271 pp->rsp_consumer);
1272 1272
1273 /* increment our consumer index... */ 1273 /* increment our consumer index... */
1274 pp->rsp_consumer = mv_inc_q_index(&pp->rsp_consumer); 1274 pp->rsp_consumer = mv_inc_q_index(&pp->rsp_consumer);
1275 1275
1276 /* and, until we do NCQ, there should only be 1 CRPB waiting */ 1276 /* and, until we do NCQ, there should only be 1 CRPB waiting */
1277 assert(((readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS) >> 1277 WARN_ON(((readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS) >>
1278 EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) == 1278 EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) !=
1279 pp->rsp_consumer); 1279 pp->rsp_consumer);
1280 1280
1281 /* write out our inc'd consumer index so EDMA knows we're caught up */ 1281 /* write out our inc'd consumer index so EDMA knows we're caught up */
1282 out_ptr &= EDMA_RSP_Q_BASE_LO_MASK; 1282 out_ptr &= EDMA_RSP_Q_BASE_LO_MASK;
diff --git a/drivers/scsi/sata_qstor.c b/drivers/scsi/sata_qstor.c
index d5c912763d0c..5730167d2e74 100644
--- a/drivers/scsi/sata_qstor.c
+++ b/drivers/scsi/sata_qstor.c
@@ -277,8 +277,8 @@ static unsigned int qs_fill_sg(struct ata_queued_cmd *qc)
277 unsigned int nelem; 277 unsigned int nelem;
278 u8 *prd = pp->pkt + QS_CPB_BYTES; 278 u8 *prd = pp->pkt + QS_CPB_BYTES;
279 279
280 assert(qc->__sg != NULL); 280 WARN_ON(qc->__sg == NULL);
281 assert(qc->n_elem > 0); 281 WARN_ON(qc->n_elem == 0);
282 282
283 nelem = 0; 283 nelem = 0;
284 ata_for_each_sg(sg, qc) { 284 ata_for_each_sg(sg, qc) {
diff --git a/drivers/scsi/sata_sil.c b/drivers/scsi/sata_sil.c
index f40f25edbb11..bd2887741d78 100644
--- a/drivers/scsi/sata_sil.c
+++ b/drivers/scsi/sata_sil.c
@@ -158,7 +158,7 @@ static const struct ata_port_operations sil_ops = {
158 .check_status = ata_check_status, 158 .check_status = ata_check_status,
159 .exec_command = ata_exec_command, 159 .exec_command = ata_exec_command,
160 .dev_select = ata_std_dev_select, 160 .dev_select = ata_std_dev_select,
161 .phy_reset = sata_phy_reset, 161 .probe_reset = ata_std_probe_reset,
162 .post_set_mode = sil_post_set_mode, 162 .post_set_mode = sil_post_set_mode,
163 .bmdma_setup = ata_bmdma_setup, 163 .bmdma_setup = ata_bmdma_setup,
164 .bmdma_start = ata_bmdma_start, 164 .bmdma_start = ata_bmdma_start,
@@ -181,7 +181,7 @@ static const struct ata_port_info sil_port_info[] = {
181 { 181 {
182 .sht = &sil_sht, 182 .sht = &sil_sht,
183 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 183 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
184 ATA_FLAG_SRST | ATA_FLAG_MMIO, 184 ATA_FLAG_MMIO,
185 .pio_mask = 0x1f, /* pio0-4 */ 185 .pio_mask = 0x1f, /* pio0-4 */
186 .mwdma_mask = 0x07, /* mwdma0-2 */ 186 .mwdma_mask = 0x07, /* mwdma0-2 */
187 .udma_mask = 0x3f, /* udma0-5 */ 187 .udma_mask = 0x3f, /* udma0-5 */
@@ -190,8 +190,7 @@ static const struct ata_port_info sil_port_info[] = {
190 { 190 {
191 .sht = &sil_sht, 191 .sht = &sil_sht,
192 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 192 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
193 ATA_FLAG_SRST | ATA_FLAG_MMIO | 193 ATA_FLAG_MMIO | SIL_FLAG_MOD15WRITE,
194 SIL_FLAG_MOD15WRITE,
195 .pio_mask = 0x1f, /* pio0-4 */ 194 .pio_mask = 0x1f, /* pio0-4 */
196 .mwdma_mask = 0x07, /* mwdma0-2 */ 195 .mwdma_mask = 0x07, /* mwdma0-2 */
197 .udma_mask = 0x3f, /* udma0-5 */ 196 .udma_mask = 0x3f, /* udma0-5 */
@@ -200,7 +199,7 @@ static const struct ata_port_info sil_port_info[] = {
200 { 199 {
201 .sht = &sil_sht, 200 .sht = &sil_sht,
202 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 201 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
203 ATA_FLAG_SRST | ATA_FLAG_MMIO, 202 ATA_FLAG_MMIO,
204 .pio_mask = 0x1f, /* pio0-4 */ 203 .pio_mask = 0x1f, /* pio0-4 */
205 .mwdma_mask = 0x07, /* mwdma0-2 */ 204 .mwdma_mask = 0x07, /* mwdma0-2 */
206 .udma_mask = 0x3f, /* udma0-5 */ 205 .udma_mask = 0x3f, /* udma0-5 */
diff --git a/drivers/scsi/sata_sil24.c b/drivers/scsi/sata_sil24.c
index 962396b36f61..228a7fabffff 100644
--- a/drivers/scsi/sata_sil24.c
+++ b/drivers/scsi/sata_sil24.c
@@ -249,7 +249,7 @@ static u8 sil24_check_status(struct ata_port *ap);
249static u32 sil24_scr_read(struct ata_port *ap, unsigned sc_reg); 249static u32 sil24_scr_read(struct ata_port *ap, unsigned sc_reg);
250static void sil24_scr_write(struct ata_port *ap, unsigned sc_reg, u32 val); 250static void sil24_scr_write(struct ata_port *ap, unsigned sc_reg, u32 val);
251static void sil24_tf_read(struct ata_port *ap, struct ata_taskfile *tf); 251static void sil24_tf_read(struct ata_port *ap, struct ata_taskfile *tf);
252static void sil24_phy_reset(struct ata_port *ap); 252static int sil24_probe_reset(struct ata_port *ap, unsigned int *classes);
253static void sil24_qc_prep(struct ata_queued_cmd *qc); 253static void sil24_qc_prep(struct ata_queued_cmd *qc);
254static unsigned int sil24_qc_issue(struct ata_queued_cmd *qc); 254static unsigned int sil24_qc_issue(struct ata_queued_cmd *qc);
255static void sil24_irq_clear(struct ata_port *ap); 255static void sil24_irq_clear(struct ata_port *ap);
@@ -306,7 +306,7 @@ static const struct ata_port_operations sil24_ops = {
306 306
307 .tf_read = sil24_tf_read, 307 .tf_read = sil24_tf_read,
308 308
309 .phy_reset = sil24_phy_reset, 309 .probe_reset = sil24_probe_reset,
310 310
311 .qc_prep = sil24_qc_prep, 311 .qc_prep = sil24_qc_prep,
312 .qc_issue = sil24_qc_issue, 312 .qc_issue = sil24_qc_issue,
@@ -336,8 +336,8 @@ static struct ata_port_info sil24_port_info[] = {
336 { 336 {
337 .sht = &sil24_sht, 337 .sht = &sil24_sht,
338 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 338 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
339 ATA_FLAG_SRST | ATA_FLAG_MMIO | 339 ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA |
340 ATA_FLAG_PIO_DMA | SIL24_NPORTS2FLAG(4), 340 SIL24_NPORTS2FLAG(4),
341 .pio_mask = 0x1f, /* pio0-4 */ 341 .pio_mask = 0x1f, /* pio0-4 */
342 .mwdma_mask = 0x07, /* mwdma0-2 */ 342 .mwdma_mask = 0x07, /* mwdma0-2 */
343 .udma_mask = 0x3f, /* udma0-5 */ 343 .udma_mask = 0x3f, /* udma0-5 */
@@ -347,8 +347,8 @@ static struct ata_port_info sil24_port_info[] = {
347 { 347 {
348 .sht = &sil24_sht, 348 .sht = &sil24_sht,
349 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 349 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
350 ATA_FLAG_SRST | ATA_FLAG_MMIO | 350 ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA |
351 ATA_FLAG_PIO_DMA | SIL24_NPORTS2FLAG(2), 351 SIL24_NPORTS2FLAG(2),
352 .pio_mask = 0x1f, /* pio0-4 */ 352 .pio_mask = 0x1f, /* pio0-4 */
353 .mwdma_mask = 0x07, /* mwdma0-2 */ 353 .mwdma_mask = 0x07, /* mwdma0-2 */
354 .udma_mask = 0x3f, /* udma0-5 */ 354 .udma_mask = 0x3f, /* udma0-5 */
@@ -358,8 +358,8 @@ static struct ata_port_info sil24_port_info[] = {
358 { 358 {
359 .sht = &sil24_sht, 359 .sht = &sil24_sht,
360 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 360 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
361 ATA_FLAG_SRST | ATA_FLAG_MMIO | 361 ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA |
362 ATA_FLAG_PIO_DMA | SIL24_NPORTS2FLAG(1), 362 SIL24_NPORTS2FLAG(1),
363 .pio_mask = 0x1f, /* pio0-4 */ 363 .pio_mask = 0x1f, /* pio0-4 */
364 .mwdma_mask = 0x07, /* mwdma0-2 */ 364 .mwdma_mask = 0x07, /* mwdma0-2 */
365 .udma_mask = 0x3f, /* udma0-5 */ 365 .udma_mask = 0x3f, /* udma0-5 */
@@ -428,7 +428,8 @@ static void sil24_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
428 *tf = pp->tf; 428 *tf = pp->tf;
429} 429}
430 430
431static int sil24_issue_SRST(struct ata_port *ap) 431static int sil24_softreset(struct ata_port *ap, int verbose,
432 unsigned int *class)
432{ 433{
433 void __iomem *port = (void __iomem *)ap->ioaddr.cmd_addr; 434 void __iomem *port = (void __iomem *)ap->ioaddr.cmd_addr;
434 struct sil24_port_priv *pp = ap->private_data; 435 struct sil24_port_priv *pp = ap->private_data;
@@ -437,6 +438,8 @@ static int sil24_issue_SRST(struct ata_port *ap)
437 u32 irq_enable, irq_stat; 438 u32 irq_enable, irq_stat;
438 int cnt; 439 int cnt;
439 440
441 DPRINTK("ENTER\n");
442
440 /* temporarily turn off IRQs during SRST */ 443 /* temporarily turn off IRQs during SRST */
441 irq_enable = readl(port + PORT_IRQ_ENABLE_SET); 444 irq_enable = readl(port + PORT_IRQ_ENABLE_SET);
442 writel(irq_enable, port + PORT_IRQ_ENABLE_CLR); 445 writel(irq_enable, port + PORT_IRQ_ENABLE_CLR);
@@ -466,30 +469,36 @@ static int sil24_issue_SRST(struct ata_port *ap)
466 /* restore IRQs */ 469 /* restore IRQs */
467 writel(irq_enable, port + PORT_IRQ_ENABLE_SET); 470 writel(irq_enable, port + PORT_IRQ_ENABLE_SET);
468 471
469 if (!(irq_stat & PORT_IRQ_COMPLETE)) 472 if (sata_dev_present(ap)) {
470 return -1; 473 if (!(irq_stat & PORT_IRQ_COMPLETE)) {
474 DPRINTK("EXIT, srst failed\n");
475 return -EIO;
476 }
471 477
472 /* update TF */ 478 sil24_update_tf(ap);
473 sil24_update_tf(ap); 479 *class = ata_dev_classify(&pp->tf);
480 }
481 if (*class == ATA_DEV_UNKNOWN)
482 *class = ATA_DEV_NONE;
483
484 DPRINTK("EXIT, class=%u\n", *class);
474 return 0; 485 return 0;
475} 486}
476 487
477static void sil24_phy_reset(struct ata_port *ap) 488static int sil24_hardreset(struct ata_port *ap, int verbose,
489 unsigned int *class)
478{ 490{
479 struct sil24_port_priv *pp = ap->private_data; 491 unsigned int dummy_class;
480 492
481 __sata_phy_reset(ap); 493 /* sil24 doesn't report device signature after hard reset */
482 if (ap->flags & ATA_FLAG_PORT_DISABLED) 494 return sata_std_hardreset(ap, verbose, &dummy_class);
483 return; 495}
484
485 if (sil24_issue_SRST(ap) < 0) {
486 printk(KERN_ERR DRV_NAME
487 " ata%u: SRST failed, disabling port\n", ap->id);
488 ap->ops->port_disable(ap);
489 return;
490 }
491 496
492 ap->device->class = ata_dev_classify(&pp->tf); 497static int sil24_probe_reset(struct ata_port *ap, unsigned int *classes)
498{
499 return ata_drive_probe_reset(ap, ata_std_probeinit,
500 sil24_softreset, sil24_hardreset,
501 ata_std_postreset, classes);
493} 502}
494 503
495static inline void sil24_fill_sg(struct ata_queued_cmd *qc, 504static inline void sil24_fill_sg(struct ata_queued_cmd *qc,
diff --git a/drivers/scsi/sata_sx4.c b/drivers/scsi/sata_sx4.c
index b7cf279deeb2..04465fb86e1d 100644
--- a/drivers/scsi/sata_sx4.c
+++ b/drivers/scsi/sata_sx4.c
@@ -461,7 +461,7 @@ static void pdc20621_dma_prep(struct ata_queued_cmd *qc)
461 unsigned int i, idx, total_len = 0, sgt_len; 461 unsigned int i, idx, total_len = 0, sgt_len;
462 u32 *buf = (u32 *) &pp->dimm_buf[PDC_DIMM_HEADER_SZ]; 462 u32 *buf = (u32 *) &pp->dimm_buf[PDC_DIMM_HEADER_SZ];
463 463
464 assert(qc->flags & ATA_QCFLAG_DMAMAP); 464 WARN_ON(!(qc->flags & ATA_QCFLAG_DMAMAP));
465 465
466 VPRINTK("ata%u: ENTER\n", ap->id); 466 VPRINTK("ata%u: ENTER\n", ap->id);
467 467