aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/scsi/Makefile2
-rw-r--r--drivers/scsi/ahci.c196
-rw-r--r--drivers/scsi/ata_piix.c133
-rw-r--r--drivers/scsi/libata-bmdma.c703
-rw-r--r--drivers/scsi/libata-core.c1895
-rw-r--r--drivers/scsi/libata-scsi.c238
-rw-r--r--drivers/scsi/libata.h2
-rw-r--r--drivers/scsi/pdc_adma.c6
-rw-r--r--drivers/scsi/sata_mv.c279
-rw-r--r--drivers/scsi/sata_nv.c2
-rw-r--r--drivers/scsi/sata_promise.c127
-rw-r--r--drivers/scsi/sata_qstor.c10
-rw-r--r--drivers/scsi/sata_sil.c33
-rw-r--r--drivers/scsi/sata_sil24.c88
-rw-r--r--drivers/scsi/sata_sis.c2
-rw-r--r--drivers/scsi/sata_svw.c2
-rw-r--r--drivers/scsi/sata_sx4.c25
-rw-r--r--drivers/scsi/sata_uli.c2
-rw-r--r--drivers/scsi/sata_via.c2
-rw-r--r--drivers/scsi/sata_vsc.c2
-rw-r--r--drivers/scsi/scsi_error.c7
-rw-r--r--include/linux/ata.h22
-rw-r--r--include/linux/libata.h146
-rw-r--r--include/scsi/scsi_eh.h3
24 files changed, 2428 insertions, 1499 deletions
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index 320e765fa0cd..15dc2e00e1b2 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -163,7 +163,7 @@ ncr53c8xx-flags-$(CONFIG_SCSI_ZALON) \
163CFLAGS_ncr53c8xx.o := $(ncr53c8xx-flags-y) $(ncr53c8xx-flags-m) 163CFLAGS_ncr53c8xx.o := $(ncr53c8xx-flags-y) $(ncr53c8xx-flags-m)
164zalon7xx-objs := zalon.o ncr53c8xx.o 164zalon7xx-objs := zalon.o ncr53c8xx.o
165NCR_Q720_mod-objs := NCR_Q720.o ncr53c8xx.o 165NCR_Q720_mod-objs := NCR_Q720.o ncr53c8xx.o
166libata-objs := libata-core.o libata-scsi.o 166libata-objs := libata-core.o libata-scsi.o libata-bmdma.o
167oktagon_esp_mod-objs := oktagon_esp.o oktagon_io.o 167oktagon_esp_mod-objs := oktagon_esp.o oktagon_io.o
168 168
169# Files generated that shall be removed upon make clean 169# Files generated that shall be removed upon make clean
diff --git a/drivers/scsi/ahci.c b/drivers/scsi/ahci.c
index a800fb51168b..1c2ab3dede71 100644
--- a/drivers/scsi/ahci.c
+++ b/drivers/scsi/ahci.c
@@ -66,6 +66,8 @@ enum {
66 AHCI_IRQ_ON_SG = (1 << 31), 66 AHCI_IRQ_ON_SG = (1 << 31),
67 AHCI_CMD_ATAPI = (1 << 5), 67 AHCI_CMD_ATAPI = (1 << 5),
68 AHCI_CMD_WRITE = (1 << 6), 68 AHCI_CMD_WRITE = (1 << 6),
69 AHCI_CMD_RESET = (1 << 8),
70 AHCI_CMD_CLR_BUSY = (1 << 10),
69 71
70 RX_FIS_D2H_REG = 0x40, /* offset of D2H Register FIS data */ 72 RX_FIS_D2H_REG = 0x40, /* offset of D2H Register FIS data */
71 73
@@ -85,6 +87,7 @@ enum {
85 87
86 /* HOST_CAP bits */ 88 /* HOST_CAP bits */
87 HOST_CAP_64 = (1 << 31), /* PCI DAC (64-bit DMA) support */ 89 HOST_CAP_64 = (1 << 31), /* PCI DAC (64-bit DMA) support */
90 HOST_CAP_CLO = (1 << 24), /* Command List Override support */
88 91
89 /* registers for each SATA port */ 92 /* registers for each SATA port */
90 PORT_LST_ADDR = 0x00, /* command list DMA addr */ 93 PORT_LST_ADDR = 0x00, /* command list DMA addr */
@@ -138,6 +141,7 @@ enum {
138 PORT_CMD_LIST_ON = (1 << 15), /* cmd list DMA engine running */ 141 PORT_CMD_LIST_ON = (1 << 15), /* cmd list DMA engine running */
139 PORT_CMD_FIS_ON = (1 << 14), /* FIS DMA engine running */ 142 PORT_CMD_FIS_ON = (1 << 14), /* FIS DMA engine running */
140 PORT_CMD_FIS_RX = (1 << 4), /* Enable FIS receive DMA engine */ 143 PORT_CMD_FIS_RX = (1 << 4), /* Enable FIS receive DMA engine */
144 PORT_CMD_CLO = (1 << 3), /* Command list override */
141 PORT_CMD_POWER_ON = (1 << 2), /* Power up device */ 145 PORT_CMD_POWER_ON = (1 << 2), /* Power up device */
142 PORT_CMD_SPIN_UP = (1 << 1), /* Spin up device */ 146 PORT_CMD_SPIN_UP = (1 << 1), /* Spin up device */
143 PORT_CMD_START = (1 << 0), /* Enable port DMA engine */ 147 PORT_CMD_START = (1 << 0), /* Enable port DMA engine */
@@ -184,9 +188,9 @@ struct ahci_port_priv {
184static u32 ahci_scr_read (struct ata_port *ap, unsigned int sc_reg); 188static u32 ahci_scr_read (struct ata_port *ap, unsigned int sc_reg);
185static void ahci_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val); 189static void ahci_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val);
186static int ahci_init_one (struct pci_dev *pdev, const struct pci_device_id *ent); 190static int ahci_init_one (struct pci_dev *pdev, const struct pci_device_id *ent);
187static int ahci_qc_issue(struct ata_queued_cmd *qc); 191static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc);
188static irqreturn_t ahci_interrupt (int irq, void *dev_instance, struct pt_regs *regs); 192static irqreturn_t ahci_interrupt (int irq, void *dev_instance, struct pt_regs *regs);
189static void ahci_phy_reset(struct ata_port *ap); 193static int ahci_probe_reset(struct ata_port *ap, unsigned int *classes);
190static void ahci_irq_clear(struct ata_port *ap); 194static void ahci_irq_clear(struct ata_port *ap);
191static void ahci_eng_timeout(struct ata_port *ap); 195static void ahci_eng_timeout(struct ata_port *ap);
192static int ahci_port_start(struct ata_port *ap); 196static int ahci_port_start(struct ata_port *ap);
@@ -202,11 +206,11 @@ static struct scsi_host_template ahci_sht = {
202 .name = DRV_NAME, 206 .name = DRV_NAME,
203 .ioctl = ata_scsi_ioctl, 207 .ioctl = ata_scsi_ioctl,
204 .queuecommand = ata_scsi_queuecmd, 208 .queuecommand = ata_scsi_queuecmd,
209 .eh_timed_out = ata_scsi_timed_out,
205 .eh_strategy_handler = ata_scsi_error, 210 .eh_strategy_handler = ata_scsi_error,
206 .can_queue = ATA_DEF_QUEUE, 211 .can_queue = ATA_DEF_QUEUE,
207 .this_id = ATA_SHT_THIS_ID, 212 .this_id = ATA_SHT_THIS_ID,
208 .sg_tablesize = AHCI_MAX_SG, 213 .sg_tablesize = AHCI_MAX_SG,
209 .max_sectors = ATA_MAX_SECTORS,
210 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 214 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
211 .emulated = ATA_SHT_EMULATED, 215 .emulated = ATA_SHT_EMULATED,
212 .use_clustering = AHCI_USE_CLUSTERING, 216 .use_clustering = AHCI_USE_CLUSTERING,
@@ -225,7 +229,7 @@ static const struct ata_port_operations ahci_ops = {
225 229
226 .tf_read = ahci_tf_read, 230 .tf_read = ahci_tf_read,
227 231
228 .phy_reset = ahci_phy_reset, 232 .probe_reset = ahci_probe_reset,
229 233
230 .qc_prep = ahci_qc_prep, 234 .qc_prep = ahci_qc_prep,
231 .qc_issue = ahci_qc_issue, 235 .qc_issue = ahci_qc_issue,
@@ -247,8 +251,7 @@ static const struct ata_port_info ahci_port_info[] = {
247 { 251 {
248 .sht = &ahci_sht, 252 .sht = &ahci_sht,
249 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 253 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
250 ATA_FLAG_SATA_RESET | ATA_FLAG_MMIO | 254 ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA,
251 ATA_FLAG_PIO_DMA,
252 .pio_mask = 0x1f, /* pio0-4 */ 255 .pio_mask = 0x1f, /* pio0-4 */
253 .udma_mask = 0x7f, /* udma0-6 ; FIXME */ 256 .udma_mask = 0x7f, /* udma0-6 ; FIXME */
254 .port_ops = &ahci_ops, 257 .port_ops = &ahci_ops,
@@ -450,17 +453,48 @@ static void ahci_scr_write (struct ata_port *ap, unsigned int sc_reg_in,
450 writel(val, (void __iomem *) ap->ioaddr.scr_addr + (sc_reg * 4)); 453 writel(val, (void __iomem *) ap->ioaddr.scr_addr + (sc_reg * 4));
451} 454}
452 455
453static void ahci_phy_reset(struct ata_port *ap) 456static int ahci_stop_engine(struct ata_port *ap)
454{ 457{
455 void __iomem *port_mmio = (void __iomem *) ap->ioaddr.cmd_addr; 458 void __iomem *mmio = ap->host_set->mmio_base;
456 struct ata_taskfile tf; 459 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
457 struct ata_device *dev = &ap->device[0]; 460 int work;
458 u32 new_tmp, tmp; 461 u32 tmp;
459 462
460 __sata_phy_reset(ap); 463 tmp = readl(port_mmio + PORT_CMD);
464 tmp &= ~PORT_CMD_START;
465 writel(tmp, port_mmio + PORT_CMD);
461 466
462 if (ap->flags & ATA_FLAG_PORT_DISABLED) 467 /* wait for engine to stop. TODO: this could be
463 return; 468 * as long as 500 msec
469 */
470 work = 1000;
471 while (work-- > 0) {
472 tmp = readl(port_mmio + PORT_CMD);
473 if ((tmp & PORT_CMD_LIST_ON) == 0)
474 return 0;
475 udelay(10);
476 }
477
478 return -EIO;
479}
480
481static void ahci_start_engine(struct ata_port *ap)
482{
483 void __iomem *mmio = ap->host_set->mmio_base;
484 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
485 u32 tmp;
486
487 tmp = readl(port_mmio + PORT_CMD);
488 tmp |= PORT_CMD_START;
489 writel(tmp, port_mmio + PORT_CMD);
490 readl(port_mmio + PORT_CMD); /* flush */
491}
492
493static unsigned int ahci_dev_classify(struct ata_port *ap)
494{
495 void __iomem *port_mmio = (void __iomem *) ap->ioaddr.cmd_addr;
496 struct ata_taskfile tf;
497 u32 tmp;
464 498
465 tmp = readl(port_mmio + PORT_SIG); 499 tmp = readl(port_mmio + PORT_SIG);
466 tf.lbah = (tmp >> 24) & 0xff; 500 tf.lbah = (tmp >> 24) & 0xff;
@@ -468,15 +502,46 @@ static void ahci_phy_reset(struct ata_port *ap)
468 tf.lbal = (tmp >> 8) & 0xff; 502 tf.lbal = (tmp >> 8) & 0xff;
469 tf.nsect = (tmp) & 0xff; 503 tf.nsect = (tmp) & 0xff;
470 504
471 dev->class = ata_dev_classify(&tf); 505 return ata_dev_classify(&tf);
472 if (!ata_dev_present(dev)) { 506}
473 ata_port_disable(ap); 507
474 return; 508static void ahci_fill_cmd_slot(struct ahci_port_priv *pp, u32 opts)
475 } 509{
510 pp->cmd_slot[0].opts = cpu_to_le32(opts);
511 pp->cmd_slot[0].status = 0;
512 pp->cmd_slot[0].tbl_addr = cpu_to_le32(pp->cmd_tbl_dma & 0xffffffff);
513 pp->cmd_slot[0].tbl_addr_hi = cpu_to_le32((pp->cmd_tbl_dma >> 16) >> 16);
514}
515
516static int ahci_hardreset(struct ata_port *ap, int verbose, unsigned int *class)
517{
518 int rc;
519
520 DPRINTK("ENTER\n");
521
522 ahci_stop_engine(ap);
523 rc = sata_std_hardreset(ap, verbose, class);
524 ahci_start_engine(ap);
525
526 if (rc == 0)
527 *class = ahci_dev_classify(ap);
528 if (*class == ATA_DEV_UNKNOWN)
529 *class = ATA_DEV_NONE;
530
531 DPRINTK("EXIT, rc=%d, class=%u\n", rc, *class);
532 return rc;
533}
534
535static void ahci_postreset(struct ata_port *ap, unsigned int *class)
536{
537 void __iomem *port_mmio = (void __iomem *) ap->ioaddr.cmd_addr;
538 u32 new_tmp, tmp;
539
540 ata_std_postreset(ap, class);
476 541
477 /* Make sure port's ATAPI bit is set appropriately */ 542 /* Make sure port's ATAPI bit is set appropriately */
478 new_tmp = tmp = readl(port_mmio + PORT_CMD); 543 new_tmp = tmp = readl(port_mmio + PORT_CMD);
479 if (dev->class == ATA_DEV_ATAPI) 544 if (*class == ATA_DEV_ATAPI)
480 new_tmp |= PORT_CMD_ATAPI; 545 new_tmp |= PORT_CMD_ATAPI;
481 else 546 else
482 new_tmp &= ~PORT_CMD_ATAPI; 547 new_tmp &= ~PORT_CMD_ATAPI;
@@ -486,6 +551,12 @@ static void ahci_phy_reset(struct ata_port *ap)
486 } 551 }
487} 552}
488 553
554static int ahci_probe_reset(struct ata_port *ap, unsigned int *classes)
555{
556 return ata_drive_probe_reset(ap, NULL, NULL, ahci_hardreset,
557 ahci_postreset, classes);
558}
559
489static u8 ahci_check_status(struct ata_port *ap) 560static u8 ahci_check_status(struct ata_port *ap)
490{ 561{
491 void __iomem *mmio = (void __iomem *) ap->ioaddr.cmd_addr; 562 void __iomem *mmio = (void __iomem *) ap->ioaddr.cmd_addr;
@@ -533,42 +604,36 @@ static void ahci_qc_prep(struct ata_queued_cmd *qc)
533{ 604{
534 struct ata_port *ap = qc->ap; 605 struct ata_port *ap = qc->ap;
535 struct ahci_port_priv *pp = ap->private_data; 606 struct ahci_port_priv *pp = ap->private_data;
607 int is_atapi = is_atapi_taskfile(&qc->tf);
536 u32 opts; 608 u32 opts;
537 const u32 cmd_fis_len = 5; /* five dwords */ 609 const u32 cmd_fis_len = 5; /* five dwords */
538 unsigned int n_elem; 610 unsigned int n_elem;
539 611
540 /* 612 /*
541 * Fill in command slot information (currently only one slot,
542 * slot 0, is currently since we don't do queueing)
543 */
544
545 opts = cmd_fis_len;
546 if (qc->tf.flags & ATA_TFLAG_WRITE)
547 opts |= AHCI_CMD_WRITE;
548 if (is_atapi_taskfile(&qc->tf))
549 opts |= AHCI_CMD_ATAPI;
550
551 pp->cmd_slot[0].opts = cpu_to_le32(opts);
552 pp->cmd_slot[0].status = 0;
553 pp->cmd_slot[0].tbl_addr = cpu_to_le32(pp->cmd_tbl_dma & 0xffffffff);
554 pp->cmd_slot[0].tbl_addr_hi = cpu_to_le32((pp->cmd_tbl_dma >> 16) >> 16);
555
556 /*
557 * Fill in command table information. First, the header, 613 * Fill in command table information. First, the header,
558 * a SATA Register - Host to Device command FIS. 614 * a SATA Register - Host to Device command FIS.
559 */ 615 */
560 ata_tf_to_fis(&qc->tf, pp->cmd_tbl, 0); 616 ata_tf_to_fis(&qc->tf, pp->cmd_tbl, 0);
561 if (opts & AHCI_CMD_ATAPI) { 617 if (is_atapi) {
562 memset(pp->cmd_tbl + AHCI_CMD_TBL_CDB, 0, 32); 618 memset(pp->cmd_tbl + AHCI_CMD_TBL_CDB, 0, 32);
563 memcpy(pp->cmd_tbl + AHCI_CMD_TBL_CDB, qc->cdb, ap->cdb_len); 619 memcpy(pp->cmd_tbl + AHCI_CMD_TBL_CDB, qc->cdb,
620 qc->dev->cdb_len);
564 } 621 }
565 622
566 if (!(qc->flags & ATA_QCFLAG_DMAMAP)) 623 n_elem = 0;
567 return; 624 if (qc->flags & ATA_QCFLAG_DMAMAP)
625 n_elem = ahci_fill_sg(qc);
568 626
569 n_elem = ahci_fill_sg(qc); 627 /*
628 * Fill in command slot information.
629 */
630 opts = cmd_fis_len | n_elem << 16;
631 if (qc->tf.flags & ATA_TFLAG_WRITE)
632 opts |= AHCI_CMD_WRITE;
633 if (is_atapi)
634 opts |= AHCI_CMD_ATAPI;
570 635
571 pp->cmd_slot[0].opts |= cpu_to_le32(n_elem << 16); 636 ahci_fill_cmd_slot(pp, opts);
572} 637}
573 638
574static void ahci_restart_port(struct ata_port *ap, u32 irq_stat) 639static void ahci_restart_port(struct ata_port *ap, u32 irq_stat)
@@ -576,7 +641,6 @@ static void ahci_restart_port(struct ata_port *ap, u32 irq_stat)
576 void __iomem *mmio = ap->host_set->mmio_base; 641 void __iomem *mmio = ap->host_set->mmio_base;
577 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no); 642 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
578 u32 tmp; 643 u32 tmp;
579 int work;
580 644
581 if ((ap->device[0].class != ATA_DEV_ATAPI) || 645 if ((ap->device[0].class != ATA_DEV_ATAPI) ||
582 ((irq_stat & PORT_IRQ_TF_ERR) == 0)) 646 ((irq_stat & PORT_IRQ_TF_ERR) == 0))
@@ -592,20 +656,7 @@ static void ahci_restart_port(struct ata_port *ap, u32 irq_stat)
592 readl(port_mmio + PORT_SCR_ERR)); 656 readl(port_mmio + PORT_SCR_ERR));
593 657
594 /* stop DMA */ 658 /* stop DMA */
595 tmp = readl(port_mmio + PORT_CMD); 659 ahci_stop_engine(ap);
596 tmp &= ~PORT_CMD_START;
597 writel(tmp, port_mmio + PORT_CMD);
598
599 /* wait for engine to stop. TODO: this could be
600 * as long as 500 msec
601 */
602 work = 1000;
603 while (work-- > 0) {
604 tmp = readl(port_mmio + PORT_CMD);
605 if ((tmp & PORT_CMD_LIST_ON) == 0)
606 break;
607 udelay(10);
608 }
609 660
610 /* clear SATA phy error, if any */ 661 /* clear SATA phy error, if any */
611 tmp = readl(port_mmio + PORT_SCR_ERR); 662 tmp = readl(port_mmio + PORT_SCR_ERR);
@@ -624,10 +675,7 @@ static void ahci_restart_port(struct ata_port *ap, u32 irq_stat)
624 } 675 }
625 676
626 /* re-start DMA */ 677 /* re-start DMA */
627 tmp = readl(port_mmio + PORT_CMD); 678 ahci_start_engine(ap);
628 tmp |= PORT_CMD_START;
629 writel(tmp, port_mmio + PORT_CMD);
630 readl(port_mmio + PORT_CMD); /* flush */
631} 679}
632 680
633static void ahci_eng_timeout(struct ata_port *ap) 681static void ahci_eng_timeout(struct ata_port *ap)
@@ -642,25 +690,13 @@ static void ahci_eng_timeout(struct ata_port *ap)
642 690
643 spin_lock_irqsave(&host_set->lock, flags); 691 spin_lock_irqsave(&host_set->lock, flags);
644 692
693 ahci_restart_port(ap, readl(port_mmio + PORT_IRQ_STAT));
645 qc = ata_qc_from_tag(ap, ap->active_tag); 694 qc = ata_qc_from_tag(ap, ap->active_tag);
646 if (!qc) { 695 qc->err_mask |= AC_ERR_TIMEOUT;
647 printk(KERN_ERR "ata%u: BUG: timeout without command\n",
648 ap->id);
649 } else {
650 ahci_restart_port(ap, readl(port_mmio + PORT_IRQ_STAT));
651
652 /* hack alert! We cannot use the supplied completion
653 * function from inside the ->eh_strategy_handler() thread.
654 * libata is the only user of ->eh_strategy_handler() in
655 * any kernel, so the default scsi_done() assumes it is
656 * not being called from the SCSI EH.
657 */
658 qc->scsidone = scsi_finish_command;
659 qc->err_mask |= AC_ERR_OTHER;
660 ata_qc_complete(qc);
661 }
662 696
663 spin_unlock_irqrestore(&host_set->lock, flags); 697 spin_unlock_irqrestore(&host_set->lock, flags);
698
699 ata_eh_qc_complete(qc);
664} 700}
665 701
666static inline int ahci_host_intr(struct ata_port *ap, struct ata_queued_cmd *qc) 702static inline int ahci_host_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
@@ -678,7 +714,7 @@ static inline int ahci_host_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
678 ci = readl(port_mmio + PORT_CMD_ISSUE); 714 ci = readl(port_mmio + PORT_CMD_ISSUE);
679 if (likely((ci & 0x1) == 0)) { 715 if (likely((ci & 0x1) == 0)) {
680 if (qc) { 716 if (qc) {
681 assert(qc->err_mask == 0); 717 WARN_ON(qc->err_mask);
682 ata_qc_complete(qc); 718 ata_qc_complete(qc);
683 qc = NULL; 719 qc = NULL;
684 } 720 }
@@ -697,7 +733,7 @@ static inline int ahci_host_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
697 ahci_restart_port(ap, status); 733 ahci_restart_port(ap, status);
698 734
699 if (qc) { 735 if (qc) {
700 qc->err_mask |= AC_ERR_OTHER; 736 qc->err_mask |= err_mask;
701 ata_qc_complete(qc); 737 ata_qc_complete(qc);
702 } 738 }
703 } 739 }
@@ -776,7 +812,7 @@ static irqreturn_t ahci_interrupt (int irq, void *dev_instance, struct pt_regs *
776 return IRQ_RETVAL(handled); 812 return IRQ_RETVAL(handled);
777} 813}
778 814
779static int ahci_qc_issue(struct ata_queued_cmd *qc) 815static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc)
780{ 816{
781 struct ata_port *ap = qc->ap; 817 struct ata_port *ap = qc->ap;
782 void __iomem *port_mmio = (void __iomem *) ap->ioaddr.cmd_addr; 818 void __iomem *port_mmio = (void __iomem *) ap->ioaddr.cmd_addr;
diff --git a/drivers/scsi/ata_piix.c b/drivers/scsi/ata_piix.c
index fc3ca051ceed..c662bf531514 100644
--- a/drivers/scsi/ata_piix.c
+++ b/drivers/scsi/ata_piix.c
@@ -101,9 +101,11 @@ enum {
101 ICH5_PCS = 0x92, /* port control and status */ 101 ICH5_PCS = 0x92, /* port control and status */
102 PIIX_SCC = 0x0A, /* sub-class code register */ 102 PIIX_SCC = 0x0A, /* sub-class code register */
103 103
104 PIIX_FLAG_AHCI = (1 << 28), /* AHCI possible */ 104 PIIX_FLAG_AHCI = (1 << 27), /* AHCI possible */
105 PIIX_FLAG_CHECKINTR = (1 << 29), /* make sure PCI INTx enabled */ 105 PIIX_FLAG_CHECKINTR = (1 << 28), /* make sure PCI INTx enabled */
106 PIIX_FLAG_COMBINED = (1 << 30), /* combined mode possible */ 106 PIIX_FLAG_COMBINED = (1 << 29), /* combined mode possible */
107 /* ICH6/7 use different scheme for map value */
108 PIIX_FLAG_COMBINED_ICH6 = PIIX_FLAG_COMBINED | (1 << 30),
107 109
108 /* combined mode. if set, PATA is channel 0. 110 /* combined mode. if set, PATA is channel 0.
109 * if clear, PATA is channel 1. 111 * if clear, PATA is channel 1.
@@ -129,8 +131,8 @@ enum {
129static int piix_init_one (struct pci_dev *pdev, 131static int piix_init_one (struct pci_dev *pdev,
130 const struct pci_device_id *ent); 132 const struct pci_device_id *ent);
131 133
132static void piix_pata_phy_reset(struct ata_port *ap); 134static int piix_pata_probe_reset(struct ata_port *ap, unsigned int *classes);
133static void piix_sata_phy_reset(struct ata_port *ap); 135static int piix_sata_probe_reset(struct ata_port *ap, unsigned int *classes);
134static void piix_set_piomode (struct ata_port *ap, struct ata_device *adev); 136static void piix_set_piomode (struct ata_port *ap, struct ata_device *adev);
135static void piix_set_dmamode (struct ata_port *ap, struct ata_device *adev); 137static void piix_set_dmamode (struct ata_port *ap, struct ata_device *adev);
136 138
@@ -178,11 +180,11 @@ static struct scsi_host_template piix_sht = {
178 .name = DRV_NAME, 180 .name = DRV_NAME,
179 .ioctl = ata_scsi_ioctl, 181 .ioctl = ata_scsi_ioctl,
180 .queuecommand = ata_scsi_queuecmd, 182 .queuecommand = ata_scsi_queuecmd,
183 .eh_timed_out = ata_scsi_timed_out,
181 .eh_strategy_handler = ata_scsi_error, 184 .eh_strategy_handler = ata_scsi_error,
182 .can_queue = ATA_DEF_QUEUE, 185 .can_queue = ATA_DEF_QUEUE,
183 .this_id = ATA_SHT_THIS_ID, 186 .this_id = ATA_SHT_THIS_ID,
184 .sg_tablesize = LIBATA_MAX_PRD, 187 .sg_tablesize = LIBATA_MAX_PRD,
185 .max_sectors = ATA_MAX_SECTORS,
186 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 188 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
187 .emulated = ATA_SHT_EMULATED, 189 .emulated = ATA_SHT_EMULATED,
188 .use_clustering = ATA_SHT_USE_CLUSTERING, 190 .use_clustering = ATA_SHT_USE_CLUSTERING,
@@ -205,7 +207,7 @@ static const struct ata_port_operations piix_pata_ops = {
205 .exec_command = ata_exec_command, 207 .exec_command = ata_exec_command,
206 .dev_select = ata_std_dev_select, 208 .dev_select = ata_std_dev_select,
207 209
208 .phy_reset = piix_pata_phy_reset, 210 .probe_reset = piix_pata_probe_reset,
209 211
210 .bmdma_setup = ata_bmdma_setup, 212 .bmdma_setup = ata_bmdma_setup,
211 .bmdma_start = ata_bmdma_start, 213 .bmdma_start = ata_bmdma_start,
@@ -233,7 +235,7 @@ static const struct ata_port_operations piix_sata_ops = {
233 .exec_command = ata_exec_command, 235 .exec_command = ata_exec_command,
234 .dev_select = ata_std_dev_select, 236 .dev_select = ata_std_dev_select,
235 237
236 .phy_reset = piix_sata_phy_reset, 238 .probe_reset = piix_sata_probe_reset,
237 239
238 .bmdma_setup = ata_bmdma_setup, 240 .bmdma_setup = ata_bmdma_setup,
239 .bmdma_start = ata_bmdma_start, 241 .bmdma_start = ata_bmdma_start,
@@ -256,8 +258,7 @@ static struct ata_port_info piix_port_info[] = {
256 /* ich5_pata */ 258 /* ich5_pata */
257 { 259 {
258 .sht = &piix_sht, 260 .sht = &piix_sht,
259 .host_flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST | 261 .host_flags = ATA_FLAG_SLAVE_POSS | PIIX_FLAG_CHECKINTR,
260 PIIX_FLAG_CHECKINTR,
261 .pio_mask = 0x1f, /* pio0-4 */ 262 .pio_mask = 0x1f, /* pio0-4 */
262#if 0 263#if 0
263 .mwdma_mask = 0x06, /* mwdma1-2 */ 264 .mwdma_mask = 0x06, /* mwdma1-2 */
@@ -271,8 +272,8 @@ static struct ata_port_info piix_port_info[] = {
271 /* ich5_sata */ 272 /* ich5_sata */
272 { 273 {
273 .sht = &piix_sht, 274 .sht = &piix_sht,
274 .host_flags = ATA_FLAG_SATA | ATA_FLAG_SRST | 275 .host_flags = ATA_FLAG_SATA | PIIX_FLAG_COMBINED |
275 PIIX_FLAG_COMBINED | PIIX_FLAG_CHECKINTR, 276 PIIX_FLAG_CHECKINTR,
276 .pio_mask = 0x1f, /* pio0-4 */ 277 .pio_mask = 0x1f, /* pio0-4 */
277 .mwdma_mask = 0x07, /* mwdma0-2 */ 278 .mwdma_mask = 0x07, /* mwdma0-2 */
278 .udma_mask = 0x7f, /* udma0-6 */ 279 .udma_mask = 0x7f, /* udma0-6 */
@@ -282,7 +283,7 @@ static struct ata_port_info piix_port_info[] = {
282 /* piix4_pata */ 283 /* piix4_pata */
283 { 284 {
284 .sht = &piix_sht, 285 .sht = &piix_sht,
285 .host_flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST, 286 .host_flags = ATA_FLAG_SLAVE_POSS,
286 .pio_mask = 0x1f, /* pio0-4 */ 287 .pio_mask = 0x1f, /* pio0-4 */
287#if 0 288#if 0
288 .mwdma_mask = 0x06, /* mwdma1-2 */ 289 .mwdma_mask = 0x06, /* mwdma1-2 */
@@ -296,9 +297,8 @@ static struct ata_port_info piix_port_info[] = {
296 /* ich6_sata */ 297 /* ich6_sata */
297 { 298 {
298 .sht = &piix_sht, 299 .sht = &piix_sht,
299 .host_flags = ATA_FLAG_SATA | ATA_FLAG_SRST | 300 .host_flags = ATA_FLAG_SATA | PIIX_FLAG_COMBINED_ICH6 |
300 PIIX_FLAG_COMBINED | PIIX_FLAG_CHECKINTR | 301 PIIX_FLAG_CHECKINTR | ATA_FLAG_SLAVE_POSS,
301 ATA_FLAG_SLAVE_POSS,
302 .pio_mask = 0x1f, /* pio0-4 */ 302 .pio_mask = 0x1f, /* pio0-4 */
303 .mwdma_mask = 0x07, /* mwdma0-2 */ 303 .mwdma_mask = 0x07, /* mwdma0-2 */
304 .udma_mask = 0x7f, /* udma0-6 */ 304 .udma_mask = 0x7f, /* udma0-6 */
@@ -308,9 +308,9 @@ static struct ata_port_info piix_port_info[] = {
308 /* ich6_sata_ahci */ 308 /* ich6_sata_ahci */
309 { 309 {
310 .sht = &piix_sht, 310 .sht = &piix_sht,
311 .host_flags = ATA_FLAG_SATA | ATA_FLAG_SRST | 311 .host_flags = ATA_FLAG_SATA | PIIX_FLAG_COMBINED_ICH6 |
312 PIIX_FLAG_COMBINED | PIIX_FLAG_CHECKINTR | 312 PIIX_FLAG_CHECKINTR | ATA_FLAG_SLAVE_POSS |
313 ATA_FLAG_SLAVE_POSS | PIIX_FLAG_AHCI, 313 PIIX_FLAG_AHCI,
314 .pio_mask = 0x1f, /* pio0-4 */ 314 .pio_mask = 0x1f, /* pio0-4 */
315 .mwdma_mask = 0x07, /* mwdma0-2 */ 315 .mwdma_mask = 0x07, /* mwdma0-2 */
316 .udma_mask = 0x7f, /* udma0-6 */ 316 .udma_mask = 0x7f, /* udma0-6 */
@@ -363,30 +363,42 @@ cbl40:
363} 363}
364 364
365/** 365/**
366 * piix_pata_phy_reset - Probe specified port on PATA host controller 366 * piix_pata_probeinit - probeinit for PATA host controller
367 * @ap: Port to probe 367 * @ap: Target port
368 * 368 *
369 * Probe PATA phy. 369 * Probeinit including cable detection.
370 * 370 *
371 * LOCKING: 371 * LOCKING:
372 * None (inherited from caller). 372 * None (inherited from caller).
373 */ 373 */
374static void piix_pata_probeinit(struct ata_port *ap)
375{
376 piix_pata_cbl_detect(ap);
377 ata_std_probeinit(ap);
378}
374 379
375static void piix_pata_phy_reset(struct ata_port *ap) 380/**
381 * piix_pata_probe_reset - Perform reset on PATA port and classify
382 * @ap: Port to reset
383 * @classes: Resulting classes of attached devices
384 *
385 * Reset PATA phy and classify attached devices.
386 *
387 * LOCKING:
388 * None (inherited from caller).
389 */
390static int piix_pata_probe_reset(struct ata_port *ap, unsigned int *classes)
376{ 391{
377 struct pci_dev *pdev = to_pci_dev(ap->host_set->dev); 392 struct pci_dev *pdev = to_pci_dev(ap->host_set->dev);
378 393
379 if (!pci_test_config_bits(pdev, &piix_enable_bits[ap->hard_port_no])) { 394 if (!pci_test_config_bits(pdev, &piix_enable_bits[ap->hard_port_no])) {
380 ata_port_disable(ap);
381 printk(KERN_INFO "ata%u: port disabled. ignoring.\n", ap->id); 395 printk(KERN_INFO "ata%u: port disabled. ignoring.\n", ap->id);
382 return; 396 return 0;
383 } 397 }
384 398
385 piix_pata_cbl_detect(ap); 399 return ata_drive_probe_reset(ap, piix_pata_probeinit,
386 400 ata_std_softreset, NULL,
387 ata_port_probe(ap); 401 ata_std_postreset, classes);
388
389 ata_bus_reset(ap);
390} 402}
391 403
392/** 404/**
@@ -411,9 +423,6 @@ static int piix_sata_probe (struct ata_port *ap)
411 int orig_mask, mask, i; 423 int orig_mask, mask, i;
412 u8 pcs; 424 u8 pcs;
413 425
414 mask = (PIIX_PORT_PRESENT << ap->hard_port_no) |
415 (PIIX_PORT_ENABLED << ap->hard_port_no);
416
417 pci_read_config_byte(pdev, ICH5_PCS, &pcs); 426 pci_read_config_byte(pdev, ICH5_PCS, &pcs);
418 orig_mask = (int) pcs & 0xff; 427 orig_mask = (int) pcs & 0xff;
419 428
@@ -437,28 +446,25 @@ static int piix_sata_probe (struct ata_port *ap)
437} 446}
438 447
439/** 448/**
440 * piix_sata_phy_reset - Probe specified port on SATA host controller 449 * piix_sata_probe_reset - Perform reset on SATA port and classify
441 * @ap: Port to probe 450 * @ap: Port to reset
451 * @classes: Resulting classes of attached devices
442 * 452 *
443 * Probe SATA phy. 453 * Reset SATA phy and classify attached devices.
444 * 454 *
445 * LOCKING: 455 * LOCKING:
446 * None (inherited from caller). 456 * None (inherited from caller).
447 */ 457 */
448 458static int piix_sata_probe_reset(struct ata_port *ap, unsigned int *classes)
449static void piix_sata_phy_reset(struct ata_port *ap)
450{ 459{
451 if (!piix_sata_probe(ap)) { 460 if (!piix_sata_probe(ap)) {
452 ata_port_disable(ap);
453 printk(KERN_INFO "ata%u: SATA port has no device.\n", ap->id); 461 printk(KERN_INFO "ata%u: SATA port has no device.\n", ap->id);
454 return; 462 return 0;
455 } 463 }
456 464
457 ap->cbl = ATA_CBL_SATA; 465 return ata_drive_probe_reset(ap, ata_std_probeinit,
458 466 ata_std_softreset, NULL,
459 ata_port_probe(ap); 467 ata_std_postreset, classes);
460
461 ata_bus_reset(ap);
462} 468}
463 469
464/** 470/**
@@ -627,6 +633,7 @@ static int piix_disable_ahci(struct pci_dev *pdev)
627 633
628/** 634/**
629 * piix_check_450nx_errata - Check for problem 450NX setup 635 * piix_check_450nx_errata - Check for problem 450NX setup
636 * @ata_dev: the PCI device to check
630 * 637 *
631 * Check for the present of 450NX errata #19 and errata #25. If 638 * Check for the present of 450NX errata #19 and errata #25. If
632 * they are found return an error code so we can turn off DMA 639 * they are found return an error code so we can turn off DMA
@@ -680,6 +687,7 @@ static int piix_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
680 struct ata_port_info *port_info[2]; 687 struct ata_port_info *port_info[2];
681 unsigned int combined = 0; 688 unsigned int combined = 0;
682 unsigned int pata_chan = 0, sata_chan = 0; 689 unsigned int pata_chan = 0, sata_chan = 0;
690 unsigned long host_flags;
683 691
684 if (!printed_version++) 692 if (!printed_version++)
685 dev_printk(KERN_DEBUG, &pdev->dev, 693 dev_printk(KERN_DEBUG, &pdev->dev,
@@ -692,7 +700,9 @@ static int piix_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
692 port_info[0] = &piix_port_info[ent->driver_data]; 700 port_info[0] = &piix_port_info[ent->driver_data];
693 port_info[1] = &piix_port_info[ent->driver_data]; 701 port_info[1] = &piix_port_info[ent->driver_data];
694 702
695 if (port_info[0]->host_flags & PIIX_FLAG_AHCI) { 703 host_flags = port_info[0]->host_flags;
704
705 if (host_flags & PIIX_FLAG_AHCI) {
696 u8 tmp; 706 u8 tmp;
697 pci_read_config_byte(pdev, PIIX_SCC, &tmp); 707 pci_read_config_byte(pdev, PIIX_SCC, &tmp);
698 if (tmp == PIIX_AHCI_DEVICE) { 708 if (tmp == PIIX_AHCI_DEVICE) {
@@ -702,16 +712,35 @@ static int piix_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
702 } 712 }
703 } 713 }
704 714
705 if (port_info[0]->host_flags & PIIX_FLAG_COMBINED) { 715 if (host_flags & PIIX_FLAG_COMBINED) {
706 u8 tmp; 716 u8 tmp;
707 pci_read_config_byte(pdev, ICH5_PMR, &tmp); 717 pci_read_config_byte(pdev, ICH5_PMR, &tmp);
708 718
709 if (tmp & PIIX_COMB) { 719 if (host_flags & PIIX_FLAG_COMBINED_ICH6) {
710 combined = 1; 720 switch (tmp & 0x3) {
711 if (tmp & PIIX_COMB_PATA_P0) 721 case 0:
722 break;
723 case 1:
724 combined = 1;
712 sata_chan = 1; 725 sata_chan = 1;
713 else 726 break;
727 case 2:
728 combined = 1;
714 pata_chan = 1; 729 pata_chan = 1;
730 break;
731 case 3:
732 dev_printk(KERN_WARNING, &pdev->dev,
733 "invalid MAP value %u\n", tmp);
734 break;
735 }
736 } else {
737 if (tmp & PIIX_COMB) {
738 combined = 1;
739 if (tmp & PIIX_COMB_PATA_P0)
740 sata_chan = 1;
741 else
742 pata_chan = 1;
743 }
715 } 744 }
716 } 745 }
717 746
@@ -721,7 +750,7 @@ static int piix_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
721 * MSI is disabled (and it is disabled, as we don't use 750 * MSI is disabled (and it is disabled, as we don't use
722 * message-signalled interrupts currently). 751 * message-signalled interrupts currently).
723 */ 752 */
724 if (port_info[0]->host_flags & PIIX_FLAG_CHECKINTR) 753 if (host_flags & PIIX_FLAG_CHECKINTR)
725 pci_intx(pdev, 1); 754 pci_intx(pdev, 1);
726 755
727 if (combined) { 756 if (combined) {
diff --git a/drivers/scsi/libata-bmdma.c b/drivers/scsi/libata-bmdma.c
new file mode 100644
index 000000000000..a93336adcd23
--- /dev/null
+++ b/drivers/scsi/libata-bmdma.c
@@ -0,0 +1,703 @@
1/*
2 * libata-bmdma.c - helper library for PCI IDE BMDMA
3 *
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
7 *
8 * Copyright 2003-2006 Red Hat, Inc. All rights reserved.
9 * Copyright 2003-2006 Jeff Garzik
10 *
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
15 * any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; see the file COPYING. If not, write to
24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25 *
26 *
27 * libata documentation is available via 'make {ps|pdf}docs',
28 * as Documentation/DocBook/libata.*
29 *
30 * Hardware documentation available from http://www.t13.org/ and
31 * http://www.sata-io.org/
32 *
33 */
34
35#include <linux/config.h>
36#include <linux/kernel.h>
37#include <linux/pci.h>
38#include <linux/libata.h>
39
40#include "libata.h"
41
42/**
43 * ata_tf_load_pio - send taskfile registers to host controller
44 * @ap: Port to which output is sent
45 * @tf: ATA taskfile register set
46 *
47 * Outputs ATA taskfile to standard ATA host controller.
48 *
49 * LOCKING:
50 * Inherited from caller.
51 */
52
53static void ata_tf_load_pio(struct ata_port *ap, const struct ata_taskfile *tf)
54{
55 struct ata_ioports *ioaddr = &ap->ioaddr;
56 unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
57
58 if (tf->ctl != ap->last_ctl) {
59 outb(tf->ctl, ioaddr->ctl_addr);
60 ap->last_ctl = tf->ctl;
61 ata_wait_idle(ap);
62 }
63
64 if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
65 outb(tf->hob_feature, ioaddr->feature_addr);
66 outb(tf->hob_nsect, ioaddr->nsect_addr);
67 outb(tf->hob_lbal, ioaddr->lbal_addr);
68 outb(tf->hob_lbam, ioaddr->lbam_addr);
69 outb(tf->hob_lbah, ioaddr->lbah_addr);
70 VPRINTK("hob: feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n",
71 tf->hob_feature,
72 tf->hob_nsect,
73 tf->hob_lbal,
74 tf->hob_lbam,
75 tf->hob_lbah);
76 }
77
78 if (is_addr) {
79 outb(tf->feature, ioaddr->feature_addr);
80 outb(tf->nsect, ioaddr->nsect_addr);
81 outb(tf->lbal, ioaddr->lbal_addr);
82 outb(tf->lbam, ioaddr->lbam_addr);
83 outb(tf->lbah, ioaddr->lbah_addr);
84 VPRINTK("feat 0x%X nsect 0x%X lba 0x%X 0x%X 0x%X\n",
85 tf->feature,
86 tf->nsect,
87 tf->lbal,
88 tf->lbam,
89 tf->lbah);
90 }
91
92 if (tf->flags & ATA_TFLAG_DEVICE) {
93 outb(tf->device, ioaddr->device_addr);
94 VPRINTK("device 0x%X\n", tf->device);
95 }
96
97 ata_wait_idle(ap);
98}
99
100/**
101 * ata_tf_load_mmio - send taskfile registers to host controller
102 * @ap: Port to which output is sent
103 * @tf: ATA taskfile register set
104 *
105 * Outputs ATA taskfile to standard ATA host controller using MMIO.
106 *
107 * LOCKING:
108 * Inherited from caller.
109 */
110
111static void ata_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf)
112{
113 struct ata_ioports *ioaddr = &ap->ioaddr;
114 unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
115
116 if (tf->ctl != ap->last_ctl) {
117 writeb(tf->ctl, (void __iomem *) ap->ioaddr.ctl_addr);
118 ap->last_ctl = tf->ctl;
119 ata_wait_idle(ap);
120 }
121
122 if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
123 writeb(tf->hob_feature, (void __iomem *) ioaddr->feature_addr);
124 writeb(tf->hob_nsect, (void __iomem *) ioaddr->nsect_addr);
125 writeb(tf->hob_lbal, (void __iomem *) ioaddr->lbal_addr);
126 writeb(tf->hob_lbam, (void __iomem *) ioaddr->lbam_addr);
127 writeb(tf->hob_lbah, (void __iomem *) ioaddr->lbah_addr);
128 VPRINTK("hob: feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n",
129 tf->hob_feature,
130 tf->hob_nsect,
131 tf->hob_lbal,
132 tf->hob_lbam,
133 tf->hob_lbah);
134 }
135
136 if (is_addr) {
137 writeb(tf->feature, (void __iomem *) ioaddr->feature_addr);
138 writeb(tf->nsect, (void __iomem *) ioaddr->nsect_addr);
139 writeb(tf->lbal, (void __iomem *) ioaddr->lbal_addr);
140 writeb(tf->lbam, (void __iomem *) ioaddr->lbam_addr);
141 writeb(tf->lbah, (void __iomem *) ioaddr->lbah_addr);
142 VPRINTK("feat 0x%X nsect 0x%X lba 0x%X 0x%X 0x%X\n",
143 tf->feature,
144 tf->nsect,
145 tf->lbal,
146 tf->lbam,
147 tf->lbah);
148 }
149
150 if (tf->flags & ATA_TFLAG_DEVICE) {
151 writeb(tf->device, (void __iomem *) ioaddr->device_addr);
152 VPRINTK("device 0x%X\n", tf->device);
153 }
154
155 ata_wait_idle(ap);
156}
157
158
159/**
160 * ata_tf_load - send taskfile registers to host controller
161 * @ap: Port to which output is sent
162 * @tf: ATA taskfile register set
163 *
164 * Outputs ATA taskfile to standard ATA host controller using MMIO
165 * or PIO as indicated by the ATA_FLAG_MMIO flag.
166 * Writes the control, feature, nsect, lbal, lbam, and lbah registers.
167 * Optionally (ATA_TFLAG_LBA48) writes hob_feature, hob_nsect,
168 * hob_lbal, hob_lbam, and hob_lbah.
169 *
170 * This function waits for idle (!BUSY and !DRQ) after writing
171 * registers. If the control register has a new value, this
172 * function also waits for idle after writing control and before
173 * writing the remaining registers.
174 *
175 * May be used as the tf_load() entry in ata_port_operations.
176 *
177 * LOCKING:
178 * Inherited from caller.
179 */
180void ata_tf_load(struct ata_port *ap, const struct ata_taskfile *tf)
181{
182 if (ap->flags & ATA_FLAG_MMIO)
183 ata_tf_load_mmio(ap, tf);
184 else
185 ata_tf_load_pio(ap, tf);
186}
187
188/**
189 * ata_exec_command_pio - issue ATA command to host controller
190 * @ap: port to which command is being issued
191 * @tf: ATA taskfile register set
192 *
193 * Issues PIO write to ATA command register, with proper
194 * synchronization with interrupt handler / other threads.
195 *
196 * LOCKING:
197 * spin_lock_irqsave(host_set lock)
198 */
199
200static void ata_exec_command_pio(struct ata_port *ap, const struct ata_taskfile *tf)
201{
202 DPRINTK("ata%u: cmd 0x%X\n", ap->id, tf->command);
203
204 outb(tf->command, ap->ioaddr.command_addr);
205 ata_pause(ap);
206}
207
208
209/**
210 * ata_exec_command_mmio - issue ATA command to host controller
211 * @ap: port to which command is being issued
212 * @tf: ATA taskfile register set
213 *
214 * Issues MMIO write to ATA command register, with proper
215 * synchronization with interrupt handler / other threads.
216 *
217 * LOCKING:
218 * spin_lock_irqsave(host_set lock)
219 */
220
221static void ata_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf)
222{
223 DPRINTK("ata%u: cmd 0x%X\n", ap->id, tf->command);
224
225 writeb(tf->command, (void __iomem *) ap->ioaddr.command_addr);
226 ata_pause(ap);
227}
228
229
230/**
231 * ata_exec_command - issue ATA command to host controller
232 * @ap: port to which command is being issued
233 * @tf: ATA taskfile register set
234 *
235 * Issues PIO/MMIO write to ATA command register, with proper
236 * synchronization with interrupt handler / other threads.
237 *
238 * LOCKING:
239 * spin_lock_irqsave(host_set lock)
240 */
241void ata_exec_command(struct ata_port *ap, const struct ata_taskfile *tf)
242{
243 if (ap->flags & ATA_FLAG_MMIO)
244 ata_exec_command_mmio(ap, tf);
245 else
246 ata_exec_command_pio(ap, tf);
247}
248
249/**
250 * ata_tf_read_pio - input device's ATA taskfile shadow registers
251 * @ap: Port from which input is read
252 * @tf: ATA taskfile register set for storing input
253 *
254 * Reads ATA taskfile registers for currently-selected device
255 * into @tf.
256 *
257 * LOCKING:
258 * Inherited from caller.
259 */
260
261static void ata_tf_read_pio(struct ata_port *ap, struct ata_taskfile *tf)
262{
263 struct ata_ioports *ioaddr = &ap->ioaddr;
264
265 tf->command = ata_check_status(ap);
266 tf->feature = inb(ioaddr->error_addr);
267 tf->nsect = inb(ioaddr->nsect_addr);
268 tf->lbal = inb(ioaddr->lbal_addr);
269 tf->lbam = inb(ioaddr->lbam_addr);
270 tf->lbah = inb(ioaddr->lbah_addr);
271 tf->device = inb(ioaddr->device_addr);
272
273 if (tf->flags & ATA_TFLAG_LBA48) {
274 outb(tf->ctl | ATA_HOB, ioaddr->ctl_addr);
275 tf->hob_feature = inb(ioaddr->error_addr);
276 tf->hob_nsect = inb(ioaddr->nsect_addr);
277 tf->hob_lbal = inb(ioaddr->lbal_addr);
278 tf->hob_lbam = inb(ioaddr->lbam_addr);
279 tf->hob_lbah = inb(ioaddr->lbah_addr);
280 }
281}
282
283/**
284 * ata_tf_read_mmio - input device's ATA taskfile shadow registers
285 * @ap: Port from which input is read
286 * @tf: ATA taskfile register set for storing input
287 *
288 * Reads ATA taskfile registers for currently-selected device
289 * into @tf via MMIO.
290 *
291 * LOCKING:
292 * Inherited from caller.
293 */
294
295static void ata_tf_read_mmio(struct ata_port *ap, struct ata_taskfile *tf)
296{
297 struct ata_ioports *ioaddr = &ap->ioaddr;
298
299 tf->command = ata_check_status(ap);
300 tf->feature = readb((void __iomem *)ioaddr->error_addr);
301 tf->nsect = readb((void __iomem *)ioaddr->nsect_addr);
302 tf->lbal = readb((void __iomem *)ioaddr->lbal_addr);
303 tf->lbam = readb((void __iomem *)ioaddr->lbam_addr);
304 tf->lbah = readb((void __iomem *)ioaddr->lbah_addr);
305 tf->device = readb((void __iomem *)ioaddr->device_addr);
306
307 if (tf->flags & ATA_TFLAG_LBA48) {
308 writeb(tf->ctl | ATA_HOB, (void __iomem *) ap->ioaddr.ctl_addr);
309 tf->hob_feature = readb((void __iomem *)ioaddr->error_addr);
310 tf->hob_nsect = readb((void __iomem *)ioaddr->nsect_addr);
311 tf->hob_lbal = readb((void __iomem *)ioaddr->lbal_addr);
312 tf->hob_lbam = readb((void __iomem *)ioaddr->lbam_addr);
313 tf->hob_lbah = readb((void __iomem *)ioaddr->lbah_addr);
314 }
315}
316
317
318/**
319 * ata_tf_read - input device's ATA taskfile shadow registers
320 * @ap: Port from which input is read
321 * @tf: ATA taskfile register set for storing input
322 *
323 * Reads ATA taskfile registers for currently-selected device
324 * into @tf.
325 *
326 * Reads nsect, lbal, lbam, lbah, and device. If ATA_TFLAG_LBA48
327 * is set, also reads the hob registers.
328 *
329 * May be used as the tf_read() entry in ata_port_operations.
330 *
331 * LOCKING:
332 * Inherited from caller.
333 */
334void ata_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
335{
336 if (ap->flags & ATA_FLAG_MMIO)
337 ata_tf_read_mmio(ap, tf);
338 else
339 ata_tf_read_pio(ap, tf);
340}
341
342/**
343 * ata_check_status_pio - Read device status reg & clear interrupt
344 * @ap: port where the device is
345 *
346 * Reads ATA taskfile status register for currently-selected device
347 * and return its value. This also clears pending interrupts
348 * from this device
349 *
350 * LOCKING:
351 * Inherited from caller.
352 */
353static u8 ata_check_status_pio(struct ata_port *ap)
354{
355 return inb(ap->ioaddr.status_addr);
356}
357
358/**
359 * ata_check_status_mmio - Read device status reg & clear interrupt
360 * @ap: port where the device is
361 *
362 * Reads ATA taskfile status register for currently-selected device
363 * via MMIO and return its value. This also clears pending interrupts
364 * from this device
365 *
366 * LOCKING:
367 * Inherited from caller.
368 */
369static u8 ata_check_status_mmio(struct ata_port *ap)
370{
371 return readb((void __iomem *) ap->ioaddr.status_addr);
372}
373
374
375/**
376 * ata_check_status - Read device status reg & clear interrupt
377 * @ap: port where the device is
378 *
379 * Reads ATA taskfile status register for currently-selected device
380 * and return its value. This also clears pending interrupts
381 * from this device
382 *
383 * May be used as the check_status() entry in ata_port_operations.
384 *
385 * LOCKING:
386 * Inherited from caller.
387 */
388u8 ata_check_status(struct ata_port *ap)
389{
390 if (ap->flags & ATA_FLAG_MMIO)
391 return ata_check_status_mmio(ap);
392 return ata_check_status_pio(ap);
393}
394
395
396/**
397 * ata_altstatus - Read device alternate status reg
398 * @ap: port where the device is
399 *
400 * Reads ATA taskfile alternate status register for
401 * currently-selected device and return its value.
402 *
403 * Note: may NOT be used as the check_altstatus() entry in
404 * ata_port_operations.
405 *
406 * LOCKING:
407 * Inherited from caller.
408 */
409u8 ata_altstatus(struct ata_port *ap)
410{
411 if (ap->ops->check_altstatus)
412 return ap->ops->check_altstatus(ap);
413
414 if (ap->flags & ATA_FLAG_MMIO)
415 return readb((void __iomem *)ap->ioaddr.altstatus_addr);
416 return inb(ap->ioaddr.altstatus_addr);
417}
418
419#ifdef CONFIG_PCI
420static struct ata_probe_ent *
421ata_probe_ent_alloc(struct device *dev, const struct ata_port_info *port)
422{
423 struct ata_probe_ent *probe_ent;
424
425 probe_ent = kzalloc(sizeof(*probe_ent), GFP_KERNEL);
426 if (!probe_ent) {
427 printk(KERN_ERR DRV_NAME "(%s): out of memory\n",
428 kobject_name(&(dev->kobj)));
429 return NULL;
430 }
431
432 INIT_LIST_HEAD(&probe_ent->node);
433 probe_ent->dev = dev;
434
435 probe_ent->sht = port->sht;
436 probe_ent->host_flags = port->host_flags;
437 probe_ent->pio_mask = port->pio_mask;
438 probe_ent->mwdma_mask = port->mwdma_mask;
439 probe_ent->udma_mask = port->udma_mask;
440 probe_ent->port_ops = port->port_ops;
441
442 return probe_ent;
443}
444
445
446/**
447 * ata_pci_init_native_mode - Initialize native-mode driver
448 * @pdev: pci device to be initialized
449 * @port: array[2] of pointers to port info structures.
450 * @ports: bitmap of ports present
451 *
452 * Utility function which allocates and initializes an
453 * ata_probe_ent structure for a standard dual-port
454 * PIO-based IDE controller. The returned ata_probe_ent
455 * structure can be passed to ata_device_add(). The returned
456 * ata_probe_ent structure should then be freed with kfree().
457 *
458 * The caller need only pass the address of the primary port, the
459 * secondary will be deduced automatically. If the device has non
460 * standard secondary port mappings this function can be called twice,
461 * once for each interface.
462 */
463
464struct ata_probe_ent *
465ata_pci_init_native_mode(struct pci_dev *pdev, struct ata_port_info **port, int ports)
466{
467 struct ata_probe_ent *probe_ent =
468 ata_probe_ent_alloc(pci_dev_to_dev(pdev), port[0]);
469 int p = 0;
470
471 if (!probe_ent)
472 return NULL;
473
474 probe_ent->irq = pdev->irq;
475 probe_ent->irq_flags = SA_SHIRQ;
476 probe_ent->private_data = port[0]->private_data;
477
478 if (ports & ATA_PORT_PRIMARY) {
479 probe_ent->port[p].cmd_addr = pci_resource_start(pdev, 0);
480 probe_ent->port[p].altstatus_addr =
481 probe_ent->port[p].ctl_addr =
482 pci_resource_start(pdev, 1) | ATA_PCI_CTL_OFS;
483 probe_ent->port[p].bmdma_addr = pci_resource_start(pdev, 4);
484 ata_std_ports(&probe_ent->port[p]);
485 p++;
486 }
487
488 if (ports & ATA_PORT_SECONDARY) {
489 probe_ent->port[p].cmd_addr = pci_resource_start(pdev, 2);
490 probe_ent->port[p].altstatus_addr =
491 probe_ent->port[p].ctl_addr =
492 pci_resource_start(pdev, 3) | ATA_PCI_CTL_OFS;
493 probe_ent->port[p].bmdma_addr = pci_resource_start(pdev, 4) + 8;
494 ata_std_ports(&probe_ent->port[p]);
495 p++;
496 }
497
498 probe_ent->n_ports = p;
499 return probe_ent;
500}
501
502
503static struct ata_probe_ent *ata_pci_init_legacy_port(struct pci_dev *pdev,
504 struct ata_port_info *port, int port_num)
505{
506 struct ata_probe_ent *probe_ent;
507
508 probe_ent = ata_probe_ent_alloc(pci_dev_to_dev(pdev), port);
509 if (!probe_ent)
510 return NULL;
511
512 probe_ent->legacy_mode = 1;
513 probe_ent->n_ports = 1;
514 probe_ent->hard_port_no = port_num;
515 probe_ent->private_data = port->private_data;
516
517 switch(port_num)
518 {
519 case 0:
520 probe_ent->irq = 14;
521 probe_ent->port[0].cmd_addr = 0x1f0;
522 probe_ent->port[0].altstatus_addr =
523 probe_ent->port[0].ctl_addr = 0x3f6;
524 break;
525 case 1:
526 probe_ent->irq = 15;
527 probe_ent->port[0].cmd_addr = 0x170;
528 probe_ent->port[0].altstatus_addr =
529 probe_ent->port[0].ctl_addr = 0x376;
530 break;
531 }
532
533 probe_ent->port[0].bmdma_addr =
534 pci_resource_start(pdev, 4) + 8 * port_num;
535 ata_std_ports(&probe_ent->port[0]);
536
537 return probe_ent;
538}
539
540
541/**
542 * ata_pci_init_one - Initialize/register PCI IDE host controller
543 * @pdev: Controller to be initialized
544 * @port_info: Information from low-level host driver
545 * @n_ports: Number of ports attached to host controller
546 *
547 * This is a helper function which can be called from a driver's
548 * xxx_init_one() probe function if the hardware uses traditional
549 * IDE taskfile registers.
550 *
551 * This function calls pci_enable_device(), reserves its register
552 * regions, sets the dma mask, enables bus master mode, and calls
553 * ata_device_add()
554 *
555 * LOCKING:
556 * Inherited from PCI layer (may sleep).
557 *
558 * RETURNS:
559 * Zero on success, negative on errno-based value on error.
560 */
561
562int ata_pci_init_one (struct pci_dev *pdev, struct ata_port_info **port_info,
563 unsigned int n_ports)
564{
565 struct ata_probe_ent *probe_ent = NULL, *probe_ent2 = NULL;
566 struct ata_port_info *port[2];
567 u8 tmp8, mask;
568 unsigned int legacy_mode = 0;
569 int disable_dev_on_err = 1;
570 int rc;
571
572 DPRINTK("ENTER\n");
573
574 port[0] = port_info[0];
575 if (n_ports > 1)
576 port[1] = port_info[1];
577 else
578 port[1] = port[0];
579
580 if ((port[0]->host_flags & ATA_FLAG_NO_LEGACY) == 0
581 && (pdev->class >> 8) == PCI_CLASS_STORAGE_IDE) {
582 /* TODO: What if one channel is in native mode ... */
583 pci_read_config_byte(pdev, PCI_CLASS_PROG, &tmp8);
584 mask = (1 << 2) | (1 << 0);
585 if ((tmp8 & mask) != mask)
586 legacy_mode = (1 << 3);
587 }
588
589 /* FIXME... */
590 if ((!legacy_mode) && (n_ports > 2)) {
591 printk(KERN_ERR "ata: BUG: native mode, n_ports > 2\n");
592 n_ports = 2;
593 /* For now */
594 }
595
596 /* FIXME: Really for ATA it isn't safe because the device may be
597 multi-purpose and we want to leave it alone if it was already
598 enabled. Secondly for shared use as Arjan says we want refcounting
599
600 Checking dev->is_enabled is insufficient as this is not set at
601 boot for the primary video which is BIOS enabled
602 */
603
604 rc = pci_enable_device(pdev);
605 if (rc)
606 return rc;
607
608 rc = pci_request_regions(pdev, DRV_NAME);
609 if (rc) {
610 disable_dev_on_err = 0;
611 goto err_out;
612 }
613
614 /* FIXME: Should use platform specific mappers for legacy port ranges */
615 if (legacy_mode) {
616 if (!request_region(0x1f0, 8, "libata")) {
617 struct resource *conflict, res;
618 res.start = 0x1f0;
619 res.end = 0x1f0 + 8 - 1;
620 conflict = ____request_resource(&ioport_resource, &res);
621 if (!strcmp(conflict->name, "libata"))
622 legacy_mode |= (1 << 0);
623 else {
624 disable_dev_on_err = 0;
625 printk(KERN_WARNING "ata: 0x1f0 IDE port busy\n");
626 }
627 } else
628 legacy_mode |= (1 << 0);
629
630 if (!request_region(0x170, 8, "libata")) {
631 struct resource *conflict, res;
632 res.start = 0x170;
633 res.end = 0x170 + 8 - 1;
634 conflict = ____request_resource(&ioport_resource, &res);
635 if (!strcmp(conflict->name, "libata"))
636 legacy_mode |= (1 << 1);
637 else {
638 disable_dev_on_err = 0;
639 printk(KERN_WARNING "ata: 0x170 IDE port busy\n");
640 }
641 } else
642 legacy_mode |= (1 << 1);
643 }
644
645 /* we have legacy mode, but all ports are unavailable */
646 if (legacy_mode == (1 << 3)) {
647 rc = -EBUSY;
648 goto err_out_regions;
649 }
650
651 rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
652 if (rc)
653 goto err_out_regions;
654 rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
655 if (rc)
656 goto err_out_regions;
657
658 if (legacy_mode) {
659 if (legacy_mode & (1 << 0))
660 probe_ent = ata_pci_init_legacy_port(pdev, port[0], 0);
661 if (legacy_mode & (1 << 1))
662 probe_ent2 = ata_pci_init_legacy_port(pdev, port[1], 1);
663 } else {
664 if (n_ports == 2)
665 probe_ent = ata_pci_init_native_mode(pdev, port, ATA_PORT_PRIMARY | ATA_PORT_SECONDARY);
666 else
667 probe_ent = ata_pci_init_native_mode(pdev, port, ATA_PORT_PRIMARY);
668 }
669 if (!probe_ent && !probe_ent2) {
670 rc = -ENOMEM;
671 goto err_out_regions;
672 }
673
674 pci_set_master(pdev);
675
676 /* FIXME: check ata_device_add return */
677 if (legacy_mode) {
678 if (legacy_mode & (1 << 0))
679 ata_device_add(probe_ent);
680 if (legacy_mode & (1 << 1))
681 ata_device_add(probe_ent2);
682 } else
683 ata_device_add(probe_ent);
684
685 kfree(probe_ent);
686 kfree(probe_ent2);
687
688 return 0;
689
690err_out_regions:
691 if (legacy_mode & (1 << 0))
692 release_region(0x1f0, 8);
693 if (legacy_mode & (1 << 1))
694 release_region(0x170, 8);
695 pci_release_regions(pdev);
696err_out:
697 if (disable_dev_on_err)
698 pci_disable_device(pdev);
699 return rc;
700}
701
702#endif /* CONFIG_PCI */
703
diff --git a/drivers/scsi/libata-core.c b/drivers/scsi/libata-core.c
index 4f91b0dc572b..17c1df435cc8 100644
--- a/drivers/scsi/libata-core.c
+++ b/drivers/scsi/libata-core.c
@@ -61,11 +61,8 @@
61 61
62#include "libata.h" 62#include "libata.h"
63 63
64static unsigned int ata_busy_sleep (struct ata_port *ap, 64static unsigned int ata_dev_init_params(struct ata_port *ap,
65 unsigned long tmout_pat, 65 struct ata_device *dev);
66 unsigned long tmout);
67static void ata_dev_reread_id(struct ata_port *ap, struct ata_device *dev);
68static void ata_dev_init_params(struct ata_port *ap, struct ata_device *dev);
69static void ata_set_mode(struct ata_port *ap); 66static void ata_set_mode(struct ata_port *ap);
70static void ata_dev_set_xfermode(struct ata_port *ap, struct ata_device *dev); 67static void ata_dev_set_xfermode(struct ata_port *ap, struct ata_device *dev);
71static unsigned int ata_get_mode_mask(const struct ata_port *ap, int shift); 68static unsigned int ata_get_mode_mask(const struct ata_port *ap, int shift);
@@ -73,7 +70,6 @@ static int fgb(u32 bitmap);
73static int ata_choose_xfer_mode(const struct ata_port *ap, 70static int ata_choose_xfer_mode(const struct ata_port *ap,
74 u8 *xfer_mode_out, 71 u8 *xfer_mode_out,
75 unsigned int *xfer_shift_out); 72 unsigned int *xfer_shift_out);
76static void __ata_qc_complete(struct ata_queued_cmd *qc);
77 73
78static unsigned int ata_unique_id = 1; 74static unsigned int ata_unique_id = 1;
79static struct workqueue_struct *ata_wq; 75static struct workqueue_struct *ata_wq;
@@ -91,403 +87,6 @@ MODULE_DESCRIPTION("Library module for ATA devices");
91MODULE_LICENSE("GPL"); 87MODULE_LICENSE("GPL");
92MODULE_VERSION(DRV_VERSION); 88MODULE_VERSION(DRV_VERSION);
93 89
94/**
95 * ata_tf_load_pio - send taskfile registers to host controller
96 * @ap: Port to which output is sent
97 * @tf: ATA taskfile register set
98 *
99 * Outputs ATA taskfile to standard ATA host controller.
100 *
101 * LOCKING:
102 * Inherited from caller.
103 */
104
105static void ata_tf_load_pio(struct ata_port *ap, const struct ata_taskfile *tf)
106{
107 struct ata_ioports *ioaddr = &ap->ioaddr;
108 unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
109
110 if (tf->ctl != ap->last_ctl) {
111 outb(tf->ctl, ioaddr->ctl_addr);
112 ap->last_ctl = tf->ctl;
113 ata_wait_idle(ap);
114 }
115
116 if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
117 outb(tf->hob_feature, ioaddr->feature_addr);
118 outb(tf->hob_nsect, ioaddr->nsect_addr);
119 outb(tf->hob_lbal, ioaddr->lbal_addr);
120 outb(tf->hob_lbam, ioaddr->lbam_addr);
121 outb(tf->hob_lbah, ioaddr->lbah_addr);
122 VPRINTK("hob: feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n",
123 tf->hob_feature,
124 tf->hob_nsect,
125 tf->hob_lbal,
126 tf->hob_lbam,
127 tf->hob_lbah);
128 }
129
130 if (is_addr) {
131 outb(tf->feature, ioaddr->feature_addr);
132 outb(tf->nsect, ioaddr->nsect_addr);
133 outb(tf->lbal, ioaddr->lbal_addr);
134 outb(tf->lbam, ioaddr->lbam_addr);
135 outb(tf->lbah, ioaddr->lbah_addr);
136 VPRINTK("feat 0x%X nsect 0x%X lba 0x%X 0x%X 0x%X\n",
137 tf->feature,
138 tf->nsect,
139 tf->lbal,
140 tf->lbam,
141 tf->lbah);
142 }
143
144 if (tf->flags & ATA_TFLAG_DEVICE) {
145 outb(tf->device, ioaddr->device_addr);
146 VPRINTK("device 0x%X\n", tf->device);
147 }
148
149 ata_wait_idle(ap);
150}
151
152/**
153 * ata_tf_load_mmio - send taskfile registers to host controller
154 * @ap: Port to which output is sent
155 * @tf: ATA taskfile register set
156 *
157 * Outputs ATA taskfile to standard ATA host controller using MMIO.
158 *
159 * LOCKING:
160 * Inherited from caller.
161 */
162
163static void ata_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf)
164{
165 struct ata_ioports *ioaddr = &ap->ioaddr;
166 unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
167
168 if (tf->ctl != ap->last_ctl) {
169 writeb(tf->ctl, (void __iomem *) ap->ioaddr.ctl_addr);
170 ap->last_ctl = tf->ctl;
171 ata_wait_idle(ap);
172 }
173
174 if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
175 writeb(tf->hob_feature, (void __iomem *) ioaddr->feature_addr);
176 writeb(tf->hob_nsect, (void __iomem *) ioaddr->nsect_addr);
177 writeb(tf->hob_lbal, (void __iomem *) ioaddr->lbal_addr);
178 writeb(tf->hob_lbam, (void __iomem *) ioaddr->lbam_addr);
179 writeb(tf->hob_lbah, (void __iomem *) ioaddr->lbah_addr);
180 VPRINTK("hob: feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n",
181 tf->hob_feature,
182 tf->hob_nsect,
183 tf->hob_lbal,
184 tf->hob_lbam,
185 tf->hob_lbah);
186 }
187
188 if (is_addr) {
189 writeb(tf->feature, (void __iomem *) ioaddr->feature_addr);
190 writeb(tf->nsect, (void __iomem *) ioaddr->nsect_addr);
191 writeb(tf->lbal, (void __iomem *) ioaddr->lbal_addr);
192 writeb(tf->lbam, (void __iomem *) ioaddr->lbam_addr);
193 writeb(tf->lbah, (void __iomem *) ioaddr->lbah_addr);
194 VPRINTK("feat 0x%X nsect 0x%X lba 0x%X 0x%X 0x%X\n",
195 tf->feature,
196 tf->nsect,
197 tf->lbal,
198 tf->lbam,
199 tf->lbah);
200 }
201
202 if (tf->flags & ATA_TFLAG_DEVICE) {
203 writeb(tf->device, (void __iomem *) ioaddr->device_addr);
204 VPRINTK("device 0x%X\n", tf->device);
205 }
206
207 ata_wait_idle(ap);
208}
209
210
211/**
212 * ata_tf_load - send taskfile registers to host controller
213 * @ap: Port to which output is sent
214 * @tf: ATA taskfile register set
215 *
216 * Outputs ATA taskfile to standard ATA host controller using MMIO
217 * or PIO as indicated by the ATA_FLAG_MMIO flag.
218 * Writes the control, feature, nsect, lbal, lbam, and lbah registers.
219 * Optionally (ATA_TFLAG_LBA48) writes hob_feature, hob_nsect,
220 * hob_lbal, hob_lbam, and hob_lbah.
221 *
222 * This function waits for idle (!BUSY and !DRQ) after writing
223 * registers. If the control register has a new value, this
224 * function also waits for idle after writing control and before
225 * writing the remaining registers.
226 *
227 * May be used as the tf_load() entry in ata_port_operations.
228 *
229 * LOCKING:
230 * Inherited from caller.
231 */
232void ata_tf_load(struct ata_port *ap, const struct ata_taskfile *tf)
233{
234 if (ap->flags & ATA_FLAG_MMIO)
235 ata_tf_load_mmio(ap, tf);
236 else
237 ata_tf_load_pio(ap, tf);
238}
239
240/**
241 * ata_exec_command_pio - issue ATA command to host controller
242 * @ap: port to which command is being issued
243 * @tf: ATA taskfile register set
244 *
245 * Issues PIO write to ATA command register, with proper
246 * synchronization with interrupt handler / other threads.
247 *
248 * LOCKING:
249 * spin_lock_irqsave(host_set lock)
250 */
251
252static void ata_exec_command_pio(struct ata_port *ap, const struct ata_taskfile *tf)
253{
254 DPRINTK("ata%u: cmd 0x%X\n", ap->id, tf->command);
255
256 outb(tf->command, ap->ioaddr.command_addr);
257 ata_pause(ap);
258}
259
260
261/**
262 * ata_exec_command_mmio - issue ATA command to host controller
263 * @ap: port to which command is being issued
264 * @tf: ATA taskfile register set
265 *
266 * Issues MMIO write to ATA command register, with proper
267 * synchronization with interrupt handler / other threads.
268 *
269 * LOCKING:
270 * spin_lock_irqsave(host_set lock)
271 */
272
273static void ata_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf)
274{
275 DPRINTK("ata%u: cmd 0x%X\n", ap->id, tf->command);
276
277 writeb(tf->command, (void __iomem *) ap->ioaddr.command_addr);
278 ata_pause(ap);
279}
280
281
282/**
283 * ata_exec_command - issue ATA command to host controller
284 * @ap: port to which command is being issued
285 * @tf: ATA taskfile register set
286 *
287 * Issues PIO/MMIO write to ATA command register, with proper
288 * synchronization with interrupt handler / other threads.
289 *
290 * LOCKING:
291 * spin_lock_irqsave(host_set lock)
292 */
293void ata_exec_command(struct ata_port *ap, const struct ata_taskfile *tf)
294{
295 if (ap->flags & ATA_FLAG_MMIO)
296 ata_exec_command_mmio(ap, tf);
297 else
298 ata_exec_command_pio(ap, tf);
299}
300
301/**
302 * ata_tf_to_host - issue ATA taskfile to host controller
303 * @ap: port to which command is being issued
304 * @tf: ATA taskfile register set
305 *
306 * Issues ATA taskfile register set to ATA host controller,
307 * with proper synchronization with interrupt handler and
308 * other threads.
309 *
310 * LOCKING:
311 * spin_lock_irqsave(host_set lock)
312 */
313
314static inline void ata_tf_to_host(struct ata_port *ap,
315 const struct ata_taskfile *tf)
316{
317 ap->ops->tf_load(ap, tf);
318 ap->ops->exec_command(ap, tf);
319}
320
321/**
322 * ata_tf_read_pio - input device's ATA taskfile shadow registers
323 * @ap: Port from which input is read
324 * @tf: ATA taskfile register set for storing input
325 *
326 * Reads ATA taskfile registers for currently-selected device
327 * into @tf.
328 *
329 * LOCKING:
330 * Inherited from caller.
331 */
332
333static void ata_tf_read_pio(struct ata_port *ap, struct ata_taskfile *tf)
334{
335 struct ata_ioports *ioaddr = &ap->ioaddr;
336
337 tf->command = ata_check_status(ap);
338 tf->feature = inb(ioaddr->error_addr);
339 tf->nsect = inb(ioaddr->nsect_addr);
340 tf->lbal = inb(ioaddr->lbal_addr);
341 tf->lbam = inb(ioaddr->lbam_addr);
342 tf->lbah = inb(ioaddr->lbah_addr);
343 tf->device = inb(ioaddr->device_addr);
344
345 if (tf->flags & ATA_TFLAG_LBA48) {
346 outb(tf->ctl | ATA_HOB, ioaddr->ctl_addr);
347 tf->hob_feature = inb(ioaddr->error_addr);
348 tf->hob_nsect = inb(ioaddr->nsect_addr);
349 tf->hob_lbal = inb(ioaddr->lbal_addr);
350 tf->hob_lbam = inb(ioaddr->lbam_addr);
351 tf->hob_lbah = inb(ioaddr->lbah_addr);
352 }
353}
354
355/**
356 * ata_tf_read_mmio - input device's ATA taskfile shadow registers
357 * @ap: Port from which input is read
358 * @tf: ATA taskfile register set for storing input
359 *
360 * Reads ATA taskfile registers for currently-selected device
361 * into @tf via MMIO.
362 *
363 * LOCKING:
364 * Inherited from caller.
365 */
366
367static void ata_tf_read_mmio(struct ata_port *ap, struct ata_taskfile *tf)
368{
369 struct ata_ioports *ioaddr = &ap->ioaddr;
370
371 tf->command = ata_check_status(ap);
372 tf->feature = readb((void __iomem *)ioaddr->error_addr);
373 tf->nsect = readb((void __iomem *)ioaddr->nsect_addr);
374 tf->lbal = readb((void __iomem *)ioaddr->lbal_addr);
375 tf->lbam = readb((void __iomem *)ioaddr->lbam_addr);
376 tf->lbah = readb((void __iomem *)ioaddr->lbah_addr);
377 tf->device = readb((void __iomem *)ioaddr->device_addr);
378
379 if (tf->flags & ATA_TFLAG_LBA48) {
380 writeb(tf->ctl | ATA_HOB, (void __iomem *) ap->ioaddr.ctl_addr);
381 tf->hob_feature = readb((void __iomem *)ioaddr->error_addr);
382 tf->hob_nsect = readb((void __iomem *)ioaddr->nsect_addr);
383 tf->hob_lbal = readb((void __iomem *)ioaddr->lbal_addr);
384 tf->hob_lbam = readb((void __iomem *)ioaddr->lbam_addr);
385 tf->hob_lbah = readb((void __iomem *)ioaddr->lbah_addr);
386 }
387}
388
389
390/**
391 * ata_tf_read - input device's ATA taskfile shadow registers
392 * @ap: Port from which input is read
393 * @tf: ATA taskfile register set for storing input
394 *
395 * Reads ATA taskfile registers for currently-selected device
396 * into @tf.
397 *
398 * Reads nsect, lbal, lbam, lbah, and device. If ATA_TFLAG_LBA48
399 * is set, also reads the hob registers.
400 *
401 * May be used as the tf_read() entry in ata_port_operations.
402 *
403 * LOCKING:
404 * Inherited from caller.
405 */
406void ata_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
407{
408 if (ap->flags & ATA_FLAG_MMIO)
409 ata_tf_read_mmio(ap, tf);
410 else
411 ata_tf_read_pio(ap, tf);
412}
413
414/**
415 * ata_check_status_pio - Read device status reg & clear interrupt
416 * @ap: port where the device is
417 *
418 * Reads ATA taskfile status register for currently-selected device
419 * and return its value. This also clears pending interrupts
420 * from this device
421 *
422 * LOCKING:
423 * Inherited from caller.
424 */
425static u8 ata_check_status_pio(struct ata_port *ap)
426{
427 return inb(ap->ioaddr.status_addr);
428}
429
430/**
431 * ata_check_status_mmio - Read device status reg & clear interrupt
432 * @ap: port where the device is
433 *
434 * Reads ATA taskfile status register for currently-selected device
435 * via MMIO and return its value. This also clears pending interrupts
436 * from this device
437 *
438 * LOCKING:
439 * Inherited from caller.
440 */
441static u8 ata_check_status_mmio(struct ata_port *ap)
442{
443 return readb((void __iomem *) ap->ioaddr.status_addr);
444}
445
446
447/**
448 * ata_check_status - Read device status reg & clear interrupt
449 * @ap: port where the device is
450 *
451 * Reads ATA taskfile status register for currently-selected device
452 * and return its value. This also clears pending interrupts
453 * from this device
454 *
455 * May be used as the check_status() entry in ata_port_operations.
456 *
457 * LOCKING:
458 * Inherited from caller.
459 */
460u8 ata_check_status(struct ata_port *ap)
461{
462 if (ap->flags & ATA_FLAG_MMIO)
463 return ata_check_status_mmio(ap);
464 return ata_check_status_pio(ap);
465}
466
467
468/**
469 * ata_altstatus - Read device alternate status reg
470 * @ap: port where the device is
471 *
472 * Reads ATA taskfile alternate status register for
473 * currently-selected device and return its value.
474 *
475 * Note: may NOT be used as the check_altstatus() entry in
476 * ata_port_operations.
477 *
478 * LOCKING:
479 * Inherited from caller.
480 */
481u8 ata_altstatus(struct ata_port *ap)
482{
483 if (ap->ops->check_altstatus)
484 return ap->ops->check_altstatus(ap);
485
486 if (ap->flags & ATA_FLAG_MMIO)
487 return readb((void __iomem *)ap->ioaddr.altstatus_addr);
488 return inb(ap->ioaddr.altstatus_addr);
489}
490
491 90
492/** 91/**
493 * ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure 92 * ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
@@ -838,6 +437,7 @@ unsigned int ata_dev_classify(const struct ata_taskfile *tf)
838 * ata_dev_try_classify - Parse returned ATA device signature 437 * ata_dev_try_classify - Parse returned ATA device signature
839 * @ap: ATA channel to examine 438 * @ap: ATA channel to examine
840 * @device: Device to examine (starting at zero) 439 * @device: Device to examine (starting at zero)
440 * @r_err: Value of error register on completion
841 * 441 *
842 * After an event -- SRST, E.D.D., or SATA COMRESET -- occurs, 442 * After an event -- SRST, E.D.D., or SATA COMRESET -- occurs,
843 * an ATA/ATAPI-defined set of values is placed in the ATA 443 * an ATA/ATAPI-defined set of values is placed in the ATA
@@ -850,11 +450,14 @@ unsigned int ata_dev_classify(const struct ata_taskfile *tf)
850 * 450 *
851 * LOCKING: 451 * LOCKING:
852 * caller. 452 * caller.
453 *
454 * RETURNS:
455 * Device type - %ATA_DEV_ATA, %ATA_DEV_ATAPI or %ATA_DEV_NONE.
853 */ 456 */
854 457
855static u8 ata_dev_try_classify(struct ata_port *ap, unsigned int device) 458static unsigned int
459ata_dev_try_classify(struct ata_port *ap, unsigned int device, u8 *r_err)
856{ 460{
857 struct ata_device *dev = &ap->device[device];
858 struct ata_taskfile tf; 461 struct ata_taskfile tf;
859 unsigned int class; 462 unsigned int class;
860 u8 err; 463 u8 err;
@@ -865,8 +468,8 @@ static u8 ata_dev_try_classify(struct ata_port *ap, unsigned int device)
865 468
866 ap->ops->tf_read(ap, &tf); 469 ap->ops->tf_read(ap, &tf);
867 err = tf.feature; 470 err = tf.feature;
868 471 if (r_err)
869 dev->class = ATA_DEV_NONE; 472 *r_err = err;
870 473
871 /* see if device passed diags */ 474 /* see if device passed diags */
872 if (err == 1) 475 if (err == 1)
@@ -874,22 +477,20 @@ static u8 ata_dev_try_classify(struct ata_port *ap, unsigned int device)
874 else if ((device == 0) && (err == 0x81)) 477 else if ((device == 0) && (err == 0x81))
875 /* do nothing */ ; 478 /* do nothing */ ;
876 else 479 else
877 return err; 480 return ATA_DEV_NONE;
878 481
879 /* determine if device if ATA or ATAPI */ 482 /* determine if device is ATA or ATAPI */
880 class = ata_dev_classify(&tf); 483 class = ata_dev_classify(&tf);
484
881 if (class == ATA_DEV_UNKNOWN) 485 if (class == ATA_DEV_UNKNOWN)
882 return err; 486 return ATA_DEV_NONE;
883 if ((class == ATA_DEV_ATA) && (ata_chk_status(ap) == 0)) 487 if ((class == ATA_DEV_ATA) && (ata_chk_status(ap) == 0))
884 return err; 488 return ATA_DEV_NONE;
885 489 return class;
886 dev->class = class;
887
888 return err;
889} 490}
890 491
891/** 492/**
892 * ata_dev_id_string - Convert IDENTIFY DEVICE page into string 493 * ata_id_string - Convert IDENTIFY DEVICE page into string
893 * @id: IDENTIFY DEVICE results we will examine 494 * @id: IDENTIFY DEVICE results we will examine
894 * @s: string into which data is output 495 * @s: string into which data is output
895 * @ofs: offset into identify device page 496 * @ofs: offset into identify device page
@@ -903,8 +504,8 @@ static u8 ata_dev_try_classify(struct ata_port *ap, unsigned int device)
903 * caller. 504 * caller.
904 */ 505 */
905 506
906void ata_dev_id_string(const u16 *id, unsigned char *s, 507void ata_id_string(const u16 *id, unsigned char *s,
907 unsigned int ofs, unsigned int len) 508 unsigned int ofs, unsigned int len)
908{ 509{
909 unsigned int c; 510 unsigned int c;
910 511
@@ -922,6 +523,49 @@ void ata_dev_id_string(const u16 *id, unsigned char *s,
922 } 523 }
923} 524}
924 525
526/**
527 * ata_id_c_string - Convert IDENTIFY DEVICE page into C string
528 * @id: IDENTIFY DEVICE results we will examine
529 * @s: string into which data is output
530 * @ofs: offset into identify device page
531 * @len: length of string to return. must be an odd number.
532 *
533 * This function is identical to ata_id_string except that it
534 * trims trailing spaces and terminates the resulting string with
535 * null. @len must be actual maximum length (even number) + 1.
536 *
537 * LOCKING:
538 * caller.
539 */
540void ata_id_c_string(const u16 *id, unsigned char *s,
541 unsigned int ofs, unsigned int len)
542{
543 unsigned char *p;
544
545 WARN_ON(!(len & 1));
546
547 ata_id_string(id, s, ofs, len - 1);
548
549 p = s + strnlen(s, len - 1);
550 while (p > s && p[-1] == ' ')
551 p--;
552 *p = '\0';
553}
554
555static u64 ata_id_n_sectors(const u16 *id)
556{
557 if (ata_id_has_lba(id)) {
558 if (ata_id_has_lba48(id))
559 return ata_id_u64(id, 100);
560 else
561 return ata_id_u32(id, 60);
562 } else {
563 if (ata_id_current_chs_valid(id))
564 return ata_id_u32(id, 57);
565 else
566 return id[1] * id[3] * id[6];
567 }
568}
925 569
926/** 570/**
927 * ata_noop_dev_select - Select device 0/1 on ATA bus 571 * ata_noop_dev_select - Select device 0/1 on ATA bus
@@ -1011,41 +655,41 @@ void ata_dev_select(struct ata_port *ap, unsigned int device,
1011 655
1012/** 656/**
1013 * ata_dump_id - IDENTIFY DEVICE info debugging output 657 * ata_dump_id - IDENTIFY DEVICE info debugging output
1014 * @dev: Device whose IDENTIFY DEVICE page we will dump 658 * @id: IDENTIFY DEVICE page to dump
1015 * 659 *
1016 * Dump selected 16-bit words from a detected device's 660 * Dump selected 16-bit words from the given IDENTIFY DEVICE
1017 * IDENTIFY PAGE page. 661 * page.
1018 * 662 *
1019 * LOCKING: 663 * LOCKING:
1020 * caller. 664 * caller.
1021 */ 665 */
1022 666
1023static inline void ata_dump_id(const struct ata_device *dev) 667static inline void ata_dump_id(const u16 *id)
1024{ 668{
1025 DPRINTK("49==0x%04x " 669 DPRINTK("49==0x%04x "
1026 "53==0x%04x " 670 "53==0x%04x "
1027 "63==0x%04x " 671 "63==0x%04x "
1028 "64==0x%04x " 672 "64==0x%04x "
1029 "75==0x%04x \n", 673 "75==0x%04x \n",
1030 dev->id[49], 674 id[49],
1031 dev->id[53], 675 id[53],
1032 dev->id[63], 676 id[63],
1033 dev->id[64], 677 id[64],
1034 dev->id[75]); 678 id[75]);
1035 DPRINTK("80==0x%04x " 679 DPRINTK("80==0x%04x "
1036 "81==0x%04x " 680 "81==0x%04x "
1037 "82==0x%04x " 681 "82==0x%04x "
1038 "83==0x%04x " 682 "83==0x%04x "
1039 "84==0x%04x \n", 683 "84==0x%04x \n",
1040 dev->id[80], 684 id[80],
1041 dev->id[81], 685 id[81],
1042 dev->id[82], 686 id[82],
1043 dev->id[83], 687 id[83],
1044 dev->id[84]); 688 id[84]);
1045 DPRINTK("88==0x%04x " 689 DPRINTK("88==0x%04x "
1046 "93==0x%04x\n", 690 "93==0x%04x\n",
1047 dev->id[88], 691 id[88],
1048 dev->id[93]); 692 id[93]);
1049} 693}
1050 694
1051/* 695/*
@@ -1077,24 +721,77 @@ static unsigned int ata_pio_modes(const struct ata_device *adev)
1077 timing API will get this right anyway */ 721 timing API will get this right anyway */
1078} 722}
1079 723
1080struct ata_exec_internal_arg { 724static inline void
1081 unsigned int err_mask; 725ata_queue_packet_task(struct ata_port *ap)
1082 struct ata_taskfile *tf; 726{
1083 struct completion *waiting; 727 if (!(ap->flags & ATA_FLAG_FLUSH_PIO_TASK))
1084}; 728 queue_work(ata_wq, &ap->packet_task);
729}
1085 730
1086int ata_qc_complete_internal(struct ata_queued_cmd *qc) 731static inline void
732ata_queue_pio_task(struct ata_port *ap)
1087{ 733{
1088 struct ata_exec_internal_arg *arg = qc->private_data; 734 if (!(ap->flags & ATA_FLAG_FLUSH_PIO_TASK))
1089 struct completion *waiting = arg->waiting; 735 queue_work(ata_wq, &ap->pio_task);
736}
1090 737
1091 if (!(qc->err_mask & ~AC_ERR_DEV)) 738static inline void
1092 qc->ap->ops->tf_read(qc->ap, arg->tf); 739ata_queue_delayed_pio_task(struct ata_port *ap, unsigned long delay)
1093 arg->err_mask = qc->err_mask; 740{
1094 arg->waiting = NULL; 741 if (!(ap->flags & ATA_FLAG_FLUSH_PIO_TASK))
1095 complete(waiting); 742 queue_delayed_work(ata_wq, &ap->pio_task, delay);
743}
1096 744
1097 return 0; 745/**
746 * ata_flush_pio_tasks - Flush pio_task and packet_task
747 * @ap: the target ata_port
748 *
749 * After this function completes, pio_task and packet_task are
750 * guranteed not to be running or scheduled.
751 *
752 * LOCKING:
753 * Kernel thread context (may sleep)
754 */
755
756static void ata_flush_pio_tasks(struct ata_port *ap)
757{
758 int tmp = 0;
759 unsigned long flags;
760
761 DPRINTK("ENTER\n");
762
763 spin_lock_irqsave(&ap->host_set->lock, flags);
764 ap->flags |= ATA_FLAG_FLUSH_PIO_TASK;
765 spin_unlock_irqrestore(&ap->host_set->lock, flags);
766
767 DPRINTK("flush #1\n");
768 flush_workqueue(ata_wq);
769
770 /*
771 * At this point, if a task is running, it's guaranteed to see
772 * the FLUSH flag; thus, it will never queue pio tasks again.
773 * Cancel and flush.
774 */
775 tmp |= cancel_delayed_work(&ap->pio_task);
776 tmp |= cancel_delayed_work(&ap->packet_task);
777 if (!tmp) {
778 DPRINTK("flush #2\n");
779 flush_workqueue(ata_wq);
780 }
781
782 spin_lock_irqsave(&ap->host_set->lock, flags);
783 ap->flags &= ~ATA_FLAG_FLUSH_PIO_TASK;
784 spin_unlock_irqrestore(&ap->host_set->lock, flags);
785
786 DPRINTK("EXIT\n");
787}
788
789void ata_qc_complete_internal(struct ata_queued_cmd *qc)
790{
791 struct completion *waiting = qc->private_data;
792
793 qc->ap->ops->tf_read(qc->ap, &qc->tf);
794 complete(waiting);
1098} 795}
1099 796
1100/** 797/**
@@ -1125,7 +822,7 @@ ata_exec_internal(struct ata_port *ap, struct ata_device *dev,
1125 struct ata_queued_cmd *qc; 822 struct ata_queued_cmd *qc;
1126 DECLARE_COMPLETION(wait); 823 DECLARE_COMPLETION(wait);
1127 unsigned long flags; 824 unsigned long flags;
1128 struct ata_exec_internal_arg arg; 825 unsigned int err_mask;
1129 826
1130 spin_lock_irqsave(&ap->host_set->lock, flags); 827 spin_lock_irqsave(&ap->host_set->lock, flags);
1131 828
@@ -1139,13 +836,12 @@ ata_exec_internal(struct ata_port *ap, struct ata_device *dev,
1139 qc->nsect = buflen / ATA_SECT_SIZE; 836 qc->nsect = buflen / ATA_SECT_SIZE;
1140 } 837 }
1141 838
1142 arg.waiting = &wait; 839 qc->private_data = &wait;
1143 arg.tf = tf;
1144 qc->private_data = &arg;
1145 qc->complete_fn = ata_qc_complete_internal; 840 qc->complete_fn = ata_qc_complete_internal;
1146 841
1147 if (ata_qc_issue(qc)) 842 qc->err_mask = ata_qc_issue(qc);
1148 goto issue_fail; 843 if (qc->err_mask)
844 ata_qc_complete(qc);
1149 845
1150 spin_unlock_irqrestore(&ap->host_set->lock, flags); 846 spin_unlock_irqrestore(&ap->host_set->lock, flags);
1151 847
@@ -1158,8 +854,8 @@ ata_exec_internal(struct ata_port *ap, struct ata_device *dev,
1158 * before the caller cleans up, it will result in a 854 * before the caller cleans up, it will result in a
1159 * spurious interrupt. We can live with that. 855 * spurious interrupt. We can live with that.
1160 */ 856 */
1161 if (arg.waiting) { 857 if (qc->flags & ATA_QCFLAG_ACTIVE) {
1162 qc->err_mask = AC_ERR_OTHER; 858 qc->err_mask = AC_ERR_TIMEOUT;
1163 ata_qc_complete(qc); 859 ata_qc_complete(qc);
1164 printk(KERN_WARNING "ata%u: qc timeout (cmd 0x%x)\n", 860 printk(KERN_WARNING "ata%u: qc timeout (cmd 0x%x)\n",
1165 ap->id, command); 861 ap->id, command);
@@ -1168,12 +864,12 @@ ata_exec_internal(struct ata_port *ap, struct ata_device *dev,
1168 spin_unlock_irqrestore(&ap->host_set->lock, flags); 864 spin_unlock_irqrestore(&ap->host_set->lock, flags);
1169 } 865 }
1170 866
1171 return arg.err_mask; 867 *tf = qc->tf;
868 err_mask = qc->err_mask;
1172 869
1173 issue_fail:
1174 ata_qc_free(qc); 870 ata_qc_free(qc);
1175 spin_unlock_irqrestore(&ap->host_set->lock, flags); 871
1176 return AC_ERR_OTHER; 872 return err_mask;
1177} 873}
1178 874
1179/** 875/**
@@ -1210,73 +906,70 @@ unsigned int ata_pio_need_iordy(const struct ata_device *adev)
1210} 906}
1211 907
1212/** 908/**
1213 * ata_dev_identify - obtain IDENTIFY x DEVICE page 909 * ata_dev_read_id - Read ID data from the specified device
1214 * @ap: port on which device we wish to probe resides 910 * @ap: port on which target device resides
1215 * @device: device bus address, starting at zero 911 * @dev: target device
1216 * 912 * @p_class: pointer to class of the target device (may be changed)
1217 * Following bus reset, we issue the IDENTIFY [PACKET] DEVICE 913 * @post_reset: is this read ID post-reset?
1218 * command, and read back the 512-byte device information page. 914 * @id: buffer to fill IDENTIFY page into
1219 * The device information page is fed to us via the standard 915 *
1220 * PIO-IN protocol, but we hand-code it here. (TODO: investigate 916 * Read ID data from the specified device. ATA_CMD_ID_ATA is
1221 * using standard PIO-IN paths) 917 * performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI
1222 * 918 * devices. This function also takes care of EDD signature
1223 * After reading the device information page, we use several 919 * misreporting (to be removed once EDD support is gone) and
1224 * bits of information from it to initialize data structures 920 * issues ATA_CMD_INIT_DEV_PARAMS for pre-ATA4 drives.
1225 * that will be used during the lifetime of the ata_device.
1226 * Other data from the info page is used to disqualify certain
1227 * older ATA devices we do not wish to support.
1228 * 921 *
1229 * LOCKING: 922 * LOCKING:
1230 * Inherited from caller. Some functions called by this function 923 * Kernel thread context (may sleep)
1231 * obtain the host_set lock. 924 *
925 * RETURNS:
926 * 0 on success, -errno otherwise.
1232 */ 927 */
1233 928static int ata_dev_read_id(struct ata_port *ap, struct ata_device *dev,
1234static void ata_dev_identify(struct ata_port *ap, unsigned int device) 929 unsigned int *p_class, int post_reset, u16 *id)
1235{ 930{
1236 struct ata_device *dev = &ap->device[device]; 931 unsigned int class = *p_class;
1237 unsigned int major_version;
1238 u16 tmp;
1239 unsigned long xfer_modes;
1240 unsigned int using_edd; 932 unsigned int using_edd;
1241 struct ata_taskfile tf; 933 struct ata_taskfile tf;
1242 unsigned int err_mask; 934 unsigned int err_mask = 0;
935 const char *reason;
1243 int rc; 936 int rc;
1244 937
1245 if (!ata_dev_present(dev)) { 938 DPRINTK("ENTER, host %u, dev %u\n", ap->id, dev->devno);
1246 DPRINTK("ENTER/EXIT (host %u, dev %u) -- nodev\n",
1247 ap->id, device);
1248 return;
1249 }
1250 939
1251 if (ap->flags & (ATA_FLAG_SRST | ATA_FLAG_SATA_RESET)) 940 if (ap->ops->probe_reset ||
941 ap->flags & (ATA_FLAG_SRST | ATA_FLAG_SATA_RESET))
1252 using_edd = 0; 942 using_edd = 0;
1253 else 943 else
1254 using_edd = 1; 944 using_edd = 1;
1255 945
1256 DPRINTK("ENTER, host %u, dev %u\n", ap->id, device); 946 ata_dev_select(ap, dev->devno, 1, 1); /* select device 0/1 */
1257
1258 assert (dev->class == ATA_DEV_ATA || dev->class == ATA_DEV_ATAPI ||
1259 dev->class == ATA_DEV_NONE);
1260 947
1261 ata_dev_select(ap, device, 1, 1); /* select device 0/1 */ 948 retry:
1262 949 ata_tf_init(ap, &tf, dev->devno);
1263retry:
1264 ata_tf_init(ap, &tf, device);
1265 950
1266 if (dev->class == ATA_DEV_ATA) { 951 switch (class) {
952 case ATA_DEV_ATA:
1267 tf.command = ATA_CMD_ID_ATA; 953 tf.command = ATA_CMD_ID_ATA;
1268 DPRINTK("do ATA identify\n"); 954 break;
1269 } else { 955 case ATA_DEV_ATAPI:
1270 tf.command = ATA_CMD_ID_ATAPI; 956 tf.command = ATA_CMD_ID_ATAPI;
1271 DPRINTK("do ATAPI identify\n"); 957 break;
958 default:
959 rc = -ENODEV;
960 reason = "unsupported class";
961 goto err_out;
1272 } 962 }
1273 963
1274 tf.protocol = ATA_PROT_PIO; 964 tf.protocol = ATA_PROT_PIO;
1275 965
1276 err_mask = ata_exec_internal(ap, dev, &tf, DMA_FROM_DEVICE, 966 err_mask = ata_exec_internal(ap, dev, &tf, DMA_FROM_DEVICE,
1277 dev->id, sizeof(dev->id)); 967 id, sizeof(id[0]) * ATA_ID_WORDS);
1278 968
1279 if (err_mask) { 969 if (err_mask) {
970 rc = -EIO;
971 reason = "I/O error";
972
1280 if (err_mask & ~AC_ERR_DEV) 973 if (err_mask & ~AC_ERR_DEV)
1281 goto err_out; 974 goto err_out;
1282 975
@@ -1291,25 +984,105 @@ retry:
1291 * ATA software reset (SRST, the default) does not appear 984 * ATA software reset (SRST, the default) does not appear
1292 * to have this problem. 985 * to have this problem.
1293 */ 986 */
1294 if ((using_edd) && (dev->class == ATA_DEV_ATA)) { 987 if ((using_edd) && (class == ATA_DEV_ATA)) {
1295 u8 err = tf.feature; 988 u8 err = tf.feature;
1296 if (err & ATA_ABORTED) { 989 if (err & ATA_ABORTED) {
1297 dev->class = ATA_DEV_ATAPI; 990 class = ATA_DEV_ATAPI;
1298 goto retry; 991 goto retry;
1299 } 992 }
1300 } 993 }
1301 goto err_out; 994 goto err_out;
1302 } 995 }
1303 996
1304 swap_buf_le16(dev->id, ATA_ID_WORDS); 997 swap_buf_le16(id, ATA_ID_WORDS);
1305 998
1306 /* print device capabilities */ 999 /* print device capabilities */
1307 printk(KERN_DEBUG "ata%u: dev %u cfg " 1000 printk(KERN_DEBUG "ata%u: dev %u cfg "
1308 "49:%04x 82:%04x 83:%04x 84:%04x 85:%04x 86:%04x 87:%04x 88:%04x\n", 1001 "49:%04x 82:%04x 83:%04x 84:%04x 85:%04x 86:%04x 87:%04x 88:%04x\n",
1309 ap->id, device, dev->id[49], 1002 ap->id, dev->devno,
1310 dev->id[82], dev->id[83], dev->id[84], 1003 id[49], id[82], id[83], id[84], id[85], id[86], id[87], id[88]);
1311 dev->id[85], dev->id[86], dev->id[87], 1004
1312 dev->id[88]); 1005 /* sanity check */
1006 if ((class == ATA_DEV_ATA) != ata_id_is_ata(id)) {
1007 rc = -EINVAL;
1008 reason = "device reports illegal type";
1009 goto err_out;
1010 }
1011
1012 if (post_reset && class == ATA_DEV_ATA) {
1013 /*
1014 * The exact sequence expected by certain pre-ATA4 drives is:
1015 * SRST RESET
1016 * IDENTIFY
1017 * INITIALIZE DEVICE PARAMETERS
1018 * anything else..
1019 * Some drives were very specific about that exact sequence.
1020 */
1021 if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
1022 err_mask = ata_dev_init_params(ap, dev);
1023 if (err_mask) {
1024 rc = -EIO;
1025 reason = "INIT_DEV_PARAMS failed";
1026 goto err_out;
1027 }
1028
1029 /* current CHS translation info (id[53-58]) might be
1030 * changed. reread the identify device info.
1031 */
1032 post_reset = 0;
1033 goto retry;
1034 }
1035 }
1036
1037 *p_class = class;
1038 return 0;
1039
1040 err_out:
1041 printk(KERN_WARNING "ata%u: dev %u failed to IDENTIFY (%s)\n",
1042 ap->id, dev->devno, reason);
1043 kfree(id);
1044 return rc;
1045}
1046
1047/**
1048 * ata_dev_identify - obtain IDENTIFY x DEVICE page
1049 * @ap: port on which device we wish to probe resides
1050 * @device: device bus address, starting at zero
1051 *
1052 * Following bus reset, we issue the IDENTIFY [PACKET] DEVICE
1053 * command, and read back the 512-byte device information page.
1054 * The device information page is fed to us via the standard
1055 * PIO-IN protocol, but we hand-code it here. (TODO: investigate
1056 * using standard PIO-IN paths)
1057 *
1058 * After reading the device information page, we use several
1059 * bits of information from it to initialize data structures
1060 * that will be used during the lifetime of the ata_device.
1061 * Other data from the info page is used to disqualify certain
1062 * older ATA devices we do not wish to support.
1063 *
1064 * LOCKING:
1065 * Inherited from caller. Some functions called by this function
1066 * obtain the host_set lock.
1067 */
1068
1069static void ata_dev_identify(struct ata_port *ap, unsigned int device)
1070{
1071 struct ata_device *dev = &ap->device[device];
1072 unsigned long xfer_modes;
1073 int i, rc;
1074
1075 if (!ata_dev_present(dev)) {
1076 DPRINTK("ENTER/EXIT (host %u, dev %u) -- nodev\n",
1077 ap->id, device);
1078 return;
1079 }
1080
1081 DPRINTK("ENTER, host %u, dev %u\n", ap->id, device);
1082
1083 rc = ata_dev_read_id(ap, dev, &dev->class, 1, dev->id);
1084 if (rc)
1085 goto err_out;
1313 1086
1314 /* 1087 /*
1315 * common ATA, ATAPI feature tests 1088 * common ATA, ATAPI feature tests
@@ -1328,50 +1101,22 @@ retry:
1328 if (!xfer_modes) 1101 if (!xfer_modes)
1329 xfer_modes = ata_pio_modes(dev); 1102 xfer_modes = ata_pio_modes(dev);
1330 1103
1331 ata_dump_id(dev); 1104 ata_dump_id(dev->id);
1332 1105
1333 /* ATA-specific feature tests */ 1106 /* ATA-specific feature tests */
1334 if (dev->class == ATA_DEV_ATA) { 1107 if (dev->class == ATA_DEV_ATA) {
1335 if (!ata_id_is_ata(dev->id)) /* sanity check */ 1108 dev->n_sectors = ata_id_n_sectors(dev->id);
1336 goto err_out_nosup;
1337
1338 /* get major version */
1339 tmp = dev->id[ATA_ID_MAJOR_VER];
1340 for (major_version = 14; major_version >= 1; major_version--)
1341 if (tmp & (1 << major_version))
1342 break;
1343
1344 /*
1345 * The exact sequence expected by certain pre-ATA4 drives is:
1346 * SRST RESET
1347 * IDENTIFY
1348 * INITIALIZE DEVICE PARAMETERS
1349 * anything else..
1350 * Some drives were very specific about that exact sequence.
1351 */
1352 if (major_version < 4 || (!ata_id_has_lba(dev->id))) {
1353 ata_dev_init_params(ap, dev);
1354
1355 /* current CHS translation info (id[53-58]) might be
1356 * changed. reread the identify device info.
1357 */
1358 ata_dev_reread_id(ap, dev);
1359 }
1360 1109
1361 if (ata_id_has_lba(dev->id)) { 1110 if (ata_id_has_lba(dev->id)) {
1362 dev->flags |= ATA_DFLAG_LBA; 1111 dev->flags |= ATA_DFLAG_LBA;
1363 1112
1364 if (ata_id_has_lba48(dev->id)) { 1113 if (ata_id_has_lba48(dev->id))
1365 dev->flags |= ATA_DFLAG_LBA48; 1114 dev->flags |= ATA_DFLAG_LBA48;
1366 dev->n_sectors = ata_id_u64(dev->id, 100);
1367 } else {
1368 dev->n_sectors = ata_id_u32(dev->id, 60);
1369 }
1370 1115
1371 /* print device info to dmesg */ 1116 /* print device info to dmesg */
1372 printk(KERN_INFO "ata%u: dev %u ATA-%d, max %s, %Lu sectors:%s\n", 1117 printk(KERN_INFO "ata%u: dev %u ATA-%d, max %s, %Lu sectors:%s\n",
1373 ap->id, device, 1118 ap->id, device,
1374 major_version, 1119 ata_id_major_version(dev->id),
1375 ata_mode_string(xfer_modes), 1120 ata_mode_string(xfer_modes),
1376 (unsigned long long)dev->n_sectors, 1121 (unsigned long long)dev->n_sectors,
1377 dev->flags & ATA_DFLAG_LBA48 ? " LBA48" : " LBA"); 1122 dev->flags & ATA_DFLAG_LBA48 ? " LBA48" : " LBA");
@@ -1382,42 +1127,35 @@ retry:
1382 dev->cylinders = dev->id[1]; 1127 dev->cylinders = dev->id[1];
1383 dev->heads = dev->id[3]; 1128 dev->heads = dev->id[3];
1384 dev->sectors = dev->id[6]; 1129 dev->sectors = dev->id[6];
1385 dev->n_sectors = dev->cylinders * dev->heads * dev->sectors;
1386 1130
1387 if (ata_id_current_chs_valid(dev->id)) { 1131 if (ata_id_current_chs_valid(dev->id)) {
1388 /* Current CHS translation is valid. */ 1132 /* Current CHS translation is valid. */
1389 dev->cylinders = dev->id[54]; 1133 dev->cylinders = dev->id[54];
1390 dev->heads = dev->id[55]; 1134 dev->heads = dev->id[55];
1391 dev->sectors = dev->id[56]; 1135 dev->sectors = dev->id[56];
1392
1393 dev->n_sectors = ata_id_u32(dev->id, 57);
1394 } 1136 }
1395 1137
1396 /* print device info to dmesg */ 1138 /* print device info to dmesg */
1397 printk(KERN_INFO "ata%u: dev %u ATA-%d, max %s, %Lu sectors: CHS %d/%d/%d\n", 1139 printk(KERN_INFO "ata%u: dev %u ATA-%d, max %s, %Lu sectors: CHS %d/%d/%d\n",
1398 ap->id, device, 1140 ap->id, device,
1399 major_version, 1141 ata_id_major_version(dev->id),
1400 ata_mode_string(xfer_modes), 1142 ata_mode_string(xfer_modes),
1401 (unsigned long long)dev->n_sectors, 1143 (unsigned long long)dev->n_sectors,
1402 (int)dev->cylinders, (int)dev->heads, (int)dev->sectors); 1144 (int)dev->cylinders, (int)dev->heads, (int)dev->sectors);
1403 1145
1404 } 1146 }
1405 1147
1406 ap->host->max_cmd_len = 16; 1148 dev->cdb_len = 16;
1407 } 1149 }
1408 1150
1409 /* ATAPI-specific feature tests */ 1151 /* ATAPI-specific feature tests */
1410 else if (dev->class == ATA_DEV_ATAPI) { 1152 else if (dev->class == ATA_DEV_ATAPI) {
1411 if (ata_id_is_ata(dev->id)) /* sanity check */
1412 goto err_out_nosup;
1413
1414 rc = atapi_cdb_len(dev->id); 1153 rc = atapi_cdb_len(dev->id);
1415 if ((rc < 12) || (rc > ATAPI_CDB_LEN)) { 1154 if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
1416 printk(KERN_WARNING "ata%u: unsupported CDB len\n", ap->id); 1155 printk(KERN_WARNING "ata%u: unsupported CDB len\n", ap->id);
1417 goto err_out_nosup; 1156 goto err_out_nosup;
1418 } 1157 }
1419 ap->cdb_len = (unsigned int) rc; 1158 dev->cdb_len = (unsigned int) rc;
1420 ap->host->max_cmd_len = (unsigned char) ap->cdb_len;
1421 1159
1422 /* print device info to dmesg */ 1160 /* print device info to dmesg */
1423 printk(KERN_INFO "ata%u: dev %u ATAPI, max %s\n", 1161 printk(KERN_INFO "ata%u: dev %u ATAPI, max %s\n",
@@ -1425,6 +1163,12 @@ retry:
1425 ata_mode_string(xfer_modes)); 1163 ata_mode_string(xfer_modes));
1426 } 1164 }
1427 1165
1166 ap->host->max_cmd_len = 0;
1167 for (i = 0; i < ATA_MAX_DEVICES; i++)
1168 ap->host->max_cmd_len = max_t(unsigned int,
1169 ap->host->max_cmd_len,
1170 ap->device[i].cdb_len);
1171
1428 DPRINTK("EXIT, drv_stat = 0x%x\n", ata_chk_status(ap)); 1172 DPRINTK("EXIT, drv_stat = 0x%x\n", ata_chk_status(ap));
1429 return; 1173 return;
1430 1174
@@ -1437,30 +1181,28 @@ err_out:
1437} 1181}
1438 1182
1439 1183
1440static inline u8 ata_dev_knobble(const struct ata_port *ap) 1184static inline u8 ata_dev_knobble(const struct ata_port *ap,
1185 struct ata_device *dev)
1441{ 1186{
1442 return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(ap->device->id))); 1187 return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
1443} 1188}
1444 1189
1445/** 1190/**
1446 * ata_dev_config - Run device specific handlers and check for 1191 * ata_dev_config - Run device specific handlers & check for SATA->PATA bridges
1447 * SATA->PATA bridges 1192 * @ap: Bus
1448 * @ap: Bus 1193 * @i: Device
1449 * @i: Device
1450 * 1194 *
1451 * LOCKING: 1195 * LOCKING:
1452 */ 1196 */
1453 1197
1454void ata_dev_config(struct ata_port *ap, unsigned int i) 1198void ata_dev_config(struct ata_port *ap, unsigned int i)
1455{ 1199{
1456 /* limit bridge transfers to udma5, 200 sectors */ 1200 /* limit bridge transfers to udma5, 200 sectors */
1457 if (ata_dev_knobble(ap)) { 1201 if (ata_dev_knobble(ap, &ap->device[i])) {
1458 printk(KERN_INFO "ata%u(%u): applying bridge limits\n", 1202 printk(KERN_INFO "ata%u(%u): applying bridge limits\n",
1459 ap->id, ap->device->devno); 1203 ap->id, i);
1460 ap->udma_mask &= ATA_UDMA5; 1204 ap->udma_mask &= ATA_UDMA5;
1461 ap->host->max_sectors = ATA_MAX_SECTORS; 1205 ap->device[i].max_sectors = ATA_MAX_SECTORS;
1462 ap->host->hostt->max_sectors = ATA_MAX_SECTORS;
1463 ap->device[i].flags |= ATA_DFLAG_LOCK_SECTORS;
1464 } 1206 }
1465 1207
1466 if (ap->ops->dev_config) 1208 if (ap->ops->dev_config)
@@ -1486,7 +1228,27 @@ static int ata_bus_probe(struct ata_port *ap)
1486{ 1228{
1487 unsigned int i, found = 0; 1229 unsigned int i, found = 0;
1488 1230
1489 ap->ops->phy_reset(ap); 1231 if (ap->ops->probe_reset) {
1232 unsigned int classes[ATA_MAX_DEVICES];
1233 int rc;
1234
1235 ata_port_probe(ap);
1236
1237 rc = ap->ops->probe_reset(ap, classes);
1238 if (rc == 0) {
1239 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1240 if (classes[i] == ATA_DEV_UNKNOWN)
1241 classes[i] = ATA_DEV_NONE;
1242 ap->device[i].class = classes[i];
1243 }
1244 } else {
1245 printk(KERN_ERR "ata%u: probe reset failed, "
1246 "disabling port\n", ap->id);
1247 ata_port_disable(ap);
1248 }
1249 } else
1250 ap->ops->phy_reset(ap);
1251
1490 if (ap->flags & ATA_FLAG_PORT_DISABLED) 1252 if (ap->flags & ATA_FLAG_PORT_DISABLED)
1491 goto err_out; 1253 goto err_out;
1492 1254
@@ -1530,6 +1292,41 @@ void ata_port_probe(struct ata_port *ap)
1530} 1292}
1531 1293
1532/** 1294/**
1295 * sata_print_link_status - Print SATA link status
1296 * @ap: SATA port to printk link status about
1297 *
1298 * This function prints link speed and status of a SATA link.
1299 *
1300 * LOCKING:
1301 * None.
1302 */
1303static void sata_print_link_status(struct ata_port *ap)
1304{
1305 u32 sstatus, tmp;
1306 const char *speed;
1307
1308 if (!ap->ops->scr_read)
1309 return;
1310
1311 sstatus = scr_read(ap, SCR_STATUS);
1312
1313 if (sata_dev_present(ap)) {
1314 tmp = (sstatus >> 4) & 0xf;
1315 if (tmp & (1 << 0))
1316 speed = "1.5";
1317 else if (tmp & (1 << 1))
1318 speed = "3.0";
1319 else
1320 speed = "<unknown>";
1321 printk(KERN_INFO "ata%u: SATA link up %s Gbps (SStatus %X)\n",
1322 ap->id, speed, sstatus);
1323 } else {
1324 printk(KERN_INFO "ata%u: SATA link down (SStatus %X)\n",
1325 ap->id, sstatus);
1326 }
1327}
1328
1329/**
1533 * __sata_phy_reset - Wake/reset a low-level SATA PHY 1330 * __sata_phy_reset - Wake/reset a low-level SATA PHY
1534 * @ap: SATA port associated with target SATA PHY. 1331 * @ap: SATA port associated with target SATA PHY.
1535 * 1332 *
@@ -1563,27 +1360,14 @@ void __sata_phy_reset(struct ata_port *ap)
1563 break; 1360 break;
1564 } while (time_before(jiffies, timeout)); 1361 } while (time_before(jiffies, timeout));
1565 1362
1566 /* TODO: phy layer with polling, timeouts, etc. */ 1363 /* print link status */
1567 sstatus = scr_read(ap, SCR_STATUS); 1364 sata_print_link_status(ap);
1568 if (sata_dev_present(ap)) {
1569 const char *speed;
1570 u32 tmp;
1571 1365
1572 tmp = (sstatus >> 4) & 0xf; 1366 /* TODO: phy layer with polling, timeouts, etc. */
1573 if (tmp & (1 << 0)) 1367 if (sata_dev_present(ap))
1574 speed = "1.5";
1575 else if (tmp & (1 << 1))
1576 speed = "3.0";
1577 else
1578 speed = "<unknown>";
1579 printk(KERN_INFO "ata%u: SATA link up %s Gbps (SStatus %X)\n",
1580 ap->id, speed, sstatus);
1581 ata_port_probe(ap); 1368 ata_port_probe(ap);
1582 } else { 1369 else
1583 printk(KERN_INFO "ata%u: SATA link down (SStatus %X)\n",
1584 ap->id, sstatus);
1585 ata_port_disable(ap); 1370 ata_port_disable(ap);
1586 }
1587 1371
1588 if (ap->flags & ATA_FLAG_PORT_DISABLED) 1372 if (ap->flags & ATA_FLAG_PORT_DISABLED)
1589 return; 1373 return;
@@ -1756,9 +1540,9 @@ int ata_timing_compute(struct ata_device *adev, unsigned short speed,
1756 ata_timing_quantize(t, t, T, UT); 1540 ata_timing_quantize(t, t, T, UT);
1757 1541
1758 /* 1542 /*
1759 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY, S.M.A.R.T 1543 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY,
1760 * and some other commands. We have to ensure that the DMA cycle timing is 1544 * S.M.A.R.T * and some other commands. We have to ensure that the
1761 * slower/equal than the fastest PIO timing. 1545 * DMA cycle timing is slower/equal than the fastest PIO timing.
1762 */ 1546 */
1763 1547
1764 if (speed > XFER_PIO_4) { 1548 if (speed > XFER_PIO_4) {
@@ -1767,7 +1551,7 @@ int ata_timing_compute(struct ata_device *adev, unsigned short speed,
1767 } 1551 }
1768 1552
1769 /* 1553 /*
1770 * Lenghten active & recovery time so that cycle time is correct. 1554 * Lengthen active & recovery time so that cycle time is correct.
1771 */ 1555 */
1772 1556
1773 if (t->act8b + t->rec8b < t->cyc8b) { 1557 if (t->act8b + t->rec8b < t->cyc8b) {
@@ -1886,7 +1670,6 @@ static void ata_host_set_dma(struct ata_port *ap, u8 xfer_mode,
1886 * 1670 *
1887 * LOCKING: 1671 * LOCKING:
1888 * PCI/etc. bus probe sem. 1672 * PCI/etc. bus probe sem.
1889 *
1890 */ 1673 */
1891static void ata_set_mode(struct ata_port *ap) 1674static void ata_set_mode(struct ata_port *ap)
1892{ 1675{
@@ -1926,6 +1709,26 @@ err_out:
1926} 1709}
1927 1710
1928/** 1711/**
1712 * ata_tf_to_host - issue ATA taskfile to host controller
1713 * @ap: port to which command is being issued
1714 * @tf: ATA taskfile register set
1715 *
1716 * Issues ATA taskfile register set to ATA host controller,
1717 * with proper synchronization with interrupt handler and
1718 * other threads.
1719 *
1720 * LOCKING:
1721 * spin_lock_irqsave(host_set lock)
1722 */
1723
1724static inline void ata_tf_to_host(struct ata_port *ap,
1725 const struct ata_taskfile *tf)
1726{
1727 ap->ops->tf_load(ap, tf);
1728 ap->ops->exec_command(ap, tf);
1729}
1730
1731/**
1929 * ata_busy_sleep - sleep until BSY clears, or timeout 1732 * ata_busy_sleep - sleep until BSY clears, or timeout
1930 * @ap: port containing status register to be polled 1733 * @ap: port containing status register to be polled
1931 * @tmout_pat: impatience timeout 1734 * @tmout_pat: impatience timeout
@@ -1935,12 +1738,10 @@ err_out:
1935 * or a timeout occurs. 1738 * or a timeout occurs.
1936 * 1739 *
1937 * LOCKING: None. 1740 * LOCKING: None.
1938 *
1939 */ 1741 */
1940 1742
1941static unsigned int ata_busy_sleep (struct ata_port *ap, 1743unsigned int ata_busy_sleep (struct ata_port *ap,
1942 unsigned long tmout_pat, 1744 unsigned long tmout_pat, unsigned long tmout)
1943 unsigned long tmout)
1944{ 1745{
1945 unsigned long timer_start, timeout; 1746 unsigned long timer_start, timeout;
1946 u8 status; 1747 u8 status;
@@ -2159,9 +1960,9 @@ void ata_bus_reset(struct ata_port *ap)
2159 /* 1960 /*
2160 * determine by signature whether we have ATA or ATAPI devices 1961 * determine by signature whether we have ATA or ATAPI devices
2161 */ 1962 */
2162 err = ata_dev_try_classify(ap, 0); 1963 ap->device[0].class = ata_dev_try_classify(ap, 0, &err);
2163 if ((slave_possible) && (err != 0x81)) 1964 if ((slave_possible) && (err != 0x81))
2164 ata_dev_try_classify(ap, 1); 1965 ap->device[1].class = ata_dev_try_classify(ap, 1, &err);
2165 1966
2166 /* re-enable interrupts */ 1967 /* re-enable interrupts */
2167 if (ap->ioaddr.ctl_addr) /* FIXME: hack. create a hook instead */ 1968 if (ap->ioaddr.ctl_addr) /* FIXME: hack. create a hook instead */
@@ -2196,6 +1997,334 @@ err_out:
2196 DPRINTK("EXIT\n"); 1997 DPRINTK("EXIT\n");
2197} 1998}
2198 1999
2000static int sata_phy_resume(struct ata_port *ap)
2001{
2002 unsigned long timeout = jiffies + (HZ * 5);
2003 u32 sstatus;
2004
2005 scr_write_flush(ap, SCR_CONTROL, 0x300);
2006
2007 /* Wait for phy to become ready, if necessary. */
2008 do {
2009 msleep(200);
2010 sstatus = scr_read(ap, SCR_STATUS);
2011 if ((sstatus & 0xf) != 1)
2012 return 0;
2013 } while (time_before(jiffies, timeout));
2014
2015 return -1;
2016}
2017
2018/**
2019 * ata_std_probeinit - initialize probing
2020 * @ap: port to be probed
2021 *
2022 * @ap is about to be probed. Initialize it. This function is
2023 * to be used as standard callback for ata_drive_probe_reset().
2024 *
2025 * NOTE!!! Do not use this function as probeinit if a low level
2026 * driver implements only hardreset. Just pass NULL as probeinit
2027 * in that case. Using this function is probably okay but doing
2028 * so makes reset sequence different from the original
2029 * ->phy_reset implementation and Jeff nervous. :-P
2030 */
2031extern void ata_std_probeinit(struct ata_port *ap)
2032{
2033 if (ap->flags & ATA_FLAG_SATA && ap->ops->scr_read) {
2034 sata_phy_resume(ap);
2035 if (sata_dev_present(ap))
2036 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
2037 }
2038}
2039
2040/**
2041 * ata_std_softreset - reset host port via ATA SRST
2042 * @ap: port to reset
2043 * @verbose: fail verbosely
2044 * @classes: resulting classes of attached devices
2045 *
2046 * Reset host port using ATA SRST. This function is to be used
2047 * as standard callback for ata_drive_*_reset() functions.
2048 *
2049 * LOCKING:
2050 * Kernel thread context (may sleep)
2051 *
2052 * RETURNS:
2053 * 0 on success, -errno otherwise.
2054 */
2055int ata_std_softreset(struct ata_port *ap, int verbose, unsigned int *classes)
2056{
2057 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
2058 unsigned int devmask = 0, err_mask;
2059 u8 err;
2060
2061 DPRINTK("ENTER\n");
2062
2063 if (ap->ops->scr_read && !sata_dev_present(ap)) {
2064 classes[0] = ATA_DEV_NONE;
2065 goto out;
2066 }
2067
2068 /* determine if device 0/1 are present */
2069 if (ata_devchk(ap, 0))
2070 devmask |= (1 << 0);
2071 if (slave_possible && ata_devchk(ap, 1))
2072 devmask |= (1 << 1);
2073
2074 /* select device 0 again */
2075 ap->ops->dev_select(ap, 0);
2076
2077 /* issue bus reset */
2078 DPRINTK("about to softreset, devmask=%x\n", devmask);
2079 err_mask = ata_bus_softreset(ap, devmask);
2080 if (err_mask) {
2081 if (verbose)
2082 printk(KERN_ERR "ata%u: SRST failed (err_mask=0x%x)\n",
2083 ap->id, err_mask);
2084 else
2085 DPRINTK("EXIT, softreset failed (err_mask=0x%x)\n",
2086 err_mask);
2087 return -EIO;
2088 }
2089
2090 /* determine by signature whether we have ATA or ATAPI devices */
2091 classes[0] = ata_dev_try_classify(ap, 0, &err);
2092 if (slave_possible && err != 0x81)
2093 classes[1] = ata_dev_try_classify(ap, 1, &err);
2094
2095 out:
2096 DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]);
2097 return 0;
2098}
2099
2100/**
2101 * sata_std_hardreset - reset host port via SATA phy reset
2102 * @ap: port to reset
2103 * @verbose: fail verbosely
2104 * @class: resulting class of attached device
2105 *
2106 * SATA phy-reset host port using DET bits of SControl register.
2107 * This function is to be used as standard callback for
2108 * ata_drive_*_reset().
2109 *
2110 * LOCKING:
2111 * Kernel thread context (may sleep)
2112 *
2113 * RETURNS:
2114 * 0 on success, -errno otherwise.
2115 */
2116int sata_std_hardreset(struct ata_port *ap, int verbose, unsigned int *class)
2117{
2118 DPRINTK("ENTER\n");
2119
2120 /* Issue phy wake/reset */
2121 scr_write_flush(ap, SCR_CONTROL, 0x301);
2122
2123 /*
2124 * Couldn't find anything in SATA I/II specs, but AHCI-1.1
2125 * 10.4.2 says at least 1 ms.
2126 */
2127 msleep(1);
2128
2129 /* Bring phy back */
2130 sata_phy_resume(ap);
2131
2132 /* TODO: phy layer with polling, timeouts, etc. */
2133 if (!sata_dev_present(ap)) {
2134 *class = ATA_DEV_NONE;
2135 DPRINTK("EXIT, link offline\n");
2136 return 0;
2137 }
2138
2139 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
2140 if (verbose)
2141 printk(KERN_ERR "ata%u: COMRESET failed "
2142 "(device not ready)\n", ap->id);
2143 else
2144 DPRINTK("EXIT, device not ready\n");
2145 return -EIO;
2146 }
2147
2148 ap->ops->dev_select(ap, 0); /* probably unnecessary */
2149
2150 *class = ata_dev_try_classify(ap, 0, NULL);
2151
2152 DPRINTK("EXIT, class=%u\n", *class);
2153 return 0;
2154}
2155
2156/**
2157 * ata_std_postreset - standard postreset callback
2158 * @ap: the target ata_port
2159 * @classes: classes of attached devices
2160 *
2161 * This function is invoked after a successful reset. Note that
2162 * the device might have been reset more than once using
2163 * different reset methods before postreset is invoked.
2164 *
2165 * This function is to be used as standard callback for
2166 * ata_drive_*_reset().
2167 *
2168 * LOCKING:
2169 * Kernel thread context (may sleep)
2170 */
2171void ata_std_postreset(struct ata_port *ap, unsigned int *classes)
2172{
2173 DPRINTK("ENTER\n");
2174
2175 /* set cable type if it isn't already set */
2176 if (ap->cbl == ATA_CBL_NONE && ap->flags & ATA_FLAG_SATA)
2177 ap->cbl = ATA_CBL_SATA;
2178
2179 /* print link status */
2180 if (ap->cbl == ATA_CBL_SATA)
2181 sata_print_link_status(ap);
2182
2183 /* re-enable interrupts */
2184 if (ap->ioaddr.ctl_addr) /* FIXME: hack. create a hook instead */
2185 ata_irq_on(ap);
2186
2187 /* is double-select really necessary? */
2188 if (classes[0] != ATA_DEV_NONE)
2189 ap->ops->dev_select(ap, 1);
2190 if (classes[1] != ATA_DEV_NONE)
2191 ap->ops->dev_select(ap, 0);
2192
2193 /* bail out if no device is present */
2194 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
2195 DPRINTK("EXIT, no device\n");
2196 return;
2197 }
2198
2199 /* set up device control */
2200 if (ap->ioaddr.ctl_addr) {
2201 if (ap->flags & ATA_FLAG_MMIO)
2202 writeb(ap->ctl, (void __iomem *) ap->ioaddr.ctl_addr);
2203 else
2204 outb(ap->ctl, ap->ioaddr.ctl_addr);
2205 }
2206
2207 DPRINTK("EXIT\n");
2208}
2209
2210/**
2211 * ata_std_probe_reset - standard probe reset method
2212 * @ap: prot to perform probe-reset
2213 * @classes: resulting classes of attached devices
2214 *
2215 * The stock off-the-shelf ->probe_reset method.
2216 *
2217 * LOCKING:
2218 * Kernel thread context (may sleep)
2219 *
2220 * RETURNS:
2221 * 0 on success, -errno otherwise.
2222 */
2223int ata_std_probe_reset(struct ata_port *ap, unsigned int *classes)
2224{
2225 ata_reset_fn_t hardreset;
2226
2227 hardreset = NULL;
2228 if (ap->flags & ATA_FLAG_SATA && ap->ops->scr_read)
2229 hardreset = sata_std_hardreset;
2230
2231 return ata_drive_probe_reset(ap, ata_std_probeinit,
2232 ata_std_softreset, hardreset,
2233 ata_std_postreset, classes);
2234}
2235
2236static int do_probe_reset(struct ata_port *ap, ata_reset_fn_t reset,
2237 ata_postreset_fn_t postreset,
2238 unsigned int *classes)
2239{
2240 int i, rc;
2241
2242 for (i = 0; i < ATA_MAX_DEVICES; i++)
2243 classes[i] = ATA_DEV_UNKNOWN;
2244
2245 rc = reset(ap, 0, classes);
2246 if (rc)
2247 return rc;
2248
2249 /* If any class isn't ATA_DEV_UNKNOWN, consider classification
2250 * is complete and convert all ATA_DEV_UNKNOWN to
2251 * ATA_DEV_NONE.
2252 */
2253 for (i = 0; i < ATA_MAX_DEVICES; i++)
2254 if (classes[i] != ATA_DEV_UNKNOWN)
2255 break;
2256
2257 if (i < ATA_MAX_DEVICES)
2258 for (i = 0; i < ATA_MAX_DEVICES; i++)
2259 if (classes[i] == ATA_DEV_UNKNOWN)
2260 classes[i] = ATA_DEV_NONE;
2261
2262 if (postreset)
2263 postreset(ap, classes);
2264
2265 return classes[0] != ATA_DEV_UNKNOWN ? 0 : -ENODEV;
2266}
2267
2268/**
2269 * ata_drive_probe_reset - Perform probe reset with given methods
2270 * @ap: port to reset
2271 * @probeinit: probeinit method (can be NULL)
2272 * @softreset: softreset method (can be NULL)
2273 * @hardreset: hardreset method (can be NULL)
2274 * @postreset: postreset method (can be NULL)
2275 * @classes: resulting classes of attached devices
2276 *
2277 * Reset the specified port and classify attached devices using
2278 * given methods. This function prefers softreset but tries all
2279 * possible reset sequences to reset and classify devices. This
2280 * function is intended to be used for constructing ->probe_reset
2281 * callback by low level drivers.
2282 *
2283 * Reset methods should follow the following rules.
2284 *
2285 * - Return 0 on sucess, -errno on failure.
2286 * - If classification is supported, fill classes[] with
2287 * recognized class codes.
2288 * - If classification is not supported, leave classes[] alone.
2289 * - If verbose is non-zero, print error message on failure;
2290 * otherwise, shut up.
2291 *
2292 * LOCKING:
2293 * Kernel thread context (may sleep)
2294 *
2295 * RETURNS:
2296 * 0 on success, -EINVAL if no reset method is avaliable, -ENODEV
2297 * if classification fails, and any error code from reset
2298 * methods.
2299 */
2300int ata_drive_probe_reset(struct ata_port *ap, ata_probeinit_fn_t probeinit,
2301 ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
2302 ata_postreset_fn_t postreset, unsigned int *classes)
2303{
2304 int rc = -EINVAL;
2305
2306 if (probeinit)
2307 probeinit(ap);
2308
2309 if (softreset) {
2310 rc = do_probe_reset(ap, softreset, postreset, classes);
2311 if (rc == 0)
2312 return 0;
2313 }
2314
2315 if (!hardreset)
2316 return rc;
2317
2318 rc = do_probe_reset(ap, hardreset, postreset, classes);
2319 if (rc == 0 || rc != -ENODEV)
2320 return rc;
2321
2322 if (softreset)
2323 rc = do_probe_reset(ap, softreset, postreset, classes);
2324
2325 return rc;
2326}
2327
2199static void ata_pr_blacklisted(const struct ata_port *ap, 2328static void ata_pr_blacklisted(const struct ata_port *ap,
2200 const struct ata_device *dev) 2329 const struct ata_device *dev)
2201{ 2330{
@@ -2237,24 +2366,13 @@ static const char * const ata_dma_blacklist [] = {
2237 2366
2238static int ata_dma_blacklisted(const struct ata_device *dev) 2367static int ata_dma_blacklisted(const struct ata_device *dev)
2239{ 2368{
2240 unsigned char model_num[40]; 2369 unsigned char model_num[41];
2241 char *s;
2242 unsigned int len;
2243 int i; 2370 int i;
2244 2371
2245 ata_dev_id_string(dev->id, model_num, ATA_ID_PROD_OFS, 2372 ata_id_c_string(dev->id, model_num, ATA_ID_PROD_OFS, sizeof(model_num));
2246 sizeof(model_num));
2247 s = &model_num[0];
2248 len = strnlen(s, sizeof(model_num));
2249
2250 /* ATAPI specifies that empty space is blank-filled; remove blanks */
2251 while ((len > 0) && (s[len - 1] == ' ')) {
2252 len--;
2253 s[len] = 0;
2254 }
2255 2373
2256 for (i = 0; i < ARRAY_SIZE(ata_dma_blacklist); i++) 2374 for (i = 0; i < ARRAY_SIZE(ata_dma_blacklist); i++)
2257 if (!strncmp(ata_dma_blacklist[i], s, len)) 2375 if (!strcmp(ata_dma_blacklist[i], model_num))
2258 return 1; 2376 return 1;
2259 2377
2260 return 0; 2378 return 0;
@@ -2268,7 +2386,7 @@ static unsigned int ata_get_mode_mask(const struct ata_port *ap, int shift)
2268 master = &ap->device[0]; 2386 master = &ap->device[0];
2269 slave = &ap->device[1]; 2387 slave = &ap->device[1];
2270 2388
2271 assert (ata_dev_present(master) || ata_dev_present(slave)); 2389 WARN_ON(!ata_dev_present(master) && !ata_dev_present(slave));
2272 2390
2273 if (shift == ATA_SHIFT_UDMA) { 2391 if (shift == ATA_SHIFT_UDMA) {
2274 mask = ap->udma_mask; 2392 mask = ap->udma_mask;
@@ -2420,63 +2538,28 @@ static void ata_dev_set_xfermode(struct ata_port *ap, struct ata_device *dev)
2420} 2538}
2421 2539
2422/** 2540/**
2423 * ata_dev_reread_id - Reread the device identify device info
2424 * @ap: port where the device is
2425 * @dev: device to reread the identify device info
2426 *
2427 * LOCKING:
2428 */
2429
2430static void ata_dev_reread_id(struct ata_port *ap, struct ata_device *dev)
2431{
2432 struct ata_taskfile tf;
2433
2434 ata_tf_init(ap, &tf, dev->devno);
2435
2436 if (dev->class == ATA_DEV_ATA) {
2437 tf.command = ATA_CMD_ID_ATA;
2438 DPRINTK("do ATA identify\n");
2439 } else {
2440 tf.command = ATA_CMD_ID_ATAPI;
2441 DPRINTK("do ATAPI identify\n");
2442 }
2443
2444 tf.flags |= ATA_TFLAG_DEVICE;
2445 tf.protocol = ATA_PROT_PIO;
2446
2447 if (ata_exec_internal(ap, dev, &tf, DMA_FROM_DEVICE,
2448 dev->id, sizeof(dev->id)))
2449 goto err_out;
2450
2451 swap_buf_le16(dev->id, ATA_ID_WORDS);
2452
2453 ata_dump_id(dev);
2454
2455 DPRINTK("EXIT\n");
2456
2457 return;
2458err_out:
2459 printk(KERN_ERR "ata%u: failed to reread ID, disabled\n", ap->id);
2460 ata_port_disable(ap);
2461}
2462
2463/**
2464 * ata_dev_init_params - Issue INIT DEV PARAMS command 2541 * ata_dev_init_params - Issue INIT DEV PARAMS command
2465 * @ap: Port associated with device @dev 2542 * @ap: Port associated with device @dev
2466 * @dev: Device to which command will be sent 2543 * @dev: Device to which command will be sent
2467 * 2544 *
2468 * LOCKING: 2545 * LOCKING:
2546 * Kernel thread context (may sleep)
2547 *
2548 * RETURNS:
2549 * 0 on success, AC_ERR_* mask otherwise.
2469 */ 2550 */
2470 2551
2471static void ata_dev_init_params(struct ata_port *ap, struct ata_device *dev) 2552static unsigned int ata_dev_init_params(struct ata_port *ap,
2553 struct ata_device *dev)
2472{ 2554{
2473 struct ata_taskfile tf; 2555 struct ata_taskfile tf;
2556 unsigned int err_mask;
2474 u16 sectors = dev->id[6]; 2557 u16 sectors = dev->id[6];
2475 u16 heads = dev->id[3]; 2558 u16 heads = dev->id[3];
2476 2559
2477 /* Number of sectors per track 1-255. Number of heads 1-16 */ 2560 /* Number of sectors per track 1-255. Number of heads 1-16 */
2478 if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16) 2561 if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
2479 return; 2562 return 0;
2480 2563
2481 /* set up init dev params taskfile */ 2564 /* set up init dev params taskfile */
2482 DPRINTK("init dev params \n"); 2565 DPRINTK("init dev params \n");
@@ -2488,13 +2571,10 @@ static void ata_dev_init_params(struct ata_port *ap, struct ata_device *dev)
2488 tf.nsect = sectors; 2571 tf.nsect = sectors;
2489 tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */ 2572 tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
2490 2573
2491 if (ata_exec_internal(ap, dev, &tf, DMA_NONE, NULL, 0)) { 2574 err_mask = ata_exec_internal(ap, dev, &tf, DMA_NONE, NULL, 0);
2492 printk(KERN_ERR "ata%u: failed to init parameters, disabled\n",
2493 ap->id);
2494 ata_port_disable(ap);
2495 }
2496 2575
2497 DPRINTK("EXIT\n"); 2576 DPRINTK("EXIT, err_mask=%x\n", err_mask);
2577 return err_mask;
2498} 2578}
2499 2579
2500/** 2580/**
@@ -2514,11 +2594,11 @@ static void ata_sg_clean(struct ata_queued_cmd *qc)
2514 int dir = qc->dma_dir; 2594 int dir = qc->dma_dir;
2515 void *pad_buf = NULL; 2595 void *pad_buf = NULL;
2516 2596
2517 assert(qc->flags & ATA_QCFLAG_DMAMAP); 2597 WARN_ON(!(qc->flags & ATA_QCFLAG_DMAMAP));
2518 assert(sg != NULL); 2598 WARN_ON(sg == NULL);
2519 2599
2520 if (qc->flags & ATA_QCFLAG_SINGLE) 2600 if (qc->flags & ATA_QCFLAG_SINGLE)
2521 assert(qc->n_elem <= 1); 2601 WARN_ON(qc->n_elem > 1);
2522 2602
2523 VPRINTK("unmapping %u sg elements\n", qc->n_elem); 2603 VPRINTK("unmapping %u sg elements\n", qc->n_elem);
2524 2604
@@ -2573,8 +2653,8 @@ static void ata_fill_sg(struct ata_queued_cmd *qc)
2573 struct scatterlist *sg; 2653 struct scatterlist *sg;
2574 unsigned int idx; 2654 unsigned int idx;
2575 2655
2576 assert(qc->__sg != NULL); 2656 WARN_ON(qc->__sg == NULL);
2577 assert(qc->n_elem > 0 || qc->pad_len > 0); 2657 WARN_ON(qc->n_elem == 0 && qc->pad_len == 0);
2578 2658
2579 idx = 0; 2659 idx = 0;
2580 ata_for_each_sg(sg, qc) { 2660 ata_for_each_sg(sg, qc) {
@@ -2727,7 +2807,7 @@ static int ata_sg_setup_one(struct ata_queued_cmd *qc)
2727 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ); 2807 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
2728 struct scatterlist *psg = &qc->pad_sgent; 2808 struct scatterlist *psg = &qc->pad_sgent;
2729 2809
2730 assert(qc->dev->class == ATA_DEV_ATAPI); 2810 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
2731 2811
2732 memset(pad_buf, 0, ATA_DMA_PAD_SZ); 2812 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
2733 2813
@@ -2791,7 +2871,7 @@ static int ata_sg_setup(struct ata_queued_cmd *qc)
2791 int n_elem, pre_n_elem, dir, trim_sg = 0; 2871 int n_elem, pre_n_elem, dir, trim_sg = 0;
2792 2872
2793 VPRINTK("ENTER, ata%u\n", ap->id); 2873 VPRINTK("ENTER, ata%u\n", ap->id);
2794 assert(qc->flags & ATA_QCFLAG_SG); 2874 WARN_ON(!(qc->flags & ATA_QCFLAG_SG));
2795 2875
2796 /* we must lengthen transfers to end on a 32-bit boundary */ 2876 /* we must lengthen transfers to end on a 32-bit boundary */
2797 qc->pad_len = lsg->length & 3; 2877 qc->pad_len = lsg->length & 3;
@@ -2800,7 +2880,7 @@ static int ata_sg_setup(struct ata_queued_cmd *qc)
2800 struct scatterlist *psg = &qc->pad_sgent; 2880 struct scatterlist *psg = &qc->pad_sgent;
2801 unsigned int offset; 2881 unsigned int offset;
2802 2882
2803 assert(qc->dev->class == ATA_DEV_ATAPI); 2883 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
2804 2884
2805 memset(pad_buf, 0, ATA_DMA_PAD_SZ); 2885 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
2806 2886
@@ -2876,7 +2956,7 @@ void ata_poll_qc_complete(struct ata_queued_cmd *qc)
2876} 2956}
2877 2957
2878/** 2958/**
2879 * ata_pio_poll - 2959 * ata_pio_poll - poll using PIO, depending on current state
2880 * @ap: the target ata_port 2960 * @ap: the target ata_port
2881 * 2961 *
2882 * LOCKING: 2962 * LOCKING:
@@ -2894,7 +2974,7 @@ static unsigned long ata_pio_poll(struct ata_port *ap)
2894 unsigned int reg_state = HSM_ST_UNKNOWN; 2974 unsigned int reg_state = HSM_ST_UNKNOWN;
2895 2975
2896 qc = ata_qc_from_tag(ap, ap->active_tag); 2976 qc = ata_qc_from_tag(ap, ap->active_tag);
2897 assert(qc != NULL); 2977 WARN_ON(qc == NULL);
2898 2978
2899 switch (ap->hsm_task_state) { 2979 switch (ap->hsm_task_state) {
2900 case HSM_ST: 2980 case HSM_ST:
@@ -2915,7 +2995,7 @@ static unsigned long ata_pio_poll(struct ata_port *ap)
2915 status = ata_chk_status(ap); 2995 status = ata_chk_status(ap);
2916 if (status & ATA_BUSY) { 2996 if (status & ATA_BUSY) {
2917 if (time_after(jiffies, ap->pio_task_timeout)) { 2997 if (time_after(jiffies, ap->pio_task_timeout)) {
2918 qc->err_mask |= AC_ERR_ATA_BUS; 2998 qc->err_mask |= AC_ERR_TIMEOUT;
2919 ap->hsm_task_state = HSM_ST_TMOUT; 2999 ap->hsm_task_state = HSM_ST_TMOUT;
2920 return 0; 3000 return 0;
2921 } 3001 }
@@ -2962,7 +3042,7 @@ static int ata_pio_complete (struct ata_port *ap)
2962 } 3042 }
2963 3043
2964 qc = ata_qc_from_tag(ap, ap->active_tag); 3044 qc = ata_qc_from_tag(ap, ap->active_tag);
2965 assert(qc != NULL); 3045 WARN_ON(qc == NULL);
2966 3046
2967 drv_stat = ata_wait_idle(ap); 3047 drv_stat = ata_wait_idle(ap);
2968 if (!ata_ok(drv_stat)) { 3048 if (!ata_ok(drv_stat)) {
@@ -2973,7 +3053,7 @@ static int ata_pio_complete (struct ata_port *ap)
2973 3053
2974 ap->hsm_task_state = HSM_ST_IDLE; 3054 ap->hsm_task_state = HSM_ST_IDLE;
2975 3055
2976 assert(qc->err_mask == 0); 3056 WARN_ON(qc->err_mask);
2977 ata_poll_qc_complete(qc); 3057 ata_poll_qc_complete(qc);
2978 3058
2979 /* another command may start at this point */ 3059 /* another command may start at this point */
@@ -2983,7 +3063,7 @@ static int ata_pio_complete (struct ata_port *ap)
2983 3063
2984 3064
2985/** 3065/**
2986 * swap_buf_le16 - swap halves of 16-words in place 3066 * swap_buf_le16 - swap halves of 16-bit words in place
2987 * @buf: Buffer to swap 3067 * @buf: Buffer to swap
2988 * @buf_words: Number of 16-bit words in buffer. 3068 * @buf_words: Number of 16-bit words in buffer.
2989 * 3069 *
@@ -3293,7 +3373,7 @@ static void atapi_pio_bytes(struct ata_queued_cmd *qc)
3293err_out: 3373err_out:
3294 printk(KERN_INFO "ata%u: dev %u: ATAPI check failed\n", 3374 printk(KERN_INFO "ata%u: dev %u: ATAPI check failed\n",
3295 ap->id, dev->devno); 3375 ap->id, dev->devno);
3296 qc->err_mask |= AC_ERR_ATA_BUS; 3376 qc->err_mask |= AC_ERR_HSM;
3297 ap->hsm_task_state = HSM_ST_ERR; 3377 ap->hsm_task_state = HSM_ST_ERR;
3298} 3378}
3299 3379
@@ -3330,7 +3410,7 @@ static void ata_pio_block(struct ata_port *ap)
3330 } 3410 }
3331 3411
3332 qc = ata_qc_from_tag(ap, ap->active_tag); 3412 qc = ata_qc_from_tag(ap, ap->active_tag);
3333 assert(qc != NULL); 3413 WARN_ON(qc == NULL);
3334 3414
3335 /* check error */ 3415 /* check error */
3336 if (status & (ATA_ERR | ATA_DF)) { 3416 if (status & (ATA_ERR | ATA_DF)) {
@@ -3351,7 +3431,7 @@ static void ata_pio_block(struct ata_port *ap)
3351 } else { 3431 } else {
3352 /* handle BSY=0, DRQ=0 as error */ 3432 /* handle BSY=0, DRQ=0 as error */
3353 if ((status & ATA_DRQ) == 0) { 3433 if ((status & ATA_DRQ) == 0) {
3354 qc->err_mask |= AC_ERR_ATA_BUS; 3434 qc->err_mask |= AC_ERR_HSM;
3355 ap->hsm_task_state = HSM_ST_ERR; 3435 ap->hsm_task_state = HSM_ST_ERR;
3356 return; 3436 return;
3357 } 3437 }
@@ -3365,7 +3445,7 @@ static void ata_pio_error(struct ata_port *ap)
3365 struct ata_queued_cmd *qc; 3445 struct ata_queued_cmd *qc;
3366 3446
3367 qc = ata_qc_from_tag(ap, ap->active_tag); 3447 qc = ata_qc_from_tag(ap, ap->active_tag);
3368 assert(qc != NULL); 3448 WARN_ON(qc == NULL);
3369 3449
3370 if (qc->tf.command != ATA_CMD_PACKET) 3450 if (qc->tf.command != ATA_CMD_PACKET)
3371 printk(KERN_WARNING "ata%u: PIO error\n", ap->id); 3451 printk(KERN_WARNING "ata%u: PIO error\n", ap->id);
@@ -3373,7 +3453,7 @@ static void ata_pio_error(struct ata_port *ap)
3373 /* make sure qc->err_mask is available to 3453 /* make sure qc->err_mask is available to
3374 * know what's wrong and recover 3454 * know what's wrong and recover
3375 */ 3455 */
3376 assert(qc->err_mask); 3456 WARN_ON(qc->err_mask == 0);
3377 3457
3378 ap->hsm_task_state = HSM_ST_IDLE; 3458 ap->hsm_task_state = HSM_ST_IDLE;
3379 3459
@@ -3414,7 +3494,7 @@ fsm_start:
3414 } 3494 }
3415 3495
3416 if (timeout) 3496 if (timeout)
3417 queue_delayed_work(ata_wq, &ap->pio_task, timeout); 3497 ata_queue_delayed_pio_task(ap, timeout);
3418 else if (!qc_completed) 3498 else if (!qc_completed)
3419 goto fsm_start; 3499 goto fsm_start;
3420} 3500}
@@ -3447,15 +3527,10 @@ static void ata_qc_timeout(struct ata_queued_cmd *qc)
3447 3527
3448 DPRINTK("ENTER\n"); 3528 DPRINTK("ENTER\n");
3449 3529
3450 spin_lock_irqsave(&host_set->lock, flags); 3530 ata_flush_pio_tasks(ap);
3531 ap->hsm_task_state = HSM_ST_IDLE;
3451 3532
3452 /* hack alert! We cannot use the supplied completion 3533 spin_lock_irqsave(&host_set->lock, flags);
3453 * function from inside the ->eh_strategy_handler() thread.
3454 * libata is the only user of ->eh_strategy_handler() in
3455 * any kernel, so the default scsi_done() assumes it is
3456 * not being called from the SCSI EH.
3457 */
3458 qc->scsidone = scsi_finish_command;
3459 3534
3460 switch (qc->tf.protocol) { 3535 switch (qc->tf.protocol) {
3461 3536
@@ -3480,12 +3555,13 @@ static void ata_qc_timeout(struct ata_queued_cmd *qc)
3480 3555
3481 /* complete taskfile transaction */ 3556 /* complete taskfile transaction */
3482 qc->err_mask |= ac_err_mask(drv_stat); 3557 qc->err_mask |= ac_err_mask(drv_stat);
3483 ata_qc_complete(qc);
3484 break; 3558 break;
3485 } 3559 }
3486 3560
3487 spin_unlock_irqrestore(&host_set->lock, flags); 3561 spin_unlock_irqrestore(&host_set->lock, flags);
3488 3562
3563 ata_eh_qc_complete(qc);
3564
3489 DPRINTK("EXIT\n"); 3565 DPRINTK("EXIT\n");
3490} 3566}
3491 3567
@@ -3510,20 +3586,10 @@ static void ata_qc_timeout(struct ata_queued_cmd *qc)
3510 3586
3511void ata_eng_timeout(struct ata_port *ap) 3587void ata_eng_timeout(struct ata_port *ap)
3512{ 3588{
3513 struct ata_queued_cmd *qc;
3514
3515 DPRINTK("ENTER\n"); 3589 DPRINTK("ENTER\n");
3516 3590
3517 qc = ata_qc_from_tag(ap, ap->active_tag); 3591 ata_qc_timeout(ata_qc_from_tag(ap, ap->active_tag));
3518 if (qc)
3519 ata_qc_timeout(qc);
3520 else {
3521 printk(KERN_ERR "ata%u: BUG: timeout without command\n",
3522 ap->id);
3523 goto out;
3524 }
3525 3592
3526out:
3527 DPRINTK("EXIT\n"); 3593 DPRINTK("EXIT\n");
3528} 3594}
3529 3595
@@ -3579,21 +3645,6 @@ struct ata_queued_cmd *ata_qc_new_init(struct ata_port *ap,
3579 return qc; 3645 return qc;
3580} 3646}
3581 3647
3582static void __ata_qc_complete(struct ata_queued_cmd *qc)
3583{
3584 struct ata_port *ap = qc->ap;
3585 unsigned int tag;
3586
3587 qc->flags = 0;
3588 tag = qc->tag;
3589 if (likely(ata_tag_valid(tag))) {
3590 if (tag == ap->active_tag)
3591 ap->active_tag = ATA_TAG_POISON;
3592 qc->tag = ATA_TAG_POISON;
3593 clear_bit(tag, &ap->qactive);
3594 }
3595}
3596
3597/** 3648/**
3598 * ata_qc_free - free unused ata_queued_cmd 3649 * ata_qc_free - free unused ata_queued_cmd
3599 * @qc: Command to complete 3650 * @qc: Command to complete
@@ -3606,29 +3657,25 @@ static void __ata_qc_complete(struct ata_queued_cmd *qc)
3606 */ 3657 */
3607void ata_qc_free(struct ata_queued_cmd *qc) 3658void ata_qc_free(struct ata_queued_cmd *qc)
3608{ 3659{
3609 assert(qc != NULL); /* ata_qc_from_tag _might_ return NULL */ 3660 struct ata_port *ap = qc->ap;
3661 unsigned int tag;
3610 3662
3611 __ata_qc_complete(qc); 3663 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
3612}
3613 3664
3614/** 3665 qc->flags = 0;
3615 * ata_qc_complete - Complete an active ATA command 3666 tag = qc->tag;
3616 * @qc: Command to complete 3667 if (likely(ata_tag_valid(tag))) {
3617 * @err_mask: ATA Status register contents 3668 if (tag == ap->active_tag)
3618 * 3669 ap->active_tag = ATA_TAG_POISON;
3619 * Indicate to the mid and upper layers that an ATA 3670 qc->tag = ATA_TAG_POISON;
3620 * command has completed, with either an ok or not-ok status. 3671 clear_bit(tag, &ap->qactive);
3621 * 3672 }
3622 * LOCKING: 3673}
3623 * spin_lock_irqsave(host_set lock)
3624 */
3625 3674
3626void ata_qc_complete(struct ata_queued_cmd *qc) 3675void __ata_qc_complete(struct ata_queued_cmd *qc)
3627{ 3676{
3628 int rc; 3677 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
3629 3678 WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
3630 assert(qc != NULL); /* ata_qc_from_tag _might_ return NULL */
3631 assert(qc->flags & ATA_QCFLAG_ACTIVE);
3632 3679
3633 if (likely(qc->flags & ATA_QCFLAG_DMAMAP)) 3680 if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
3634 ata_sg_clean(qc); 3681 ata_sg_clean(qc);
@@ -3640,17 +3687,7 @@ void ata_qc_complete(struct ata_queued_cmd *qc)
3640 qc->flags &= ~ATA_QCFLAG_ACTIVE; 3687 qc->flags &= ~ATA_QCFLAG_ACTIVE;
3641 3688
3642 /* call completion callback */ 3689 /* call completion callback */
3643 rc = qc->complete_fn(qc); 3690 qc->complete_fn(qc);
3644
3645 /* if callback indicates not to complete command (non-zero),
3646 * return immediately
3647 */
3648 if (rc != 0)
3649 return;
3650
3651 __ata_qc_complete(qc);
3652
3653 VPRINTK("EXIT\n");
3654} 3691}
3655 3692
3656static inline int ata_should_dma_map(struct ata_queued_cmd *qc) 3693static inline int ata_should_dma_map(struct ata_queued_cmd *qc)
@@ -3690,20 +3727,20 @@ static inline int ata_should_dma_map(struct ata_queued_cmd *qc)
3690 * spin_lock_irqsave(host_set lock) 3727 * spin_lock_irqsave(host_set lock)
3691 * 3728 *
3692 * RETURNS: 3729 * RETURNS:
3693 * Zero on success, negative on error. 3730 * Zero on success, AC_ERR_* mask on failure
3694 */ 3731 */
3695 3732
3696int ata_qc_issue(struct ata_queued_cmd *qc) 3733unsigned int ata_qc_issue(struct ata_queued_cmd *qc)
3697{ 3734{
3698 struct ata_port *ap = qc->ap; 3735 struct ata_port *ap = qc->ap;
3699 3736
3700 if (ata_should_dma_map(qc)) { 3737 if (ata_should_dma_map(qc)) {
3701 if (qc->flags & ATA_QCFLAG_SG) { 3738 if (qc->flags & ATA_QCFLAG_SG) {
3702 if (ata_sg_setup(qc)) 3739 if (ata_sg_setup(qc))
3703 goto err_out; 3740 goto sg_err;
3704 } else if (qc->flags & ATA_QCFLAG_SINGLE) { 3741 } else if (qc->flags & ATA_QCFLAG_SINGLE) {
3705 if (ata_sg_setup_one(qc)) 3742 if (ata_sg_setup_one(qc))
3706 goto err_out; 3743 goto sg_err;
3707 } 3744 }
3708 } else { 3745 } else {
3709 qc->flags &= ~ATA_QCFLAG_DMAMAP; 3746 qc->flags &= ~ATA_QCFLAG_DMAMAP;
@@ -3716,8 +3753,9 @@ int ata_qc_issue(struct ata_queued_cmd *qc)
3716 3753
3717 return ap->ops->qc_issue(qc); 3754 return ap->ops->qc_issue(qc);
3718 3755
3719err_out: 3756sg_err:
3720 return -1; 3757 qc->flags &= ~ATA_QCFLAG_DMAMAP;
3758 return AC_ERR_SYSTEM;
3721} 3759}
3722 3760
3723 3761
@@ -3736,10 +3774,10 @@ err_out:
3736 * spin_lock_irqsave(host_set lock) 3774 * spin_lock_irqsave(host_set lock)
3737 * 3775 *
3738 * RETURNS: 3776 * RETURNS:
3739 * Zero on success, negative on error. 3777 * Zero on success, AC_ERR_* mask on failure
3740 */ 3778 */
3741 3779
3742int ata_qc_issue_prot(struct ata_queued_cmd *qc) 3780unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc)
3743{ 3781{
3744 struct ata_port *ap = qc->ap; 3782 struct ata_port *ap = qc->ap;
3745 3783
@@ -3760,31 +3798,31 @@ int ata_qc_issue_prot(struct ata_queued_cmd *qc)
3760 ata_qc_set_polling(qc); 3798 ata_qc_set_polling(qc);
3761 ata_tf_to_host(ap, &qc->tf); 3799 ata_tf_to_host(ap, &qc->tf);
3762 ap->hsm_task_state = HSM_ST; 3800 ap->hsm_task_state = HSM_ST;
3763 queue_work(ata_wq, &ap->pio_task); 3801 ata_queue_pio_task(ap);
3764 break; 3802 break;
3765 3803
3766 case ATA_PROT_ATAPI: 3804 case ATA_PROT_ATAPI:
3767 ata_qc_set_polling(qc); 3805 ata_qc_set_polling(qc);
3768 ata_tf_to_host(ap, &qc->tf); 3806 ata_tf_to_host(ap, &qc->tf);
3769 queue_work(ata_wq, &ap->packet_task); 3807 ata_queue_packet_task(ap);
3770 break; 3808 break;
3771 3809
3772 case ATA_PROT_ATAPI_NODATA: 3810 case ATA_PROT_ATAPI_NODATA:
3773 ap->flags |= ATA_FLAG_NOINTR; 3811 ap->flags |= ATA_FLAG_NOINTR;
3774 ata_tf_to_host(ap, &qc->tf); 3812 ata_tf_to_host(ap, &qc->tf);
3775 queue_work(ata_wq, &ap->packet_task); 3813 ata_queue_packet_task(ap);
3776 break; 3814 break;
3777 3815
3778 case ATA_PROT_ATAPI_DMA: 3816 case ATA_PROT_ATAPI_DMA:
3779 ap->flags |= ATA_FLAG_NOINTR; 3817 ap->flags |= ATA_FLAG_NOINTR;
3780 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */ 3818 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
3781 ap->ops->bmdma_setup(qc); /* set up bmdma */ 3819 ap->ops->bmdma_setup(qc); /* set up bmdma */
3782 queue_work(ata_wq, &ap->packet_task); 3820 ata_queue_packet_task(ap);
3783 break; 3821 break;
3784 3822
3785 default: 3823 default:
3786 WARN_ON(1); 3824 WARN_ON(1);
3787 return -1; 3825 return AC_ERR_SYSTEM;
3788 } 3826 }
3789 3827
3790 return 0; 3828 return 0;
@@ -4168,26 +4206,26 @@ static void atapi_packet_task(void *_data)
4168 u8 status; 4206 u8 status;
4169 4207
4170 qc = ata_qc_from_tag(ap, ap->active_tag); 4208 qc = ata_qc_from_tag(ap, ap->active_tag);
4171 assert(qc != NULL); 4209 WARN_ON(qc == NULL);
4172 assert(qc->flags & ATA_QCFLAG_ACTIVE); 4210 WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
4173 4211
4174 /* sleep-wait for BSY to clear */ 4212 /* sleep-wait for BSY to clear */
4175 DPRINTK("busy wait\n"); 4213 DPRINTK("busy wait\n");
4176 if (ata_busy_sleep(ap, ATA_TMOUT_CDB_QUICK, ATA_TMOUT_CDB)) { 4214 if (ata_busy_sleep(ap, ATA_TMOUT_CDB_QUICK, ATA_TMOUT_CDB)) {
4177 qc->err_mask |= AC_ERR_ATA_BUS; 4215 qc->err_mask |= AC_ERR_TIMEOUT;
4178 goto err_out; 4216 goto err_out;
4179 } 4217 }
4180 4218
4181 /* make sure DRQ is set */ 4219 /* make sure DRQ is set */
4182 status = ata_chk_status(ap); 4220 status = ata_chk_status(ap);
4183 if ((status & (ATA_BUSY | ATA_DRQ)) != ATA_DRQ) { 4221 if ((status & (ATA_BUSY | ATA_DRQ)) != ATA_DRQ) {
4184 qc->err_mask |= AC_ERR_ATA_BUS; 4222 qc->err_mask |= AC_ERR_HSM;
4185 goto err_out; 4223 goto err_out;
4186 } 4224 }
4187 4225
4188 /* send SCSI cdb */ 4226 /* send SCSI cdb */
4189 DPRINTK("send cdb\n"); 4227 DPRINTK("send cdb\n");
4190 assert(ap->cdb_len >= 12); 4228 WARN_ON(qc->dev->cdb_len < 12);
4191 4229
4192 if (qc->tf.protocol == ATA_PROT_ATAPI_DMA || 4230 if (qc->tf.protocol == ATA_PROT_ATAPI_DMA ||
4193 qc->tf.protocol == ATA_PROT_ATAPI_NODATA) { 4231 qc->tf.protocol == ATA_PROT_ATAPI_NODATA) {
@@ -4201,16 +4239,16 @@ static void atapi_packet_task(void *_data)
4201 */ 4239 */
4202 spin_lock_irqsave(&ap->host_set->lock, flags); 4240 spin_lock_irqsave(&ap->host_set->lock, flags);
4203 ap->flags &= ~ATA_FLAG_NOINTR; 4241 ap->flags &= ~ATA_FLAG_NOINTR;
4204 ata_data_xfer(ap, qc->cdb, ap->cdb_len, 1); 4242 ata_data_xfer(ap, qc->cdb, qc->dev->cdb_len, 1);
4205 if (qc->tf.protocol == ATA_PROT_ATAPI_DMA) 4243 if (qc->tf.protocol == ATA_PROT_ATAPI_DMA)
4206 ap->ops->bmdma_start(qc); /* initiate bmdma */ 4244 ap->ops->bmdma_start(qc); /* initiate bmdma */
4207 spin_unlock_irqrestore(&ap->host_set->lock, flags); 4245 spin_unlock_irqrestore(&ap->host_set->lock, flags);
4208 } else { 4246 } else {
4209 ata_data_xfer(ap, qc->cdb, ap->cdb_len, 1); 4247 ata_data_xfer(ap, qc->cdb, qc->dev->cdb_len, 1);
4210 4248
4211 /* PIO commands are handled by polling */ 4249 /* PIO commands are handled by polling */
4212 ap->hsm_task_state = HSM_ST; 4250 ap->hsm_task_state = HSM_ST;
4213 queue_work(ata_wq, &ap->pio_task); 4251 ata_queue_pio_task(ap);
4214 } 4252 }
4215 4253
4216 return; 4254 return;
@@ -4220,19 +4258,6 @@ err_out:
4220} 4258}
4221 4259
4222 4260
4223/**
4224 * ata_port_start - Set port up for dma.
4225 * @ap: Port to initialize
4226 *
4227 * Called just after data structures for each port are
4228 * initialized. Allocates space for PRD table.
4229 *
4230 * May be used as the port_start() entry in ata_port_operations.
4231 *
4232 * LOCKING:
4233 * Inherited from caller.
4234 */
4235
4236/* 4261/*
4237 * Execute a 'simple' command, that only consists of the opcode 'cmd' itself, 4262 * Execute a 'simple' command, that only consists of the opcode 'cmd' itself,
4238 * without filling any other registers 4263 * without filling any other registers
@@ -4284,6 +4309,8 @@ static int ata_start_drive(struct ata_port *ap, struct ata_device *dev)
4284 4309
4285/** 4310/**
4286 * ata_device_resume - wakeup a previously suspended devices 4311 * ata_device_resume - wakeup a previously suspended devices
4312 * @ap: port the device is connected to
4313 * @dev: the device to resume
4287 * 4314 *
4288 * Kick the drive back into action, by sending it an idle immediate 4315 * Kick the drive back into action, by sending it an idle immediate
4289 * command and making sure its transfer mode matches between drive 4316 * command and making sure its transfer mode matches between drive
@@ -4306,10 +4333,11 @@ int ata_device_resume(struct ata_port *ap, struct ata_device *dev)
4306 4333
4307/** 4334/**
4308 * ata_device_suspend - prepare a device for suspend 4335 * ata_device_suspend - prepare a device for suspend
4336 * @ap: port the device is connected to
4337 * @dev: the device to suspend
4309 * 4338 *
4310 * Flush the cache on the drive, if appropriate, then issue a 4339 * Flush the cache on the drive, if appropriate, then issue a
4311 * standbynow command. 4340 * standbynow command.
4312 *
4313 */ 4341 */
4314int ata_device_suspend(struct ata_port *ap, struct ata_device *dev) 4342int ata_device_suspend(struct ata_port *ap, struct ata_device *dev)
4315{ 4343{
@@ -4323,6 +4351,19 @@ int ata_device_suspend(struct ata_port *ap, struct ata_device *dev)
4323 return 0; 4351 return 0;
4324} 4352}
4325 4353
4354/**
4355 * ata_port_start - Set port up for dma.
4356 * @ap: Port to initialize
4357 *
4358 * Called just after data structures for each port are
4359 * initialized. Allocates space for PRD table.
4360 *
4361 * May be used as the port_start() entry in ata_port_operations.
4362 *
4363 * LOCKING:
4364 * Inherited from caller.
4365 */
4366
4326int ata_port_start (struct ata_port *ap) 4367int ata_port_start (struct ata_port *ap)
4327{ 4368{
4328 struct device *dev = ap->host_set->dev; 4369 struct device *dev = ap->host_set->dev;
@@ -4438,6 +4479,7 @@ static void ata_host_init(struct ata_port *ap, struct Scsi_Host *host,
4438 4479
4439 INIT_WORK(&ap->packet_task, atapi_packet_task, ap); 4480 INIT_WORK(&ap->packet_task, atapi_packet_task, ap);
4440 INIT_WORK(&ap->pio_task, ata_pio_task, ap); 4481 INIT_WORK(&ap->pio_task, ata_pio_task, ap);
4482 INIT_LIST_HEAD(&ap->eh_done_q);
4441 4483
4442 for (i = 0; i < ATA_MAX_DEVICES; i++) 4484 for (i = 0; i < ATA_MAX_DEVICES; i++)
4443 ap->device[i].devno = i; 4485 ap->device[i].devno = i;
@@ -4579,9 +4621,9 @@ int ata_device_add(const struct ata_probe_ent *ent)
4579 4621
4580 ap = host_set->ports[i]; 4622 ap = host_set->ports[i];
4581 4623
4582 DPRINTK("ata%u: probe begin\n", ap->id); 4624 DPRINTK("ata%u: bus probe begin\n", ap->id);
4583 rc = ata_bus_probe(ap); 4625 rc = ata_bus_probe(ap);
4584 DPRINTK("ata%u: probe end\n", ap->id); 4626 DPRINTK("ata%u: bus probe end\n", ap->id);
4585 4627
4586 if (rc) { 4628 if (rc) {
4587 /* FIXME: do something useful here? 4629 /* FIXME: do something useful here?
@@ -4605,7 +4647,7 @@ int ata_device_add(const struct ata_probe_ent *ent)
4605 } 4647 }
4606 4648
4607 /* probes are done, now scan each port's disk(s) */ 4649 /* probes are done, now scan each port's disk(s) */
4608 DPRINTK("probe begin\n"); 4650 DPRINTK("host probe begin\n");
4609 for (i = 0; i < count; i++) { 4651 for (i = 0; i < count; i++) {
4610 struct ata_port *ap = host_set->ports[i]; 4652 struct ata_port *ap = host_set->ports[i];
4611 4653
@@ -4727,32 +4769,6 @@ void ata_std_ports(struct ata_ioports *ioaddr)
4727 ioaddr->command_addr = ioaddr->cmd_addr + ATA_REG_CMD; 4769 ioaddr->command_addr = ioaddr->cmd_addr + ATA_REG_CMD;
4728} 4770}
4729 4771
4730static struct ata_probe_ent *
4731ata_probe_ent_alloc(struct device *dev, const struct ata_port_info *port)
4732{
4733 struct ata_probe_ent *probe_ent;
4734
4735 probe_ent = kzalloc(sizeof(*probe_ent), GFP_KERNEL);
4736 if (!probe_ent) {
4737 printk(KERN_ERR DRV_NAME "(%s): out of memory\n",
4738 kobject_name(&(dev->kobj)));
4739 return NULL;
4740 }
4741
4742 INIT_LIST_HEAD(&probe_ent->node);
4743 probe_ent->dev = dev;
4744
4745 probe_ent->sht = port->sht;
4746 probe_ent->host_flags = port->host_flags;
4747 probe_ent->pio_mask = port->pio_mask;
4748 probe_ent->mwdma_mask = port->mwdma_mask;
4749 probe_ent->udma_mask = port->udma_mask;
4750 probe_ent->port_ops = port->port_ops;
4751
4752 return probe_ent;
4753}
4754
4755
4756 4772
4757#ifdef CONFIG_PCI 4773#ifdef CONFIG_PCI
4758 4774
@@ -4764,256 +4780,6 @@ void ata_pci_host_stop (struct ata_host_set *host_set)
4764} 4780}
4765 4781
4766/** 4782/**
4767 * ata_pci_init_native_mode - Initialize native-mode driver
4768 * @pdev: pci device to be initialized
4769 * @port: array[2] of pointers to port info structures.
4770 * @ports: bitmap of ports present
4771 *
4772 * Utility function which allocates and initializes an
4773 * ata_probe_ent structure for a standard dual-port
4774 * PIO-based IDE controller. The returned ata_probe_ent
4775 * structure can be passed to ata_device_add(). The returned
4776 * ata_probe_ent structure should then be freed with kfree().
4777 *
4778 * The caller need only pass the address of the primary port, the
4779 * secondary will be deduced automatically. If the device has non
4780 * standard secondary port mappings this function can be called twice,
4781 * once for each interface.
4782 */
4783
4784struct ata_probe_ent *
4785ata_pci_init_native_mode(struct pci_dev *pdev, struct ata_port_info **port, int ports)
4786{
4787 struct ata_probe_ent *probe_ent =
4788 ata_probe_ent_alloc(pci_dev_to_dev(pdev), port[0]);
4789 int p = 0;
4790
4791 if (!probe_ent)
4792 return NULL;
4793
4794 probe_ent->irq = pdev->irq;
4795 probe_ent->irq_flags = SA_SHIRQ;
4796 probe_ent->private_data = port[0]->private_data;
4797
4798 if (ports & ATA_PORT_PRIMARY) {
4799 probe_ent->port[p].cmd_addr = pci_resource_start(pdev, 0);
4800 probe_ent->port[p].altstatus_addr =
4801 probe_ent->port[p].ctl_addr =
4802 pci_resource_start(pdev, 1) | ATA_PCI_CTL_OFS;
4803 probe_ent->port[p].bmdma_addr = pci_resource_start(pdev, 4);
4804 ata_std_ports(&probe_ent->port[p]);
4805 p++;
4806 }
4807
4808 if (ports & ATA_PORT_SECONDARY) {
4809 probe_ent->port[p].cmd_addr = pci_resource_start(pdev, 2);
4810 probe_ent->port[p].altstatus_addr =
4811 probe_ent->port[p].ctl_addr =
4812 pci_resource_start(pdev, 3) | ATA_PCI_CTL_OFS;
4813 probe_ent->port[p].bmdma_addr = pci_resource_start(pdev, 4) + 8;
4814 ata_std_ports(&probe_ent->port[p]);
4815 p++;
4816 }
4817
4818 probe_ent->n_ports = p;
4819 return probe_ent;
4820}
4821
4822static struct ata_probe_ent *ata_pci_init_legacy_port(struct pci_dev *pdev, struct ata_port_info *port, int port_num)
4823{
4824 struct ata_probe_ent *probe_ent;
4825
4826 probe_ent = ata_probe_ent_alloc(pci_dev_to_dev(pdev), port);
4827 if (!probe_ent)
4828 return NULL;
4829
4830 probe_ent->legacy_mode = 1;
4831 probe_ent->n_ports = 1;
4832 probe_ent->hard_port_no = port_num;
4833 probe_ent->private_data = port->private_data;
4834
4835 switch(port_num)
4836 {
4837 case 0:
4838 probe_ent->irq = 14;
4839 probe_ent->port[0].cmd_addr = 0x1f0;
4840 probe_ent->port[0].altstatus_addr =
4841 probe_ent->port[0].ctl_addr = 0x3f6;
4842 break;
4843 case 1:
4844 probe_ent->irq = 15;
4845 probe_ent->port[0].cmd_addr = 0x170;
4846 probe_ent->port[0].altstatus_addr =
4847 probe_ent->port[0].ctl_addr = 0x376;
4848 break;
4849 }
4850 probe_ent->port[0].bmdma_addr = pci_resource_start(pdev, 4) + 8 * port_num;
4851 ata_std_ports(&probe_ent->port[0]);
4852 return probe_ent;
4853}
4854
4855/**
4856 * ata_pci_init_one - Initialize/register PCI IDE host controller
4857 * @pdev: Controller to be initialized
4858 * @port_info: Information from low-level host driver
4859 * @n_ports: Number of ports attached to host controller
4860 *
4861 * This is a helper function which can be called from a driver's
4862 * xxx_init_one() probe function if the hardware uses traditional
4863 * IDE taskfile registers.
4864 *
4865 * This function calls pci_enable_device(), reserves its register
4866 * regions, sets the dma mask, enables bus master mode, and calls
4867 * ata_device_add()
4868 *
4869 * LOCKING:
4870 * Inherited from PCI layer (may sleep).
4871 *
4872 * RETURNS:
4873 * Zero on success, negative on errno-based value on error.
4874 */
4875
4876int ata_pci_init_one (struct pci_dev *pdev, struct ata_port_info **port_info,
4877 unsigned int n_ports)
4878{
4879 struct ata_probe_ent *probe_ent = NULL, *probe_ent2 = NULL;
4880 struct ata_port_info *port[2];
4881 u8 tmp8, mask;
4882 unsigned int legacy_mode = 0;
4883 int disable_dev_on_err = 1;
4884 int rc;
4885
4886 DPRINTK("ENTER\n");
4887
4888 port[0] = port_info[0];
4889 if (n_ports > 1)
4890 port[1] = port_info[1];
4891 else
4892 port[1] = port[0];
4893
4894 if ((port[0]->host_flags & ATA_FLAG_NO_LEGACY) == 0
4895 && (pdev->class >> 8) == PCI_CLASS_STORAGE_IDE) {
4896 /* TODO: What if one channel is in native mode ... */
4897 pci_read_config_byte(pdev, PCI_CLASS_PROG, &tmp8);
4898 mask = (1 << 2) | (1 << 0);
4899 if ((tmp8 & mask) != mask)
4900 legacy_mode = (1 << 3);
4901 }
4902
4903 /* FIXME... */
4904 if ((!legacy_mode) && (n_ports > 2)) {
4905 printk(KERN_ERR "ata: BUG: native mode, n_ports > 2\n");
4906 n_ports = 2;
4907 /* For now */
4908 }
4909
4910 /* FIXME: Really for ATA it isn't safe because the device may be
4911 multi-purpose and we want to leave it alone if it was already
4912 enabled. Secondly for shared use as Arjan says we want refcounting
4913
4914 Checking dev->is_enabled is insufficient as this is not set at
4915 boot for the primary video which is BIOS enabled
4916 */
4917
4918 rc = pci_enable_device(pdev);
4919 if (rc)
4920 return rc;
4921
4922 rc = pci_request_regions(pdev, DRV_NAME);
4923 if (rc) {
4924 disable_dev_on_err = 0;
4925 goto err_out;
4926 }
4927
4928 /* FIXME: Should use platform specific mappers for legacy port ranges */
4929 if (legacy_mode) {
4930 if (!request_region(0x1f0, 8, "libata")) {
4931 struct resource *conflict, res;
4932 res.start = 0x1f0;
4933 res.end = 0x1f0 + 8 - 1;
4934 conflict = ____request_resource(&ioport_resource, &res);
4935 if (!strcmp(conflict->name, "libata"))
4936 legacy_mode |= (1 << 0);
4937 else {
4938 disable_dev_on_err = 0;
4939 printk(KERN_WARNING "ata: 0x1f0 IDE port busy\n");
4940 }
4941 } else
4942 legacy_mode |= (1 << 0);
4943
4944 if (!request_region(0x170, 8, "libata")) {
4945 struct resource *conflict, res;
4946 res.start = 0x170;
4947 res.end = 0x170 + 8 - 1;
4948 conflict = ____request_resource(&ioport_resource, &res);
4949 if (!strcmp(conflict->name, "libata"))
4950 legacy_mode |= (1 << 1);
4951 else {
4952 disable_dev_on_err = 0;
4953 printk(KERN_WARNING "ata: 0x170 IDE port busy\n");
4954 }
4955 } else
4956 legacy_mode |= (1 << 1);
4957 }
4958
4959 /* we have legacy mode, but all ports are unavailable */
4960 if (legacy_mode == (1 << 3)) {
4961 rc = -EBUSY;
4962 goto err_out_regions;
4963 }
4964
4965 rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
4966 if (rc)
4967 goto err_out_regions;
4968 rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
4969 if (rc)
4970 goto err_out_regions;
4971
4972 if (legacy_mode) {
4973 if (legacy_mode & (1 << 0))
4974 probe_ent = ata_pci_init_legacy_port(pdev, port[0], 0);
4975 if (legacy_mode & (1 << 1))
4976 probe_ent2 = ata_pci_init_legacy_port(pdev, port[1], 1);
4977 } else {
4978 if (n_ports == 2)
4979 probe_ent = ata_pci_init_native_mode(pdev, port, ATA_PORT_PRIMARY | ATA_PORT_SECONDARY);
4980 else
4981 probe_ent = ata_pci_init_native_mode(pdev, port, ATA_PORT_PRIMARY);
4982 }
4983 if (!probe_ent && !probe_ent2) {
4984 rc = -ENOMEM;
4985 goto err_out_regions;
4986 }
4987
4988 pci_set_master(pdev);
4989
4990 /* FIXME: check ata_device_add return */
4991 if (legacy_mode) {
4992 if (legacy_mode & (1 << 0))
4993 ata_device_add(probe_ent);
4994 if (legacy_mode & (1 << 1))
4995 ata_device_add(probe_ent2);
4996 } else
4997 ata_device_add(probe_ent);
4998
4999 kfree(probe_ent);
5000 kfree(probe_ent2);
5001
5002 return 0;
5003
5004err_out_regions:
5005 if (legacy_mode & (1 << 0))
5006 release_region(0x1f0, 8);
5007 if (legacy_mode & (1 << 1))
5008 release_region(0x170, 8);
5009 pci_release_regions(pdev);
5010err_out:
5011 if (disable_dev_on_err)
5012 pci_disable_device(pdev);
5013 return rc;
5014}
5015
5016/**
5017 * ata_pci_remove_one - PCI layer callback for device removal 4783 * ata_pci_remove_one - PCI layer callback for device removal
5018 * @pdev: PCI device that was removed 4784 * @pdev: PCI device that was removed
5019 * 4785 *
@@ -5143,7 +4909,7 @@ EXPORT_SYMBOL_GPL(ata_device_add);
5143EXPORT_SYMBOL_GPL(ata_host_set_remove); 4909EXPORT_SYMBOL_GPL(ata_host_set_remove);
5144EXPORT_SYMBOL_GPL(ata_sg_init); 4910EXPORT_SYMBOL_GPL(ata_sg_init);
5145EXPORT_SYMBOL_GPL(ata_sg_init_one); 4911EXPORT_SYMBOL_GPL(ata_sg_init_one);
5146EXPORT_SYMBOL_GPL(ata_qc_complete); 4912EXPORT_SYMBOL_GPL(__ata_qc_complete);
5147EXPORT_SYMBOL_GPL(ata_qc_issue_prot); 4913EXPORT_SYMBOL_GPL(ata_qc_issue_prot);
5148EXPORT_SYMBOL_GPL(ata_eng_timeout); 4914EXPORT_SYMBOL_GPL(ata_eng_timeout);
5149EXPORT_SYMBOL_GPL(ata_tf_load); 4915EXPORT_SYMBOL_GPL(ata_tf_load);
@@ -5169,18 +4935,29 @@ EXPORT_SYMBOL_GPL(ata_port_probe);
5169EXPORT_SYMBOL_GPL(sata_phy_reset); 4935EXPORT_SYMBOL_GPL(sata_phy_reset);
5170EXPORT_SYMBOL_GPL(__sata_phy_reset); 4936EXPORT_SYMBOL_GPL(__sata_phy_reset);
5171EXPORT_SYMBOL_GPL(ata_bus_reset); 4937EXPORT_SYMBOL_GPL(ata_bus_reset);
4938EXPORT_SYMBOL_GPL(ata_std_probeinit);
4939EXPORT_SYMBOL_GPL(ata_std_softreset);
4940EXPORT_SYMBOL_GPL(sata_std_hardreset);
4941EXPORT_SYMBOL_GPL(ata_std_postreset);
4942EXPORT_SYMBOL_GPL(ata_std_probe_reset);
4943EXPORT_SYMBOL_GPL(ata_drive_probe_reset);
5172EXPORT_SYMBOL_GPL(ata_port_disable); 4944EXPORT_SYMBOL_GPL(ata_port_disable);
5173EXPORT_SYMBOL_GPL(ata_ratelimit); 4945EXPORT_SYMBOL_GPL(ata_ratelimit);
4946EXPORT_SYMBOL_GPL(ata_busy_sleep);
5174EXPORT_SYMBOL_GPL(ata_scsi_ioctl); 4947EXPORT_SYMBOL_GPL(ata_scsi_ioctl);
5175EXPORT_SYMBOL_GPL(ata_scsi_queuecmd); 4948EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
4949EXPORT_SYMBOL_GPL(ata_scsi_timed_out);
5176EXPORT_SYMBOL_GPL(ata_scsi_error); 4950EXPORT_SYMBOL_GPL(ata_scsi_error);
5177EXPORT_SYMBOL_GPL(ata_scsi_slave_config); 4951EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
5178EXPORT_SYMBOL_GPL(ata_scsi_release); 4952EXPORT_SYMBOL_GPL(ata_scsi_release);
5179EXPORT_SYMBOL_GPL(ata_host_intr); 4953EXPORT_SYMBOL_GPL(ata_host_intr);
5180EXPORT_SYMBOL_GPL(ata_dev_classify); 4954EXPORT_SYMBOL_GPL(ata_dev_classify);
5181EXPORT_SYMBOL_GPL(ata_dev_id_string); 4955EXPORT_SYMBOL_GPL(ata_id_string);
4956EXPORT_SYMBOL_GPL(ata_id_c_string);
5182EXPORT_SYMBOL_GPL(ata_dev_config); 4957EXPORT_SYMBOL_GPL(ata_dev_config);
5183EXPORT_SYMBOL_GPL(ata_scsi_simulate); 4958EXPORT_SYMBOL_GPL(ata_scsi_simulate);
4959EXPORT_SYMBOL_GPL(ata_eh_qc_complete);
4960EXPORT_SYMBOL_GPL(ata_eh_qc_retry);
5184 4961
5185EXPORT_SYMBOL_GPL(ata_pio_need_iordy); 4962EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
5186EXPORT_SYMBOL_GPL(ata_timing_compute); 4963EXPORT_SYMBOL_GPL(ata_timing_compute);
diff --git a/drivers/scsi/libata-scsi.c b/drivers/scsi/libata-scsi.c
index 59503c9ccac9..d0bd94abb413 100644
--- a/drivers/scsi/libata-scsi.c
+++ b/drivers/scsi/libata-scsi.c
@@ -151,7 +151,7 @@ int ata_cmd_ioctl(struct scsi_device *scsidev, void __user *arg)
151 struct scsi_sense_hdr sshdr; 151 struct scsi_sense_hdr sshdr;
152 enum dma_data_direction data_dir; 152 enum dma_data_direction data_dir;
153 153
154 if (NULL == (void *)arg) 154 if (arg == NULL)
155 return -EINVAL; 155 return -EINVAL;
156 156
157 if (copy_from_user(args, arg, sizeof(args))) 157 if (copy_from_user(args, arg, sizeof(args)))
@@ -201,7 +201,7 @@ int ata_cmd_ioctl(struct scsi_device *scsidev, void __user *arg)
201 /* Need code to retrieve data from check condition? */ 201 /* Need code to retrieve data from check condition? */
202 202
203 if ((argbuf) 203 if ((argbuf)
204 && copy_to_user((void *)(arg + sizeof(args)), argbuf, argsize)) 204 && copy_to_user(arg + sizeof(args), argbuf, argsize))
205 rc = -EFAULT; 205 rc = -EFAULT;
206error: 206error:
207 if (argbuf) 207 if (argbuf)
@@ -228,7 +228,7 @@ int ata_task_ioctl(struct scsi_device *scsidev, void __user *arg)
228 u8 args[7]; 228 u8 args[7];
229 struct scsi_sense_hdr sshdr; 229 struct scsi_sense_hdr sshdr;
230 230
231 if (NULL == (void *)arg) 231 if (arg == NULL)
232 return -EINVAL; 232 return -EINVAL;
233 233
234 if (copy_from_user(args, arg, sizeof(args))) 234 if (copy_from_user(args, arg, sizeof(args)))
@@ -553,7 +553,7 @@ void ata_gen_ata_desc_sense(struct ata_queued_cmd *qc)
553 /* 553 /*
554 * Read the controller registers. 554 * Read the controller registers.
555 */ 555 */
556 assert(NULL != qc->ap->ops->tf_read); 556 WARN_ON(qc->ap->ops->tf_read == NULL);
557 qc->ap->ops->tf_read(qc->ap, tf); 557 qc->ap->ops->tf_read(qc->ap, tf);
558 558
559 /* 559 /*
@@ -628,7 +628,7 @@ void ata_gen_fixed_sense(struct ata_queued_cmd *qc)
628 /* 628 /*
629 * Read the controller registers. 629 * Read the controller registers.
630 */ 630 */
631 assert(NULL != qc->ap->ops->tf_read); 631 WARN_ON(qc->ap->ops->tf_read == NULL);
632 qc->ap->ops->tf_read(qc->ap, tf); 632 qc->ap->ops->tf_read(qc->ap, tf);
633 633
634 /* 634 /*
@@ -684,23 +684,23 @@ int ata_scsi_slave_config(struct scsi_device *sdev)
684 if (sdev->id < ATA_MAX_DEVICES) { 684 if (sdev->id < ATA_MAX_DEVICES) {
685 struct ata_port *ap; 685 struct ata_port *ap;
686 struct ata_device *dev; 686 struct ata_device *dev;
687 unsigned int max_sectors;
687 688
688 ap = (struct ata_port *) &sdev->host->hostdata[0]; 689 ap = (struct ata_port *) &sdev->host->hostdata[0];
689 dev = &ap->device[sdev->id]; 690 dev = &ap->device[sdev->id];
690 691
691 /* TODO: 1024 is an arbitrary number, not the 692 /* TODO: 2048 is an arbitrary number, not the
692 * hardware maximum. This should be increased to 693 * hardware maximum. This should be increased to
693 * 65534 when Jens Axboe's patch for dynamically 694 * 65534 when Jens Axboe's patch for dynamically
694 * determining max_sectors is merged. 695 * determining max_sectors is merged.
695 */ 696 */
696 if ((dev->flags & ATA_DFLAG_LBA48) && 697 max_sectors = ATA_MAX_SECTORS;
697 ((dev->flags & ATA_DFLAG_LOCK_SECTORS) == 0)) { 698 if (dev->flags & ATA_DFLAG_LBA48)
698 /* 699 max_sectors = 2048;
699 * do not overwrite sdev->host->max_sectors, since 700 if (dev->max_sectors)
700 * other drives on this host may not support LBA48 701 max_sectors = dev->max_sectors;
701 */ 702
702 blk_queue_max_sectors(sdev->request_queue, 2048); 703 blk_queue_max_sectors(sdev->request_queue, max_sectors);
703 }
704 704
705 /* 705 /*
706 * SATA DMA transfers must be multiples of 4 byte, so 706 * SATA DMA transfers must be multiples of 4 byte, so
@@ -717,6 +717,47 @@ int ata_scsi_slave_config(struct scsi_device *sdev)
717} 717}
718 718
719/** 719/**
720 * ata_scsi_timed_out - SCSI layer time out callback
721 * @cmd: timed out SCSI command
722 *
723 * Handles SCSI layer timeout. We race with normal completion of
724 * the qc for @cmd. If the qc is already gone, we lose and let
725 * the scsi command finish (EH_HANDLED). Otherwise, the qc has
726 * timed out and EH should be invoked. Prevent ata_qc_complete()
727 * from finishing it by setting EH_SCHEDULED and return
728 * EH_NOT_HANDLED.
729 *
730 * LOCKING:
731 * Called from timer context
732 *
733 * RETURNS:
734 * EH_HANDLED or EH_NOT_HANDLED
735 */
736enum scsi_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd)
737{
738 struct Scsi_Host *host = cmd->device->host;
739 struct ata_port *ap = (struct ata_port *) &host->hostdata[0];
740 unsigned long flags;
741 struct ata_queued_cmd *qc;
742 enum scsi_eh_timer_return ret = EH_HANDLED;
743
744 DPRINTK("ENTER\n");
745
746 spin_lock_irqsave(&ap->host_set->lock, flags);
747 qc = ata_qc_from_tag(ap, ap->active_tag);
748 if (qc) {
749 WARN_ON(qc->scsicmd != cmd);
750 qc->flags |= ATA_QCFLAG_EH_SCHEDULED;
751 qc->err_mask |= AC_ERR_TIMEOUT;
752 ret = EH_NOT_HANDLED;
753 }
754 spin_unlock_irqrestore(&ap->host_set->lock, flags);
755
756 DPRINTK("EXIT, ret=%d\n", ret);
757 return ret;
758}
759
760/**
720 * ata_scsi_error - SCSI layer error handler callback 761 * ata_scsi_error - SCSI layer error handler callback
721 * @host: SCSI host on which error occurred 762 * @host: SCSI host on which error occurred
722 * 763 *
@@ -732,23 +773,82 @@ int ata_scsi_slave_config(struct scsi_device *sdev)
732int ata_scsi_error(struct Scsi_Host *host) 773int ata_scsi_error(struct Scsi_Host *host)
733{ 774{
734 struct ata_port *ap; 775 struct ata_port *ap;
776 unsigned long flags;
735 777
736 DPRINTK("ENTER\n"); 778 DPRINTK("ENTER\n");
737 779
738 ap = (struct ata_port *) &host->hostdata[0]; 780 ap = (struct ata_port *) &host->hostdata[0];
781
782 spin_lock_irqsave(&ap->host_set->lock, flags);
783 WARN_ON(ap->flags & ATA_FLAG_IN_EH);
784 ap->flags |= ATA_FLAG_IN_EH;
785 WARN_ON(ata_qc_from_tag(ap, ap->active_tag) == NULL);
786 spin_unlock_irqrestore(&ap->host_set->lock, flags);
787
739 ap->ops->eng_timeout(ap); 788 ap->ops->eng_timeout(ap);
740 789
741 /* TODO: this is per-command; when queueing is supported 790 WARN_ON(host->host_failed || !list_empty(&host->eh_cmd_q));
742 * this code will either change or move to a more 791
743 * appropriate place 792 scsi_eh_flush_done_q(&ap->eh_done_q);
744 */ 793
745 host->host_failed--; 794 spin_lock_irqsave(&ap->host_set->lock, flags);
746 INIT_LIST_HEAD(&host->eh_cmd_q); 795 ap->flags &= ~ATA_FLAG_IN_EH;
796 spin_unlock_irqrestore(&ap->host_set->lock, flags);
747 797
748 DPRINTK("EXIT\n"); 798 DPRINTK("EXIT\n");
749 return 0; 799 return 0;
750} 800}
751 801
802static void ata_eh_scsidone(struct scsi_cmnd *scmd)
803{
804 /* nada */
805}
806
807static void __ata_eh_qc_complete(struct ata_queued_cmd *qc)
808{
809 struct ata_port *ap = qc->ap;
810 struct scsi_cmnd *scmd = qc->scsicmd;
811 unsigned long flags;
812
813 spin_lock_irqsave(&ap->host_set->lock, flags);
814 qc->scsidone = ata_eh_scsidone;
815 __ata_qc_complete(qc);
816 WARN_ON(ata_tag_valid(qc->tag));
817 spin_unlock_irqrestore(&ap->host_set->lock, flags);
818
819 scsi_eh_finish_cmd(scmd, &ap->eh_done_q);
820}
821
822/**
823 * ata_eh_qc_complete - Complete an active ATA command from EH
824 * @qc: Command to complete
825 *
826 * Indicate to the mid and upper layers that an ATA command has
827 * completed. To be used from EH.
828 */
829void ata_eh_qc_complete(struct ata_queued_cmd *qc)
830{
831 struct scsi_cmnd *scmd = qc->scsicmd;
832 scmd->retries = scmd->allowed;
833 __ata_eh_qc_complete(qc);
834}
835
836/**
837 * ata_eh_qc_retry - Tell midlayer to retry an ATA command after EH
838 * @qc: Command to retry
839 *
840 * Indicate to the mid and upper layers that an ATA command
841 * should be retried. To be used from EH.
842 *
843 * SCSI midlayer limits the number of retries to scmd->allowed.
844 * This function might need to adjust scmd->retries for commands
845 * which get retried due to unrelated NCQ failures.
846 */
847void ata_eh_qc_retry(struct ata_queued_cmd *qc)
848{
849 __ata_eh_qc_complete(qc);
850}
851
752/** 852/**
753 * ata_scsi_start_stop_xlat - Translate SCSI START STOP UNIT command 853 * ata_scsi_start_stop_xlat - Translate SCSI START STOP UNIT command
754 * @qc: Storage for translated ATA taskfile 854 * @qc: Storage for translated ATA taskfile
@@ -985,9 +1085,13 @@ static unsigned int ata_scsi_verify_xlat(struct ata_queued_cmd *qc, const u8 *sc
985 if (dev->flags & ATA_DFLAG_LBA) { 1085 if (dev->flags & ATA_DFLAG_LBA) {
986 tf->flags |= ATA_TFLAG_LBA; 1086 tf->flags |= ATA_TFLAG_LBA;
987 1087
988 if (dev->flags & ATA_DFLAG_LBA48) { 1088 if (lba_28_ok(block, n_block)) {
989 if (n_block > (64 * 1024)) 1089 /* use LBA28 */
990 goto invalid_fld; 1090 tf->command = ATA_CMD_VERIFY;
1091 tf->device |= (block >> 24) & 0xf;
1092 } else if (lba_48_ok(block, n_block)) {
1093 if (!(dev->flags & ATA_DFLAG_LBA48))
1094 goto out_of_range;
991 1095
992 /* use LBA48 */ 1096 /* use LBA48 */
993 tf->flags |= ATA_TFLAG_LBA48; 1097 tf->flags |= ATA_TFLAG_LBA48;
@@ -998,15 +1102,9 @@ static unsigned int ata_scsi_verify_xlat(struct ata_queued_cmd *qc, const u8 *sc
998 tf->hob_lbah = (block >> 40) & 0xff; 1102 tf->hob_lbah = (block >> 40) & 0xff;
999 tf->hob_lbam = (block >> 32) & 0xff; 1103 tf->hob_lbam = (block >> 32) & 0xff;
1000 tf->hob_lbal = (block >> 24) & 0xff; 1104 tf->hob_lbal = (block >> 24) & 0xff;
1001 } else { 1105 } else
1002 if (n_block > 256) 1106 /* request too large even for LBA48 */
1003 goto invalid_fld; 1107 goto out_of_range;
1004
1005 /* use LBA28 */
1006 tf->command = ATA_CMD_VERIFY;
1007
1008 tf->device |= (block >> 24) & 0xf;
1009 }
1010 1108
1011 tf->nsect = n_block & 0xff; 1109 tf->nsect = n_block & 0xff;
1012 1110
@@ -1019,8 +1117,8 @@ static unsigned int ata_scsi_verify_xlat(struct ata_queued_cmd *qc, const u8 *sc
1019 /* CHS */ 1117 /* CHS */
1020 u32 sect, head, cyl, track; 1118 u32 sect, head, cyl, track;
1021 1119
1022 if (n_block > 256) 1120 if (!lba_28_ok(block, n_block))
1023 goto invalid_fld; 1121 goto out_of_range;
1024 1122
1025 /* Convert LBA to CHS */ 1123 /* Convert LBA to CHS */
1026 track = (u32)block / dev->sectors; 1124 track = (u32)block / dev->sectors;
@@ -1139,9 +1237,11 @@ static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc, const u8 *scsicm
1139 if (dev->flags & ATA_DFLAG_LBA) { 1237 if (dev->flags & ATA_DFLAG_LBA) {
1140 tf->flags |= ATA_TFLAG_LBA; 1238 tf->flags |= ATA_TFLAG_LBA;
1141 1239
1142 if (dev->flags & ATA_DFLAG_LBA48) { 1240 if (lba_28_ok(block, n_block)) {
1143 /* The request -may- be too large for LBA48. */ 1241 /* use LBA28 */
1144 if ((block >> 48) || (n_block > 65536)) 1242 tf->device |= (block >> 24) & 0xf;
1243 } else if (lba_48_ok(block, n_block)) {
1244 if (!(dev->flags & ATA_DFLAG_LBA48))
1145 goto out_of_range; 1245 goto out_of_range;
1146 1246
1147 /* use LBA48 */ 1247 /* use LBA48 */
@@ -1152,15 +1252,9 @@ static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc, const u8 *scsicm
1152 tf->hob_lbah = (block >> 40) & 0xff; 1252 tf->hob_lbah = (block >> 40) & 0xff;
1153 tf->hob_lbam = (block >> 32) & 0xff; 1253 tf->hob_lbam = (block >> 32) & 0xff;
1154 tf->hob_lbal = (block >> 24) & 0xff; 1254 tf->hob_lbal = (block >> 24) & 0xff;
1155 } else { 1255 } else
1156 /* use LBA28 */ 1256 /* request too large even for LBA48 */
1157 1257 goto out_of_range;
1158 /* The request -may- be too large for LBA28. */
1159 if ((block >> 28) || (n_block > 256))
1160 goto out_of_range;
1161
1162 tf->device |= (block >> 24) & 0xf;
1163 }
1164 1258
1165 if (unlikely(ata_rwcmd_protocol(qc) < 0)) 1259 if (unlikely(ata_rwcmd_protocol(qc) < 0))
1166 goto invalid_fld; 1260 goto invalid_fld;
@@ -1178,7 +1272,7 @@ static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc, const u8 *scsicm
1178 u32 sect, head, cyl, track; 1272 u32 sect, head, cyl, track;
1179 1273
1180 /* The request -may- be too large for CHS addressing. */ 1274 /* The request -may- be too large for CHS addressing. */
1181 if ((block >> 28) || (n_block > 256)) 1275 if (!lba_28_ok(block, n_block))
1182 goto out_of_range; 1276 goto out_of_range;
1183 1277
1184 if (unlikely(ata_rwcmd_protocol(qc) < 0)) 1278 if (unlikely(ata_rwcmd_protocol(qc) < 0))
@@ -1225,7 +1319,7 @@ nothing_to_do:
1225 return 1; 1319 return 1;
1226} 1320}
1227 1321
1228static int ata_scsi_qc_complete(struct ata_queued_cmd *qc) 1322static void ata_scsi_qc_complete(struct ata_queued_cmd *qc)
1229{ 1323{
1230 struct scsi_cmnd *cmd = qc->scsicmd; 1324 struct scsi_cmnd *cmd = qc->scsicmd;
1231 u8 *cdb = cmd->cmnd; 1325 u8 *cdb = cmd->cmnd;
@@ -1262,7 +1356,7 @@ static int ata_scsi_qc_complete(struct ata_queued_cmd *qc)
1262 1356
1263 qc->scsidone(cmd); 1357 qc->scsidone(cmd);
1264 1358
1265 return 0; 1359 ata_qc_free(qc);
1266} 1360}
1267 1361
1268/** 1362/**
@@ -1328,8 +1422,9 @@ static void ata_scsi_translate(struct ata_port *ap, struct ata_device *dev,
1328 goto early_finish; 1422 goto early_finish;
1329 1423
1330 /* select device, send command to hardware */ 1424 /* select device, send command to hardware */
1331 if (ata_qc_issue(qc)) 1425 qc->err_mask = ata_qc_issue(qc);
1332 goto err_did; 1426 if (qc->err_mask)
1427 ata_qc_complete(qc);
1333 1428
1334 VPRINTK("EXIT\n"); 1429 VPRINTK("EXIT\n");
1335 return; 1430 return;
@@ -1472,8 +1567,8 @@ unsigned int ata_scsiop_inq_std(struct ata_scsi_args *args, u8 *rbuf,
1472 1567
1473 if (buflen > 35) { 1568 if (buflen > 35) {
1474 memcpy(&rbuf[8], "ATA ", 8); 1569 memcpy(&rbuf[8], "ATA ", 8);
1475 ata_dev_id_string(args->id, &rbuf[16], ATA_ID_PROD_OFS, 16); 1570 ata_id_string(args->id, &rbuf[16], ATA_ID_PROD_OFS, 16);
1476 ata_dev_id_string(args->id, &rbuf[32], ATA_ID_FW_REV_OFS, 4); 1571 ata_id_string(args->id, &rbuf[32], ATA_ID_FW_REV_OFS, 4);
1477 if (rbuf[32] == 0 || rbuf[32] == ' ') 1572 if (rbuf[32] == 0 || rbuf[32] == ' ')
1478 memcpy(&rbuf[32], "n/a ", 4); 1573 memcpy(&rbuf[32], "n/a ", 4);
1479 } 1574 }
@@ -1547,8 +1642,8 @@ unsigned int ata_scsiop_inq_80(struct ata_scsi_args *args, u8 *rbuf,
1547 memcpy(rbuf, hdr, sizeof(hdr)); 1642 memcpy(rbuf, hdr, sizeof(hdr));
1548 1643
1549 if (buflen > (ATA_SERNO_LEN + 4 - 1)) 1644 if (buflen > (ATA_SERNO_LEN + 4 - 1))
1550 ata_dev_id_string(args->id, (unsigned char *) &rbuf[4], 1645 ata_id_string(args->id, (unsigned char *) &rbuf[4],
1551 ATA_ID_SERNO_OFS, ATA_SERNO_LEN); 1646 ATA_ID_SERNO_OFS, ATA_SERNO_LEN);
1552 1647
1553 return 0; 1648 return 0;
1554} 1649}
@@ -1713,15 +1808,12 @@ static int ata_dev_supports_fua(u16 *id)
1713 if (!ata_id_has_fua(id)) 1808 if (!ata_id_has_fua(id))
1714 return 0; 1809 return 0;
1715 1810
1716 model[40] = '\0'; 1811 ata_id_c_string(id, model, ATA_ID_PROD_OFS, sizeof(model));
1717 fw[8] = '\0'; 1812 ata_id_c_string(id, fw, ATA_ID_FW_REV_OFS, sizeof(fw));
1718
1719 ata_dev_id_string(id, model, ATA_ID_PROD_OFS, sizeof(model) - 1);
1720 ata_dev_id_string(id, fw, ATA_ID_FW_REV_OFS, sizeof(fw) - 1);
1721 1813
1722 if (strncmp(model, "Maxtor", 6)) 1814 if (strcmp(model, "Maxtor"))
1723 return 1; 1815 return 1;
1724 if (strncmp(fw, "BANC1G10", 8)) 1816 if (strcmp(fw, "BANC1G10"))
1725 return 1; 1817 return 1;
1726 1818
1727 return 0; /* blacklisted */ 1819 return 0; /* blacklisted */
@@ -2015,7 +2107,7 @@ void ata_scsi_badcmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *), u8
2015 done(cmd); 2107 done(cmd);
2016} 2108}
2017 2109
2018static int atapi_sense_complete(struct ata_queued_cmd *qc) 2110static void atapi_sense_complete(struct ata_queued_cmd *qc)
2019{ 2111{
2020 if (qc->err_mask && ((qc->err_mask & AC_ERR_DEV) == 0)) 2112 if (qc->err_mask && ((qc->err_mask & AC_ERR_DEV) == 0))
2021 /* FIXME: not quite right; we don't want the 2113 /* FIXME: not quite right; we don't want the
@@ -2026,7 +2118,7 @@ static int atapi_sense_complete(struct ata_queued_cmd *qc)
2026 ata_gen_ata_desc_sense(qc); 2118 ata_gen_ata_desc_sense(qc);
2027 2119
2028 qc->scsidone(qc->scsicmd); 2120 qc->scsidone(qc->scsicmd);
2029 return 0; 2121 ata_qc_free(qc);
2030} 2122}
2031 2123
2032/* is it pointless to prefer PIO for "safety reasons"? */ 2124/* is it pointless to prefer PIO for "safety reasons"? */
@@ -2056,7 +2148,7 @@ static void atapi_request_sense(struct ata_queued_cmd *qc)
2056 ata_sg_init_one(qc, cmd->sense_buffer, sizeof(cmd->sense_buffer)); 2148 ata_sg_init_one(qc, cmd->sense_buffer, sizeof(cmd->sense_buffer));
2057 qc->dma_dir = DMA_FROM_DEVICE; 2149 qc->dma_dir = DMA_FROM_DEVICE;
2058 2150
2059 memset(&qc->cdb, 0, ap->cdb_len); 2151 memset(&qc->cdb, 0, qc->dev->cdb_len);
2060 qc->cdb[0] = REQUEST_SENSE; 2152 qc->cdb[0] = REQUEST_SENSE;
2061 qc->cdb[4] = SCSI_SENSE_BUFFERSIZE; 2153 qc->cdb[4] = SCSI_SENSE_BUFFERSIZE;
2062 2154
@@ -2075,15 +2167,14 @@ static void atapi_request_sense(struct ata_queued_cmd *qc)
2075 2167
2076 qc->complete_fn = atapi_sense_complete; 2168 qc->complete_fn = atapi_sense_complete;
2077 2169
2078 if (ata_qc_issue(qc)) { 2170 qc->err_mask = ata_qc_issue(qc);
2079 qc->err_mask |= AC_ERR_OTHER; 2171 if (qc->err_mask)
2080 ata_qc_complete(qc); 2172 ata_qc_complete(qc);
2081 }
2082 2173
2083 DPRINTK("EXIT\n"); 2174 DPRINTK("EXIT\n");
2084} 2175}
2085 2176
2086static int atapi_qc_complete(struct ata_queued_cmd *qc) 2177static void atapi_qc_complete(struct ata_queued_cmd *qc)
2087{ 2178{
2088 struct scsi_cmnd *cmd = qc->scsicmd; 2179 struct scsi_cmnd *cmd = qc->scsicmd;
2089 unsigned int err_mask = qc->err_mask; 2180 unsigned int err_mask = qc->err_mask;
@@ -2093,7 +2184,7 @@ static int atapi_qc_complete(struct ata_queued_cmd *qc)
2093 if (unlikely(err_mask & AC_ERR_DEV)) { 2184 if (unlikely(err_mask & AC_ERR_DEV)) {
2094 cmd->result = SAM_STAT_CHECK_CONDITION; 2185 cmd->result = SAM_STAT_CHECK_CONDITION;
2095 atapi_request_sense(qc); 2186 atapi_request_sense(qc);
2096 return 1; 2187 return;
2097 } 2188 }
2098 2189
2099 else if (unlikely(err_mask)) 2190 else if (unlikely(err_mask))
@@ -2133,7 +2224,7 @@ static int atapi_qc_complete(struct ata_queued_cmd *qc)
2133 } 2224 }
2134 2225
2135 qc->scsidone(cmd); 2226 qc->scsidone(cmd);
2136 return 0; 2227 ata_qc_free(qc);
2137} 2228}
2138/** 2229/**
2139 * atapi_xlat - Initialize PACKET taskfile 2230 * atapi_xlat - Initialize PACKET taskfile
@@ -2159,7 +2250,7 @@ static unsigned int atapi_xlat(struct ata_queued_cmd *qc, const u8 *scsicmd)
2159 if (ata_check_atapi_dma(qc)) 2250 if (ata_check_atapi_dma(qc))
2160 using_pio = 1; 2251 using_pio = 1;
2161 2252
2162 memcpy(&qc->cdb, scsicmd, qc->ap->cdb_len); 2253 memcpy(&qc->cdb, scsicmd, dev->cdb_len);
2163 2254
2164 qc->complete_fn = atapi_qc_complete; 2255 qc->complete_fn = atapi_qc_complete;
2165 2256
@@ -2519,7 +2610,8 @@ out_unlock:
2519 2610
2520/** 2611/**
2521 * ata_scsi_simulate - simulate SCSI command on ATA device 2612 * ata_scsi_simulate - simulate SCSI command on ATA device
2522 * @id: current IDENTIFY data for target device. 2613 * @ap: port the device is connected to
2614 * @dev: the target device
2523 * @cmd: SCSI command being sent to device. 2615 * @cmd: SCSI command being sent to device.
2524 * @done: SCSI command completion function. 2616 * @done: SCSI command completion function.
2525 * 2617 *
diff --git a/drivers/scsi/libata.h b/drivers/scsi/libata.h
index fddaf479a544..d822eba05f3c 100644
--- a/drivers/scsi/libata.h
+++ b/drivers/scsi/libata.h
@@ -46,7 +46,7 @@ extern struct ata_queued_cmd *ata_qc_new_init(struct ata_port *ap,
46 struct ata_device *dev); 46 struct ata_device *dev);
47extern int ata_rwcmd_protocol(struct ata_queued_cmd *qc); 47extern int ata_rwcmd_protocol(struct ata_queued_cmd *qc);
48extern void ata_qc_free(struct ata_queued_cmd *qc); 48extern void ata_qc_free(struct ata_queued_cmd *qc);
49extern int ata_qc_issue(struct ata_queued_cmd *qc); 49extern unsigned int ata_qc_issue(struct ata_queued_cmd *qc);
50extern int ata_check_atapi_dma(struct ata_queued_cmd *qc); 50extern int ata_check_atapi_dma(struct ata_queued_cmd *qc);
51extern void ata_dev_select(struct ata_port *ap, unsigned int device, 51extern void ata_dev_select(struct ata_port *ap, unsigned int device,
52 unsigned int wait, unsigned int can_sleep); 52 unsigned int wait, unsigned int can_sleep);
diff --git a/drivers/scsi/pdc_adma.c b/drivers/scsi/pdc_adma.c
index e8df0c9ec1e6..5f33cc932e70 100644
--- a/drivers/scsi/pdc_adma.c
+++ b/drivers/scsi/pdc_adma.c
@@ -131,7 +131,7 @@ static void adma_host_stop(struct ata_host_set *host_set);
131static void adma_port_stop(struct ata_port *ap); 131static void adma_port_stop(struct ata_port *ap);
132static void adma_phy_reset(struct ata_port *ap); 132static void adma_phy_reset(struct ata_port *ap);
133static void adma_qc_prep(struct ata_queued_cmd *qc); 133static void adma_qc_prep(struct ata_queued_cmd *qc);
134static int adma_qc_issue(struct ata_queued_cmd *qc); 134static unsigned int adma_qc_issue(struct ata_queued_cmd *qc);
135static int adma_check_atapi_dma(struct ata_queued_cmd *qc); 135static int adma_check_atapi_dma(struct ata_queued_cmd *qc);
136static void adma_bmdma_stop(struct ata_queued_cmd *qc); 136static void adma_bmdma_stop(struct ata_queued_cmd *qc);
137static u8 adma_bmdma_status(struct ata_port *ap); 137static u8 adma_bmdma_status(struct ata_port *ap);
@@ -143,11 +143,11 @@ static struct scsi_host_template adma_ata_sht = {
143 .name = DRV_NAME, 143 .name = DRV_NAME,
144 .ioctl = ata_scsi_ioctl, 144 .ioctl = ata_scsi_ioctl,
145 .queuecommand = ata_scsi_queuecmd, 145 .queuecommand = ata_scsi_queuecmd,
146 .eh_timed_out = ata_scsi_timed_out,
146 .eh_strategy_handler = ata_scsi_error, 147 .eh_strategy_handler = ata_scsi_error,
147 .can_queue = ATA_DEF_QUEUE, 148 .can_queue = ATA_DEF_QUEUE,
148 .this_id = ATA_SHT_THIS_ID, 149 .this_id = ATA_SHT_THIS_ID,
149 .sg_tablesize = LIBATA_MAX_PRD, 150 .sg_tablesize = LIBATA_MAX_PRD,
150 .max_sectors = ATA_MAX_SECTORS,
151 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 151 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
152 .emulated = ATA_SHT_EMULATED, 152 .emulated = ATA_SHT_EMULATED,
153 .use_clustering = ENABLE_CLUSTERING, 153 .use_clustering = ENABLE_CLUSTERING,
@@ -419,7 +419,7 @@ static inline void adma_packet_start(struct ata_queued_cmd *qc)
419 writew(aPIOMD4 | aGO, chan + ADMA_CONTROL); 419 writew(aPIOMD4 | aGO, chan + ADMA_CONTROL);
420} 420}
421 421
422static int adma_qc_issue(struct ata_queued_cmd *qc) 422static unsigned int adma_qc_issue(struct ata_queued_cmd *qc)
423{ 423{
424 struct adma_port_priv *pp = qc->ap->private_data; 424 struct adma_port_priv *pp = qc->ap->private_data;
425 425
diff --git a/drivers/scsi/sata_mv.c b/drivers/scsi/sata_mv.c
index 2770005324b4..aceaf56999a5 100644
--- a/drivers/scsi/sata_mv.c
+++ b/drivers/scsi/sata_mv.c
@@ -37,7 +37,7 @@
37#include <asm/io.h> 37#include <asm/io.h>
38 38
39#define DRV_NAME "sata_mv" 39#define DRV_NAME "sata_mv"
40#define DRV_VERSION "0.5" 40#define DRV_VERSION "0.6"
41 41
42enum { 42enum {
43 /* BAR's are enumerated in terms of pci_resource_start() terms */ 43 /* BAR's are enumerated in terms of pci_resource_start() terms */
@@ -228,7 +228,9 @@ enum {
228 MV_HP_ERRATA_50XXB2 = (1 << 2), 228 MV_HP_ERRATA_50XXB2 = (1 << 2),
229 MV_HP_ERRATA_60X1B2 = (1 << 3), 229 MV_HP_ERRATA_60X1B2 = (1 << 3),
230 MV_HP_ERRATA_60X1C0 = (1 << 4), 230 MV_HP_ERRATA_60X1C0 = (1 << 4),
231 MV_HP_50XX = (1 << 5), 231 MV_HP_ERRATA_XX42A0 = (1 << 5),
232 MV_HP_50XX = (1 << 6),
233 MV_HP_GEN_IIE = (1 << 7),
232 234
233 /* Port private flags (pp_flags) */ 235 /* Port private flags (pp_flags) */
234 MV_PP_FLAG_EDMA_EN = (1 << 0), 236 MV_PP_FLAG_EDMA_EN = (1 << 0),
@@ -237,6 +239,9 @@ enum {
237 239
238#define IS_50XX(hpriv) ((hpriv)->hp_flags & MV_HP_50XX) 240#define IS_50XX(hpriv) ((hpriv)->hp_flags & MV_HP_50XX)
239#define IS_60XX(hpriv) (((hpriv)->hp_flags & MV_HP_50XX) == 0) 241#define IS_60XX(hpriv) (((hpriv)->hp_flags & MV_HP_50XX) == 0)
242#define IS_GEN_I(hpriv) IS_50XX(hpriv)
243#define IS_GEN_II(hpriv) IS_60XX(hpriv)
244#define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
240 245
241enum { 246enum {
242 /* Our DMA boundary is determined by an ePRD being unable to handle 247 /* Our DMA boundary is determined by an ePRD being unable to handle
@@ -255,6 +260,8 @@ enum chip_type {
255 chip_5080, 260 chip_5080,
256 chip_604x, 261 chip_604x,
257 chip_608x, 262 chip_608x,
263 chip_6042,
264 chip_7042,
258}; 265};
259 266
260/* Command ReQuest Block: 32B */ 267/* Command ReQuest Block: 32B */
@@ -265,6 +272,14 @@ struct mv_crqb {
265 u16 ata_cmd[11]; 272 u16 ata_cmd[11];
266}; 273};
267 274
275struct mv_crqb_iie {
276 u32 addr;
277 u32 addr_hi;
278 u32 flags;
279 u32 len;
280 u32 ata_cmd[4];
281};
282
268/* Command ResPonse Block: 8B */ 283/* Command ResPonse Block: 8B */
269struct mv_crpb { 284struct mv_crpb {
270 u16 id; 285 u16 id;
@@ -328,7 +343,8 @@ static void mv_host_stop(struct ata_host_set *host_set);
328static int mv_port_start(struct ata_port *ap); 343static int mv_port_start(struct ata_port *ap);
329static void mv_port_stop(struct ata_port *ap); 344static void mv_port_stop(struct ata_port *ap);
330static void mv_qc_prep(struct ata_queued_cmd *qc); 345static void mv_qc_prep(struct ata_queued_cmd *qc);
331static int mv_qc_issue(struct ata_queued_cmd *qc); 346static void mv_qc_prep_iie(struct ata_queued_cmd *qc);
347static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
332static irqreturn_t mv_interrupt(int irq, void *dev_instance, 348static irqreturn_t mv_interrupt(int irq, void *dev_instance,
333 struct pt_regs *regs); 349 struct pt_regs *regs);
334static void mv_eng_timeout(struct ata_port *ap); 350static void mv_eng_timeout(struct ata_port *ap);
@@ -362,11 +378,11 @@ static struct scsi_host_template mv_sht = {
362 .name = DRV_NAME, 378 .name = DRV_NAME,
363 .ioctl = ata_scsi_ioctl, 379 .ioctl = ata_scsi_ioctl,
364 .queuecommand = ata_scsi_queuecmd, 380 .queuecommand = ata_scsi_queuecmd,
381 .eh_timed_out = ata_scsi_timed_out,
365 .eh_strategy_handler = ata_scsi_error, 382 .eh_strategy_handler = ata_scsi_error,
366 .can_queue = MV_USE_Q_DEPTH, 383 .can_queue = MV_USE_Q_DEPTH,
367 .this_id = ATA_SHT_THIS_ID, 384 .this_id = ATA_SHT_THIS_ID,
368 .sg_tablesize = MV_MAX_SG_CT / 2, 385 .sg_tablesize = MV_MAX_SG_CT / 2,
369 .max_sectors = ATA_MAX_SECTORS,
370 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 386 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
371 .emulated = ATA_SHT_EMULATED, 387 .emulated = ATA_SHT_EMULATED,
372 .use_clustering = ATA_SHT_USE_CLUSTERING, 388 .use_clustering = ATA_SHT_USE_CLUSTERING,
@@ -430,6 +446,33 @@ static const struct ata_port_operations mv6_ops = {
430 .host_stop = mv_host_stop, 446 .host_stop = mv_host_stop,
431}; 447};
432 448
449static const struct ata_port_operations mv_iie_ops = {
450 .port_disable = ata_port_disable,
451
452 .tf_load = ata_tf_load,
453 .tf_read = ata_tf_read,
454 .check_status = ata_check_status,
455 .exec_command = ata_exec_command,
456 .dev_select = ata_std_dev_select,
457
458 .phy_reset = mv_phy_reset,
459
460 .qc_prep = mv_qc_prep_iie,
461 .qc_issue = mv_qc_issue,
462
463 .eng_timeout = mv_eng_timeout,
464
465 .irq_handler = mv_interrupt,
466 .irq_clear = mv_irq_clear,
467
468 .scr_read = mv_scr_read,
469 .scr_write = mv_scr_write,
470
471 .port_start = mv_port_start,
472 .port_stop = mv_port_stop,
473 .host_stop = mv_host_stop,
474};
475
433static const struct ata_port_info mv_port_info[] = { 476static const struct ata_port_info mv_port_info[] = {
434 { /* chip_504x */ 477 { /* chip_504x */
435 .sht = &mv_sht, 478 .sht = &mv_sht,
@@ -467,6 +510,21 @@ static const struct ata_port_info mv_port_info[] = {
467 .udma_mask = 0x7f, /* udma0-6 */ 510 .udma_mask = 0x7f, /* udma0-6 */
468 .port_ops = &mv6_ops, 511 .port_ops = &mv6_ops,
469 }, 512 },
513 { /* chip_6042 */
514 .sht = &mv_sht,
515 .host_flags = (MV_COMMON_FLAGS | MV_6XXX_FLAGS),
516 .pio_mask = 0x1f, /* pio0-4 */
517 .udma_mask = 0x7f, /* udma0-6 */
518 .port_ops = &mv_iie_ops,
519 },
520 { /* chip_7042 */
521 .sht = &mv_sht,
522 .host_flags = (MV_COMMON_FLAGS | MV_6XXX_FLAGS |
523 MV_FLAG_DUAL_HC),
524 .pio_mask = 0x1f, /* pio0-4 */
525 .udma_mask = 0x7f, /* udma0-6 */
526 .port_ops = &mv_iie_ops,
527 },
470}; 528};
471 529
472static const struct pci_device_id mv_pci_tbl[] = { 530static const struct pci_device_id mv_pci_tbl[] = {
@@ -477,6 +535,7 @@ static const struct pci_device_id mv_pci_tbl[] = {
477 535
478 {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x6040), 0, 0, chip_604x}, 536 {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x6040), 0, 0, chip_604x},
479 {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x6041), 0, 0, chip_604x}, 537 {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x6041), 0, 0, chip_604x},
538 {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x6042), 0, 0, chip_6042},
480 {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x6080), 0, 0, chip_608x}, 539 {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x6080), 0, 0, chip_608x},
481 {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x6081), 0, 0, chip_608x}, 540 {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x6081), 0, 0, chip_608x},
482 541
@@ -572,8 +631,8 @@ static void mv_irq_clear(struct ata_port *ap)
572 * @base: port base address 631 * @base: port base address
573 * @pp: port private data 632 * @pp: port private data
574 * 633 *
575 * Verify the local cache of the eDMA state is accurate with an 634 * Verify the local cache of the eDMA state is accurate with a
576 * assert. 635 * WARN_ON.
577 * 636 *
578 * LOCKING: 637 * LOCKING:
579 * Inherited from caller. 638 * Inherited from caller.
@@ -584,15 +643,15 @@ static void mv_start_dma(void __iomem *base, struct mv_port_priv *pp)
584 writelfl(EDMA_EN, base + EDMA_CMD_OFS); 643 writelfl(EDMA_EN, base + EDMA_CMD_OFS);
585 pp->pp_flags |= MV_PP_FLAG_EDMA_EN; 644 pp->pp_flags |= MV_PP_FLAG_EDMA_EN;
586 } 645 }
587 assert(EDMA_EN & readl(base + EDMA_CMD_OFS)); 646 WARN_ON(!(EDMA_EN & readl(base + EDMA_CMD_OFS)));
588} 647}
589 648
590/** 649/**
591 * mv_stop_dma - Disable eDMA engine 650 * mv_stop_dma - Disable eDMA engine
592 * @ap: ATA channel to manipulate 651 * @ap: ATA channel to manipulate
593 * 652 *
594 * Verify the local cache of the eDMA state is accurate with an 653 * Verify the local cache of the eDMA state is accurate with a
595 * assert. 654 * WARN_ON.
596 * 655 *
597 * LOCKING: 656 * LOCKING:
598 * Inherited from caller. 657 * Inherited from caller.
@@ -610,7 +669,7 @@ static void mv_stop_dma(struct ata_port *ap)
610 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS); 669 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
611 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN; 670 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
612 } else { 671 } else {
613 assert(!(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS))); 672 WARN_ON(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS));
614 } 673 }
615 674
616 /* now properly wait for the eDMA to stop */ 675 /* now properly wait for the eDMA to stop */
@@ -773,6 +832,33 @@ static inline void mv_priv_free(struct mv_port_priv *pp, struct device *dev)
773 dma_free_coherent(dev, MV_PORT_PRIV_DMA_SZ, pp->crpb, pp->crpb_dma); 832 dma_free_coherent(dev, MV_PORT_PRIV_DMA_SZ, pp->crpb, pp->crpb_dma);
774} 833}
775 834
835static void mv_edma_cfg(struct mv_host_priv *hpriv, void __iomem *port_mmio)
836{
837 u32 cfg = readl(port_mmio + EDMA_CFG_OFS);
838
839 /* set up non-NCQ EDMA configuration */
840 cfg &= ~0x1f; /* clear queue depth */
841 cfg &= ~EDMA_CFG_NCQ; /* clear NCQ mode */
842 cfg &= ~(1 << 9); /* disable equeue */
843
844 if (IS_GEN_I(hpriv))
845 cfg |= (1 << 8); /* enab config burst size mask */
846
847 else if (IS_GEN_II(hpriv))
848 cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN;
849
850 else if (IS_GEN_IIE(hpriv)) {
851 cfg |= (1 << 23); /* dis RX PM port mask */
852 cfg &= ~(1 << 16); /* dis FIS-based switching (for now) */
853 cfg &= ~(1 << 19); /* dis 128-entry queue (for now?) */
854 cfg |= (1 << 18); /* enab early completion */
855 cfg |= (1 << 17); /* enab host q cache */
856 cfg |= (1 << 22); /* enab cutthrough */
857 }
858
859 writelfl(cfg, port_mmio + EDMA_CFG_OFS);
860}
861
776/** 862/**
777 * mv_port_start - Port specific init/start routine. 863 * mv_port_start - Port specific init/start routine.
778 * @ap: ATA channel to manipulate 864 * @ap: ATA channel to manipulate
@@ -786,6 +872,7 @@ static inline void mv_priv_free(struct mv_port_priv *pp, struct device *dev)
786static int mv_port_start(struct ata_port *ap) 872static int mv_port_start(struct ata_port *ap)
787{ 873{
788 struct device *dev = ap->host_set->dev; 874 struct device *dev = ap->host_set->dev;
875 struct mv_host_priv *hpriv = ap->host_set->private_data;
789 struct mv_port_priv *pp; 876 struct mv_port_priv *pp;
790 void __iomem *port_mmio = mv_ap_base(ap); 877 void __iomem *port_mmio = mv_ap_base(ap);
791 void *mem; 878 void *mem;
@@ -829,17 +916,26 @@ static int mv_port_start(struct ata_port *ap)
829 pp->sg_tbl = mem; 916 pp->sg_tbl = mem;
830 pp->sg_tbl_dma = mem_dma; 917 pp->sg_tbl_dma = mem_dma;
831 918
832 writelfl(EDMA_CFG_Q_DEPTH | EDMA_CFG_RD_BRST_EXT | 919 mv_edma_cfg(hpriv, port_mmio);
833 EDMA_CFG_WR_BUFF_LEN, port_mmio + EDMA_CFG_OFS);
834 920
835 writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS); 921 writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS);
836 writelfl(pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK, 922 writelfl(pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK,
837 port_mmio + EDMA_REQ_Q_IN_PTR_OFS); 923 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
838 924
839 writelfl(0, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS); 925 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
840 writelfl(0, port_mmio + EDMA_RSP_Q_IN_PTR_OFS); 926 writelfl(pp->crqb_dma & 0xffffffff,
927 port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
928 else
929 writelfl(0, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
841 930
842 writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS); 931 writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS);
932
933 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
934 writelfl(pp->crpb_dma & 0xffffffff,
935 port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
936 else
937 writelfl(0, port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
938
843 writelfl(pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK, 939 writelfl(pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK,
844 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS); 940 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
845 941
@@ -960,21 +1056,19 @@ static void mv_qc_prep(struct ata_queued_cmd *qc)
960 struct ata_taskfile *tf; 1056 struct ata_taskfile *tf;
961 u16 flags = 0; 1057 u16 flags = 0;
962 1058
963 if (ATA_PROT_DMA != qc->tf.protocol) { 1059 if (ATA_PROT_DMA != qc->tf.protocol)
964 return; 1060 return;
965 }
966 1061
967 /* the req producer index should be the same as we remember it */ 1062 /* the req producer index should be the same as we remember it */
968 assert(((readl(mv_ap_base(qc->ap) + EDMA_REQ_Q_IN_PTR_OFS) >> 1063 WARN_ON(((readl(mv_ap_base(qc->ap) + EDMA_REQ_Q_IN_PTR_OFS) >>
969 EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) == 1064 EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) !=
970 pp->req_producer); 1065 pp->req_producer);
971 1066
972 /* Fill in command request block 1067 /* Fill in command request block
973 */ 1068 */
974 if (!(qc->tf.flags & ATA_TFLAG_WRITE)) { 1069 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
975 flags |= CRQB_FLAG_READ; 1070 flags |= CRQB_FLAG_READ;
976 } 1071 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
977 assert(MV_MAX_Q_DEPTH > qc->tag);
978 flags |= qc->tag << CRQB_TAG_SHIFT; 1072 flags |= qc->tag << CRQB_TAG_SHIFT;
979 1073
980 pp->crqb[pp->req_producer].sg_addr = 1074 pp->crqb[pp->req_producer].sg_addr =
@@ -1029,9 +1123,76 @@ static void mv_qc_prep(struct ata_queued_cmd *qc)
1029 mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0); 1123 mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0);
1030 mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */ 1124 mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */
1031 1125
1032 if (!(qc->flags & ATA_QCFLAG_DMAMAP)) { 1126 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1127 return;
1128 mv_fill_sg(qc);
1129}
1130
1131/**
1132 * mv_qc_prep_iie - Host specific command preparation.
1133 * @qc: queued command to prepare
1134 *
1135 * This routine simply redirects to the general purpose routine
1136 * if command is not DMA. Else, it handles prep of the CRQB
1137 * (command request block), does some sanity checking, and calls
1138 * the SG load routine.
1139 *
1140 * LOCKING:
1141 * Inherited from caller.
1142 */
1143static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
1144{
1145 struct ata_port *ap = qc->ap;
1146 struct mv_port_priv *pp = ap->private_data;
1147 struct mv_crqb_iie *crqb;
1148 struct ata_taskfile *tf;
1149 u32 flags = 0;
1150
1151 if (ATA_PROT_DMA != qc->tf.protocol)
1152 return;
1153
1154 /* the req producer index should be the same as we remember it */
1155 WARN_ON(((readl(mv_ap_base(qc->ap) + EDMA_REQ_Q_IN_PTR_OFS) >>
1156 EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) !=
1157 pp->req_producer);
1158
1159 /* Fill in Gen IIE command request block
1160 */
1161 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1162 flags |= CRQB_FLAG_READ;
1163
1164 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
1165 flags |= qc->tag << CRQB_TAG_SHIFT;
1166
1167 crqb = (struct mv_crqb_iie *) &pp->crqb[pp->req_producer];
1168 crqb->addr = cpu_to_le32(pp->sg_tbl_dma & 0xffffffff);
1169 crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16);
1170 crqb->flags = cpu_to_le32(flags);
1171
1172 tf = &qc->tf;
1173 crqb->ata_cmd[0] = cpu_to_le32(
1174 (tf->command << 16) |
1175 (tf->feature << 24)
1176 );
1177 crqb->ata_cmd[1] = cpu_to_le32(
1178 (tf->lbal << 0) |
1179 (tf->lbam << 8) |
1180 (tf->lbah << 16) |
1181 (tf->device << 24)
1182 );
1183 crqb->ata_cmd[2] = cpu_to_le32(
1184 (tf->hob_lbal << 0) |
1185 (tf->hob_lbam << 8) |
1186 (tf->hob_lbah << 16) |
1187 (tf->hob_feature << 24)
1188 );
1189 crqb->ata_cmd[3] = cpu_to_le32(
1190 (tf->nsect << 0) |
1191 (tf->hob_nsect << 8)
1192 );
1193
1194 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1033 return; 1195 return;
1034 }
1035 mv_fill_sg(qc); 1196 mv_fill_sg(qc);
1036} 1197}
1037 1198
@@ -1047,7 +1208,7 @@ static void mv_qc_prep(struct ata_queued_cmd *qc)
1047 * LOCKING: 1208 * LOCKING:
1048 * Inherited from caller. 1209 * Inherited from caller.
1049 */ 1210 */
1050static int mv_qc_issue(struct ata_queued_cmd *qc) 1211static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
1051{ 1212{
1052 void __iomem *port_mmio = mv_ap_base(qc->ap); 1213 void __iomem *port_mmio = mv_ap_base(qc->ap);
1053 struct mv_port_priv *pp = qc->ap->private_data; 1214 struct mv_port_priv *pp = qc->ap->private_data;
@@ -1065,12 +1226,12 @@ static int mv_qc_issue(struct ata_queued_cmd *qc)
1065 in_ptr = readl(port_mmio + EDMA_REQ_Q_IN_PTR_OFS); 1226 in_ptr = readl(port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
1066 1227
1067 /* the req producer index should be the same as we remember it */ 1228 /* the req producer index should be the same as we remember it */
1068 assert(((in_ptr >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) == 1229 WARN_ON(((in_ptr >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) !=
1069 pp->req_producer); 1230 pp->req_producer);
1070 /* until we do queuing, the queue should be empty at this point */ 1231 /* until we do queuing, the queue should be empty at this point */
1071 assert(((in_ptr >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) == 1232 WARN_ON(((in_ptr >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) !=
1072 ((readl(port_mmio + EDMA_REQ_Q_OUT_PTR_OFS) >> 1233 ((readl(port_mmio + EDMA_REQ_Q_OUT_PTR_OFS) >>
1073 EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK)); 1234 EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK));
1074 1235
1075 mv_inc_q_index(&pp->req_producer); /* now incr producer index */ 1236 mv_inc_q_index(&pp->req_producer); /* now incr producer index */
1076 1237
@@ -1090,7 +1251,7 @@ static int mv_qc_issue(struct ata_queued_cmd *qc)
1090 * 1251 *
1091 * This routine is for use when the port is in DMA mode, when it 1252 * This routine is for use when the port is in DMA mode, when it
1092 * will be using the CRPB (command response block) method of 1253 * will be using the CRPB (command response block) method of
1093 * returning command completion information. We assert indices 1254 * returning command completion information. We check indices
1094 * are good, grab status, and bump the response consumer index to 1255 * are good, grab status, and bump the response consumer index to
1095 * prove that we're up to date. 1256 * prove that we're up to date.
1096 * 1257 *
@@ -1106,16 +1267,16 @@ static u8 mv_get_crpb_status(struct ata_port *ap)
1106 out_ptr = readl(port_mmio + EDMA_RSP_Q_OUT_PTR_OFS); 1267 out_ptr = readl(port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
1107 1268
1108 /* the response consumer index should be the same as we remember it */ 1269 /* the response consumer index should be the same as we remember it */
1109 assert(((out_ptr >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) == 1270 WARN_ON(((out_ptr >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) !=
1110 pp->rsp_consumer); 1271 pp->rsp_consumer);
1111 1272
1112 /* increment our consumer index... */ 1273 /* increment our consumer index... */
1113 pp->rsp_consumer = mv_inc_q_index(&pp->rsp_consumer); 1274 pp->rsp_consumer = mv_inc_q_index(&pp->rsp_consumer);
1114 1275
1115 /* and, until we do NCQ, there should only be 1 CRPB waiting */ 1276 /* and, until we do NCQ, there should only be 1 CRPB waiting */
1116 assert(((readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS) >> 1277 WARN_ON(((readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS) >>
1117 EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) == 1278 EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) !=
1118 pp->rsp_consumer); 1279 pp->rsp_consumer);
1119 1280
1120 /* write out our inc'd consumer index so EDMA knows we're caught up */ 1281 /* write out our inc'd consumer index so EDMA knows we're caught up */
1121 out_ptr &= EDMA_RSP_Q_BASE_LO_MASK; 1282 out_ptr &= EDMA_RSP_Q_BASE_LO_MASK;
@@ -1681,6 +1842,12 @@ static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
1681 m2 |= hpriv->signal[port].pre; 1842 m2 |= hpriv->signal[port].pre;
1682 m2 &= ~(1 << 16); 1843 m2 &= ~(1 << 16);
1683 1844
1845 /* according to mvSata 3.6.1, some IIE values are fixed */
1846 if (IS_GEN_IIE(hpriv)) {
1847 m2 &= ~0xC30FF01F;
1848 m2 |= 0x0000900F;
1849 }
1850
1684 writel(m2, port_mmio + PHY_MODE2); 1851 writel(m2, port_mmio + PHY_MODE2);
1685} 1852}
1686 1853
@@ -1846,7 +2013,6 @@ static void mv_phy_reset(struct ata_port *ap)
1846static void mv_eng_timeout(struct ata_port *ap) 2013static void mv_eng_timeout(struct ata_port *ap)
1847{ 2014{
1848 struct ata_queued_cmd *qc; 2015 struct ata_queued_cmd *qc;
1849 unsigned long flags;
1850 2016
1851 printk(KERN_ERR "ata%u: Entering mv_eng_timeout\n",ap->id); 2017 printk(KERN_ERR "ata%u: Entering mv_eng_timeout\n",ap->id);
1852 DPRINTK("All regs @ start of eng_timeout\n"); 2018 DPRINTK("All regs @ start of eng_timeout\n");
@@ -1861,22 +2027,8 @@ static void mv_eng_timeout(struct ata_port *ap)
1861 mv_err_intr(ap); 2027 mv_err_intr(ap);
1862 mv_stop_and_reset(ap); 2028 mv_stop_and_reset(ap);
1863 2029
1864 if (!qc) { 2030 qc->err_mask |= AC_ERR_TIMEOUT;
1865 printk(KERN_ERR "ata%u: BUG: timeout without command\n", 2031 ata_eh_qc_complete(qc);
1866 ap->id);
1867 } else {
1868 /* hack alert! We cannot use the supplied completion
1869 * function from inside the ->eh_strategy_handler() thread.
1870 * libata is the only user of ->eh_strategy_handler() in
1871 * any kernel, so the default scsi_done() assumes it is
1872 * not being called from the SCSI EH.
1873 */
1874 spin_lock_irqsave(&ap->host_set->lock, flags);
1875 qc->scsidone = scsi_finish_command;
1876 qc->err_mask |= AC_ERR_OTHER;
1877 ata_qc_complete(qc);
1878 spin_unlock_irqrestore(&ap->host_set->lock, flags);
1879 }
1880} 2032}
1881 2033
1882/** 2034/**
@@ -1995,6 +2147,27 @@ static int mv_chip_id(struct pci_dev *pdev, struct mv_host_priv *hpriv,
1995 } 2147 }
1996 break; 2148 break;
1997 2149
2150 case chip_7042:
2151 case chip_6042:
2152 hpriv->ops = &mv6xxx_ops;
2153
2154 hp_flags |= MV_HP_GEN_IIE;
2155
2156 switch (rev_id) {
2157 case 0x0:
2158 hp_flags |= MV_HP_ERRATA_XX42A0;
2159 break;
2160 case 0x1:
2161 hp_flags |= MV_HP_ERRATA_60X1C0;
2162 break;
2163 default:
2164 dev_printk(KERN_WARNING, &pdev->dev,
2165 "Applying 60X1C0 workarounds to unknown rev\n");
2166 hp_flags |= MV_HP_ERRATA_60X1C0;
2167 break;
2168 }
2169 break;
2170
1998 default: 2171 default:
1999 printk(KERN_ERR DRV_NAME ": BUG: invalid board index %u\n", board_idx); 2172 printk(KERN_ERR DRV_NAME ": BUG: invalid board index %u\n", board_idx);
2000 return 1; 2173 return 1;
diff --git a/drivers/scsi/sata_nv.c b/drivers/scsi/sata_nv.c
index bbbb55eeb73a..caffadc2e0ae 100644
--- a/drivers/scsi/sata_nv.c
+++ b/drivers/scsi/sata_nv.c
@@ -229,11 +229,11 @@ static struct scsi_host_template nv_sht = {
229 .name = DRV_NAME, 229 .name = DRV_NAME,
230 .ioctl = ata_scsi_ioctl, 230 .ioctl = ata_scsi_ioctl,
231 .queuecommand = ata_scsi_queuecmd, 231 .queuecommand = ata_scsi_queuecmd,
232 .eh_timed_out = ata_scsi_timed_out,
232 .eh_strategy_handler = ata_scsi_error, 233 .eh_strategy_handler = ata_scsi_error,
233 .can_queue = ATA_DEF_QUEUE, 234 .can_queue = ATA_DEF_QUEUE,
234 .this_id = ATA_SHT_THIS_ID, 235 .this_id = ATA_SHT_THIS_ID,
235 .sg_tablesize = LIBATA_MAX_PRD, 236 .sg_tablesize = LIBATA_MAX_PRD,
236 .max_sectors = ATA_MAX_SECTORS,
237 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 237 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
238 .emulated = ATA_SHT_EMULATED, 238 .emulated = ATA_SHT_EMULATED,
239 .use_clustering = ATA_SHT_USE_CLUSTERING, 239 .use_clustering = ATA_SHT_USE_CLUSTERING,
diff --git a/drivers/scsi/sata_promise.c b/drivers/scsi/sata_promise.c
index b0b0a69b3563..ba2b7a0983db 100644
--- a/drivers/scsi/sata_promise.c
+++ b/drivers/scsi/sata_promise.c
@@ -46,7 +46,7 @@
46#include "sata_promise.h" 46#include "sata_promise.h"
47 47
48#define DRV_NAME "sata_promise" 48#define DRV_NAME "sata_promise"
49#define DRV_VERSION "1.03" 49#define DRV_VERSION "1.04"
50 50
51 51
52enum { 52enum {
@@ -58,6 +58,7 @@ enum {
58 PDC_GLOBAL_CTL = 0x48, /* Global control/status (per port) */ 58 PDC_GLOBAL_CTL = 0x48, /* Global control/status (per port) */
59 PDC_CTLSTAT = 0x60, /* IDE control and status (per port) */ 59 PDC_CTLSTAT = 0x60, /* IDE control and status (per port) */
60 PDC_SATA_PLUG_CSR = 0x6C, /* SATA Plug control/status reg */ 60 PDC_SATA_PLUG_CSR = 0x6C, /* SATA Plug control/status reg */
61 PDC2_SATA_PLUG_CSR = 0x60, /* SATAII Plug control/status reg */
61 PDC_SLEW_CTL = 0x470, /* slew rate control reg */ 62 PDC_SLEW_CTL = 0x470, /* slew rate control reg */
62 63
63 PDC_ERR_MASK = (1<<19) | (1<<20) | (1<<21) | (1<<22) | 64 PDC_ERR_MASK = (1<<19) | (1<<20) | (1<<21) | (1<<22) |
@@ -67,8 +68,10 @@ enum {
67 board_20319 = 1, /* FastTrak S150 TX4 */ 68 board_20319 = 1, /* FastTrak S150 TX4 */
68 board_20619 = 2, /* FastTrak TX4000 */ 69 board_20619 = 2, /* FastTrak TX4000 */
69 board_20771 = 3, /* FastTrak TX2300 */ 70 board_20771 = 3, /* FastTrak TX2300 */
71 board_2057x = 4, /* SATAII150 Tx2plus */
72 board_40518 = 5, /* SATAII150 Tx4 */
70 73
71 PDC_HAS_PATA = (1 << 1), /* PDC20375 has PATA */ 74 PDC_HAS_PATA = (1 << 1), /* PDC20375/20575 has PATA */
72 75
73 PDC_RESET = (1 << 11), /* HDMA reset */ 76 PDC_RESET = (1 << 11), /* HDMA reset */
74 77
@@ -82,6 +85,10 @@ struct pdc_port_priv {
82 dma_addr_t pkt_dma; 85 dma_addr_t pkt_dma;
83}; 86};
84 87
88struct pdc_host_priv {
89 int hotplug_offset;
90};
91
85static u32 pdc_sata_scr_read (struct ata_port *ap, unsigned int sc_reg); 92static u32 pdc_sata_scr_read (struct ata_port *ap, unsigned int sc_reg);
86static void pdc_sata_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val); 93static void pdc_sata_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val);
87static int pdc_ata_init_one (struct pci_dev *pdev, const struct pci_device_id *ent); 94static int pdc_ata_init_one (struct pci_dev *pdev, const struct pci_device_id *ent);
@@ -95,7 +102,8 @@ static void pdc_qc_prep(struct ata_queued_cmd *qc);
95static void pdc_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf); 102static void pdc_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf);
96static void pdc_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf); 103static void pdc_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf);
97static void pdc_irq_clear(struct ata_port *ap); 104static void pdc_irq_clear(struct ata_port *ap);
98static int pdc_qc_issue_prot(struct ata_queued_cmd *qc); 105static unsigned int pdc_qc_issue_prot(struct ata_queued_cmd *qc);
106static void pdc_host_stop(struct ata_host_set *host_set);
99 107
100 108
101static struct scsi_host_template pdc_ata_sht = { 109static struct scsi_host_template pdc_ata_sht = {
@@ -103,11 +111,11 @@ static struct scsi_host_template pdc_ata_sht = {
103 .name = DRV_NAME, 111 .name = DRV_NAME,
104 .ioctl = ata_scsi_ioctl, 112 .ioctl = ata_scsi_ioctl,
105 .queuecommand = ata_scsi_queuecmd, 113 .queuecommand = ata_scsi_queuecmd,
114 .eh_timed_out = ata_scsi_timed_out,
106 .eh_strategy_handler = ata_scsi_error, 115 .eh_strategy_handler = ata_scsi_error,
107 .can_queue = ATA_DEF_QUEUE, 116 .can_queue = ATA_DEF_QUEUE,
108 .this_id = ATA_SHT_THIS_ID, 117 .this_id = ATA_SHT_THIS_ID,
109 .sg_tablesize = LIBATA_MAX_PRD, 118 .sg_tablesize = LIBATA_MAX_PRD,
110 .max_sectors = ATA_MAX_SECTORS,
111 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 119 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
112 .emulated = ATA_SHT_EMULATED, 120 .emulated = ATA_SHT_EMULATED,
113 .use_clustering = ATA_SHT_USE_CLUSTERING, 121 .use_clustering = ATA_SHT_USE_CLUSTERING,
@@ -137,7 +145,7 @@ static const struct ata_port_operations pdc_sata_ops = {
137 .scr_write = pdc_sata_scr_write, 145 .scr_write = pdc_sata_scr_write,
138 .port_start = pdc_port_start, 146 .port_start = pdc_port_start,
139 .port_stop = pdc_port_stop, 147 .port_stop = pdc_port_stop,
140 .host_stop = ata_pci_host_stop, 148 .host_stop = pdc_host_stop,
141}; 149};
142 150
143static const struct ata_port_operations pdc_pata_ops = { 151static const struct ata_port_operations pdc_pata_ops = {
@@ -158,7 +166,7 @@ static const struct ata_port_operations pdc_pata_ops = {
158 166
159 .port_start = pdc_port_start, 167 .port_start = pdc_port_start,
160 .port_stop = pdc_port_stop, 168 .port_stop = pdc_port_stop,
161 .host_stop = ata_pci_host_stop, 169 .host_stop = pdc_host_stop,
162}; 170};
163 171
164static const struct ata_port_info pdc_port_info[] = { 172static const struct ata_port_info pdc_port_info[] = {
@@ -201,6 +209,26 @@ static const struct ata_port_info pdc_port_info[] = {
201 .udma_mask = 0x7f, /* udma0-6 ; FIXME */ 209 .udma_mask = 0x7f, /* udma0-6 ; FIXME */
202 .port_ops = &pdc_sata_ops, 210 .port_ops = &pdc_sata_ops,
203 }, 211 },
212
213 /* board_2057x */
214 {
215 .sht = &pdc_ata_sht,
216 .host_flags = PDC_COMMON_FLAGS | ATA_FLAG_SATA,
217 .pio_mask = 0x1f, /* pio0-4 */
218 .mwdma_mask = 0x07, /* mwdma0-2 */
219 .udma_mask = 0x7f, /* udma0-6 ; FIXME */
220 .port_ops = &pdc_sata_ops,
221 },
222
223 /* board_40518 */
224 {
225 .sht = &pdc_ata_sht,
226 .host_flags = PDC_COMMON_FLAGS | ATA_FLAG_SATA,
227 .pio_mask = 0x1f, /* pio0-4 */
228 .mwdma_mask = 0x07, /* mwdma0-2 */
229 .udma_mask = 0x7f, /* udma0-6 ; FIXME */
230 .port_ops = &pdc_sata_ops,
231 },
204}; 232};
205 233
206static const struct pci_device_id pdc_ata_pci_tbl[] = { 234static const struct pci_device_id pdc_ata_pci_tbl[] = {
@@ -217,9 +245,9 @@ static const struct pci_device_id pdc_ata_pci_tbl[] = {
217 { PCI_VENDOR_ID_PROMISE, 0x3376, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 245 { PCI_VENDOR_ID_PROMISE, 0x3376, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
218 board_2037x }, 246 board_2037x },
219 { PCI_VENDOR_ID_PROMISE, 0x3574, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 247 { PCI_VENDOR_ID_PROMISE, 0x3574, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
220 board_2037x }, 248 board_2057x },
221 { PCI_VENDOR_ID_PROMISE, 0x3d75, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 249 { PCI_VENDOR_ID_PROMISE, 0x3d75, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
222 board_2037x }, 250 board_2057x },
223 { PCI_VENDOR_ID_PROMISE, 0x3d73, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 251 { PCI_VENDOR_ID_PROMISE, 0x3d73, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
224 board_2037x }, 252 board_2037x },
225 253
@@ -232,7 +260,7 @@ static const struct pci_device_id pdc_ata_pci_tbl[] = {
232 { PCI_VENDOR_ID_PROMISE, 0x3d17, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 260 { PCI_VENDOR_ID_PROMISE, 0x3d17, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
233 board_20319 }, 261 board_20319 },
234 { PCI_VENDOR_ID_PROMISE, 0x3d18, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 262 { PCI_VENDOR_ID_PROMISE, 0x3d18, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
235 board_20319 }, 263 board_40518 },
236 264
237 { PCI_VENDOR_ID_PROMISE, 0x6629, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 265 { PCI_VENDOR_ID_PROMISE, 0x6629, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
238 board_20619 }, 266 board_20619 },
@@ -261,12 +289,11 @@ static int pdc_port_start(struct ata_port *ap)
261 if (rc) 289 if (rc)
262 return rc; 290 return rc;
263 291
264 pp = kmalloc(sizeof(*pp), GFP_KERNEL); 292 pp = kzalloc(sizeof(*pp), GFP_KERNEL);
265 if (!pp) { 293 if (!pp) {
266 rc = -ENOMEM; 294 rc = -ENOMEM;
267 goto err_out; 295 goto err_out;
268 } 296 }
269 memset(pp, 0, sizeof(*pp));
270 297
271 pp->pkt = dma_alloc_coherent(dev, 128, &pp->pkt_dma, GFP_KERNEL); 298 pp->pkt = dma_alloc_coherent(dev, 128, &pp->pkt_dma, GFP_KERNEL);
272 if (!pp->pkt) { 299 if (!pp->pkt) {
@@ -298,6 +325,16 @@ static void pdc_port_stop(struct ata_port *ap)
298} 325}
299 326
300 327
328static void pdc_host_stop(struct ata_host_set *host_set)
329{
330 struct pdc_host_priv *hp = host_set->private_data;
331
332 ata_pci_host_stop(host_set);
333
334 kfree(hp);
335}
336
337
301static void pdc_reset_port(struct ata_port *ap) 338static void pdc_reset_port(struct ata_port *ap)
302{ 339{
303 void __iomem *mmio = (void __iomem *) ap->ioaddr.cmd_addr + PDC_CTLSTAT; 340 void __iomem *mmio = (void __iomem *) ap->ioaddr.cmd_addr + PDC_CTLSTAT;
@@ -394,19 +431,6 @@ static void pdc_eng_timeout(struct ata_port *ap)
394 spin_lock_irqsave(&host_set->lock, flags); 431 spin_lock_irqsave(&host_set->lock, flags);
395 432
396 qc = ata_qc_from_tag(ap, ap->active_tag); 433 qc = ata_qc_from_tag(ap, ap->active_tag);
397 if (!qc) {
398 printk(KERN_ERR "ata%u: BUG: timeout without command\n",
399 ap->id);
400 goto out;
401 }
402
403 /* hack alert! We cannot use the supplied completion
404 * function from inside the ->eh_strategy_handler() thread.
405 * libata is the only user of ->eh_strategy_handler() in
406 * any kernel, so the default scsi_done() assumes it is
407 * not being called from the SCSI EH.
408 */
409 qc->scsidone = scsi_finish_command;
410 434
411 switch (qc->tf.protocol) { 435 switch (qc->tf.protocol) {
412 case ATA_PROT_DMA: 436 case ATA_PROT_DMA:
@@ -414,7 +438,6 @@ static void pdc_eng_timeout(struct ata_port *ap)
414 printk(KERN_ERR "ata%u: command timeout\n", ap->id); 438 printk(KERN_ERR "ata%u: command timeout\n", ap->id);
415 drv_stat = ata_wait_idle(ap); 439 drv_stat = ata_wait_idle(ap);
416 qc->err_mask |= __ac_err_mask(drv_stat); 440 qc->err_mask |= __ac_err_mask(drv_stat);
417 ata_qc_complete(qc);
418 break; 441 break;
419 442
420 default: 443 default:
@@ -424,12 +447,11 @@ static void pdc_eng_timeout(struct ata_port *ap)
424 ap->id, qc->tf.command, drv_stat); 447 ap->id, qc->tf.command, drv_stat);
425 448
426 qc->err_mask |= ac_err_mask(drv_stat); 449 qc->err_mask |= ac_err_mask(drv_stat);
427 ata_qc_complete(qc);
428 break; 450 break;
429 } 451 }
430 452
431out:
432 spin_unlock_irqrestore(&host_set->lock, flags); 453 spin_unlock_irqrestore(&host_set->lock, flags);
454 ata_eh_qc_complete(qc);
433 DPRINTK("EXIT\n"); 455 DPRINTK("EXIT\n");
434} 456}
435 457
@@ -495,14 +517,15 @@ static irqreturn_t pdc_interrupt (int irq, void *dev_instance, struct pt_regs *r
495 VPRINTK("QUICK EXIT 2\n"); 517 VPRINTK("QUICK EXIT 2\n");
496 return IRQ_NONE; 518 return IRQ_NONE;
497 } 519 }
520
521 spin_lock(&host_set->lock);
522
498 mask &= 0xffff; /* only 16 tags possible */ 523 mask &= 0xffff; /* only 16 tags possible */
499 if (!mask) { 524 if (!mask) {
500 VPRINTK("QUICK EXIT 3\n"); 525 VPRINTK("QUICK EXIT 3\n");
501 return IRQ_NONE; 526 goto done_irq;
502 } 527 }
503 528
504 spin_lock(&host_set->lock);
505
506 writel(mask, mmio_base + PDC_INT_SEQMASK); 529 writel(mask, mmio_base + PDC_INT_SEQMASK);
507 530
508 for (i = 0; i < host_set->n_ports; i++) { 531 for (i = 0; i < host_set->n_ports; i++) {
@@ -519,10 +542,10 @@ static irqreturn_t pdc_interrupt (int irq, void *dev_instance, struct pt_regs *r
519 } 542 }
520 } 543 }
521 544
522 spin_unlock(&host_set->lock);
523
524 VPRINTK("EXIT\n"); 545 VPRINTK("EXIT\n");
525 546
547done_irq:
548 spin_unlock(&host_set->lock);
526 return IRQ_RETVAL(handled); 549 return IRQ_RETVAL(handled);
527} 550}
528 551
@@ -544,7 +567,7 @@ static inline void pdc_packet_start(struct ata_queued_cmd *qc)
544 readl((void __iomem *) ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT); /* flush */ 567 readl((void __iomem *) ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT); /* flush */
545} 568}
546 569
547static int pdc_qc_issue_prot(struct ata_queued_cmd *qc) 570static unsigned int pdc_qc_issue_prot(struct ata_queued_cmd *qc)
548{ 571{
549 switch (qc->tf.protocol) { 572 switch (qc->tf.protocol) {
550 case ATA_PROT_DMA: 573 case ATA_PROT_DMA:
@@ -600,6 +623,8 @@ static void pdc_ata_setup_port(struct ata_ioports *port, unsigned long base)
600static void pdc_host_init(unsigned int chip_id, struct ata_probe_ent *pe) 623static void pdc_host_init(unsigned int chip_id, struct ata_probe_ent *pe)
601{ 624{
602 void __iomem *mmio = pe->mmio_base; 625 void __iomem *mmio = pe->mmio_base;
626 struct pdc_host_priv *hp = pe->private_data;
627 int hotplug_offset = hp->hotplug_offset;
603 u32 tmp; 628 u32 tmp;
604 629
605 /* 630 /*
@@ -614,12 +639,12 @@ static void pdc_host_init(unsigned int chip_id, struct ata_probe_ent *pe)
614 writel(tmp, mmio + PDC_FLASH_CTL); 639 writel(tmp, mmio + PDC_FLASH_CTL);
615 640
616 /* clear plug/unplug flags for all ports */ 641 /* clear plug/unplug flags for all ports */
617 tmp = readl(mmio + PDC_SATA_PLUG_CSR); 642 tmp = readl(mmio + hotplug_offset);
618 writel(tmp | 0xff, mmio + PDC_SATA_PLUG_CSR); 643 writel(tmp | 0xff, mmio + hotplug_offset);
619 644
620 /* mask plug/unplug ints */ 645 /* mask plug/unplug ints */
621 tmp = readl(mmio + PDC_SATA_PLUG_CSR); 646 tmp = readl(mmio + hotplug_offset);
622 writel(tmp | 0xff0000, mmio + PDC_SATA_PLUG_CSR); 647 writel(tmp | 0xff0000, mmio + hotplug_offset);
623 648
624 /* reduce TBG clock to 133 Mhz. */ 649 /* reduce TBG clock to 133 Mhz. */
625 tmp = readl(mmio + PDC_TBG_MODE); 650 tmp = readl(mmio + PDC_TBG_MODE);
@@ -641,6 +666,7 @@ static int pdc_ata_init_one (struct pci_dev *pdev, const struct pci_device_id *e
641{ 666{
642 static int printed_version; 667 static int printed_version;
643 struct ata_probe_ent *probe_ent = NULL; 668 struct ata_probe_ent *probe_ent = NULL;
669 struct pdc_host_priv *hp;
644 unsigned long base; 670 unsigned long base;
645 void __iomem *mmio_base; 671 void __iomem *mmio_base;
646 unsigned int board_idx = (unsigned int) ent->driver_data; 672 unsigned int board_idx = (unsigned int) ent->driver_data;
@@ -671,13 +697,12 @@ static int pdc_ata_init_one (struct pci_dev *pdev, const struct pci_device_id *e
671 if (rc) 697 if (rc)
672 goto err_out_regions; 698 goto err_out_regions;
673 699
674 probe_ent = kmalloc(sizeof(*probe_ent), GFP_KERNEL); 700 probe_ent = kzalloc(sizeof(*probe_ent), GFP_KERNEL);
675 if (probe_ent == NULL) { 701 if (probe_ent == NULL) {
676 rc = -ENOMEM; 702 rc = -ENOMEM;
677 goto err_out_regions; 703 goto err_out_regions;
678 } 704 }
679 705
680 memset(probe_ent, 0, sizeof(*probe_ent));
681 probe_ent->dev = pci_dev_to_dev(pdev); 706 probe_ent->dev = pci_dev_to_dev(pdev);
682 INIT_LIST_HEAD(&probe_ent->node); 707 INIT_LIST_HEAD(&probe_ent->node);
683 708
@@ -688,6 +713,16 @@ static int pdc_ata_init_one (struct pci_dev *pdev, const struct pci_device_id *e
688 } 713 }
689 base = (unsigned long) mmio_base; 714 base = (unsigned long) mmio_base;
690 715
716 hp = kzalloc(sizeof(*hp), GFP_KERNEL);
717 if (hp == NULL) {
718 rc = -ENOMEM;
719 goto err_out_free_ent;
720 }
721
722 /* Set default hotplug offset */
723 hp->hotplug_offset = PDC_SATA_PLUG_CSR;
724 probe_ent->private_data = hp;
725
691 probe_ent->sht = pdc_port_info[board_idx].sht; 726 probe_ent->sht = pdc_port_info[board_idx].sht;
692 probe_ent->host_flags = pdc_port_info[board_idx].host_flags; 727 probe_ent->host_flags = pdc_port_info[board_idx].host_flags;
693 probe_ent->pio_mask = pdc_port_info[board_idx].pio_mask; 728 probe_ent->pio_mask = pdc_port_info[board_idx].pio_mask;
@@ -707,6 +742,10 @@ static int pdc_ata_init_one (struct pci_dev *pdev, const struct pci_device_id *e
707 742
708 /* notice 4-port boards */ 743 /* notice 4-port boards */
709 switch (board_idx) { 744 switch (board_idx) {
745 case board_40518:
746 /* Override hotplug offset for SATAII150 */
747 hp->hotplug_offset = PDC2_SATA_PLUG_CSR;
748 /* Fall through */
710 case board_20319: 749 case board_20319:
711 probe_ent->n_ports = 4; 750 probe_ent->n_ports = 4;
712 751
@@ -716,6 +755,10 @@ static int pdc_ata_init_one (struct pci_dev *pdev, const struct pci_device_id *e
716 probe_ent->port[2].scr_addr = base + 0x600; 755 probe_ent->port[2].scr_addr = base + 0x600;
717 probe_ent->port[3].scr_addr = base + 0x700; 756 probe_ent->port[3].scr_addr = base + 0x700;
718 break; 757 break;
758 case board_2057x:
759 /* Override hotplug offset for SATAII150 */
760 hp->hotplug_offset = PDC2_SATA_PLUG_CSR;
761 /* Fall through */
719 case board_2037x: 762 case board_2037x:
720 probe_ent->n_ports = 2; 763 probe_ent->n_ports = 2;
721 break; 764 break;
@@ -741,8 +784,10 @@ static int pdc_ata_init_one (struct pci_dev *pdev, const struct pci_device_id *e
741 /* initialize adapter */ 784 /* initialize adapter */
742 pdc_host_init(board_idx, probe_ent); 785 pdc_host_init(board_idx, probe_ent);
743 786
744 /* FIXME: check ata_device_add return value */ 787 /* FIXME: Need any other frees than hp? */
745 ata_device_add(probe_ent); 788 if (!ata_device_add(probe_ent))
789 kfree(hp);
790
746 kfree(probe_ent); 791 kfree(probe_ent);
747 792
748 return 0; 793 return 0;
diff --git a/drivers/scsi/sata_qstor.c b/drivers/scsi/sata_qstor.c
index 80480f0fb2b8..9602f43a298e 100644
--- a/drivers/scsi/sata_qstor.c
+++ b/drivers/scsi/sata_qstor.c
@@ -120,7 +120,7 @@ static void qs_host_stop(struct ata_host_set *host_set);
120static void qs_port_stop(struct ata_port *ap); 120static void qs_port_stop(struct ata_port *ap);
121static void qs_phy_reset(struct ata_port *ap); 121static void qs_phy_reset(struct ata_port *ap);
122static void qs_qc_prep(struct ata_queued_cmd *qc); 122static void qs_qc_prep(struct ata_queued_cmd *qc);
123static int qs_qc_issue(struct ata_queued_cmd *qc); 123static unsigned int qs_qc_issue(struct ata_queued_cmd *qc);
124static int qs_check_atapi_dma(struct ata_queued_cmd *qc); 124static int qs_check_atapi_dma(struct ata_queued_cmd *qc);
125static void qs_bmdma_stop(struct ata_queued_cmd *qc); 125static void qs_bmdma_stop(struct ata_queued_cmd *qc);
126static u8 qs_bmdma_status(struct ata_port *ap); 126static u8 qs_bmdma_status(struct ata_port *ap);
@@ -132,11 +132,11 @@ static struct scsi_host_template qs_ata_sht = {
132 .name = DRV_NAME, 132 .name = DRV_NAME,
133 .ioctl = ata_scsi_ioctl, 133 .ioctl = ata_scsi_ioctl,
134 .queuecommand = ata_scsi_queuecmd, 134 .queuecommand = ata_scsi_queuecmd,
135 .eh_timed_out = ata_scsi_timed_out,
135 .eh_strategy_handler = ata_scsi_error, 136 .eh_strategy_handler = ata_scsi_error,
136 .can_queue = ATA_DEF_QUEUE, 137 .can_queue = ATA_DEF_QUEUE,
137 .this_id = ATA_SHT_THIS_ID, 138 .this_id = ATA_SHT_THIS_ID,
138 .sg_tablesize = QS_MAX_PRD, 139 .sg_tablesize = QS_MAX_PRD,
139 .max_sectors = ATA_MAX_SECTORS,
140 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 140 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
141 .emulated = ATA_SHT_EMULATED, 141 .emulated = ATA_SHT_EMULATED,
142 //FIXME .use_clustering = ATA_SHT_USE_CLUSTERING, 142 //FIXME .use_clustering = ATA_SHT_USE_CLUSTERING,
@@ -276,8 +276,8 @@ static unsigned int qs_fill_sg(struct ata_queued_cmd *qc)
276 unsigned int nelem; 276 unsigned int nelem;
277 u8 *prd = pp->pkt + QS_CPB_BYTES; 277 u8 *prd = pp->pkt + QS_CPB_BYTES;
278 278
279 assert(qc->__sg != NULL); 279 WARN_ON(qc->__sg == NULL);
280 assert(qc->n_elem > 0 || qc->pad_len > 0); 280 WARN_ON(qc->n_elem == 0 && qc->pad_len == 0);
281 281
282 nelem = 0; 282 nelem = 0;
283 ata_for_each_sg(sg, qc) { 283 ata_for_each_sg(sg, qc) {
@@ -352,7 +352,7 @@ static inline void qs_packet_start(struct ata_queued_cmd *qc)
352 readl(chan + QS_CCT_CFF); /* flush */ 352 readl(chan + QS_CCT_CFF); /* flush */
353} 353}
354 354
355static int qs_qc_issue(struct ata_queued_cmd *qc) 355static unsigned int qs_qc_issue(struct ata_queued_cmd *qc)
356{ 356{
357 struct qs_port_priv *pp = qc->ap->private_data; 357 struct qs_port_priv *pp = qc->ap->private_data;
358 358
diff --git a/drivers/scsi/sata_sil.c b/drivers/scsi/sata_sil.c
index 9face3c6aa21..91ecf527bb8a 100644
--- a/drivers/scsi/sata_sil.c
+++ b/drivers/scsi/sata_sil.c
@@ -137,11 +137,11 @@ static struct scsi_host_template sil_sht = {
137 .name = DRV_NAME, 137 .name = DRV_NAME,
138 .ioctl = ata_scsi_ioctl, 138 .ioctl = ata_scsi_ioctl,
139 .queuecommand = ata_scsi_queuecmd, 139 .queuecommand = ata_scsi_queuecmd,
140 .eh_timed_out = ata_scsi_timed_out,
140 .eh_strategy_handler = ata_scsi_error, 141 .eh_strategy_handler = ata_scsi_error,
141 .can_queue = ATA_DEF_QUEUE, 142 .can_queue = ATA_DEF_QUEUE,
142 .this_id = ATA_SHT_THIS_ID, 143 .this_id = ATA_SHT_THIS_ID,
143 .sg_tablesize = LIBATA_MAX_PRD, 144 .sg_tablesize = LIBATA_MAX_PRD,
144 .max_sectors = ATA_MAX_SECTORS,
145 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 145 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
146 .emulated = ATA_SHT_EMULATED, 146 .emulated = ATA_SHT_EMULATED,
147 .use_clustering = ATA_SHT_USE_CLUSTERING, 147 .use_clustering = ATA_SHT_USE_CLUSTERING,
@@ -159,7 +159,7 @@ static const struct ata_port_operations sil_ops = {
159 .check_status = ata_check_status, 159 .check_status = ata_check_status,
160 .exec_command = ata_exec_command, 160 .exec_command = ata_exec_command,
161 .dev_select = ata_std_dev_select, 161 .dev_select = ata_std_dev_select,
162 .phy_reset = sata_phy_reset, 162 .probe_reset = ata_std_probe_reset,
163 .post_set_mode = sil_post_set_mode, 163 .post_set_mode = sil_post_set_mode,
164 .bmdma_setup = ata_bmdma_setup, 164 .bmdma_setup = ata_bmdma_setup,
165 .bmdma_start = ata_bmdma_start, 165 .bmdma_start = ata_bmdma_start,
@@ -182,7 +182,7 @@ static const struct ata_port_info sil_port_info[] = {
182 { 182 {
183 .sht = &sil_sht, 183 .sht = &sil_sht,
184 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 184 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
185 ATA_FLAG_SRST | ATA_FLAG_MMIO, 185 ATA_FLAG_MMIO,
186 .pio_mask = 0x1f, /* pio0-4 */ 186 .pio_mask = 0x1f, /* pio0-4 */
187 .mwdma_mask = 0x07, /* mwdma0-2 */ 187 .mwdma_mask = 0x07, /* mwdma0-2 */
188 .udma_mask = 0x3f, /* udma0-5 */ 188 .udma_mask = 0x3f, /* udma0-5 */
@@ -192,8 +192,7 @@ static const struct ata_port_info sil_port_info[] = {
192 { 192 {
193 .sht = &sil_sht, 193 .sht = &sil_sht,
194 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 194 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
195 ATA_FLAG_SRST | ATA_FLAG_MMIO | 195 ATA_FLAG_MMIO | SIL_FLAG_MOD15WRITE,
196 SIL_FLAG_MOD15WRITE,
197 .pio_mask = 0x1f, /* pio0-4 */ 196 .pio_mask = 0x1f, /* pio0-4 */
198 .mwdma_mask = 0x07, /* mwdma0-2 */ 197 .mwdma_mask = 0x07, /* mwdma0-2 */
199 .udma_mask = 0x3f, /* udma0-5 */ 198 .udma_mask = 0x3f, /* udma0-5 */
@@ -214,7 +213,7 @@ static const struct ata_port_info sil_port_info[] = {
214 { 213 {
215 .sht = &sil_sht, 214 .sht = &sil_sht,
216 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 215 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
217 ATA_FLAG_SRST | ATA_FLAG_MMIO | 216 ATA_FLAG_MMIO |
218 SIL_FLAG_RERR_ON_DMA_ACT, 217 SIL_FLAG_RERR_ON_DMA_ACT,
219 .pio_mask = 0x1f, /* pio0-4 */ 218 .pio_mask = 0x1f, /* pio0-4 */
220 .mwdma_mask = 0x07, /* mwdma0-2 */ 219 .mwdma_mask = 0x07, /* mwdma0-2 */
@@ -354,22 +353,12 @@ static void sil_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val)
354static void sil_dev_config(struct ata_port *ap, struct ata_device *dev) 353static void sil_dev_config(struct ata_port *ap, struct ata_device *dev)
355{ 354{
356 unsigned int n, quirks = 0; 355 unsigned int n, quirks = 0;
357 unsigned char model_num[40]; 356 unsigned char model_num[41];
358 const char *s;
359 unsigned int len;
360
361 ata_dev_id_string(dev->id, model_num, ATA_ID_PROD_OFS,
362 sizeof(model_num));
363 s = &model_num[0];
364 len = strnlen(s, sizeof(model_num));
365 357
366 /* ATAPI specifies that empty space is blank-filled; remove blanks */ 358 ata_id_c_string(dev->id, model_num, ATA_ID_PROD_OFS, sizeof(model_num));
367 while ((len > 0) && (s[len - 1] == ' '))
368 len--;
369 359
370 for (n = 0; sil_blacklist[n].product; n++) 360 for (n = 0; sil_blacklist[n].product; n++)
371 if (!memcmp(sil_blacklist[n].product, s, 361 if (!strcmp(sil_blacklist[n].product, model_num)) {
372 strlen(sil_blacklist[n].product))) {
373 quirks = sil_blacklist[n].quirk; 362 quirks = sil_blacklist[n].quirk;
374 break; 363 break;
375 } 364 }
@@ -380,16 +369,14 @@ static void sil_dev_config(struct ata_port *ap, struct ata_device *dev)
380 (quirks & SIL_QUIRK_MOD15WRITE))) { 369 (quirks & SIL_QUIRK_MOD15WRITE))) {
381 printk(KERN_INFO "ata%u(%u): applying Seagate errata fix (mod15write workaround)\n", 370 printk(KERN_INFO "ata%u(%u): applying Seagate errata fix (mod15write workaround)\n",
382 ap->id, dev->devno); 371 ap->id, dev->devno);
383 ap->host->max_sectors = 15; 372 dev->max_sectors = 15;
384 ap->host->hostt->max_sectors = 15;
385 dev->flags |= ATA_DFLAG_LOCK_SECTORS;
386 return; 373 return;
387 } 374 }
388 375
389 /* limit to udma5 */ 376 /* limit to udma5 */
390 if (quirks & SIL_QUIRK_UDMA5MAX) { 377 if (quirks & SIL_QUIRK_UDMA5MAX) {
391 printk(KERN_INFO "ata%u(%u): applying Maxtor errata fix %s\n", 378 printk(KERN_INFO "ata%u(%u): applying Maxtor errata fix %s\n",
392 ap->id, dev->devno, s); 379 ap->id, dev->devno, model_num);
393 ap->udma_mask &= ATA_UDMA5; 380 ap->udma_mask &= ATA_UDMA5;
394 return; 381 return;
395 } 382 }
diff --git a/drivers/scsi/sata_sil24.c b/drivers/scsi/sata_sil24.c
index 923130185a9e..a0e35a262156 100644
--- a/drivers/scsi/sata_sil24.c
+++ b/drivers/scsi/sata_sil24.c
@@ -249,9 +249,9 @@ static u8 sil24_check_status(struct ata_port *ap);
249static u32 sil24_scr_read(struct ata_port *ap, unsigned sc_reg); 249static u32 sil24_scr_read(struct ata_port *ap, unsigned sc_reg);
250static void sil24_scr_write(struct ata_port *ap, unsigned sc_reg, u32 val); 250static void sil24_scr_write(struct ata_port *ap, unsigned sc_reg, u32 val);
251static void sil24_tf_read(struct ata_port *ap, struct ata_taskfile *tf); 251static void sil24_tf_read(struct ata_port *ap, struct ata_taskfile *tf);
252static void sil24_phy_reset(struct ata_port *ap); 252static int sil24_probe_reset(struct ata_port *ap, unsigned int *classes);
253static void sil24_qc_prep(struct ata_queued_cmd *qc); 253static void sil24_qc_prep(struct ata_queued_cmd *qc);
254static int sil24_qc_issue(struct ata_queued_cmd *qc); 254static unsigned int sil24_qc_issue(struct ata_queued_cmd *qc);
255static void sil24_irq_clear(struct ata_port *ap); 255static void sil24_irq_clear(struct ata_port *ap);
256static void sil24_eng_timeout(struct ata_port *ap); 256static void sil24_eng_timeout(struct ata_port *ap);
257static irqreturn_t sil24_interrupt(int irq, void *dev_instance, struct pt_regs *regs); 257static irqreturn_t sil24_interrupt(int irq, void *dev_instance, struct pt_regs *regs);
@@ -280,11 +280,11 @@ static struct scsi_host_template sil24_sht = {
280 .name = DRV_NAME, 280 .name = DRV_NAME,
281 .ioctl = ata_scsi_ioctl, 281 .ioctl = ata_scsi_ioctl,
282 .queuecommand = ata_scsi_queuecmd, 282 .queuecommand = ata_scsi_queuecmd,
283 .eh_timed_out = ata_scsi_timed_out,
283 .eh_strategy_handler = ata_scsi_error, 284 .eh_strategy_handler = ata_scsi_error,
284 .can_queue = ATA_DEF_QUEUE, 285 .can_queue = ATA_DEF_QUEUE,
285 .this_id = ATA_SHT_THIS_ID, 286 .this_id = ATA_SHT_THIS_ID,
286 .sg_tablesize = LIBATA_MAX_PRD, 287 .sg_tablesize = LIBATA_MAX_PRD,
287 .max_sectors = ATA_MAX_SECTORS,
288 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 288 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
289 .emulated = ATA_SHT_EMULATED, 289 .emulated = ATA_SHT_EMULATED,
290 .use_clustering = ATA_SHT_USE_CLUSTERING, 290 .use_clustering = ATA_SHT_USE_CLUSTERING,
@@ -305,7 +305,7 @@ static const struct ata_port_operations sil24_ops = {
305 305
306 .tf_read = sil24_tf_read, 306 .tf_read = sil24_tf_read,
307 307
308 .phy_reset = sil24_phy_reset, 308 .probe_reset = sil24_probe_reset,
309 309
310 .qc_prep = sil24_qc_prep, 310 .qc_prep = sil24_qc_prep,
311 .qc_issue = sil24_qc_issue, 311 .qc_issue = sil24_qc_issue,
@@ -335,8 +335,8 @@ static struct ata_port_info sil24_port_info[] = {
335 { 335 {
336 .sht = &sil24_sht, 336 .sht = &sil24_sht,
337 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 337 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
338 ATA_FLAG_SRST | ATA_FLAG_MMIO | 338 ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA |
339 ATA_FLAG_PIO_DMA | SIL24_NPORTS2FLAG(4), 339 SIL24_NPORTS2FLAG(4),
340 .pio_mask = 0x1f, /* pio0-4 */ 340 .pio_mask = 0x1f, /* pio0-4 */
341 .mwdma_mask = 0x07, /* mwdma0-2 */ 341 .mwdma_mask = 0x07, /* mwdma0-2 */
342 .udma_mask = 0x3f, /* udma0-5 */ 342 .udma_mask = 0x3f, /* udma0-5 */
@@ -346,8 +346,8 @@ static struct ata_port_info sil24_port_info[] = {
346 { 346 {
347 .sht = &sil24_sht, 347 .sht = &sil24_sht,
348 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 348 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
349 ATA_FLAG_SRST | ATA_FLAG_MMIO | 349 ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA |
350 ATA_FLAG_PIO_DMA | SIL24_NPORTS2FLAG(2), 350 SIL24_NPORTS2FLAG(2),
351 .pio_mask = 0x1f, /* pio0-4 */ 351 .pio_mask = 0x1f, /* pio0-4 */
352 .mwdma_mask = 0x07, /* mwdma0-2 */ 352 .mwdma_mask = 0x07, /* mwdma0-2 */
353 .udma_mask = 0x3f, /* udma0-5 */ 353 .udma_mask = 0x3f, /* udma0-5 */
@@ -357,8 +357,8 @@ static struct ata_port_info sil24_port_info[] = {
357 { 357 {
358 .sht = &sil24_sht, 358 .sht = &sil24_sht,
359 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 359 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
360 ATA_FLAG_SRST | ATA_FLAG_MMIO | 360 ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA |
361 ATA_FLAG_PIO_DMA | SIL24_NPORTS2FLAG(1), 361 SIL24_NPORTS2FLAG(1),
362 .pio_mask = 0x1f, /* pio0-4 */ 362 .pio_mask = 0x1f, /* pio0-4 */
363 .mwdma_mask = 0x07, /* mwdma0-2 */ 363 .mwdma_mask = 0x07, /* mwdma0-2 */
364 .udma_mask = 0x3f, /* udma0-5 */ 364 .udma_mask = 0x3f, /* udma0-5 */
@@ -370,7 +370,7 @@ static void sil24_dev_config(struct ata_port *ap, struct ata_device *dev)
370{ 370{
371 void __iomem *port = (void __iomem *)ap->ioaddr.cmd_addr; 371 void __iomem *port = (void __iomem *)ap->ioaddr.cmd_addr;
372 372
373 if (ap->cdb_len == 16) 373 if (dev->cdb_len == 16)
374 writel(PORT_CS_CDB16, port + PORT_CTRL_STAT); 374 writel(PORT_CS_CDB16, port + PORT_CTRL_STAT);
375 else 375 else
376 writel(PORT_CS_CDB16, port + PORT_CTRL_CLR); 376 writel(PORT_CS_CDB16, port + PORT_CTRL_CLR);
@@ -427,7 +427,8 @@ static void sil24_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
427 *tf = pp->tf; 427 *tf = pp->tf;
428} 428}
429 429
430static int sil24_issue_SRST(struct ata_port *ap) 430static int sil24_softreset(struct ata_port *ap, int verbose,
431 unsigned int *class)
431{ 432{
432 void __iomem *port = (void __iomem *)ap->ioaddr.cmd_addr; 433 void __iomem *port = (void __iomem *)ap->ioaddr.cmd_addr;
433 struct sil24_port_priv *pp = ap->private_data; 434 struct sil24_port_priv *pp = ap->private_data;
@@ -436,6 +437,8 @@ static int sil24_issue_SRST(struct ata_port *ap)
436 u32 irq_enable, irq_stat; 437 u32 irq_enable, irq_stat;
437 int cnt; 438 int cnt;
438 439
440 DPRINTK("ENTER\n");
441
439 /* temporarily turn off IRQs during SRST */ 442 /* temporarily turn off IRQs during SRST */
440 irq_enable = readl(port + PORT_IRQ_ENABLE_SET); 443 irq_enable = readl(port + PORT_IRQ_ENABLE_SET);
441 writel(irq_enable, port + PORT_IRQ_ENABLE_CLR); 444 writel(irq_enable, port + PORT_IRQ_ENABLE_CLR);
@@ -465,30 +468,36 @@ static int sil24_issue_SRST(struct ata_port *ap)
465 /* restore IRQs */ 468 /* restore IRQs */
466 writel(irq_enable, port + PORT_IRQ_ENABLE_SET); 469 writel(irq_enable, port + PORT_IRQ_ENABLE_SET);
467 470
468 if (!(irq_stat & PORT_IRQ_COMPLETE)) 471 if (sata_dev_present(ap)) {
469 return -1; 472 if (!(irq_stat & PORT_IRQ_COMPLETE)) {
473 DPRINTK("EXIT, srst failed\n");
474 return -EIO;
475 }
470 476
471 /* update TF */ 477 sil24_update_tf(ap);
472 sil24_update_tf(ap); 478 *class = ata_dev_classify(&pp->tf);
479 }
480 if (*class == ATA_DEV_UNKNOWN)
481 *class = ATA_DEV_NONE;
482
483 DPRINTK("EXIT, class=%u\n", *class);
473 return 0; 484 return 0;
474} 485}
475 486
476static void sil24_phy_reset(struct ata_port *ap) 487static int sil24_hardreset(struct ata_port *ap, int verbose,
488 unsigned int *class)
477{ 489{
478 struct sil24_port_priv *pp = ap->private_data; 490 unsigned int dummy_class;
479
480 __sata_phy_reset(ap);
481 if (ap->flags & ATA_FLAG_PORT_DISABLED)
482 return;
483 491
484 if (sil24_issue_SRST(ap) < 0) { 492 /* sil24 doesn't report device signature after hard reset */
485 printk(KERN_ERR DRV_NAME 493 return sata_std_hardreset(ap, verbose, &dummy_class);
486 " ata%u: SRST failed, disabling port\n", ap->id); 494}
487 ap->ops->port_disable(ap);
488 return;
489 }
490 495
491 ap->device->class = ata_dev_classify(&pp->tf); 496static int sil24_probe_reset(struct ata_port *ap, unsigned int *classes)
497{
498 return ata_drive_probe_reset(ap, ata_std_probeinit,
499 sil24_softreset, sil24_hardreset,
500 ata_std_postreset, classes);
492} 501}
493 502
494static inline void sil24_fill_sg(struct ata_queued_cmd *qc, 503static inline void sil24_fill_sg(struct ata_queued_cmd *qc,
@@ -533,7 +542,7 @@ static void sil24_qc_prep(struct ata_queued_cmd *qc)
533 prb = &cb->atapi.prb; 542 prb = &cb->atapi.prb;
534 sge = cb->atapi.sge; 543 sge = cb->atapi.sge;
535 memset(cb->atapi.cdb, 0, 32); 544 memset(cb->atapi.cdb, 0, 32);
536 memcpy(cb->atapi.cdb, qc->cdb, ap->cdb_len); 545 memcpy(cb->atapi.cdb, qc->cdb, qc->dev->cdb_len);
537 546
538 if (qc->tf.protocol != ATA_PROT_ATAPI_NODATA) { 547 if (qc->tf.protocol != ATA_PROT_ATAPI_NODATA) {
539 if (qc->tf.flags & ATA_TFLAG_WRITE) 548 if (qc->tf.flags & ATA_TFLAG_WRITE)
@@ -557,7 +566,7 @@ static void sil24_qc_prep(struct ata_queued_cmd *qc)
557 sil24_fill_sg(qc, sge); 566 sil24_fill_sg(qc, sge);
558} 567}
559 568
560static int sil24_qc_issue(struct ata_queued_cmd *qc) 569static unsigned int sil24_qc_issue(struct ata_queued_cmd *qc)
561{ 570{
562 struct ata_port *ap = qc->ap; 571 struct ata_port *ap = qc->ap;
563 void __iomem *port = (void __iomem *)ap->ioaddr.cmd_addr; 572 void __iomem *port = (void __iomem *)ap->ioaddr.cmd_addr;
@@ -638,23 +647,10 @@ static void sil24_eng_timeout(struct ata_port *ap)
638 struct ata_queued_cmd *qc; 647 struct ata_queued_cmd *qc;
639 648
640 qc = ata_qc_from_tag(ap, ap->active_tag); 649 qc = ata_qc_from_tag(ap, ap->active_tag);
641 if (!qc) {
642 printk(KERN_ERR "ata%u: BUG: timeout without command\n",
643 ap->id);
644 return;
645 }
646 650
647 /*
648 * hack alert! We cannot use the supplied completion
649 * function from inside the ->eh_strategy_handler() thread.
650 * libata is the only user of ->eh_strategy_handler() in
651 * any kernel, so the default scsi_done() assumes it is
652 * not being called from the SCSI EH.
653 */
654 printk(KERN_ERR "ata%u: command timeout\n", ap->id); 651 printk(KERN_ERR "ata%u: command timeout\n", ap->id);
655 qc->scsidone = scsi_finish_command; 652 qc->err_mask |= AC_ERR_TIMEOUT;
656 qc->err_mask |= AC_ERR_OTHER; 653 ata_eh_qc_complete(qc);
657 ata_qc_complete(qc);
658 654
659 sil24_reset_controller(ap); 655 sil24_reset_controller(ap);
660} 656}
diff --git a/drivers/scsi/sata_sis.c b/drivers/scsi/sata_sis.c
index 2df8c5632ac3..7fd45f86de99 100644
--- a/drivers/scsi/sata_sis.c
+++ b/drivers/scsi/sata_sis.c
@@ -87,11 +87,11 @@ static struct scsi_host_template sis_sht = {
87 .name = DRV_NAME, 87 .name = DRV_NAME,
88 .ioctl = ata_scsi_ioctl, 88 .ioctl = ata_scsi_ioctl,
89 .queuecommand = ata_scsi_queuecmd, 89 .queuecommand = ata_scsi_queuecmd,
90 .eh_timed_out = ata_scsi_timed_out,
90 .eh_strategy_handler = ata_scsi_error, 91 .eh_strategy_handler = ata_scsi_error,
91 .can_queue = ATA_DEF_QUEUE, 92 .can_queue = ATA_DEF_QUEUE,
92 .this_id = ATA_SHT_THIS_ID, 93 .this_id = ATA_SHT_THIS_ID,
93 .sg_tablesize = ATA_MAX_PRD, 94 .sg_tablesize = ATA_MAX_PRD,
94 .max_sectors = ATA_MAX_SECTORS,
95 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 95 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
96 .emulated = ATA_SHT_EMULATED, 96 .emulated = ATA_SHT_EMULATED,
97 .use_clustering = ATA_SHT_USE_CLUSTERING, 97 .use_clustering = ATA_SHT_USE_CLUSTERING,
diff --git a/drivers/scsi/sata_svw.c b/drivers/scsi/sata_svw.c
index d8472563fde8..4aaccd53e736 100644
--- a/drivers/scsi/sata_svw.c
+++ b/drivers/scsi/sata_svw.c
@@ -288,11 +288,11 @@ static struct scsi_host_template k2_sata_sht = {
288 .name = DRV_NAME, 288 .name = DRV_NAME,
289 .ioctl = ata_scsi_ioctl, 289 .ioctl = ata_scsi_ioctl,
290 .queuecommand = ata_scsi_queuecmd, 290 .queuecommand = ata_scsi_queuecmd,
291 .eh_timed_out = ata_scsi_timed_out,
291 .eh_strategy_handler = ata_scsi_error, 292 .eh_strategy_handler = ata_scsi_error,
292 .can_queue = ATA_DEF_QUEUE, 293 .can_queue = ATA_DEF_QUEUE,
293 .this_id = ATA_SHT_THIS_ID, 294 .this_id = ATA_SHT_THIS_ID,
294 .sg_tablesize = LIBATA_MAX_PRD, 295 .sg_tablesize = LIBATA_MAX_PRD,
295 .max_sectors = ATA_MAX_SECTORS,
296 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 296 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
297 .emulated = ATA_SHT_EMULATED, 297 .emulated = ATA_SHT_EMULATED,
298 .use_clustering = ATA_SHT_USE_CLUSTERING, 298 .use_clustering = ATA_SHT_USE_CLUSTERING,
diff --git a/drivers/scsi/sata_sx4.c b/drivers/scsi/sata_sx4.c
index bc87c16c80d2..9f8a76815402 100644
--- a/drivers/scsi/sata_sx4.c
+++ b/drivers/scsi/sata_sx4.c
@@ -174,7 +174,7 @@ static void pdc20621_get_from_dimm(struct ata_probe_ent *pe,
174static void pdc20621_put_to_dimm(struct ata_probe_ent *pe, 174static void pdc20621_put_to_dimm(struct ata_probe_ent *pe,
175 void *psource, u32 offset, u32 size); 175 void *psource, u32 offset, u32 size);
176static void pdc20621_irq_clear(struct ata_port *ap); 176static void pdc20621_irq_clear(struct ata_port *ap);
177static int pdc20621_qc_issue_prot(struct ata_queued_cmd *qc); 177static unsigned int pdc20621_qc_issue_prot(struct ata_queued_cmd *qc);
178 178
179 179
180static struct scsi_host_template pdc_sata_sht = { 180static struct scsi_host_template pdc_sata_sht = {
@@ -182,11 +182,11 @@ static struct scsi_host_template pdc_sata_sht = {
182 .name = DRV_NAME, 182 .name = DRV_NAME,
183 .ioctl = ata_scsi_ioctl, 183 .ioctl = ata_scsi_ioctl,
184 .queuecommand = ata_scsi_queuecmd, 184 .queuecommand = ata_scsi_queuecmd,
185 .eh_timed_out = ata_scsi_timed_out,
185 .eh_strategy_handler = ata_scsi_error, 186 .eh_strategy_handler = ata_scsi_error,
186 .can_queue = ATA_DEF_QUEUE, 187 .can_queue = ATA_DEF_QUEUE,
187 .this_id = ATA_SHT_THIS_ID, 188 .this_id = ATA_SHT_THIS_ID,
188 .sg_tablesize = LIBATA_MAX_PRD, 189 .sg_tablesize = LIBATA_MAX_PRD,
189 .max_sectors = ATA_MAX_SECTORS,
190 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 190 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
191 .emulated = ATA_SHT_EMULATED, 191 .emulated = ATA_SHT_EMULATED,
192 .use_clustering = ATA_SHT_USE_CLUSTERING, 192 .use_clustering = ATA_SHT_USE_CLUSTERING,
@@ -460,7 +460,7 @@ static void pdc20621_dma_prep(struct ata_queued_cmd *qc)
460 unsigned int i, idx, total_len = 0, sgt_len; 460 unsigned int i, idx, total_len = 0, sgt_len;
461 u32 *buf = (u32 *) &pp->dimm_buf[PDC_DIMM_HEADER_SZ]; 461 u32 *buf = (u32 *) &pp->dimm_buf[PDC_DIMM_HEADER_SZ];
462 462
463 assert(qc->flags & ATA_QCFLAG_DMAMAP); 463 WARN_ON(!(qc->flags & ATA_QCFLAG_DMAMAP));
464 464
465 VPRINTK("ata%u: ENTER\n", ap->id); 465 VPRINTK("ata%u: ENTER\n", ap->id);
466 466
@@ -678,7 +678,7 @@ static void pdc20621_packet_start(struct ata_queued_cmd *qc)
678 } 678 }
679} 679}
680 680
681static int pdc20621_qc_issue_prot(struct ata_queued_cmd *qc) 681static unsigned int pdc20621_qc_issue_prot(struct ata_queued_cmd *qc)
682{ 682{
683 switch (qc->tf.protocol) { 683 switch (qc->tf.protocol) {
684 case ATA_PROT_DMA: 684 case ATA_PROT_DMA:
@@ -866,26 +866,12 @@ static void pdc_eng_timeout(struct ata_port *ap)
866 spin_lock_irqsave(&host_set->lock, flags); 866 spin_lock_irqsave(&host_set->lock, flags);
867 867
868 qc = ata_qc_from_tag(ap, ap->active_tag); 868 qc = ata_qc_from_tag(ap, ap->active_tag);
869 if (!qc) {
870 printk(KERN_ERR "ata%u: BUG: timeout without command\n",
871 ap->id);
872 goto out;
873 }
874
875 /* hack alert! We cannot use the supplied completion
876 * function from inside the ->eh_strategy_handler() thread.
877 * libata is the only user of ->eh_strategy_handler() in
878 * any kernel, so the default scsi_done() assumes it is
879 * not being called from the SCSI EH.
880 */
881 qc->scsidone = scsi_finish_command;
882 869
883 switch (qc->tf.protocol) { 870 switch (qc->tf.protocol) {
884 case ATA_PROT_DMA: 871 case ATA_PROT_DMA:
885 case ATA_PROT_NODATA: 872 case ATA_PROT_NODATA:
886 printk(KERN_ERR "ata%u: command timeout\n", ap->id); 873 printk(KERN_ERR "ata%u: command timeout\n", ap->id);
887 qc->err_mask |= __ac_err_mask(ata_wait_idle(ap)); 874 qc->err_mask |= __ac_err_mask(ata_wait_idle(ap));
888 ata_qc_complete(qc);
889 break; 875 break;
890 876
891 default: 877 default:
@@ -895,12 +881,11 @@ static void pdc_eng_timeout(struct ata_port *ap)
895 ap->id, qc->tf.command, drv_stat); 881 ap->id, qc->tf.command, drv_stat);
896 882
897 qc->err_mask |= ac_err_mask(drv_stat); 883 qc->err_mask |= ac_err_mask(drv_stat);
898 ata_qc_complete(qc);
899 break; 884 break;
900 } 885 }
901 886
902out:
903 spin_unlock_irqrestore(&host_set->lock, flags); 887 spin_unlock_irqrestore(&host_set->lock, flags);
888 ata_eh_qc_complete(qc);
904 DPRINTK("EXIT\n"); 889 DPRINTK("EXIT\n");
905} 890}
906 891
diff --git a/drivers/scsi/sata_uli.c b/drivers/scsi/sata_uli.c
index 9635ca700977..37a487b7d655 100644
--- a/drivers/scsi/sata_uli.c
+++ b/drivers/scsi/sata_uli.c
@@ -75,11 +75,11 @@ static struct scsi_host_template uli_sht = {
75 .name = DRV_NAME, 75 .name = DRV_NAME,
76 .ioctl = ata_scsi_ioctl, 76 .ioctl = ata_scsi_ioctl,
77 .queuecommand = ata_scsi_queuecmd, 77 .queuecommand = ata_scsi_queuecmd,
78 .eh_timed_out = ata_scsi_timed_out,
78 .eh_strategy_handler = ata_scsi_error, 79 .eh_strategy_handler = ata_scsi_error,
79 .can_queue = ATA_DEF_QUEUE, 80 .can_queue = ATA_DEF_QUEUE,
80 .this_id = ATA_SHT_THIS_ID, 81 .this_id = ATA_SHT_THIS_ID,
81 .sg_tablesize = LIBATA_MAX_PRD, 82 .sg_tablesize = LIBATA_MAX_PRD,
82 .max_sectors = ATA_MAX_SECTORS,
83 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 83 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
84 .emulated = ATA_SHT_EMULATED, 84 .emulated = ATA_SHT_EMULATED,
85 .use_clustering = ATA_SHT_USE_CLUSTERING, 85 .use_clustering = ATA_SHT_USE_CLUSTERING,
diff --git a/drivers/scsi/sata_via.c b/drivers/scsi/sata_via.c
index 6d5b0a794cfd..ff65a0b0457f 100644
--- a/drivers/scsi/sata_via.c
+++ b/drivers/scsi/sata_via.c
@@ -94,11 +94,11 @@ static struct scsi_host_template svia_sht = {
94 .name = DRV_NAME, 94 .name = DRV_NAME,
95 .ioctl = ata_scsi_ioctl, 95 .ioctl = ata_scsi_ioctl,
96 .queuecommand = ata_scsi_queuecmd, 96 .queuecommand = ata_scsi_queuecmd,
97 .eh_timed_out = ata_scsi_timed_out,
97 .eh_strategy_handler = ata_scsi_error, 98 .eh_strategy_handler = ata_scsi_error,
98 .can_queue = ATA_DEF_QUEUE, 99 .can_queue = ATA_DEF_QUEUE,
99 .this_id = ATA_SHT_THIS_ID, 100 .this_id = ATA_SHT_THIS_ID,
100 .sg_tablesize = LIBATA_MAX_PRD, 101 .sg_tablesize = LIBATA_MAX_PRD,
101 .max_sectors = ATA_MAX_SECTORS,
102 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 102 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
103 .emulated = ATA_SHT_EMULATED, 103 .emulated = ATA_SHT_EMULATED,
104 .use_clustering = ATA_SHT_USE_CLUSTERING, 104 .use_clustering = ATA_SHT_USE_CLUSTERING,
diff --git a/drivers/scsi/sata_vsc.c b/drivers/scsi/sata_vsc.c
index e484e8db6810..b574379a7a82 100644
--- a/drivers/scsi/sata_vsc.c
+++ b/drivers/scsi/sata_vsc.c
@@ -251,11 +251,11 @@ static struct scsi_host_template vsc_sata_sht = {
251 .name = DRV_NAME, 251 .name = DRV_NAME,
252 .ioctl = ata_scsi_ioctl, 252 .ioctl = ata_scsi_ioctl,
253 .queuecommand = ata_scsi_queuecmd, 253 .queuecommand = ata_scsi_queuecmd,
254 .eh_timed_out = ata_scsi_timed_out,
254 .eh_strategy_handler = ata_scsi_error, 255 .eh_strategy_handler = ata_scsi_error,
255 .can_queue = ATA_DEF_QUEUE, 256 .can_queue = ATA_DEF_QUEUE,
256 .this_id = ATA_SHT_THIS_ID, 257 .this_id = ATA_SHT_THIS_ID,
257 .sg_tablesize = LIBATA_MAX_PRD, 258 .sg_tablesize = LIBATA_MAX_PRD,
258 .max_sectors = ATA_MAX_SECTORS,
259 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 259 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
260 .emulated = ATA_SHT_EMULATED, 260 .emulated = ATA_SHT_EMULATED,
261 .use_clustering = ATA_SHT_USE_CLUSTERING, 261 .use_clustering = ATA_SHT_USE_CLUSTERING,
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index ff82ccfbb106..5d169a2881b9 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -584,8 +584,7 @@ static int scsi_request_sense(struct scsi_cmnd *scmd)
584 * keep a list of pending commands for final completion, and once we 584 * keep a list of pending commands for final completion, and once we
585 * are ready to leave error handling we handle completion for real. 585 * are ready to leave error handling we handle completion for real.
586 **/ 586 **/
587static void scsi_eh_finish_cmd(struct scsi_cmnd *scmd, 587void scsi_eh_finish_cmd(struct scsi_cmnd *scmd, struct list_head *done_q)
588 struct list_head *done_q)
589{ 588{
590 scmd->device->host->host_failed--; 589 scmd->device->host->host_failed--;
591 scmd->eh_eflags = 0; 590 scmd->eh_eflags = 0;
@@ -597,6 +596,7 @@ static void scsi_eh_finish_cmd(struct scsi_cmnd *scmd,
597 scsi_setup_cmd_retry(scmd); 596 scsi_setup_cmd_retry(scmd);
598 list_move_tail(&scmd->eh_entry, done_q); 597 list_move_tail(&scmd->eh_entry, done_q);
599} 598}
599EXPORT_SYMBOL(scsi_eh_finish_cmd);
600 600
601/** 601/**
602 * scsi_eh_get_sense - Get device sense data. 602 * scsi_eh_get_sense - Get device sense data.
@@ -1425,7 +1425,7 @@ static void scsi_eh_ready_devs(struct Scsi_Host *shost,
1425 * @done_q: list_head of processed commands. 1425 * @done_q: list_head of processed commands.
1426 * 1426 *
1427 **/ 1427 **/
1428static void scsi_eh_flush_done_q(struct list_head *done_q) 1428void scsi_eh_flush_done_q(struct list_head *done_q)
1429{ 1429{
1430 struct scsi_cmnd *scmd, *next; 1430 struct scsi_cmnd *scmd, *next;
1431 1431
@@ -1454,6 +1454,7 @@ static void scsi_eh_flush_done_q(struct list_head *done_q)
1454 } 1454 }
1455 } 1455 }
1456} 1456}
1457EXPORT_SYMBOL(scsi_eh_flush_done_q);
1457 1458
1458/** 1459/**
1459 * scsi_unjam_host - Attempt to fix a host which has a cmd that failed. 1460 * scsi_unjam_host - Attempt to fix a host which has a cmd that failed.
diff --git a/include/linux/ata.h b/include/linux/ata.h
index 94f77cce27fa..b02a16c435e7 100644
--- a/include/linux/ata.h
+++ b/include/linux/ata.h
@@ -267,6 +267,16 @@ struct ata_taskfile {
267 ((u64) (id)[(n) + 1] << 16) | \ 267 ((u64) (id)[(n) + 1] << 16) | \
268 ((u64) (id)[(n) + 0]) ) 268 ((u64) (id)[(n) + 0]) )
269 269
270static inline unsigned int ata_id_major_version(const u16 *id)
271{
272 unsigned int mver;
273
274 for (mver = 14; mver >= 1; mver--)
275 if (id[ATA_ID_MAJOR_VER] & (1 << mver))
276 break;
277 return mver;
278}
279
270static inline int ata_id_current_chs_valid(const u16 *id) 280static inline int ata_id_current_chs_valid(const u16 *id)
271{ 281{
272 /* For ATA-1 devices, if the INITIALIZE DEVICE PARAMETERS command 282 /* For ATA-1 devices, if the INITIALIZE DEVICE PARAMETERS command
@@ -302,4 +312,16 @@ static inline int ata_ok(u8 status)
302 == ATA_DRDY); 312 == ATA_DRDY);
303} 313}
304 314
315static inline int lba_28_ok(u64 block, u32 n_block)
316{
317 /* check the ending block number */
318 return ((block + n_block - 1) < ((u64)1 << 28)) && (n_block <= 256);
319}
320
321static inline int lba_48_ok(u64 block, u32 n_block)
322{
323 /* check the ending block number */
324 return ((block + n_block - 1) < ((u64)1 << 48)) && (n_block <= 65536);
325}
326
305#endif /* __LINUX_ATA_H__ */ 327#endif /* __LINUX_ATA_H__ */
diff --git a/include/linux/libata.h b/include/linux/libata.h
index c91be5e64ede..66b6847225df 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -35,7 +35,8 @@
35#include <linux/workqueue.h> 35#include <linux/workqueue.h>
36 36
37/* 37/*
38 * compile-time options 38 * compile-time options: to be removed as soon as all the drivers are
39 * converted to the new debugging mechanism
39 */ 40 */
40#undef ATA_DEBUG /* debugging output */ 41#undef ATA_DEBUG /* debugging output */
41#undef ATA_VERBOSE_DEBUG /* yet more debugging output */ 42#undef ATA_VERBOSE_DEBUG /* yet more debugging output */
@@ -61,15 +62,37 @@
61 62
62#define BPRINTK(fmt, args...) if (ap->flags & ATA_FLAG_DEBUGMSG) printk(KERN_ERR "%s: " fmt, __FUNCTION__, ## args) 63#define BPRINTK(fmt, args...) if (ap->flags & ATA_FLAG_DEBUGMSG) printk(KERN_ERR "%s: " fmt, __FUNCTION__, ## args)
63 64
64#ifdef ATA_NDEBUG 65/* NEW: debug levels */
65#define assert(expr) 66#define HAVE_LIBATA_MSG 1
66#else 67
67#define assert(expr) \ 68enum {
68 if(unlikely(!(expr))) { \ 69 ATA_MSG_DRV = 0x0001,
69 printk(KERN_ERR "Assertion failed! %s,%s,%s,line=%d\n", \ 70 ATA_MSG_INFO = 0x0002,
70 #expr,__FILE__,__FUNCTION__,__LINE__); \ 71 ATA_MSG_PROBE = 0x0004,
71 } 72 ATA_MSG_WARN = 0x0008,
72#endif 73 ATA_MSG_MALLOC = 0x0010,
74 ATA_MSG_CTL = 0x0020,
75 ATA_MSG_INTR = 0x0040,
76 ATA_MSG_ERR = 0x0080,
77};
78
79#define ata_msg_drv(p) ((p)->msg_enable & ATA_MSG_DRV)
80#define ata_msg_info(p) ((p)->msg_enable & ATA_MSG_INFO)
81#define ata_msg_probe(p) ((p)->msg_enable & ATA_MSG_PROBE)
82#define ata_msg_warn(p) ((p)->msg_enable & ATA_MSG_WARN)
83#define ata_msg_malloc(p) ((p)->msg_enable & ATA_MSG_MALLOC)
84#define ata_msg_ctl(p) ((p)->msg_enable & ATA_MSG_CTL)
85#define ata_msg_intr(p) ((p)->msg_enable & ATA_MSG_INTR)
86#define ata_msg_err(p) ((p)->msg_enable & ATA_MSG_ERR)
87
88static inline u32 ata_msg_init(int dval, int default_msg_enable_bits)
89{
90 if (dval < 0 || dval >= (sizeof(u32) * 8))
91 return default_msg_enable_bits; /* should be 0x1 - only driver info msgs */
92 if (!dval)
93 return 0;
94 return (1 << dval) - 1;
95}
73 96
74/* defines only for the constants which don't work well as enums */ 97/* defines only for the constants which don't work well as enums */
75#define ATA_TAG_POISON 0xfafbfcfdU 98#define ATA_TAG_POISON 0xfafbfcfdU
@@ -99,8 +122,7 @@ enum {
99 /* struct ata_device stuff */ 122 /* struct ata_device stuff */
100 ATA_DFLAG_LBA48 = (1 << 0), /* device supports LBA48 */ 123 ATA_DFLAG_LBA48 = (1 << 0), /* device supports LBA48 */
101 ATA_DFLAG_PIO = (1 << 1), /* device currently in PIO mode */ 124 ATA_DFLAG_PIO = (1 << 1), /* device currently in PIO mode */
102 ATA_DFLAG_LOCK_SECTORS = (1 << 2), /* don't adjust max_sectors */ 125 ATA_DFLAG_LBA = (1 << 2), /* device supports LBA */
103 ATA_DFLAG_LBA = (1 << 3), /* device supports LBA */
104 126
105 ATA_DEV_UNKNOWN = 0, /* unknown device */ 127 ATA_DEV_UNKNOWN = 0, /* unknown device */
106 ATA_DEV_ATA = 1, /* ATA device */ 128 ATA_DEV_ATA = 1, /* ATA device */
@@ -115,9 +137,9 @@ enum {
115 ATA_FLAG_PORT_DISABLED = (1 << 2), /* port is disabled, ignore it */ 137 ATA_FLAG_PORT_DISABLED = (1 << 2), /* port is disabled, ignore it */
116 ATA_FLAG_SATA = (1 << 3), 138 ATA_FLAG_SATA = (1 << 3),
117 ATA_FLAG_NO_LEGACY = (1 << 4), /* no legacy mode check */ 139 ATA_FLAG_NO_LEGACY = (1 << 4), /* no legacy mode check */
118 ATA_FLAG_SRST = (1 << 5), /* use ATA SRST, not E.D.D. */ 140 ATA_FLAG_SRST = (1 << 5), /* (obsolete) use ATA SRST, not E.D.D. */
119 ATA_FLAG_MMIO = (1 << 6), /* use MMIO, not PIO */ 141 ATA_FLAG_MMIO = (1 << 6), /* use MMIO, not PIO */
120 ATA_FLAG_SATA_RESET = (1 << 7), /* use COMRESET */ 142 ATA_FLAG_SATA_RESET = (1 << 7), /* (obsolete) use COMRESET */
121 ATA_FLAG_PIO_DMA = (1 << 8), /* PIO cmds via DMA */ 143 ATA_FLAG_PIO_DMA = (1 << 8), /* PIO cmds via DMA */
122 ATA_FLAG_NOINTR = (1 << 9), /* FIXME: Remove this once 144 ATA_FLAG_NOINTR = (1 << 9), /* FIXME: Remove this once
123 * proper HSM is in place. */ 145 * proper HSM is in place. */
@@ -129,10 +151,14 @@ enum {
129 ATA_FLAG_PIO_LBA48 = (1 << 13), /* Host DMA engine is LBA28 only */ 151 ATA_FLAG_PIO_LBA48 = (1 << 13), /* Host DMA engine is LBA28 only */
130 ATA_FLAG_IRQ_MASK = (1 << 14), /* Mask IRQ in PIO xfers */ 152 ATA_FLAG_IRQ_MASK = (1 << 14), /* Mask IRQ in PIO xfers */
131 153
154 ATA_FLAG_FLUSH_PIO_TASK = (1 << 15), /* Flush PIO task */
155 ATA_FLAG_IN_EH = (1 << 16), /* EH in progress */
156
132 ATA_QCFLAG_ACTIVE = (1 << 1), /* cmd not yet ack'd to scsi lyer */ 157 ATA_QCFLAG_ACTIVE = (1 << 1), /* cmd not yet ack'd to scsi lyer */
133 ATA_QCFLAG_SG = (1 << 3), /* have s/g table? */ 158 ATA_QCFLAG_SG = (1 << 3), /* have s/g table? */
134 ATA_QCFLAG_SINGLE = (1 << 4), /* no s/g, just a single buffer */ 159 ATA_QCFLAG_SINGLE = (1 << 4), /* no s/g, just a single buffer */
135 ATA_QCFLAG_DMAMAP = ATA_QCFLAG_SG | ATA_QCFLAG_SINGLE, 160 ATA_QCFLAG_DMAMAP = ATA_QCFLAG_SG | ATA_QCFLAG_SINGLE,
161 ATA_QCFLAG_EH_SCHEDULED = (1 << 5), /* EH scheduled */
136 162
137 /* various lengths of time */ 163 /* various lengths of time */
138 ATA_TMOUT_EDD = 5 * HZ, /* heuristic */ 164 ATA_TMOUT_EDD = 5 * HZ, /* heuristic */
@@ -189,10 +215,15 @@ enum hsm_task_states {
189}; 215};
190 216
191enum ata_completion_errors { 217enum ata_completion_errors {
192 AC_ERR_OTHER = (1 << 0), 218 AC_ERR_DEV = (1 << 0), /* device reported error */
193 AC_ERR_DEV = (1 << 1), 219 AC_ERR_HSM = (1 << 1), /* host state machine violation */
194 AC_ERR_ATA_BUS = (1 << 2), 220 AC_ERR_TIMEOUT = (1 << 2), /* timeout */
195 AC_ERR_HOST_BUS = (1 << 3), 221 AC_ERR_MEDIA = (1 << 3), /* media error */
222 AC_ERR_ATA_BUS = (1 << 4), /* ATA bus error */
223 AC_ERR_HOST_BUS = (1 << 5), /* host bus error */
224 AC_ERR_SYSTEM = (1 << 6), /* system error */
225 AC_ERR_INVALID = (1 << 7), /* invalid argument */
226 AC_ERR_OTHER = (1 << 8), /* unknown */
196}; 227};
197 228
198/* forward declarations */ 229/* forward declarations */
@@ -202,7 +233,10 @@ struct ata_port;
202struct ata_queued_cmd; 233struct ata_queued_cmd;
203 234
204/* typedefs */ 235/* typedefs */
205typedef int (*ata_qc_cb_t) (struct ata_queued_cmd *qc); 236typedef void (*ata_qc_cb_t) (struct ata_queued_cmd *qc);
237typedef void (*ata_probeinit_fn_t)(struct ata_port *);
238typedef int (*ata_reset_fn_t)(struct ata_port *, int, unsigned int *);
239typedef void (*ata_postreset_fn_t)(struct ata_port *ap, unsigned int *);
206 240
207struct ata_ioports { 241struct ata_ioports {
208 unsigned long cmd_addr; 242 unsigned long cmd_addr;
@@ -313,6 +347,8 @@ struct ata_device {
313 347
314 unsigned int multi_count; /* sectors count for 348 unsigned int multi_count; /* sectors count for
315 READ/WRITE MULTIPLE */ 349 READ/WRITE MULTIPLE */
350 unsigned int max_sectors; /* per-device max sectors */
351 unsigned int cdb_len;
316 352
317 /* for CHS addressing */ 353 /* for CHS addressing */
318 u16 cylinders; /* Number of cylinders */ 354 u16 cylinders; /* Number of cylinders */
@@ -342,7 +378,6 @@ struct ata_port {
342 unsigned int mwdma_mask; 378 unsigned int mwdma_mask;
343 unsigned int udma_mask; 379 unsigned int udma_mask;
344 unsigned int cbl; /* cable type; ATA_CBL_xxx */ 380 unsigned int cbl; /* cable type; ATA_CBL_xxx */
345 unsigned int cdb_len;
346 381
347 struct ata_device device[ATA_MAX_DEVICES]; 382 struct ata_device device[ATA_MAX_DEVICES];
348 383
@@ -359,6 +394,9 @@ struct ata_port {
359 unsigned int hsm_task_state; 394 unsigned int hsm_task_state;
360 unsigned long pio_task_timeout; 395 unsigned long pio_task_timeout;
361 396
397 u32 msg_enable;
398 struct list_head eh_done_q;
399
362 void *private_data; 400 void *private_data;
363}; 401};
364 402
@@ -378,7 +416,9 @@ struct ata_port_operations {
378 u8 (*check_altstatus)(struct ata_port *ap); 416 u8 (*check_altstatus)(struct ata_port *ap);
379 void (*dev_select)(struct ata_port *ap, unsigned int device); 417 void (*dev_select)(struct ata_port *ap, unsigned int device);
380 418
381 void (*phy_reset) (struct ata_port *ap); 419 void (*phy_reset) (struct ata_port *ap); /* obsolete */
420 int (*probe_reset) (struct ata_port *ap, unsigned int *classes);
421
382 void (*post_set_mode) (struct ata_port *ap); 422 void (*post_set_mode) (struct ata_port *ap);
383 423
384 int (*check_atapi_dma) (struct ata_queued_cmd *qc); 424 int (*check_atapi_dma) (struct ata_queued_cmd *qc);
@@ -387,7 +427,7 @@ struct ata_port_operations {
387 void (*bmdma_start) (struct ata_queued_cmd *qc); 427 void (*bmdma_start) (struct ata_queued_cmd *qc);
388 428
389 void (*qc_prep) (struct ata_queued_cmd *qc); 429 void (*qc_prep) (struct ata_queued_cmd *qc);
390 int (*qc_issue) (struct ata_queued_cmd *qc); 430 unsigned int (*qc_issue) (struct ata_queued_cmd *qc);
391 431
392 void (*eng_timeout) (struct ata_port *ap); 432 void (*eng_timeout) (struct ata_port *ap);
393 433
@@ -435,6 +475,16 @@ extern void ata_port_probe(struct ata_port *);
435extern void __sata_phy_reset(struct ata_port *ap); 475extern void __sata_phy_reset(struct ata_port *ap);
436extern void sata_phy_reset(struct ata_port *ap); 476extern void sata_phy_reset(struct ata_port *ap);
437extern void ata_bus_reset(struct ata_port *ap); 477extern void ata_bus_reset(struct ata_port *ap);
478extern int ata_drive_probe_reset(struct ata_port *ap,
479 ata_probeinit_fn_t probeinit,
480 ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
481 ata_postreset_fn_t postreset, unsigned int *classes);
482extern void ata_std_probeinit(struct ata_port *ap);
483extern int ata_std_softreset(struct ata_port *ap, int verbose,
484 unsigned int *classes);
485extern int sata_std_hardreset(struct ata_port *ap, int verbose,
486 unsigned int *class);
487extern void ata_std_postreset(struct ata_port *ap, unsigned int *classes);
438extern void ata_port_disable(struct ata_port *); 488extern void ata_port_disable(struct ata_port *);
439extern void ata_std_ports(struct ata_ioports *ioaddr); 489extern void ata_std_ports(struct ata_ioports *ioaddr);
440#ifdef CONFIG_PCI 490#ifdef CONFIG_PCI
@@ -449,7 +499,10 @@ extern void ata_host_set_remove(struct ata_host_set *host_set);
449extern int ata_scsi_detect(struct scsi_host_template *sht); 499extern int ata_scsi_detect(struct scsi_host_template *sht);
450extern int ata_scsi_ioctl(struct scsi_device *dev, int cmd, void __user *arg); 500extern int ata_scsi_ioctl(struct scsi_device *dev, int cmd, void __user *arg);
451extern int ata_scsi_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)); 501extern int ata_scsi_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *));
502extern enum scsi_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd);
452extern int ata_scsi_error(struct Scsi_Host *host); 503extern int ata_scsi_error(struct Scsi_Host *host);
504extern void ata_eh_qc_complete(struct ata_queued_cmd *qc);
505extern void ata_eh_qc_retry(struct ata_queued_cmd *qc);
453extern int ata_scsi_release(struct Scsi_Host *host); 506extern int ata_scsi_release(struct Scsi_Host *host);
454extern unsigned int ata_host_intr(struct ata_port *ap, struct ata_queued_cmd *qc); 507extern unsigned int ata_host_intr(struct ata_port *ap, struct ata_queued_cmd *qc);
455extern int ata_scsi_device_resume(struct scsi_device *); 508extern int ata_scsi_device_resume(struct scsi_device *);
@@ -457,6 +510,9 @@ extern int ata_scsi_device_suspend(struct scsi_device *);
457extern int ata_device_resume(struct ata_port *, struct ata_device *); 510extern int ata_device_resume(struct ata_port *, struct ata_device *);
458extern int ata_device_suspend(struct ata_port *, struct ata_device *); 511extern int ata_device_suspend(struct ata_port *, struct ata_device *);
459extern int ata_ratelimit(void); 512extern int ata_ratelimit(void);
513extern unsigned int ata_busy_sleep(struct ata_port *ap,
514 unsigned long timeout_pat,
515 unsigned long timeout);
460 516
461/* 517/*
462 * Default driver ops implementations 518 * Default driver ops implementations
@@ -470,26 +526,29 @@ extern void ata_std_dev_select (struct ata_port *ap, unsigned int device);
470extern u8 ata_check_status(struct ata_port *ap); 526extern u8 ata_check_status(struct ata_port *ap);
471extern u8 ata_altstatus(struct ata_port *ap); 527extern u8 ata_altstatus(struct ata_port *ap);
472extern void ata_exec_command(struct ata_port *ap, const struct ata_taskfile *tf); 528extern void ata_exec_command(struct ata_port *ap, const struct ata_taskfile *tf);
529extern int ata_std_probe_reset(struct ata_port *ap, unsigned int *classes);
473extern int ata_port_start (struct ata_port *ap); 530extern int ata_port_start (struct ata_port *ap);
474extern void ata_port_stop (struct ata_port *ap); 531extern void ata_port_stop (struct ata_port *ap);
475extern void ata_host_stop (struct ata_host_set *host_set); 532extern void ata_host_stop (struct ata_host_set *host_set);
476extern irqreturn_t ata_interrupt (int irq, void *dev_instance, struct pt_regs *regs); 533extern irqreturn_t ata_interrupt (int irq, void *dev_instance, struct pt_regs *regs);
477extern void ata_qc_prep(struct ata_queued_cmd *qc); 534extern void ata_qc_prep(struct ata_queued_cmd *qc);
478extern int ata_qc_issue_prot(struct ata_queued_cmd *qc); 535extern unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc);
479extern void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf, 536extern void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf,
480 unsigned int buflen); 537 unsigned int buflen);
481extern void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg, 538extern void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
482 unsigned int n_elem); 539 unsigned int n_elem);
483extern unsigned int ata_dev_classify(const struct ata_taskfile *tf); 540extern unsigned int ata_dev_classify(const struct ata_taskfile *tf);
484extern void ata_dev_id_string(const u16 *id, unsigned char *s, 541extern void ata_id_string(const u16 *id, unsigned char *s,
485 unsigned int ofs, unsigned int len); 542 unsigned int ofs, unsigned int len);
543extern void ata_id_c_string(const u16 *id, unsigned char *s,
544 unsigned int ofs, unsigned int len);
486extern void ata_dev_config(struct ata_port *ap, unsigned int i); 545extern void ata_dev_config(struct ata_port *ap, unsigned int i);
487extern void ata_bmdma_setup (struct ata_queued_cmd *qc); 546extern void ata_bmdma_setup (struct ata_queued_cmd *qc);
488extern void ata_bmdma_start (struct ata_queued_cmd *qc); 547extern void ata_bmdma_start (struct ata_queued_cmd *qc);
489extern void ata_bmdma_stop(struct ata_queued_cmd *qc); 548extern void ata_bmdma_stop(struct ata_queued_cmd *qc);
490extern u8 ata_bmdma_status(struct ata_port *ap); 549extern u8 ata_bmdma_status(struct ata_port *ap);
491extern void ata_bmdma_irq_clear(struct ata_port *ap); 550extern void ata_bmdma_irq_clear(struct ata_port *ap);
492extern void ata_qc_complete(struct ata_queued_cmd *qc); 551extern void __ata_qc_complete(struct ata_queued_cmd *qc);
493extern void ata_eng_timeout(struct ata_port *ap); 552extern void ata_eng_timeout(struct ata_port *ap);
494extern void ata_scsi_simulate(struct ata_port *ap, struct ata_device *dev, 553extern void ata_scsi_simulate(struct ata_port *ap, struct ata_device *dev,
495 struct scsi_cmnd *cmd, 554 struct scsi_cmnd *cmd,
@@ -657,9 +716,9 @@ static inline u8 ata_wait_idle(struct ata_port *ap)
657 716
658 if (status & (ATA_BUSY | ATA_DRQ)) { 717 if (status & (ATA_BUSY | ATA_DRQ)) {
659 unsigned long l = ap->ioaddr.status_addr; 718 unsigned long l = ap->ioaddr.status_addr;
660 printk(KERN_WARNING 719 if (ata_msg_warn(ap))
661 "ATA: abnormal status 0x%X on port 0x%lX\n", 720 printk(KERN_WARNING "ATA: abnormal status 0x%X on port 0x%lX\n",
662 status, l); 721 status, l);
663 } 722 }
664 723
665 return status; 724 return status;
@@ -701,6 +760,24 @@ static inline void ata_qc_reinit(struct ata_queued_cmd *qc)
701 ata_tf_init(qc->ap, &qc->tf, qc->dev->devno); 760 ata_tf_init(qc->ap, &qc->tf, qc->dev->devno);
702} 761}
703 762
763/**
764 * ata_qc_complete - Complete an active ATA command
765 * @qc: Command to complete
766 * @err_mask: ATA Status register contents
767 *
768 * Indicate to the mid and upper layers that an ATA
769 * command has completed, with either an ok or not-ok status.
770 *
771 * LOCKING:
772 * spin_lock_irqsave(host_set lock)
773 */
774static inline void ata_qc_complete(struct ata_queued_cmd *qc)
775{
776 if (unlikely(qc->flags & ATA_QCFLAG_EH_SCHEDULED))
777 return;
778
779 __ata_qc_complete(qc);
780}
704 781
705/** 782/**
706 * ata_irq_on - Enable interrupts on a port. 783 * ata_irq_on - Enable interrupts on a port.
@@ -751,7 +828,8 @@ static inline u8 ata_irq_ack(struct ata_port *ap, unsigned int chk_drq)
751 828
752 status = ata_busy_wait(ap, bits, 1000); 829 status = ata_busy_wait(ap, bits, 1000);
753 if (status & bits) 830 if (status & bits)
754 DPRINTK("abnormal status 0x%X\n", status); 831 if (ata_msg_err(ap))
832 printk(KERN_ERR "abnormal status 0x%X\n", status);
755 833
756 /* get controller status; clear intr, err bits */ 834 /* get controller status; clear intr, err bits */
757 if (ap->flags & ATA_FLAG_MMIO) { 835 if (ap->flags & ATA_FLAG_MMIO) {
@@ -769,8 +847,10 @@ static inline u8 ata_irq_ack(struct ata_port *ap, unsigned int chk_drq)
769 post_stat = inb(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS); 847 post_stat = inb(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
770 } 848 }
771 849
772 VPRINTK("irq ack: host_stat 0x%X, new host_stat 0x%X, drv_stat 0x%X\n", 850 if (ata_msg_intr(ap))
773 host_stat, post_stat, status); 851 printk(KERN_INFO "%s: irq ack: host_stat 0x%X, new host_stat 0x%X, drv_stat 0x%X\n",
852 __FUNCTION__,
853 host_stat, post_stat, status);
774 854
775 return status; 855 return status;
776} 856}
@@ -807,7 +887,7 @@ static inline int ata_try_flush_cache(const struct ata_device *dev)
807static inline unsigned int ac_err_mask(u8 status) 887static inline unsigned int ac_err_mask(u8 status)
808{ 888{
809 if (status & ATA_BUSY) 889 if (status & ATA_BUSY)
810 return AC_ERR_ATA_BUS; 890 return AC_ERR_HSM;
811 if (status & (ATA_ERR | ATA_DF)) 891 if (status & (ATA_ERR | ATA_DF))
812 return AC_ERR_DEV; 892 return AC_ERR_DEV;
813 return 0; 893 return 0;
diff --git a/include/scsi/scsi_eh.h b/include/scsi/scsi_eh.h
index fabd879c2f2e..d160880b2a87 100644
--- a/include/scsi/scsi_eh.h
+++ b/include/scsi/scsi_eh.h
@@ -35,6 +35,9 @@ static inline int scsi_sense_valid(struct scsi_sense_hdr *sshdr)
35} 35}
36 36
37 37
38extern void scsi_eh_finish_cmd(struct scsi_cmnd *scmd,
39 struct list_head *done_q);
40extern void scsi_eh_flush_done_q(struct list_head *done_q);
38extern void scsi_report_bus_reset(struct Scsi_Host *, int); 41extern void scsi_report_bus_reset(struct Scsi_Host *, int);
39extern void scsi_report_device_reset(struct Scsi_Host *, int, int); 42extern void scsi_report_device_reset(struct Scsi_Host *, int, int);
40extern int scsi_block_when_processing_errors(struct scsi_device *); 43extern int scsi_block_when_processing_errors(struct scsi_device *);