aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/scsi')
-rw-r--r--drivers/scsi/Makefile2
-rw-r--r--drivers/scsi/ahci.c196
-rw-r--r--drivers/scsi/ata_piix.c59
-rw-r--r--drivers/scsi/libata-bmdma.c703
-rw-r--r--drivers/scsi/libata-core.c1634
-rw-r--r--drivers/scsi/libata-scsi.c230
-rw-r--r--drivers/scsi/libata.h2
-rw-r--r--drivers/scsi/pdc_adma.c6
-rw-r--r--drivers/scsi/sata_mv.c279
-rw-r--r--drivers/scsi/sata_nv.c2
-rw-r--r--drivers/scsi/sata_promise.c127
-rw-r--r--drivers/scsi/sata_qstor.c10
-rw-r--r--drivers/scsi/sata_sil.c34
-rw-r--r--drivers/scsi/sata_sil24.c88
-rw-r--r--drivers/scsi/sata_sis.c2
-rw-r--r--drivers/scsi/sata_svw.c2
-rw-r--r--drivers/scsi/sata_sx4.c25
-rw-r--r--drivers/scsi/sata_uli.c2
-rw-r--r--drivers/scsi/sata_via.c2
-rw-r--r--drivers/scsi/sata_vsc.c2
-rw-r--r--drivers/scsi/scsi_error.c7
21 files changed, 2111 insertions, 1303 deletions
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index 320e765fa0cd..15dc2e00e1b2 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -163,7 +163,7 @@ ncr53c8xx-flags-$(CONFIG_SCSI_ZALON) \
163CFLAGS_ncr53c8xx.o := $(ncr53c8xx-flags-y) $(ncr53c8xx-flags-m) 163CFLAGS_ncr53c8xx.o := $(ncr53c8xx-flags-y) $(ncr53c8xx-flags-m)
164zalon7xx-objs := zalon.o ncr53c8xx.o 164zalon7xx-objs := zalon.o ncr53c8xx.o
165NCR_Q720_mod-objs := NCR_Q720.o ncr53c8xx.o 165NCR_Q720_mod-objs := NCR_Q720.o ncr53c8xx.o
166libata-objs := libata-core.o libata-scsi.o 166libata-objs := libata-core.o libata-scsi.o libata-bmdma.o
167oktagon_esp_mod-objs := oktagon_esp.o oktagon_io.o 167oktagon_esp_mod-objs := oktagon_esp.o oktagon_io.o
168 168
169# Files generated that shall be removed upon make clean 169# Files generated that shall be removed upon make clean
diff --git a/drivers/scsi/ahci.c b/drivers/scsi/ahci.c
index a800fb51168b..1c2ab3dede71 100644
--- a/drivers/scsi/ahci.c
+++ b/drivers/scsi/ahci.c
@@ -66,6 +66,8 @@ enum {
66 AHCI_IRQ_ON_SG = (1 << 31), 66 AHCI_IRQ_ON_SG = (1 << 31),
67 AHCI_CMD_ATAPI = (1 << 5), 67 AHCI_CMD_ATAPI = (1 << 5),
68 AHCI_CMD_WRITE = (1 << 6), 68 AHCI_CMD_WRITE = (1 << 6),
69 AHCI_CMD_RESET = (1 << 8),
70 AHCI_CMD_CLR_BUSY = (1 << 10),
69 71
70 RX_FIS_D2H_REG = 0x40, /* offset of D2H Register FIS data */ 72 RX_FIS_D2H_REG = 0x40, /* offset of D2H Register FIS data */
71 73
@@ -85,6 +87,7 @@ enum {
85 87
86 /* HOST_CAP bits */ 88 /* HOST_CAP bits */
87 HOST_CAP_64 = (1 << 31), /* PCI DAC (64-bit DMA) support */ 89 HOST_CAP_64 = (1 << 31), /* PCI DAC (64-bit DMA) support */
90 HOST_CAP_CLO = (1 << 24), /* Command List Override support */
88 91
89 /* registers for each SATA port */ 92 /* registers for each SATA port */
90 PORT_LST_ADDR = 0x00, /* command list DMA addr */ 93 PORT_LST_ADDR = 0x00, /* command list DMA addr */
@@ -138,6 +141,7 @@ enum {
138 PORT_CMD_LIST_ON = (1 << 15), /* cmd list DMA engine running */ 141 PORT_CMD_LIST_ON = (1 << 15), /* cmd list DMA engine running */
139 PORT_CMD_FIS_ON = (1 << 14), /* FIS DMA engine running */ 142 PORT_CMD_FIS_ON = (1 << 14), /* FIS DMA engine running */
140 PORT_CMD_FIS_RX = (1 << 4), /* Enable FIS receive DMA engine */ 143 PORT_CMD_FIS_RX = (1 << 4), /* Enable FIS receive DMA engine */
144 PORT_CMD_CLO = (1 << 3), /* Command list override */
141 PORT_CMD_POWER_ON = (1 << 2), /* Power up device */ 145 PORT_CMD_POWER_ON = (1 << 2), /* Power up device */
142 PORT_CMD_SPIN_UP = (1 << 1), /* Spin up device */ 146 PORT_CMD_SPIN_UP = (1 << 1), /* Spin up device */
143 PORT_CMD_START = (1 << 0), /* Enable port DMA engine */ 147 PORT_CMD_START = (1 << 0), /* Enable port DMA engine */
@@ -184,9 +188,9 @@ struct ahci_port_priv {
184static u32 ahci_scr_read (struct ata_port *ap, unsigned int sc_reg); 188static u32 ahci_scr_read (struct ata_port *ap, unsigned int sc_reg);
185static void ahci_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val); 189static void ahci_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val);
186static int ahci_init_one (struct pci_dev *pdev, const struct pci_device_id *ent); 190static int ahci_init_one (struct pci_dev *pdev, const struct pci_device_id *ent);
187static int ahci_qc_issue(struct ata_queued_cmd *qc); 191static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc);
188static irqreturn_t ahci_interrupt (int irq, void *dev_instance, struct pt_regs *regs); 192static irqreturn_t ahci_interrupt (int irq, void *dev_instance, struct pt_regs *regs);
189static void ahci_phy_reset(struct ata_port *ap); 193static int ahci_probe_reset(struct ata_port *ap, unsigned int *classes);
190static void ahci_irq_clear(struct ata_port *ap); 194static void ahci_irq_clear(struct ata_port *ap);
191static void ahci_eng_timeout(struct ata_port *ap); 195static void ahci_eng_timeout(struct ata_port *ap);
192static int ahci_port_start(struct ata_port *ap); 196static int ahci_port_start(struct ata_port *ap);
@@ -202,11 +206,11 @@ static struct scsi_host_template ahci_sht = {
202 .name = DRV_NAME, 206 .name = DRV_NAME,
203 .ioctl = ata_scsi_ioctl, 207 .ioctl = ata_scsi_ioctl,
204 .queuecommand = ata_scsi_queuecmd, 208 .queuecommand = ata_scsi_queuecmd,
209 .eh_timed_out = ata_scsi_timed_out,
205 .eh_strategy_handler = ata_scsi_error, 210 .eh_strategy_handler = ata_scsi_error,
206 .can_queue = ATA_DEF_QUEUE, 211 .can_queue = ATA_DEF_QUEUE,
207 .this_id = ATA_SHT_THIS_ID, 212 .this_id = ATA_SHT_THIS_ID,
208 .sg_tablesize = AHCI_MAX_SG, 213 .sg_tablesize = AHCI_MAX_SG,
209 .max_sectors = ATA_MAX_SECTORS,
210 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 214 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
211 .emulated = ATA_SHT_EMULATED, 215 .emulated = ATA_SHT_EMULATED,
212 .use_clustering = AHCI_USE_CLUSTERING, 216 .use_clustering = AHCI_USE_CLUSTERING,
@@ -225,7 +229,7 @@ static const struct ata_port_operations ahci_ops = {
225 229
226 .tf_read = ahci_tf_read, 230 .tf_read = ahci_tf_read,
227 231
228 .phy_reset = ahci_phy_reset, 232 .probe_reset = ahci_probe_reset,
229 233
230 .qc_prep = ahci_qc_prep, 234 .qc_prep = ahci_qc_prep,
231 .qc_issue = ahci_qc_issue, 235 .qc_issue = ahci_qc_issue,
@@ -247,8 +251,7 @@ static const struct ata_port_info ahci_port_info[] = {
247 { 251 {
248 .sht = &ahci_sht, 252 .sht = &ahci_sht,
249 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 253 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
250 ATA_FLAG_SATA_RESET | ATA_FLAG_MMIO | 254 ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA,
251 ATA_FLAG_PIO_DMA,
252 .pio_mask = 0x1f, /* pio0-4 */ 255 .pio_mask = 0x1f, /* pio0-4 */
253 .udma_mask = 0x7f, /* udma0-6 ; FIXME */ 256 .udma_mask = 0x7f, /* udma0-6 ; FIXME */
254 .port_ops = &ahci_ops, 257 .port_ops = &ahci_ops,
@@ -450,17 +453,48 @@ static void ahci_scr_write (struct ata_port *ap, unsigned int sc_reg_in,
450 writel(val, (void __iomem *) ap->ioaddr.scr_addr + (sc_reg * 4)); 453 writel(val, (void __iomem *) ap->ioaddr.scr_addr + (sc_reg * 4));
451} 454}
452 455
453static void ahci_phy_reset(struct ata_port *ap) 456static int ahci_stop_engine(struct ata_port *ap)
454{ 457{
455 void __iomem *port_mmio = (void __iomem *) ap->ioaddr.cmd_addr; 458 void __iomem *mmio = ap->host_set->mmio_base;
456 struct ata_taskfile tf; 459 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
457 struct ata_device *dev = &ap->device[0]; 460 int work;
458 u32 new_tmp, tmp; 461 u32 tmp;
459 462
460 __sata_phy_reset(ap); 463 tmp = readl(port_mmio + PORT_CMD);
464 tmp &= ~PORT_CMD_START;
465 writel(tmp, port_mmio + PORT_CMD);
461 466
462 if (ap->flags & ATA_FLAG_PORT_DISABLED) 467 /* wait for engine to stop. TODO: this could be
463 return; 468 * as long as 500 msec
469 */
470 work = 1000;
471 while (work-- > 0) {
472 tmp = readl(port_mmio + PORT_CMD);
473 if ((tmp & PORT_CMD_LIST_ON) == 0)
474 return 0;
475 udelay(10);
476 }
477
478 return -EIO;
479}
480
481static void ahci_start_engine(struct ata_port *ap)
482{
483 void __iomem *mmio = ap->host_set->mmio_base;
484 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
485 u32 tmp;
486
487 tmp = readl(port_mmio + PORT_CMD);
488 tmp |= PORT_CMD_START;
489 writel(tmp, port_mmio + PORT_CMD);
490 readl(port_mmio + PORT_CMD); /* flush */
491}
492
493static unsigned int ahci_dev_classify(struct ata_port *ap)
494{
495 void __iomem *port_mmio = (void __iomem *) ap->ioaddr.cmd_addr;
496 struct ata_taskfile tf;
497 u32 tmp;
464 498
465 tmp = readl(port_mmio + PORT_SIG); 499 tmp = readl(port_mmio + PORT_SIG);
466 tf.lbah = (tmp >> 24) & 0xff; 500 tf.lbah = (tmp >> 24) & 0xff;
@@ -468,15 +502,46 @@ static void ahci_phy_reset(struct ata_port *ap)
468 tf.lbal = (tmp >> 8) & 0xff; 502 tf.lbal = (tmp >> 8) & 0xff;
469 tf.nsect = (tmp) & 0xff; 503 tf.nsect = (tmp) & 0xff;
470 504
471 dev->class = ata_dev_classify(&tf); 505 return ata_dev_classify(&tf);
472 if (!ata_dev_present(dev)) { 506}
473 ata_port_disable(ap); 507
474 return; 508static void ahci_fill_cmd_slot(struct ahci_port_priv *pp, u32 opts)
475 } 509{
510 pp->cmd_slot[0].opts = cpu_to_le32(opts);
511 pp->cmd_slot[0].status = 0;
512 pp->cmd_slot[0].tbl_addr = cpu_to_le32(pp->cmd_tbl_dma & 0xffffffff);
513 pp->cmd_slot[0].tbl_addr_hi = cpu_to_le32((pp->cmd_tbl_dma >> 16) >> 16);
514}
515
516static int ahci_hardreset(struct ata_port *ap, int verbose, unsigned int *class)
517{
518 int rc;
519
520 DPRINTK("ENTER\n");
521
522 ahci_stop_engine(ap);
523 rc = sata_std_hardreset(ap, verbose, class);
524 ahci_start_engine(ap);
525
526 if (rc == 0)
527 *class = ahci_dev_classify(ap);
528 if (*class == ATA_DEV_UNKNOWN)
529 *class = ATA_DEV_NONE;
530
531 DPRINTK("EXIT, rc=%d, class=%u\n", rc, *class);
532 return rc;
533}
534
535static void ahci_postreset(struct ata_port *ap, unsigned int *class)
536{
537 void __iomem *port_mmio = (void __iomem *) ap->ioaddr.cmd_addr;
538 u32 new_tmp, tmp;
539
540 ata_std_postreset(ap, class);
476 541
477 /* Make sure port's ATAPI bit is set appropriately */ 542 /* Make sure port's ATAPI bit is set appropriately */
478 new_tmp = tmp = readl(port_mmio + PORT_CMD); 543 new_tmp = tmp = readl(port_mmio + PORT_CMD);
479 if (dev->class == ATA_DEV_ATAPI) 544 if (*class == ATA_DEV_ATAPI)
480 new_tmp |= PORT_CMD_ATAPI; 545 new_tmp |= PORT_CMD_ATAPI;
481 else 546 else
482 new_tmp &= ~PORT_CMD_ATAPI; 547 new_tmp &= ~PORT_CMD_ATAPI;
@@ -486,6 +551,12 @@ static void ahci_phy_reset(struct ata_port *ap)
486 } 551 }
487} 552}
488 553
554static int ahci_probe_reset(struct ata_port *ap, unsigned int *classes)
555{
556 return ata_drive_probe_reset(ap, NULL, NULL, ahci_hardreset,
557 ahci_postreset, classes);
558}
559
489static u8 ahci_check_status(struct ata_port *ap) 560static u8 ahci_check_status(struct ata_port *ap)
490{ 561{
491 void __iomem *mmio = (void __iomem *) ap->ioaddr.cmd_addr; 562 void __iomem *mmio = (void __iomem *) ap->ioaddr.cmd_addr;
@@ -533,42 +604,36 @@ static void ahci_qc_prep(struct ata_queued_cmd *qc)
533{ 604{
534 struct ata_port *ap = qc->ap; 605 struct ata_port *ap = qc->ap;
535 struct ahci_port_priv *pp = ap->private_data; 606 struct ahci_port_priv *pp = ap->private_data;
607 int is_atapi = is_atapi_taskfile(&qc->tf);
536 u32 opts; 608 u32 opts;
537 const u32 cmd_fis_len = 5; /* five dwords */ 609 const u32 cmd_fis_len = 5; /* five dwords */
538 unsigned int n_elem; 610 unsigned int n_elem;
539 611
540 /* 612 /*
541 * Fill in command slot information (currently only one slot,
542 * slot 0, is currently since we don't do queueing)
543 */
544
545 opts = cmd_fis_len;
546 if (qc->tf.flags & ATA_TFLAG_WRITE)
547 opts |= AHCI_CMD_WRITE;
548 if (is_atapi_taskfile(&qc->tf))
549 opts |= AHCI_CMD_ATAPI;
550
551 pp->cmd_slot[0].opts = cpu_to_le32(opts);
552 pp->cmd_slot[0].status = 0;
553 pp->cmd_slot[0].tbl_addr = cpu_to_le32(pp->cmd_tbl_dma & 0xffffffff);
554 pp->cmd_slot[0].tbl_addr_hi = cpu_to_le32((pp->cmd_tbl_dma >> 16) >> 16);
555
556 /*
557 * Fill in command table information. First, the header, 613 * Fill in command table information. First, the header,
558 * a SATA Register - Host to Device command FIS. 614 * a SATA Register - Host to Device command FIS.
559 */ 615 */
560 ata_tf_to_fis(&qc->tf, pp->cmd_tbl, 0); 616 ata_tf_to_fis(&qc->tf, pp->cmd_tbl, 0);
561 if (opts & AHCI_CMD_ATAPI) { 617 if (is_atapi) {
562 memset(pp->cmd_tbl + AHCI_CMD_TBL_CDB, 0, 32); 618 memset(pp->cmd_tbl + AHCI_CMD_TBL_CDB, 0, 32);
563 memcpy(pp->cmd_tbl + AHCI_CMD_TBL_CDB, qc->cdb, ap->cdb_len); 619 memcpy(pp->cmd_tbl + AHCI_CMD_TBL_CDB, qc->cdb,
620 qc->dev->cdb_len);
564 } 621 }
565 622
566 if (!(qc->flags & ATA_QCFLAG_DMAMAP)) 623 n_elem = 0;
567 return; 624 if (qc->flags & ATA_QCFLAG_DMAMAP)
625 n_elem = ahci_fill_sg(qc);
568 626
569 n_elem = ahci_fill_sg(qc); 627 /*
628 * Fill in command slot information.
629 */
630 opts = cmd_fis_len | n_elem << 16;
631 if (qc->tf.flags & ATA_TFLAG_WRITE)
632 opts |= AHCI_CMD_WRITE;
633 if (is_atapi)
634 opts |= AHCI_CMD_ATAPI;
570 635
571 pp->cmd_slot[0].opts |= cpu_to_le32(n_elem << 16); 636 ahci_fill_cmd_slot(pp, opts);
572} 637}
573 638
574static void ahci_restart_port(struct ata_port *ap, u32 irq_stat) 639static void ahci_restart_port(struct ata_port *ap, u32 irq_stat)
@@ -576,7 +641,6 @@ static void ahci_restart_port(struct ata_port *ap, u32 irq_stat)
576 void __iomem *mmio = ap->host_set->mmio_base; 641 void __iomem *mmio = ap->host_set->mmio_base;
577 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no); 642 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
578 u32 tmp; 643 u32 tmp;
579 int work;
580 644
581 if ((ap->device[0].class != ATA_DEV_ATAPI) || 645 if ((ap->device[0].class != ATA_DEV_ATAPI) ||
582 ((irq_stat & PORT_IRQ_TF_ERR) == 0)) 646 ((irq_stat & PORT_IRQ_TF_ERR) == 0))
@@ -592,20 +656,7 @@ static void ahci_restart_port(struct ata_port *ap, u32 irq_stat)
592 readl(port_mmio + PORT_SCR_ERR)); 656 readl(port_mmio + PORT_SCR_ERR));
593 657
594 /* stop DMA */ 658 /* stop DMA */
595 tmp = readl(port_mmio + PORT_CMD); 659 ahci_stop_engine(ap);
596 tmp &= ~PORT_CMD_START;
597 writel(tmp, port_mmio + PORT_CMD);
598
599 /* wait for engine to stop. TODO: this could be
600 * as long as 500 msec
601 */
602 work = 1000;
603 while (work-- > 0) {
604 tmp = readl(port_mmio + PORT_CMD);
605 if ((tmp & PORT_CMD_LIST_ON) == 0)
606 break;
607 udelay(10);
608 }
609 660
610 /* clear SATA phy error, if any */ 661 /* clear SATA phy error, if any */
611 tmp = readl(port_mmio + PORT_SCR_ERR); 662 tmp = readl(port_mmio + PORT_SCR_ERR);
@@ -624,10 +675,7 @@ static void ahci_restart_port(struct ata_port *ap, u32 irq_stat)
624 } 675 }
625 676
626 /* re-start DMA */ 677 /* re-start DMA */
627 tmp = readl(port_mmio + PORT_CMD); 678 ahci_start_engine(ap);
628 tmp |= PORT_CMD_START;
629 writel(tmp, port_mmio + PORT_CMD);
630 readl(port_mmio + PORT_CMD); /* flush */
631} 679}
632 680
633static void ahci_eng_timeout(struct ata_port *ap) 681static void ahci_eng_timeout(struct ata_port *ap)
@@ -642,25 +690,13 @@ static void ahci_eng_timeout(struct ata_port *ap)
642 690
643 spin_lock_irqsave(&host_set->lock, flags); 691 spin_lock_irqsave(&host_set->lock, flags);
644 692
693 ahci_restart_port(ap, readl(port_mmio + PORT_IRQ_STAT));
645 qc = ata_qc_from_tag(ap, ap->active_tag); 694 qc = ata_qc_from_tag(ap, ap->active_tag);
646 if (!qc) { 695 qc->err_mask |= AC_ERR_TIMEOUT;
647 printk(KERN_ERR "ata%u: BUG: timeout without command\n",
648 ap->id);
649 } else {
650 ahci_restart_port(ap, readl(port_mmio + PORT_IRQ_STAT));
651
652 /* hack alert! We cannot use the supplied completion
653 * function from inside the ->eh_strategy_handler() thread.
654 * libata is the only user of ->eh_strategy_handler() in
655 * any kernel, so the default scsi_done() assumes it is
656 * not being called from the SCSI EH.
657 */
658 qc->scsidone = scsi_finish_command;
659 qc->err_mask |= AC_ERR_OTHER;
660 ata_qc_complete(qc);
661 }
662 696
663 spin_unlock_irqrestore(&host_set->lock, flags); 697 spin_unlock_irqrestore(&host_set->lock, flags);
698
699 ata_eh_qc_complete(qc);
664} 700}
665 701
666static inline int ahci_host_intr(struct ata_port *ap, struct ata_queued_cmd *qc) 702static inline int ahci_host_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
@@ -678,7 +714,7 @@ static inline int ahci_host_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
678 ci = readl(port_mmio + PORT_CMD_ISSUE); 714 ci = readl(port_mmio + PORT_CMD_ISSUE);
679 if (likely((ci & 0x1) == 0)) { 715 if (likely((ci & 0x1) == 0)) {
680 if (qc) { 716 if (qc) {
681 assert(qc->err_mask == 0); 717 WARN_ON(qc->err_mask);
682 ata_qc_complete(qc); 718 ata_qc_complete(qc);
683 qc = NULL; 719 qc = NULL;
684 } 720 }
@@ -697,7 +733,7 @@ static inline int ahci_host_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
697 ahci_restart_port(ap, status); 733 ahci_restart_port(ap, status);
698 734
699 if (qc) { 735 if (qc) {
700 qc->err_mask |= AC_ERR_OTHER; 736 qc->err_mask |= err_mask;
701 ata_qc_complete(qc); 737 ata_qc_complete(qc);
702 } 738 }
703 } 739 }
@@ -776,7 +812,7 @@ static irqreturn_t ahci_interrupt (int irq, void *dev_instance, struct pt_regs *
776 return IRQ_RETVAL(handled); 812 return IRQ_RETVAL(handled);
777} 813}
778 814
779static int ahci_qc_issue(struct ata_queued_cmd *qc) 815static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc)
780{ 816{
781 struct ata_port *ap = qc->ap; 817 struct ata_port *ap = qc->ap;
782 void __iomem *port_mmio = (void __iomem *) ap->ioaddr.cmd_addr; 818 void __iomem *port_mmio = (void __iomem *) ap->ioaddr.cmd_addr;
diff --git a/drivers/scsi/ata_piix.c b/drivers/scsi/ata_piix.c
index fc3ca051ceed..4cc1108f721a 100644
--- a/drivers/scsi/ata_piix.c
+++ b/drivers/scsi/ata_piix.c
@@ -101,9 +101,11 @@ enum {
101 ICH5_PCS = 0x92, /* port control and status */ 101 ICH5_PCS = 0x92, /* port control and status */
102 PIIX_SCC = 0x0A, /* sub-class code register */ 102 PIIX_SCC = 0x0A, /* sub-class code register */
103 103
104 PIIX_FLAG_AHCI = (1 << 28), /* AHCI possible */ 104 PIIX_FLAG_AHCI = (1 << 27), /* AHCI possible */
105 PIIX_FLAG_CHECKINTR = (1 << 29), /* make sure PCI INTx enabled */ 105 PIIX_FLAG_CHECKINTR = (1 << 28), /* make sure PCI INTx enabled */
106 PIIX_FLAG_COMBINED = (1 << 30), /* combined mode possible */ 106 PIIX_FLAG_COMBINED = (1 << 29), /* combined mode possible */
107 /* ICH6/7 use different scheme for map value */
108 PIIX_FLAG_COMBINED_ICH6 = PIIX_FLAG_COMBINED | (1 << 30),
107 109
108 /* combined mode. if set, PATA is channel 0. 110 /* combined mode. if set, PATA is channel 0.
109 * if clear, PATA is channel 1. 111 * if clear, PATA is channel 1.
@@ -178,11 +180,11 @@ static struct scsi_host_template piix_sht = {
178 .name = DRV_NAME, 180 .name = DRV_NAME,
179 .ioctl = ata_scsi_ioctl, 181 .ioctl = ata_scsi_ioctl,
180 .queuecommand = ata_scsi_queuecmd, 182 .queuecommand = ata_scsi_queuecmd,
183 .eh_timed_out = ata_scsi_timed_out,
181 .eh_strategy_handler = ata_scsi_error, 184 .eh_strategy_handler = ata_scsi_error,
182 .can_queue = ATA_DEF_QUEUE, 185 .can_queue = ATA_DEF_QUEUE,
183 .this_id = ATA_SHT_THIS_ID, 186 .this_id = ATA_SHT_THIS_ID,
184 .sg_tablesize = LIBATA_MAX_PRD, 187 .sg_tablesize = LIBATA_MAX_PRD,
185 .max_sectors = ATA_MAX_SECTORS,
186 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 188 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
187 .emulated = ATA_SHT_EMULATED, 189 .emulated = ATA_SHT_EMULATED,
188 .use_clustering = ATA_SHT_USE_CLUSTERING, 190 .use_clustering = ATA_SHT_USE_CLUSTERING,
@@ -297,8 +299,8 @@ static struct ata_port_info piix_port_info[] = {
297 { 299 {
298 .sht = &piix_sht, 300 .sht = &piix_sht,
299 .host_flags = ATA_FLAG_SATA | ATA_FLAG_SRST | 301 .host_flags = ATA_FLAG_SATA | ATA_FLAG_SRST |
300 PIIX_FLAG_COMBINED | PIIX_FLAG_CHECKINTR | 302 PIIX_FLAG_COMBINED_ICH6 |
301 ATA_FLAG_SLAVE_POSS, 303 PIIX_FLAG_CHECKINTR | ATA_FLAG_SLAVE_POSS,
302 .pio_mask = 0x1f, /* pio0-4 */ 304 .pio_mask = 0x1f, /* pio0-4 */
303 .mwdma_mask = 0x07, /* mwdma0-2 */ 305 .mwdma_mask = 0x07, /* mwdma0-2 */
304 .udma_mask = 0x7f, /* udma0-6 */ 306 .udma_mask = 0x7f, /* udma0-6 */
@@ -309,8 +311,9 @@ static struct ata_port_info piix_port_info[] = {
309 { 311 {
310 .sht = &piix_sht, 312 .sht = &piix_sht,
311 .host_flags = ATA_FLAG_SATA | ATA_FLAG_SRST | 313 .host_flags = ATA_FLAG_SATA | ATA_FLAG_SRST |
312 PIIX_FLAG_COMBINED | PIIX_FLAG_CHECKINTR | 314 PIIX_FLAG_COMBINED_ICH6 |
313 ATA_FLAG_SLAVE_POSS | PIIX_FLAG_AHCI, 315 PIIX_FLAG_CHECKINTR | ATA_FLAG_SLAVE_POSS |
316 PIIX_FLAG_AHCI,
314 .pio_mask = 0x1f, /* pio0-4 */ 317 .pio_mask = 0x1f, /* pio0-4 */
315 .mwdma_mask = 0x07, /* mwdma0-2 */ 318 .mwdma_mask = 0x07, /* mwdma0-2 */
316 .udma_mask = 0x7f, /* udma0-6 */ 319 .udma_mask = 0x7f, /* udma0-6 */
@@ -411,9 +414,6 @@ static int piix_sata_probe (struct ata_port *ap)
411 int orig_mask, mask, i; 414 int orig_mask, mask, i;
412 u8 pcs; 415 u8 pcs;
413 416
414 mask = (PIIX_PORT_PRESENT << ap->hard_port_no) |
415 (PIIX_PORT_ENABLED << ap->hard_port_no);
416
417 pci_read_config_byte(pdev, ICH5_PCS, &pcs); 417 pci_read_config_byte(pdev, ICH5_PCS, &pcs);
418 orig_mask = (int) pcs & 0xff; 418 orig_mask = (int) pcs & 0xff;
419 419
@@ -627,6 +627,7 @@ static int piix_disable_ahci(struct pci_dev *pdev)
627 627
628/** 628/**
629 * piix_check_450nx_errata - Check for problem 450NX setup 629 * piix_check_450nx_errata - Check for problem 450NX setup
630 * @ata_dev: the PCI device to check
630 * 631 *
631 * Check for the present of 450NX errata #19 and errata #25. If 632 * Check for the present of 450NX errata #19 and errata #25. If
632 * they are found return an error code so we can turn off DMA 633 * they are found return an error code so we can turn off DMA
@@ -680,6 +681,7 @@ static int piix_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
680 struct ata_port_info *port_info[2]; 681 struct ata_port_info *port_info[2];
681 unsigned int combined = 0; 682 unsigned int combined = 0;
682 unsigned int pata_chan = 0, sata_chan = 0; 683 unsigned int pata_chan = 0, sata_chan = 0;
684 unsigned long host_flags;
683 685
684 if (!printed_version++) 686 if (!printed_version++)
685 dev_printk(KERN_DEBUG, &pdev->dev, 687 dev_printk(KERN_DEBUG, &pdev->dev,
@@ -692,7 +694,9 @@ static int piix_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
692 port_info[0] = &piix_port_info[ent->driver_data]; 694 port_info[0] = &piix_port_info[ent->driver_data];
693 port_info[1] = &piix_port_info[ent->driver_data]; 695 port_info[1] = &piix_port_info[ent->driver_data];
694 696
695 if (port_info[0]->host_flags & PIIX_FLAG_AHCI) { 697 host_flags = port_info[0]->host_flags;
698
699 if (host_flags & PIIX_FLAG_AHCI) {
696 u8 tmp; 700 u8 tmp;
697 pci_read_config_byte(pdev, PIIX_SCC, &tmp); 701 pci_read_config_byte(pdev, PIIX_SCC, &tmp);
698 if (tmp == PIIX_AHCI_DEVICE) { 702 if (tmp == PIIX_AHCI_DEVICE) {
@@ -702,16 +706,35 @@ static int piix_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
702 } 706 }
703 } 707 }
704 708
705 if (port_info[0]->host_flags & PIIX_FLAG_COMBINED) { 709 if (host_flags & PIIX_FLAG_COMBINED) {
706 u8 tmp; 710 u8 tmp;
707 pci_read_config_byte(pdev, ICH5_PMR, &tmp); 711 pci_read_config_byte(pdev, ICH5_PMR, &tmp);
708 712
709 if (tmp & PIIX_COMB) { 713 if (host_flags & PIIX_FLAG_COMBINED_ICH6) {
710 combined = 1; 714 switch (tmp & 0x3) {
711 if (tmp & PIIX_COMB_PATA_P0) 715 case 0:
716 break;
717 case 1:
718 combined = 1;
712 sata_chan = 1; 719 sata_chan = 1;
713 else 720 break;
721 case 2:
722 combined = 1;
714 pata_chan = 1; 723 pata_chan = 1;
724 break;
725 case 3:
726 dev_printk(KERN_WARNING, &pdev->dev,
727 "invalid MAP value %u\n", tmp);
728 break;
729 }
730 } else {
731 if (tmp & PIIX_COMB) {
732 combined = 1;
733 if (tmp & PIIX_COMB_PATA_P0)
734 sata_chan = 1;
735 else
736 pata_chan = 1;
737 }
715 } 738 }
716 } 739 }
717 740
@@ -721,7 +744,7 @@ static int piix_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
721 * MSI is disabled (and it is disabled, as we don't use 744 * MSI is disabled (and it is disabled, as we don't use
722 * message-signalled interrupts currently). 745 * message-signalled interrupts currently).
723 */ 746 */
724 if (port_info[0]->host_flags & PIIX_FLAG_CHECKINTR) 747 if (host_flags & PIIX_FLAG_CHECKINTR)
725 pci_intx(pdev, 1); 748 pci_intx(pdev, 1);
726 749
727 if (combined) { 750 if (combined) {
diff --git a/drivers/scsi/libata-bmdma.c b/drivers/scsi/libata-bmdma.c
new file mode 100644
index 000000000000..a93336adcd23
--- /dev/null
+++ b/drivers/scsi/libata-bmdma.c
@@ -0,0 +1,703 @@
1/*
2 * libata-bmdma.c - helper library for PCI IDE BMDMA
3 *
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
7 *
8 * Copyright 2003-2006 Red Hat, Inc. All rights reserved.
9 * Copyright 2003-2006 Jeff Garzik
10 *
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
15 * any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; see the file COPYING. If not, write to
24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25 *
26 *
27 * libata documentation is available via 'make {ps|pdf}docs',
28 * as Documentation/DocBook/libata.*
29 *
30 * Hardware documentation available from http://www.t13.org/ and
31 * http://www.sata-io.org/
32 *
33 */
34
35#include <linux/config.h>
36#include <linux/kernel.h>
37#include <linux/pci.h>
38#include <linux/libata.h>
39
40#include "libata.h"
41
42/**
43 * ata_tf_load_pio - send taskfile registers to host controller
44 * @ap: Port to which output is sent
45 * @tf: ATA taskfile register set
46 *
47 * Outputs ATA taskfile to standard ATA host controller.
48 *
49 * LOCKING:
50 * Inherited from caller.
51 */
52
53static void ata_tf_load_pio(struct ata_port *ap, const struct ata_taskfile *tf)
54{
55 struct ata_ioports *ioaddr = &ap->ioaddr;
56 unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
57
58 if (tf->ctl != ap->last_ctl) {
59 outb(tf->ctl, ioaddr->ctl_addr);
60 ap->last_ctl = tf->ctl;
61 ata_wait_idle(ap);
62 }
63
64 if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
65 outb(tf->hob_feature, ioaddr->feature_addr);
66 outb(tf->hob_nsect, ioaddr->nsect_addr);
67 outb(tf->hob_lbal, ioaddr->lbal_addr);
68 outb(tf->hob_lbam, ioaddr->lbam_addr);
69 outb(tf->hob_lbah, ioaddr->lbah_addr);
70 VPRINTK("hob: feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n",
71 tf->hob_feature,
72 tf->hob_nsect,
73 tf->hob_lbal,
74 tf->hob_lbam,
75 tf->hob_lbah);
76 }
77
78 if (is_addr) {
79 outb(tf->feature, ioaddr->feature_addr);
80 outb(tf->nsect, ioaddr->nsect_addr);
81 outb(tf->lbal, ioaddr->lbal_addr);
82 outb(tf->lbam, ioaddr->lbam_addr);
83 outb(tf->lbah, ioaddr->lbah_addr);
84 VPRINTK("feat 0x%X nsect 0x%X lba 0x%X 0x%X 0x%X\n",
85 tf->feature,
86 tf->nsect,
87 tf->lbal,
88 tf->lbam,
89 tf->lbah);
90 }
91
92 if (tf->flags & ATA_TFLAG_DEVICE) {
93 outb(tf->device, ioaddr->device_addr);
94 VPRINTK("device 0x%X\n", tf->device);
95 }
96
97 ata_wait_idle(ap);
98}
99
100/**
101 * ata_tf_load_mmio - send taskfile registers to host controller
102 * @ap: Port to which output is sent
103 * @tf: ATA taskfile register set
104 *
105 * Outputs ATA taskfile to standard ATA host controller using MMIO.
106 *
107 * LOCKING:
108 * Inherited from caller.
109 */
110
111static void ata_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf)
112{
113 struct ata_ioports *ioaddr = &ap->ioaddr;
114 unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
115
116 if (tf->ctl != ap->last_ctl) {
117 writeb(tf->ctl, (void __iomem *) ap->ioaddr.ctl_addr);
118 ap->last_ctl = tf->ctl;
119 ata_wait_idle(ap);
120 }
121
122 if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
123 writeb(tf->hob_feature, (void __iomem *) ioaddr->feature_addr);
124 writeb(tf->hob_nsect, (void __iomem *) ioaddr->nsect_addr);
125 writeb(tf->hob_lbal, (void __iomem *) ioaddr->lbal_addr);
126 writeb(tf->hob_lbam, (void __iomem *) ioaddr->lbam_addr);
127 writeb(tf->hob_lbah, (void __iomem *) ioaddr->lbah_addr);
128 VPRINTK("hob: feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n",
129 tf->hob_feature,
130 tf->hob_nsect,
131 tf->hob_lbal,
132 tf->hob_lbam,
133 tf->hob_lbah);
134 }
135
136 if (is_addr) {
137 writeb(tf->feature, (void __iomem *) ioaddr->feature_addr);
138 writeb(tf->nsect, (void __iomem *) ioaddr->nsect_addr);
139 writeb(tf->lbal, (void __iomem *) ioaddr->lbal_addr);
140 writeb(tf->lbam, (void __iomem *) ioaddr->lbam_addr);
141 writeb(tf->lbah, (void __iomem *) ioaddr->lbah_addr);
142 VPRINTK("feat 0x%X nsect 0x%X lba 0x%X 0x%X 0x%X\n",
143 tf->feature,
144 tf->nsect,
145 tf->lbal,
146 tf->lbam,
147 tf->lbah);
148 }
149
150 if (tf->flags & ATA_TFLAG_DEVICE) {
151 writeb(tf->device, (void __iomem *) ioaddr->device_addr);
152 VPRINTK("device 0x%X\n", tf->device);
153 }
154
155 ata_wait_idle(ap);
156}
157
158
159/**
160 * ata_tf_load - send taskfile registers to host controller
161 * @ap: Port to which output is sent
162 * @tf: ATA taskfile register set
163 *
164 * Outputs ATA taskfile to standard ATA host controller using MMIO
165 * or PIO as indicated by the ATA_FLAG_MMIO flag.
166 * Writes the control, feature, nsect, lbal, lbam, and lbah registers.
167 * Optionally (ATA_TFLAG_LBA48) writes hob_feature, hob_nsect,
168 * hob_lbal, hob_lbam, and hob_lbah.
169 *
170 * This function waits for idle (!BUSY and !DRQ) after writing
171 * registers. If the control register has a new value, this
172 * function also waits for idle after writing control and before
173 * writing the remaining registers.
174 *
175 * May be used as the tf_load() entry in ata_port_operations.
176 *
177 * LOCKING:
178 * Inherited from caller.
179 */
180void ata_tf_load(struct ata_port *ap, const struct ata_taskfile *tf)
181{
182 if (ap->flags & ATA_FLAG_MMIO)
183 ata_tf_load_mmio(ap, tf);
184 else
185 ata_tf_load_pio(ap, tf);
186}
187
188/**
189 * ata_exec_command_pio - issue ATA command to host controller
190 * @ap: port to which command is being issued
191 * @tf: ATA taskfile register set
192 *
193 * Issues PIO write to ATA command register, with proper
194 * synchronization with interrupt handler / other threads.
195 *
196 * LOCKING:
197 * spin_lock_irqsave(host_set lock)
198 */
199
200static void ata_exec_command_pio(struct ata_port *ap, const struct ata_taskfile *tf)
201{
202 DPRINTK("ata%u: cmd 0x%X\n", ap->id, tf->command);
203
204 outb(tf->command, ap->ioaddr.command_addr);
205 ata_pause(ap);
206}
207
208
209/**
210 * ata_exec_command_mmio - issue ATA command to host controller
211 * @ap: port to which command is being issued
212 * @tf: ATA taskfile register set
213 *
214 * Issues MMIO write to ATA command register, with proper
215 * synchronization with interrupt handler / other threads.
216 *
217 * LOCKING:
218 * spin_lock_irqsave(host_set lock)
219 */
220
221static void ata_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf)
222{
223 DPRINTK("ata%u: cmd 0x%X\n", ap->id, tf->command);
224
225 writeb(tf->command, (void __iomem *) ap->ioaddr.command_addr);
226 ata_pause(ap);
227}
228
229
230/**
231 * ata_exec_command - issue ATA command to host controller
232 * @ap: port to which command is being issued
233 * @tf: ATA taskfile register set
234 *
235 * Issues PIO/MMIO write to ATA command register, with proper
236 * synchronization with interrupt handler / other threads.
237 *
238 * LOCKING:
239 * spin_lock_irqsave(host_set lock)
240 */
241void ata_exec_command(struct ata_port *ap, const struct ata_taskfile *tf)
242{
243 if (ap->flags & ATA_FLAG_MMIO)
244 ata_exec_command_mmio(ap, tf);
245 else
246 ata_exec_command_pio(ap, tf);
247}
248
249/**
250 * ata_tf_read_pio - input device's ATA taskfile shadow registers
251 * @ap: Port from which input is read
252 * @tf: ATA taskfile register set for storing input
253 *
254 * Reads ATA taskfile registers for currently-selected device
255 * into @tf.
256 *
257 * LOCKING:
258 * Inherited from caller.
259 */
260
261static void ata_tf_read_pio(struct ata_port *ap, struct ata_taskfile *tf)
262{
263 struct ata_ioports *ioaddr = &ap->ioaddr;
264
265 tf->command = ata_check_status(ap);
266 tf->feature = inb(ioaddr->error_addr);
267 tf->nsect = inb(ioaddr->nsect_addr);
268 tf->lbal = inb(ioaddr->lbal_addr);
269 tf->lbam = inb(ioaddr->lbam_addr);
270 tf->lbah = inb(ioaddr->lbah_addr);
271 tf->device = inb(ioaddr->device_addr);
272
273 if (tf->flags & ATA_TFLAG_LBA48) {
274 outb(tf->ctl | ATA_HOB, ioaddr->ctl_addr);
275 tf->hob_feature = inb(ioaddr->error_addr);
276 tf->hob_nsect = inb(ioaddr->nsect_addr);
277 tf->hob_lbal = inb(ioaddr->lbal_addr);
278 tf->hob_lbam = inb(ioaddr->lbam_addr);
279 tf->hob_lbah = inb(ioaddr->lbah_addr);
280 }
281}
282
283/**
284 * ata_tf_read_mmio - input device's ATA taskfile shadow registers
285 * @ap: Port from which input is read
286 * @tf: ATA taskfile register set for storing input
287 *
288 * Reads ATA taskfile registers for currently-selected device
289 * into @tf via MMIO.
290 *
291 * LOCKING:
292 * Inherited from caller.
293 */
294
295static void ata_tf_read_mmio(struct ata_port *ap, struct ata_taskfile *tf)
296{
297 struct ata_ioports *ioaddr = &ap->ioaddr;
298
299 tf->command = ata_check_status(ap);
300 tf->feature = readb((void __iomem *)ioaddr->error_addr);
301 tf->nsect = readb((void __iomem *)ioaddr->nsect_addr);
302 tf->lbal = readb((void __iomem *)ioaddr->lbal_addr);
303 tf->lbam = readb((void __iomem *)ioaddr->lbam_addr);
304 tf->lbah = readb((void __iomem *)ioaddr->lbah_addr);
305 tf->device = readb((void __iomem *)ioaddr->device_addr);
306
307 if (tf->flags & ATA_TFLAG_LBA48) {
308 writeb(tf->ctl | ATA_HOB, (void __iomem *) ap->ioaddr.ctl_addr);
309 tf->hob_feature = readb((void __iomem *)ioaddr->error_addr);
310 tf->hob_nsect = readb((void __iomem *)ioaddr->nsect_addr);
311 tf->hob_lbal = readb((void __iomem *)ioaddr->lbal_addr);
312 tf->hob_lbam = readb((void __iomem *)ioaddr->lbam_addr);
313 tf->hob_lbah = readb((void __iomem *)ioaddr->lbah_addr);
314 }
315}
316
317
318/**
319 * ata_tf_read - input device's ATA taskfile shadow registers
320 * @ap: Port from which input is read
321 * @tf: ATA taskfile register set for storing input
322 *
323 * Reads ATA taskfile registers for currently-selected device
324 * into @tf.
325 *
326 * Reads nsect, lbal, lbam, lbah, and device. If ATA_TFLAG_LBA48
327 * is set, also reads the hob registers.
328 *
329 * May be used as the tf_read() entry in ata_port_operations.
330 *
331 * LOCKING:
332 * Inherited from caller.
333 */
334void ata_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
335{
336 if (ap->flags & ATA_FLAG_MMIO)
337 ata_tf_read_mmio(ap, tf);
338 else
339 ata_tf_read_pio(ap, tf);
340}
341
342/**
343 * ata_check_status_pio - Read device status reg & clear interrupt
344 * @ap: port where the device is
345 *
346 * Reads ATA taskfile status register for currently-selected device
347 * and return its value. This also clears pending interrupts
348 * from this device
349 *
350 * LOCKING:
351 * Inherited from caller.
352 */
353static u8 ata_check_status_pio(struct ata_port *ap)
354{
355 return inb(ap->ioaddr.status_addr);
356}
357
358/**
359 * ata_check_status_mmio - Read device status reg & clear interrupt
360 * @ap: port where the device is
361 *
362 * Reads ATA taskfile status register for currently-selected device
363 * via MMIO and return its value. This also clears pending interrupts
364 * from this device
365 *
366 * LOCKING:
367 * Inherited from caller.
368 */
369static u8 ata_check_status_mmio(struct ata_port *ap)
370{
371 return readb((void __iomem *) ap->ioaddr.status_addr);
372}
373
374
375/**
376 * ata_check_status - Read device status reg & clear interrupt
377 * @ap: port where the device is
378 *
379 * Reads ATA taskfile status register for currently-selected device
380 * and return its value. This also clears pending interrupts
381 * from this device
382 *
383 * May be used as the check_status() entry in ata_port_operations.
384 *
385 * LOCKING:
386 * Inherited from caller.
387 */
388u8 ata_check_status(struct ata_port *ap)
389{
390 if (ap->flags & ATA_FLAG_MMIO)
391 return ata_check_status_mmio(ap);
392 return ata_check_status_pio(ap);
393}
394
395
396/**
397 * ata_altstatus - Read device alternate status reg
398 * @ap: port where the device is
399 *
400 * Reads ATA taskfile alternate status register for
401 * currently-selected device and return its value.
402 *
403 * Note: may NOT be used as the check_altstatus() entry in
404 * ata_port_operations.
405 *
406 * LOCKING:
407 * Inherited from caller.
408 */
409u8 ata_altstatus(struct ata_port *ap)
410{
411 if (ap->ops->check_altstatus)
412 return ap->ops->check_altstatus(ap);
413
414 if (ap->flags & ATA_FLAG_MMIO)
415 return readb((void __iomem *)ap->ioaddr.altstatus_addr);
416 return inb(ap->ioaddr.altstatus_addr);
417}
418
419#ifdef CONFIG_PCI
420static struct ata_probe_ent *
421ata_probe_ent_alloc(struct device *dev, const struct ata_port_info *port)
422{
423 struct ata_probe_ent *probe_ent;
424
425 probe_ent = kzalloc(sizeof(*probe_ent), GFP_KERNEL);
426 if (!probe_ent) {
427 printk(KERN_ERR DRV_NAME "(%s): out of memory\n",
428 kobject_name(&(dev->kobj)));
429 return NULL;
430 }
431
432 INIT_LIST_HEAD(&probe_ent->node);
433 probe_ent->dev = dev;
434
435 probe_ent->sht = port->sht;
436 probe_ent->host_flags = port->host_flags;
437 probe_ent->pio_mask = port->pio_mask;
438 probe_ent->mwdma_mask = port->mwdma_mask;
439 probe_ent->udma_mask = port->udma_mask;
440 probe_ent->port_ops = port->port_ops;
441
442 return probe_ent;
443}
444
445
446/**
447 * ata_pci_init_native_mode - Initialize native-mode driver
448 * @pdev: pci device to be initialized
449 * @port: array[2] of pointers to port info structures.
450 * @ports: bitmap of ports present
451 *
452 * Utility function which allocates and initializes an
453 * ata_probe_ent structure for a standard dual-port
454 * PIO-based IDE controller. The returned ata_probe_ent
455 * structure can be passed to ata_device_add(). The returned
456 * ata_probe_ent structure should then be freed with kfree().
457 *
458 * The caller need only pass the address of the primary port, the
459 * secondary will be deduced automatically. If the device has non
460 * standard secondary port mappings this function can be called twice,
461 * once for each interface.
462 */
463
464struct ata_probe_ent *
465ata_pci_init_native_mode(struct pci_dev *pdev, struct ata_port_info **port, int ports)
466{
467 struct ata_probe_ent *probe_ent =
468 ata_probe_ent_alloc(pci_dev_to_dev(pdev), port[0]);
469 int p = 0;
470
471 if (!probe_ent)
472 return NULL;
473
474 probe_ent->irq = pdev->irq;
475 probe_ent->irq_flags = SA_SHIRQ;
476 probe_ent->private_data = port[0]->private_data;
477
478 if (ports & ATA_PORT_PRIMARY) {
479 probe_ent->port[p].cmd_addr = pci_resource_start(pdev, 0);
480 probe_ent->port[p].altstatus_addr =
481 probe_ent->port[p].ctl_addr =
482 pci_resource_start(pdev, 1) | ATA_PCI_CTL_OFS;
483 probe_ent->port[p].bmdma_addr = pci_resource_start(pdev, 4);
484 ata_std_ports(&probe_ent->port[p]);
485 p++;
486 }
487
488 if (ports & ATA_PORT_SECONDARY) {
489 probe_ent->port[p].cmd_addr = pci_resource_start(pdev, 2);
490 probe_ent->port[p].altstatus_addr =
491 probe_ent->port[p].ctl_addr =
492 pci_resource_start(pdev, 3) | ATA_PCI_CTL_OFS;
493 probe_ent->port[p].bmdma_addr = pci_resource_start(pdev, 4) + 8;
494 ata_std_ports(&probe_ent->port[p]);
495 p++;
496 }
497
498 probe_ent->n_ports = p;
499 return probe_ent;
500}
501
502
503static struct ata_probe_ent *ata_pci_init_legacy_port(struct pci_dev *pdev,
504 struct ata_port_info *port, int port_num)
505{
506 struct ata_probe_ent *probe_ent;
507
508 probe_ent = ata_probe_ent_alloc(pci_dev_to_dev(pdev), port);
509 if (!probe_ent)
510 return NULL;
511
512 probe_ent->legacy_mode = 1;
513 probe_ent->n_ports = 1;
514 probe_ent->hard_port_no = port_num;
515 probe_ent->private_data = port->private_data;
516
517 switch(port_num)
518 {
519 case 0:
520 probe_ent->irq = 14;
521 probe_ent->port[0].cmd_addr = 0x1f0;
522 probe_ent->port[0].altstatus_addr =
523 probe_ent->port[0].ctl_addr = 0x3f6;
524 break;
525 case 1:
526 probe_ent->irq = 15;
527 probe_ent->port[0].cmd_addr = 0x170;
528 probe_ent->port[0].altstatus_addr =
529 probe_ent->port[0].ctl_addr = 0x376;
530 break;
531 }
532
533 probe_ent->port[0].bmdma_addr =
534 pci_resource_start(pdev, 4) + 8 * port_num;
535 ata_std_ports(&probe_ent->port[0]);
536
537 return probe_ent;
538}
539
540
541/**
542 * ata_pci_init_one - Initialize/register PCI IDE host controller
543 * @pdev: Controller to be initialized
544 * @port_info: Information from low-level host driver
545 * @n_ports: Number of ports attached to host controller
546 *
547 * This is a helper function which can be called from a driver's
548 * xxx_init_one() probe function if the hardware uses traditional
549 * IDE taskfile registers.
550 *
551 * This function calls pci_enable_device(), reserves its register
552 * regions, sets the dma mask, enables bus master mode, and calls
553 * ata_device_add()
554 *
555 * LOCKING:
556 * Inherited from PCI layer (may sleep).
557 *
558 * RETURNS:
559 * Zero on success, negative on errno-based value on error.
560 */
561
562int ata_pci_init_one (struct pci_dev *pdev, struct ata_port_info **port_info,
563 unsigned int n_ports)
564{
565 struct ata_probe_ent *probe_ent = NULL, *probe_ent2 = NULL;
566 struct ata_port_info *port[2];
567 u8 tmp8, mask;
568 unsigned int legacy_mode = 0;
569 int disable_dev_on_err = 1;
570 int rc;
571
572 DPRINTK("ENTER\n");
573
574 port[0] = port_info[0];
575 if (n_ports > 1)
576 port[1] = port_info[1];
577 else
578 port[1] = port[0];
579
580 if ((port[0]->host_flags & ATA_FLAG_NO_LEGACY) == 0
581 && (pdev->class >> 8) == PCI_CLASS_STORAGE_IDE) {
582 /* TODO: What if one channel is in native mode ... */
583 pci_read_config_byte(pdev, PCI_CLASS_PROG, &tmp8);
584 mask = (1 << 2) | (1 << 0);
585 if ((tmp8 & mask) != mask)
586 legacy_mode = (1 << 3);
587 }
588
589 /* FIXME... */
590 if ((!legacy_mode) && (n_ports > 2)) {
591 printk(KERN_ERR "ata: BUG: native mode, n_ports > 2\n");
592 n_ports = 2;
593 /* For now */
594 }
595
596 /* FIXME: Really for ATA it isn't safe because the device may be
597 multi-purpose and we want to leave it alone if it was already
598 enabled. Secondly for shared use as Arjan says we want refcounting
599
600 Checking dev->is_enabled is insufficient as this is not set at
601 boot for the primary video which is BIOS enabled
602 */
603
604 rc = pci_enable_device(pdev);
605 if (rc)
606 return rc;
607
608 rc = pci_request_regions(pdev, DRV_NAME);
609 if (rc) {
610 disable_dev_on_err = 0;
611 goto err_out;
612 }
613
614 /* FIXME: Should use platform specific mappers for legacy port ranges */
615 if (legacy_mode) {
616 if (!request_region(0x1f0, 8, "libata")) {
617 struct resource *conflict, res;
618 res.start = 0x1f0;
619 res.end = 0x1f0 + 8 - 1;
620 conflict = ____request_resource(&ioport_resource, &res);
621 if (!strcmp(conflict->name, "libata"))
622 legacy_mode |= (1 << 0);
623 else {
624 disable_dev_on_err = 0;
625 printk(KERN_WARNING "ata: 0x1f0 IDE port busy\n");
626 }
627 } else
628 legacy_mode |= (1 << 0);
629
630 if (!request_region(0x170, 8, "libata")) {
631 struct resource *conflict, res;
632 res.start = 0x170;
633 res.end = 0x170 + 8 - 1;
634 conflict = ____request_resource(&ioport_resource, &res);
635 if (!strcmp(conflict->name, "libata"))
636 legacy_mode |= (1 << 1);
637 else {
638 disable_dev_on_err = 0;
639 printk(KERN_WARNING "ata: 0x170 IDE port busy\n");
640 }
641 } else
642 legacy_mode |= (1 << 1);
643 }
644
645 /* we have legacy mode, but all ports are unavailable */
646 if (legacy_mode == (1 << 3)) {
647 rc = -EBUSY;
648 goto err_out_regions;
649 }
650
651 rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
652 if (rc)
653 goto err_out_regions;
654 rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
655 if (rc)
656 goto err_out_regions;
657
658 if (legacy_mode) {
659 if (legacy_mode & (1 << 0))
660 probe_ent = ata_pci_init_legacy_port(pdev, port[0], 0);
661 if (legacy_mode & (1 << 1))
662 probe_ent2 = ata_pci_init_legacy_port(pdev, port[1], 1);
663 } else {
664 if (n_ports == 2)
665 probe_ent = ata_pci_init_native_mode(pdev, port, ATA_PORT_PRIMARY | ATA_PORT_SECONDARY);
666 else
667 probe_ent = ata_pci_init_native_mode(pdev, port, ATA_PORT_PRIMARY);
668 }
669 if (!probe_ent && !probe_ent2) {
670 rc = -ENOMEM;
671 goto err_out_regions;
672 }
673
674 pci_set_master(pdev);
675
676 /* FIXME: check ata_device_add return */
677 if (legacy_mode) {
678 if (legacy_mode & (1 << 0))
679 ata_device_add(probe_ent);
680 if (legacy_mode & (1 << 1))
681 ata_device_add(probe_ent2);
682 } else
683 ata_device_add(probe_ent);
684
685 kfree(probe_ent);
686 kfree(probe_ent2);
687
688 return 0;
689
690err_out_regions:
691 if (legacy_mode & (1 << 0))
692 release_region(0x1f0, 8);
693 if (legacy_mode & (1 << 1))
694 release_region(0x170, 8);
695 pci_release_regions(pdev);
696err_out:
697 if (disable_dev_on_err)
698 pci_disable_device(pdev);
699 return rc;
700}
701
702#endif /* CONFIG_PCI */
703
diff --git a/drivers/scsi/libata-core.c b/drivers/scsi/libata-core.c
index 7ddd5a69352a..adc5b440c9bc 100644
--- a/drivers/scsi/libata-core.c
+++ b/drivers/scsi/libata-core.c
@@ -61,9 +61,6 @@
61 61
62#include "libata.h" 62#include "libata.h"
63 63
64static unsigned int ata_busy_sleep (struct ata_port *ap,
65 unsigned long tmout_pat,
66 unsigned long tmout);
67static void ata_dev_reread_id(struct ata_port *ap, struct ata_device *dev); 64static void ata_dev_reread_id(struct ata_port *ap, struct ata_device *dev);
68static void ata_dev_init_params(struct ata_port *ap, struct ata_device *dev); 65static void ata_dev_init_params(struct ata_port *ap, struct ata_device *dev);
69static void ata_set_mode(struct ata_port *ap); 66static void ata_set_mode(struct ata_port *ap);
@@ -73,7 +70,6 @@ static int fgb(u32 bitmap);
73static int ata_choose_xfer_mode(const struct ata_port *ap, 70static int ata_choose_xfer_mode(const struct ata_port *ap,
74 u8 *xfer_mode_out, 71 u8 *xfer_mode_out,
75 unsigned int *xfer_shift_out); 72 unsigned int *xfer_shift_out);
76static void __ata_qc_complete(struct ata_queued_cmd *qc);
77 73
78static unsigned int ata_unique_id = 1; 74static unsigned int ata_unique_id = 1;
79static struct workqueue_struct *ata_wq; 75static struct workqueue_struct *ata_wq;
@@ -87,403 +83,6 @@ MODULE_DESCRIPTION("Library module for ATA devices");
87MODULE_LICENSE("GPL"); 83MODULE_LICENSE("GPL");
88MODULE_VERSION(DRV_VERSION); 84MODULE_VERSION(DRV_VERSION);
89 85
90/**
91 * ata_tf_load_pio - send taskfile registers to host controller
92 * @ap: Port to which output is sent
93 * @tf: ATA taskfile register set
94 *
95 * Outputs ATA taskfile to standard ATA host controller.
96 *
97 * LOCKING:
98 * Inherited from caller.
99 */
100
101static void ata_tf_load_pio(struct ata_port *ap, const struct ata_taskfile *tf)
102{
103 struct ata_ioports *ioaddr = &ap->ioaddr;
104 unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
105
106 if (tf->ctl != ap->last_ctl) {
107 outb(tf->ctl, ioaddr->ctl_addr);
108 ap->last_ctl = tf->ctl;
109 ata_wait_idle(ap);
110 }
111
112 if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
113 outb(tf->hob_feature, ioaddr->feature_addr);
114 outb(tf->hob_nsect, ioaddr->nsect_addr);
115 outb(tf->hob_lbal, ioaddr->lbal_addr);
116 outb(tf->hob_lbam, ioaddr->lbam_addr);
117 outb(tf->hob_lbah, ioaddr->lbah_addr);
118 VPRINTK("hob: feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n",
119 tf->hob_feature,
120 tf->hob_nsect,
121 tf->hob_lbal,
122 tf->hob_lbam,
123 tf->hob_lbah);
124 }
125
126 if (is_addr) {
127 outb(tf->feature, ioaddr->feature_addr);
128 outb(tf->nsect, ioaddr->nsect_addr);
129 outb(tf->lbal, ioaddr->lbal_addr);
130 outb(tf->lbam, ioaddr->lbam_addr);
131 outb(tf->lbah, ioaddr->lbah_addr);
132 VPRINTK("feat 0x%X nsect 0x%X lba 0x%X 0x%X 0x%X\n",
133 tf->feature,
134 tf->nsect,
135 tf->lbal,
136 tf->lbam,
137 tf->lbah);
138 }
139
140 if (tf->flags & ATA_TFLAG_DEVICE) {
141 outb(tf->device, ioaddr->device_addr);
142 VPRINTK("device 0x%X\n", tf->device);
143 }
144
145 ata_wait_idle(ap);
146}
147
148/**
149 * ata_tf_load_mmio - send taskfile registers to host controller
150 * @ap: Port to which output is sent
151 * @tf: ATA taskfile register set
152 *
153 * Outputs ATA taskfile to standard ATA host controller using MMIO.
154 *
155 * LOCKING:
156 * Inherited from caller.
157 */
158
159static void ata_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf)
160{
161 struct ata_ioports *ioaddr = &ap->ioaddr;
162 unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
163
164 if (tf->ctl != ap->last_ctl) {
165 writeb(tf->ctl, (void __iomem *) ap->ioaddr.ctl_addr);
166 ap->last_ctl = tf->ctl;
167 ata_wait_idle(ap);
168 }
169
170 if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
171 writeb(tf->hob_feature, (void __iomem *) ioaddr->feature_addr);
172 writeb(tf->hob_nsect, (void __iomem *) ioaddr->nsect_addr);
173 writeb(tf->hob_lbal, (void __iomem *) ioaddr->lbal_addr);
174 writeb(tf->hob_lbam, (void __iomem *) ioaddr->lbam_addr);
175 writeb(tf->hob_lbah, (void __iomem *) ioaddr->lbah_addr);
176 VPRINTK("hob: feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n",
177 tf->hob_feature,
178 tf->hob_nsect,
179 tf->hob_lbal,
180 tf->hob_lbam,
181 tf->hob_lbah);
182 }
183
184 if (is_addr) {
185 writeb(tf->feature, (void __iomem *) ioaddr->feature_addr);
186 writeb(tf->nsect, (void __iomem *) ioaddr->nsect_addr);
187 writeb(tf->lbal, (void __iomem *) ioaddr->lbal_addr);
188 writeb(tf->lbam, (void __iomem *) ioaddr->lbam_addr);
189 writeb(tf->lbah, (void __iomem *) ioaddr->lbah_addr);
190 VPRINTK("feat 0x%X nsect 0x%X lba 0x%X 0x%X 0x%X\n",
191 tf->feature,
192 tf->nsect,
193 tf->lbal,
194 tf->lbam,
195 tf->lbah);
196 }
197
198 if (tf->flags & ATA_TFLAG_DEVICE) {
199 writeb(tf->device, (void __iomem *) ioaddr->device_addr);
200 VPRINTK("device 0x%X\n", tf->device);
201 }
202
203 ata_wait_idle(ap);
204}
205
206
207/**
208 * ata_tf_load - send taskfile registers to host controller
209 * @ap: Port to which output is sent
210 * @tf: ATA taskfile register set
211 *
212 * Outputs ATA taskfile to standard ATA host controller using MMIO
213 * or PIO as indicated by the ATA_FLAG_MMIO flag.
214 * Writes the control, feature, nsect, lbal, lbam, and lbah registers.
215 * Optionally (ATA_TFLAG_LBA48) writes hob_feature, hob_nsect,
216 * hob_lbal, hob_lbam, and hob_lbah.
217 *
218 * This function waits for idle (!BUSY and !DRQ) after writing
219 * registers. If the control register has a new value, this
220 * function also waits for idle after writing control and before
221 * writing the remaining registers.
222 *
223 * May be used as the tf_load() entry in ata_port_operations.
224 *
225 * LOCKING:
226 * Inherited from caller.
227 */
228void ata_tf_load(struct ata_port *ap, const struct ata_taskfile *tf)
229{
230 if (ap->flags & ATA_FLAG_MMIO)
231 ata_tf_load_mmio(ap, tf);
232 else
233 ata_tf_load_pio(ap, tf);
234}
235
236/**
237 * ata_exec_command_pio - issue ATA command to host controller
238 * @ap: port to which command is being issued
239 * @tf: ATA taskfile register set
240 *
241 * Issues PIO write to ATA command register, with proper
242 * synchronization with interrupt handler / other threads.
243 *
244 * LOCKING:
245 * spin_lock_irqsave(host_set lock)
246 */
247
248static void ata_exec_command_pio(struct ata_port *ap, const struct ata_taskfile *tf)
249{
250 DPRINTK("ata%u: cmd 0x%X\n", ap->id, tf->command);
251
252 outb(tf->command, ap->ioaddr.command_addr);
253 ata_pause(ap);
254}
255
256
257/**
258 * ata_exec_command_mmio - issue ATA command to host controller
259 * @ap: port to which command is being issued
260 * @tf: ATA taskfile register set
261 *
262 * Issues MMIO write to ATA command register, with proper
263 * synchronization with interrupt handler / other threads.
264 *
265 * LOCKING:
266 * spin_lock_irqsave(host_set lock)
267 */
268
269static void ata_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf)
270{
271 DPRINTK("ata%u: cmd 0x%X\n", ap->id, tf->command);
272
273 writeb(tf->command, (void __iomem *) ap->ioaddr.command_addr);
274 ata_pause(ap);
275}
276
277
278/**
279 * ata_exec_command - issue ATA command to host controller
280 * @ap: port to which command is being issued
281 * @tf: ATA taskfile register set
282 *
283 * Issues PIO/MMIO write to ATA command register, with proper
284 * synchronization with interrupt handler / other threads.
285 *
286 * LOCKING:
287 * spin_lock_irqsave(host_set lock)
288 */
289void ata_exec_command(struct ata_port *ap, const struct ata_taskfile *tf)
290{
291 if (ap->flags & ATA_FLAG_MMIO)
292 ata_exec_command_mmio(ap, tf);
293 else
294 ata_exec_command_pio(ap, tf);
295}
296
297/**
298 * ata_tf_to_host - issue ATA taskfile to host controller
299 * @ap: port to which command is being issued
300 * @tf: ATA taskfile register set
301 *
302 * Issues ATA taskfile register set to ATA host controller,
303 * with proper synchronization with interrupt handler and
304 * other threads.
305 *
306 * LOCKING:
307 * spin_lock_irqsave(host_set lock)
308 */
309
310static inline void ata_tf_to_host(struct ata_port *ap,
311 const struct ata_taskfile *tf)
312{
313 ap->ops->tf_load(ap, tf);
314 ap->ops->exec_command(ap, tf);
315}
316
317/**
318 * ata_tf_read_pio - input device's ATA taskfile shadow registers
319 * @ap: Port from which input is read
320 * @tf: ATA taskfile register set for storing input
321 *
322 * Reads ATA taskfile registers for currently-selected device
323 * into @tf.
324 *
325 * LOCKING:
326 * Inherited from caller.
327 */
328
329static void ata_tf_read_pio(struct ata_port *ap, struct ata_taskfile *tf)
330{
331 struct ata_ioports *ioaddr = &ap->ioaddr;
332
333 tf->command = ata_check_status(ap);
334 tf->feature = inb(ioaddr->error_addr);
335 tf->nsect = inb(ioaddr->nsect_addr);
336 tf->lbal = inb(ioaddr->lbal_addr);
337 tf->lbam = inb(ioaddr->lbam_addr);
338 tf->lbah = inb(ioaddr->lbah_addr);
339 tf->device = inb(ioaddr->device_addr);
340
341 if (tf->flags & ATA_TFLAG_LBA48) {
342 outb(tf->ctl | ATA_HOB, ioaddr->ctl_addr);
343 tf->hob_feature = inb(ioaddr->error_addr);
344 tf->hob_nsect = inb(ioaddr->nsect_addr);
345 tf->hob_lbal = inb(ioaddr->lbal_addr);
346 tf->hob_lbam = inb(ioaddr->lbam_addr);
347 tf->hob_lbah = inb(ioaddr->lbah_addr);
348 }
349}
350
351/**
352 * ata_tf_read_mmio - input device's ATA taskfile shadow registers
353 * @ap: Port from which input is read
354 * @tf: ATA taskfile register set for storing input
355 *
356 * Reads ATA taskfile registers for currently-selected device
357 * into @tf via MMIO.
358 *
359 * LOCKING:
360 * Inherited from caller.
361 */
362
363static void ata_tf_read_mmio(struct ata_port *ap, struct ata_taskfile *tf)
364{
365 struct ata_ioports *ioaddr = &ap->ioaddr;
366
367 tf->command = ata_check_status(ap);
368 tf->feature = readb((void __iomem *)ioaddr->error_addr);
369 tf->nsect = readb((void __iomem *)ioaddr->nsect_addr);
370 tf->lbal = readb((void __iomem *)ioaddr->lbal_addr);
371 tf->lbam = readb((void __iomem *)ioaddr->lbam_addr);
372 tf->lbah = readb((void __iomem *)ioaddr->lbah_addr);
373 tf->device = readb((void __iomem *)ioaddr->device_addr);
374
375 if (tf->flags & ATA_TFLAG_LBA48) {
376 writeb(tf->ctl | ATA_HOB, (void __iomem *) ap->ioaddr.ctl_addr);
377 tf->hob_feature = readb((void __iomem *)ioaddr->error_addr);
378 tf->hob_nsect = readb((void __iomem *)ioaddr->nsect_addr);
379 tf->hob_lbal = readb((void __iomem *)ioaddr->lbal_addr);
380 tf->hob_lbam = readb((void __iomem *)ioaddr->lbam_addr);
381 tf->hob_lbah = readb((void __iomem *)ioaddr->lbah_addr);
382 }
383}
384
385
386/**
387 * ata_tf_read - input device's ATA taskfile shadow registers
388 * @ap: Port from which input is read
389 * @tf: ATA taskfile register set for storing input
390 *
391 * Reads ATA taskfile registers for currently-selected device
392 * into @tf.
393 *
394 * Reads nsect, lbal, lbam, lbah, and device. If ATA_TFLAG_LBA48
395 * is set, also reads the hob registers.
396 *
397 * May be used as the tf_read() entry in ata_port_operations.
398 *
399 * LOCKING:
400 * Inherited from caller.
401 */
402void ata_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
403{
404 if (ap->flags & ATA_FLAG_MMIO)
405 ata_tf_read_mmio(ap, tf);
406 else
407 ata_tf_read_pio(ap, tf);
408}
409
410/**
411 * ata_check_status_pio - Read device status reg & clear interrupt
412 * @ap: port where the device is
413 *
414 * Reads ATA taskfile status register for currently-selected device
415 * and return its value. This also clears pending interrupts
416 * from this device
417 *
418 * LOCKING:
419 * Inherited from caller.
420 */
421static u8 ata_check_status_pio(struct ata_port *ap)
422{
423 return inb(ap->ioaddr.status_addr);
424}
425
426/**
427 * ata_check_status_mmio - Read device status reg & clear interrupt
428 * @ap: port where the device is
429 *
430 * Reads ATA taskfile status register for currently-selected device
431 * via MMIO and return its value. This also clears pending interrupts
432 * from this device
433 *
434 * LOCKING:
435 * Inherited from caller.
436 */
437static u8 ata_check_status_mmio(struct ata_port *ap)
438{
439 return readb((void __iomem *) ap->ioaddr.status_addr);
440}
441
442
443/**
444 * ata_check_status - Read device status reg & clear interrupt
445 * @ap: port where the device is
446 *
447 * Reads ATA taskfile status register for currently-selected device
448 * and return its value. This also clears pending interrupts
449 * from this device
450 *
451 * May be used as the check_status() entry in ata_port_operations.
452 *
453 * LOCKING:
454 * Inherited from caller.
455 */
456u8 ata_check_status(struct ata_port *ap)
457{
458 if (ap->flags & ATA_FLAG_MMIO)
459 return ata_check_status_mmio(ap);
460 return ata_check_status_pio(ap);
461}
462
463
464/**
465 * ata_altstatus - Read device alternate status reg
466 * @ap: port where the device is
467 *
468 * Reads ATA taskfile alternate status register for
469 * currently-selected device and return its value.
470 *
471 * Note: may NOT be used as the check_altstatus() entry in
472 * ata_port_operations.
473 *
474 * LOCKING:
475 * Inherited from caller.
476 */
477u8 ata_altstatus(struct ata_port *ap)
478{
479 if (ap->ops->check_altstatus)
480 return ap->ops->check_altstatus(ap);
481
482 if (ap->flags & ATA_FLAG_MMIO)
483 return readb((void __iomem *)ap->ioaddr.altstatus_addr);
484 return inb(ap->ioaddr.altstatus_addr);
485}
486
487 86
488/** 87/**
489 * ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure 88 * ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
@@ -834,6 +433,7 @@ unsigned int ata_dev_classify(const struct ata_taskfile *tf)
834 * ata_dev_try_classify - Parse returned ATA device signature 433 * ata_dev_try_classify - Parse returned ATA device signature
835 * @ap: ATA channel to examine 434 * @ap: ATA channel to examine
836 * @device: Device to examine (starting at zero) 435 * @device: Device to examine (starting at zero)
436 * @r_err: Value of error register on completion
837 * 437 *
838 * After an event -- SRST, E.D.D., or SATA COMRESET -- occurs, 438 * After an event -- SRST, E.D.D., or SATA COMRESET -- occurs,
839 * an ATA/ATAPI-defined set of values is placed in the ATA 439 * an ATA/ATAPI-defined set of values is placed in the ATA
@@ -846,11 +446,14 @@ unsigned int ata_dev_classify(const struct ata_taskfile *tf)
846 * 446 *
847 * LOCKING: 447 * LOCKING:
848 * caller. 448 * caller.
449 *
450 * RETURNS:
451 * Device type - %ATA_DEV_ATA, %ATA_DEV_ATAPI or %ATA_DEV_NONE.
849 */ 452 */
850 453
851static u8 ata_dev_try_classify(struct ata_port *ap, unsigned int device) 454static unsigned int
455ata_dev_try_classify(struct ata_port *ap, unsigned int device, u8 *r_err)
852{ 456{
853 struct ata_device *dev = &ap->device[device];
854 struct ata_taskfile tf; 457 struct ata_taskfile tf;
855 unsigned int class; 458 unsigned int class;
856 u8 err; 459 u8 err;
@@ -861,8 +464,8 @@ static u8 ata_dev_try_classify(struct ata_port *ap, unsigned int device)
861 464
862 ap->ops->tf_read(ap, &tf); 465 ap->ops->tf_read(ap, &tf);
863 err = tf.feature; 466 err = tf.feature;
864 467 if (r_err)
865 dev->class = ATA_DEV_NONE; 468 *r_err = err;
866 469
867 /* see if device passed diags */ 470 /* see if device passed diags */
868 if (err == 1) 471 if (err == 1)
@@ -870,18 +473,16 @@ static u8 ata_dev_try_classify(struct ata_port *ap, unsigned int device)
870 else if ((device == 0) && (err == 0x81)) 473 else if ((device == 0) && (err == 0x81))
871 /* do nothing */ ; 474 /* do nothing */ ;
872 else 475 else
873 return err; 476 return ATA_DEV_NONE;
874 477
875 /* determine if device if ATA or ATAPI */ 478 /* determine if device is ATA or ATAPI */
876 class = ata_dev_classify(&tf); 479 class = ata_dev_classify(&tf);
480
877 if (class == ATA_DEV_UNKNOWN) 481 if (class == ATA_DEV_UNKNOWN)
878 return err; 482 return ATA_DEV_NONE;
879 if ((class == ATA_DEV_ATA) && (ata_chk_status(ap) == 0)) 483 if ((class == ATA_DEV_ATA) && (ata_chk_status(ap) == 0))
880 return err; 484 return ATA_DEV_NONE;
881 485 return class;
882 dev->class = class;
883
884 return err;
885} 486}
886 487
887/** 488/**
@@ -918,6 +519,49 @@ void ata_dev_id_string(const u16 *id, unsigned char *s,
918 } 519 }
919} 520}
920 521
522/**
523 * ata_dev_id_c_string - Convert IDENTIFY DEVICE page into C string
524 * @id: IDENTIFY DEVICE results we will examine
525 * @s: string into which data is output
526 * @ofs: offset into identify device page
527 * @len: length of string to return. must be an odd number.
528 *
529 * This function is identical to ata_dev_id_string except that it
530 * trims trailing spaces and terminates the resulting string with
531 * null. @len must be actual maximum length (even number) + 1.
532 *
533 * LOCKING:
534 * caller.
535 */
536void ata_dev_id_c_string(const u16 *id, unsigned char *s,
537 unsigned int ofs, unsigned int len)
538{
539 unsigned char *p;
540
541 WARN_ON(!(len & 1));
542
543 ata_dev_id_string(id, s, ofs, len - 1);
544
545 p = s + strnlen(s, len - 1);
546 while (p > s && p[-1] == ' ')
547 p--;
548 *p = '\0';
549}
550
551static u64 ata_id_n_sectors(const u16 *id)
552{
553 if (ata_id_has_lba(id)) {
554 if (ata_id_has_lba48(id))
555 return ata_id_u64(id, 100);
556 else
557 return ata_id_u32(id, 60);
558 } else {
559 if (ata_id_current_chs_valid(id))
560 return ata_id_u32(id, 57);
561 else
562 return id[1] * id[3] * id[6];
563 }
564}
921 565
922/** 566/**
923 * ata_noop_dev_select - Select device 0/1 on ATA bus 567 * ata_noop_dev_select - Select device 0/1 on ATA bus
@@ -1007,41 +651,41 @@ void ata_dev_select(struct ata_port *ap, unsigned int device,
1007 651
1008/** 652/**
1009 * ata_dump_id - IDENTIFY DEVICE info debugging output 653 * ata_dump_id - IDENTIFY DEVICE info debugging output
1010 * @dev: Device whose IDENTIFY DEVICE page we will dump 654 * @id: IDENTIFY DEVICE page to dump
1011 * 655 *
1012 * Dump selected 16-bit words from a detected device's 656 * Dump selected 16-bit words from the given IDENTIFY DEVICE
1013 * IDENTIFY PAGE page. 657 * page.
1014 * 658 *
1015 * LOCKING: 659 * LOCKING:
1016 * caller. 660 * caller.
1017 */ 661 */
1018 662
1019static inline void ata_dump_id(const struct ata_device *dev) 663static inline void ata_dump_id(const u16 *id)
1020{ 664{
1021 DPRINTK("49==0x%04x " 665 DPRINTK("49==0x%04x "
1022 "53==0x%04x " 666 "53==0x%04x "
1023 "63==0x%04x " 667 "63==0x%04x "
1024 "64==0x%04x " 668 "64==0x%04x "
1025 "75==0x%04x \n", 669 "75==0x%04x \n",
1026 dev->id[49], 670 id[49],
1027 dev->id[53], 671 id[53],
1028 dev->id[63], 672 id[63],
1029 dev->id[64], 673 id[64],
1030 dev->id[75]); 674 id[75]);
1031 DPRINTK("80==0x%04x " 675 DPRINTK("80==0x%04x "
1032 "81==0x%04x " 676 "81==0x%04x "
1033 "82==0x%04x " 677 "82==0x%04x "
1034 "83==0x%04x " 678 "83==0x%04x "
1035 "84==0x%04x \n", 679 "84==0x%04x \n",
1036 dev->id[80], 680 id[80],
1037 dev->id[81], 681 id[81],
1038 dev->id[82], 682 id[82],
1039 dev->id[83], 683 id[83],
1040 dev->id[84]); 684 id[84]);
1041 DPRINTK("88==0x%04x " 685 DPRINTK("88==0x%04x "
1042 "93==0x%04x\n", 686 "93==0x%04x\n",
1043 dev->id[88], 687 id[88],
1044 dev->id[93]); 688 id[93]);
1045} 689}
1046 690
1047/* 691/*
@@ -1073,24 +717,77 @@ static unsigned int ata_pio_modes(const struct ata_device *adev)
1073 timing API will get this right anyway */ 717 timing API will get this right anyway */
1074} 718}
1075 719
1076struct ata_exec_internal_arg { 720static inline void
1077 unsigned int err_mask; 721ata_queue_packet_task(struct ata_port *ap)
1078 struct ata_taskfile *tf; 722{
1079 struct completion *waiting; 723 if (!(ap->flags & ATA_FLAG_FLUSH_PIO_TASK))
1080}; 724 queue_work(ata_wq, &ap->packet_task);
725}
1081 726
1082int ata_qc_complete_internal(struct ata_queued_cmd *qc) 727static inline void
728ata_queue_pio_task(struct ata_port *ap)
1083{ 729{
1084 struct ata_exec_internal_arg *arg = qc->private_data; 730 if (!(ap->flags & ATA_FLAG_FLUSH_PIO_TASK))
1085 struct completion *waiting = arg->waiting; 731 queue_work(ata_wq, &ap->pio_task);
732}
1086 733
1087 if (!(qc->err_mask & ~AC_ERR_DEV)) 734static inline void
1088 qc->ap->ops->tf_read(qc->ap, arg->tf); 735ata_queue_delayed_pio_task(struct ata_port *ap, unsigned long delay)
1089 arg->err_mask = qc->err_mask; 736{
1090 arg->waiting = NULL; 737 if (!(ap->flags & ATA_FLAG_FLUSH_PIO_TASK))
1091 complete(waiting); 738 queue_delayed_work(ata_wq, &ap->pio_task, delay);
739}
1092 740
1093 return 0; 741/**
742 * ata_flush_pio_tasks - Flush pio_task and packet_task
743 * @ap: the target ata_port
744 *
745 * After this function completes, pio_task and packet_task are
746 * guranteed not to be running or scheduled.
747 *
748 * LOCKING:
749 * Kernel thread context (may sleep)
750 */
751
752static void ata_flush_pio_tasks(struct ata_port *ap)
753{
754 int tmp = 0;
755 unsigned long flags;
756
757 DPRINTK("ENTER\n");
758
759 spin_lock_irqsave(&ap->host_set->lock, flags);
760 ap->flags |= ATA_FLAG_FLUSH_PIO_TASK;
761 spin_unlock_irqrestore(&ap->host_set->lock, flags);
762
763 DPRINTK("flush #1\n");
764 flush_workqueue(ata_wq);
765
766 /*
767 * At this point, if a task is running, it's guaranteed to see
768 * the FLUSH flag; thus, it will never queue pio tasks again.
769 * Cancel and flush.
770 */
771 tmp |= cancel_delayed_work(&ap->pio_task);
772 tmp |= cancel_delayed_work(&ap->packet_task);
773 if (!tmp) {
774 DPRINTK("flush #2\n");
775 flush_workqueue(ata_wq);
776 }
777
778 spin_lock_irqsave(&ap->host_set->lock, flags);
779 ap->flags &= ~ATA_FLAG_FLUSH_PIO_TASK;
780 spin_unlock_irqrestore(&ap->host_set->lock, flags);
781
782 DPRINTK("EXIT\n");
783}
784
785void ata_qc_complete_internal(struct ata_queued_cmd *qc)
786{
787 struct completion *waiting = qc->private_data;
788
789 qc->ap->ops->tf_read(qc->ap, &qc->tf);
790 complete(waiting);
1094} 791}
1095 792
1096/** 793/**
@@ -1121,7 +818,7 @@ ata_exec_internal(struct ata_port *ap, struct ata_device *dev,
1121 struct ata_queued_cmd *qc; 818 struct ata_queued_cmd *qc;
1122 DECLARE_COMPLETION(wait); 819 DECLARE_COMPLETION(wait);
1123 unsigned long flags; 820 unsigned long flags;
1124 struct ata_exec_internal_arg arg; 821 unsigned int err_mask;
1125 822
1126 spin_lock_irqsave(&ap->host_set->lock, flags); 823 spin_lock_irqsave(&ap->host_set->lock, flags);
1127 824
@@ -1135,13 +832,12 @@ ata_exec_internal(struct ata_port *ap, struct ata_device *dev,
1135 qc->nsect = buflen / ATA_SECT_SIZE; 832 qc->nsect = buflen / ATA_SECT_SIZE;
1136 } 833 }
1137 834
1138 arg.waiting = &wait; 835 qc->private_data = &wait;
1139 arg.tf = tf;
1140 qc->private_data = &arg;
1141 qc->complete_fn = ata_qc_complete_internal; 836 qc->complete_fn = ata_qc_complete_internal;
1142 837
1143 if (ata_qc_issue(qc)) 838 qc->err_mask = ata_qc_issue(qc);
1144 goto issue_fail; 839 if (qc->err_mask)
840 ata_qc_complete(qc);
1145 841
1146 spin_unlock_irqrestore(&ap->host_set->lock, flags); 842 spin_unlock_irqrestore(&ap->host_set->lock, flags);
1147 843
@@ -1154,8 +850,8 @@ ata_exec_internal(struct ata_port *ap, struct ata_device *dev,
1154 * before the caller cleans up, it will result in a 850 * before the caller cleans up, it will result in a
1155 * spurious interrupt. We can live with that. 851 * spurious interrupt. We can live with that.
1156 */ 852 */
1157 if (arg.waiting) { 853 if (qc->flags & ATA_QCFLAG_ACTIVE) {
1158 qc->err_mask = AC_ERR_OTHER; 854 qc->err_mask = AC_ERR_TIMEOUT;
1159 ata_qc_complete(qc); 855 ata_qc_complete(qc);
1160 printk(KERN_WARNING "ata%u: qc timeout (cmd 0x%x)\n", 856 printk(KERN_WARNING "ata%u: qc timeout (cmd 0x%x)\n",
1161 ap->id, command); 857 ap->id, command);
@@ -1164,12 +860,12 @@ ata_exec_internal(struct ata_port *ap, struct ata_device *dev,
1164 spin_unlock_irqrestore(&ap->host_set->lock, flags); 860 spin_unlock_irqrestore(&ap->host_set->lock, flags);
1165 } 861 }
1166 862
1167 return arg.err_mask; 863 *tf = qc->tf;
864 err_mask = qc->err_mask;
1168 865
1169 issue_fail:
1170 ata_qc_free(qc); 866 ata_qc_free(qc);
1171 spin_unlock_irqrestore(&ap->host_set->lock, flags); 867
1172 return AC_ERR_OTHER; 868 return err_mask;
1173} 869}
1174 870
1175/** 871/**
@@ -1231,12 +927,11 @@ static void ata_dev_identify(struct ata_port *ap, unsigned int device)
1231{ 927{
1232 struct ata_device *dev = &ap->device[device]; 928 struct ata_device *dev = &ap->device[device];
1233 unsigned int major_version; 929 unsigned int major_version;
1234 u16 tmp;
1235 unsigned long xfer_modes; 930 unsigned long xfer_modes;
1236 unsigned int using_edd; 931 unsigned int using_edd;
1237 struct ata_taskfile tf; 932 struct ata_taskfile tf;
1238 unsigned int err_mask; 933 unsigned int err_mask;
1239 int rc; 934 int i, rc;
1240 935
1241 if (!ata_dev_present(dev)) { 936 if (!ata_dev_present(dev)) {
1242 DPRINTK("ENTER/EXIT (host %u, dev %u) -- nodev\n", 937 DPRINTK("ENTER/EXIT (host %u, dev %u) -- nodev\n",
@@ -1244,15 +939,16 @@ static void ata_dev_identify(struct ata_port *ap, unsigned int device)
1244 return; 939 return;
1245 } 940 }
1246 941
1247 if (ap->flags & (ATA_FLAG_SRST | ATA_FLAG_SATA_RESET)) 942 if (ap->ops->probe_reset ||
943 ap->flags & (ATA_FLAG_SRST | ATA_FLAG_SATA_RESET))
1248 using_edd = 0; 944 using_edd = 0;
1249 else 945 else
1250 using_edd = 1; 946 using_edd = 1;
1251 947
1252 DPRINTK("ENTER, host %u, dev %u\n", ap->id, device); 948 DPRINTK("ENTER, host %u, dev %u\n", ap->id, device);
1253 949
1254 assert (dev->class == ATA_DEV_ATA || dev->class == ATA_DEV_ATAPI || 950 WARN_ON(dev->class != ATA_DEV_ATA && dev->class != ATA_DEV_ATAPI &&
1255 dev->class == ATA_DEV_NONE); 951 dev->class != ATA_DEV_NONE);
1256 952
1257 ata_dev_select(ap, device, 1, 1); /* select device 0/1 */ 953 ata_dev_select(ap, device, 1, 1); /* select device 0/1 */
1258 954
@@ -1324,18 +1020,17 @@ retry:
1324 if (!xfer_modes) 1020 if (!xfer_modes)
1325 xfer_modes = ata_pio_modes(dev); 1021 xfer_modes = ata_pio_modes(dev);
1326 1022
1327 ata_dump_id(dev); 1023 ata_dump_id(dev->id);
1328 1024
1329 /* ATA-specific feature tests */ 1025 /* ATA-specific feature tests */
1330 if (dev->class == ATA_DEV_ATA) { 1026 if (dev->class == ATA_DEV_ATA) {
1027 dev->n_sectors = ata_id_n_sectors(dev->id);
1028
1331 if (!ata_id_is_ata(dev->id)) /* sanity check */ 1029 if (!ata_id_is_ata(dev->id)) /* sanity check */
1332 goto err_out_nosup; 1030 goto err_out_nosup;
1333 1031
1334 /* get major version */ 1032 /* get major version */
1335 tmp = dev->id[ATA_ID_MAJOR_VER]; 1033 major_version = ata_id_major_version(dev->id);
1336 for (major_version = 14; major_version >= 1; major_version--)
1337 if (tmp & (1 << major_version))
1338 break;
1339 1034
1340 /* 1035 /*
1341 * The exact sequence expected by certain pre-ATA4 drives is: 1036 * The exact sequence expected by certain pre-ATA4 drives is:
@@ -1357,12 +1052,8 @@ retry:
1357 if (ata_id_has_lba(dev->id)) { 1052 if (ata_id_has_lba(dev->id)) {
1358 dev->flags |= ATA_DFLAG_LBA; 1053 dev->flags |= ATA_DFLAG_LBA;
1359 1054
1360 if (ata_id_has_lba48(dev->id)) { 1055 if (ata_id_has_lba48(dev->id))
1361 dev->flags |= ATA_DFLAG_LBA48; 1056 dev->flags |= ATA_DFLAG_LBA48;
1362 dev->n_sectors = ata_id_u64(dev->id, 100);
1363 } else {
1364 dev->n_sectors = ata_id_u32(dev->id, 60);
1365 }
1366 1057
1367 /* print device info to dmesg */ 1058 /* print device info to dmesg */
1368 printk(KERN_INFO "ata%u: dev %u ATA-%d, max %s, %Lu sectors:%s\n", 1059 printk(KERN_INFO "ata%u: dev %u ATA-%d, max %s, %Lu sectors:%s\n",
@@ -1378,15 +1069,12 @@ retry:
1378 dev->cylinders = dev->id[1]; 1069 dev->cylinders = dev->id[1];
1379 dev->heads = dev->id[3]; 1070 dev->heads = dev->id[3];
1380 dev->sectors = dev->id[6]; 1071 dev->sectors = dev->id[6];
1381 dev->n_sectors = dev->cylinders * dev->heads * dev->sectors;
1382 1072
1383 if (ata_id_current_chs_valid(dev->id)) { 1073 if (ata_id_current_chs_valid(dev->id)) {
1384 /* Current CHS translation is valid. */ 1074 /* Current CHS translation is valid. */
1385 dev->cylinders = dev->id[54]; 1075 dev->cylinders = dev->id[54];
1386 dev->heads = dev->id[55]; 1076 dev->heads = dev->id[55];
1387 dev->sectors = dev->id[56]; 1077 dev->sectors = dev->id[56];
1388
1389 dev->n_sectors = ata_id_u32(dev->id, 57);
1390 } 1078 }
1391 1079
1392 /* print device info to dmesg */ 1080 /* print device info to dmesg */
@@ -1399,7 +1087,7 @@ retry:
1399 1087
1400 } 1088 }
1401 1089
1402 ap->host->max_cmd_len = 16; 1090 dev->cdb_len = 16;
1403 } 1091 }
1404 1092
1405 /* ATAPI-specific feature tests */ 1093 /* ATAPI-specific feature tests */
@@ -1412,8 +1100,7 @@ retry:
1412 printk(KERN_WARNING "ata%u: unsupported CDB len\n", ap->id); 1100 printk(KERN_WARNING "ata%u: unsupported CDB len\n", ap->id);
1413 goto err_out_nosup; 1101 goto err_out_nosup;
1414 } 1102 }
1415 ap->cdb_len = (unsigned int) rc; 1103 dev->cdb_len = (unsigned int) rc;
1416 ap->host->max_cmd_len = (unsigned char) ap->cdb_len;
1417 1104
1418 /* print device info to dmesg */ 1105 /* print device info to dmesg */
1419 printk(KERN_INFO "ata%u: dev %u ATAPI, max %s\n", 1106 printk(KERN_INFO "ata%u: dev %u ATAPI, max %s\n",
@@ -1421,6 +1108,12 @@ retry:
1421 ata_mode_string(xfer_modes)); 1108 ata_mode_string(xfer_modes));
1422 } 1109 }
1423 1110
1111 ap->host->max_cmd_len = 0;
1112 for (i = 0; i < ATA_MAX_DEVICES; i++)
1113 ap->host->max_cmd_len = max_t(unsigned int,
1114 ap->host->max_cmd_len,
1115 ap->device[i].cdb_len);
1116
1424 DPRINTK("EXIT, drv_stat = 0x%x\n", ata_chk_status(ap)); 1117 DPRINTK("EXIT, drv_stat = 0x%x\n", ata_chk_status(ap));
1425 return; 1118 return;
1426 1119
@@ -1433,30 +1126,28 @@ err_out:
1433} 1126}
1434 1127
1435 1128
1436static inline u8 ata_dev_knobble(const struct ata_port *ap) 1129static inline u8 ata_dev_knobble(const struct ata_port *ap,
1130 struct ata_device *dev)
1437{ 1131{
1438 return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(ap->device->id))); 1132 return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
1439} 1133}
1440 1134
1441/** 1135/**
1442 * ata_dev_config - Run device specific handlers and check for 1136 * ata_dev_config - Run device specific handlers & check for SATA->PATA bridges
1443 * SATA->PATA bridges 1137 * @ap: Bus
1444 * @ap: Bus 1138 * @i: Device
1445 * @i: Device
1446 * 1139 *
1447 * LOCKING: 1140 * LOCKING:
1448 */ 1141 */
1449 1142
1450void ata_dev_config(struct ata_port *ap, unsigned int i) 1143void ata_dev_config(struct ata_port *ap, unsigned int i)
1451{ 1144{
1452 /* limit bridge transfers to udma5, 200 sectors */ 1145 /* limit bridge transfers to udma5, 200 sectors */
1453 if (ata_dev_knobble(ap)) { 1146 if (ata_dev_knobble(ap, &ap->device[i])) {
1454 printk(KERN_INFO "ata%u(%u): applying bridge limits\n", 1147 printk(KERN_INFO "ata%u(%u): applying bridge limits\n",
1455 ap->id, ap->device->devno); 1148 ap->id, i);
1456 ap->udma_mask &= ATA_UDMA5; 1149 ap->udma_mask &= ATA_UDMA5;
1457 ap->host->max_sectors = ATA_MAX_SECTORS; 1150 ap->device[i].max_sectors = ATA_MAX_SECTORS;
1458 ap->host->hostt->max_sectors = ATA_MAX_SECTORS;
1459 ap->device[i].flags |= ATA_DFLAG_LOCK_SECTORS;
1460 } 1151 }
1461 1152
1462 if (ap->ops->dev_config) 1153 if (ap->ops->dev_config)
@@ -1482,7 +1173,27 @@ static int ata_bus_probe(struct ata_port *ap)
1482{ 1173{
1483 unsigned int i, found = 0; 1174 unsigned int i, found = 0;
1484 1175
1485 ap->ops->phy_reset(ap); 1176 if (ap->ops->probe_reset) {
1177 unsigned int classes[ATA_MAX_DEVICES];
1178 int rc;
1179
1180 ata_port_probe(ap);
1181
1182 rc = ap->ops->probe_reset(ap, classes);
1183 if (rc == 0) {
1184 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1185 if (classes[i] == ATA_DEV_UNKNOWN)
1186 classes[i] = ATA_DEV_NONE;
1187 ap->device[i].class = classes[i];
1188 }
1189 } else {
1190 printk(KERN_ERR "ata%u: probe reset failed, "
1191 "disabling port\n", ap->id);
1192 ata_port_disable(ap);
1193 }
1194 } else
1195 ap->ops->phy_reset(ap);
1196
1486 if (ap->flags & ATA_FLAG_PORT_DISABLED) 1197 if (ap->flags & ATA_FLAG_PORT_DISABLED)
1487 goto err_out; 1198 goto err_out;
1488 1199
@@ -1526,6 +1237,41 @@ void ata_port_probe(struct ata_port *ap)
1526} 1237}
1527 1238
1528/** 1239/**
1240 * sata_print_link_status - Print SATA link status
1241 * @ap: SATA port to printk link status about
1242 *
1243 * This function prints link speed and status of a SATA link.
1244 *
1245 * LOCKING:
1246 * None.
1247 */
1248static void sata_print_link_status(struct ata_port *ap)
1249{
1250 u32 sstatus, tmp;
1251 const char *speed;
1252
1253 if (!ap->ops->scr_read)
1254 return;
1255
1256 sstatus = scr_read(ap, SCR_STATUS);
1257
1258 if (sata_dev_present(ap)) {
1259 tmp = (sstatus >> 4) & 0xf;
1260 if (tmp & (1 << 0))
1261 speed = "1.5";
1262 else if (tmp & (1 << 1))
1263 speed = "3.0";
1264 else
1265 speed = "<unknown>";
1266 printk(KERN_INFO "ata%u: SATA link up %s Gbps (SStatus %X)\n",
1267 ap->id, speed, sstatus);
1268 } else {
1269 printk(KERN_INFO "ata%u: SATA link down (SStatus %X)\n",
1270 ap->id, sstatus);
1271 }
1272}
1273
1274/**
1529 * __sata_phy_reset - Wake/reset a low-level SATA PHY 1275 * __sata_phy_reset - Wake/reset a low-level SATA PHY
1530 * @ap: SATA port associated with target SATA PHY. 1276 * @ap: SATA port associated with target SATA PHY.
1531 * 1277 *
@@ -1559,27 +1305,14 @@ void __sata_phy_reset(struct ata_port *ap)
1559 break; 1305 break;
1560 } while (time_before(jiffies, timeout)); 1306 } while (time_before(jiffies, timeout));
1561 1307
1562 /* TODO: phy layer with polling, timeouts, etc. */ 1308 /* print link status */
1563 sstatus = scr_read(ap, SCR_STATUS); 1309 sata_print_link_status(ap);
1564 if (sata_dev_present(ap)) {
1565 const char *speed;
1566 u32 tmp;
1567 1310
1568 tmp = (sstatus >> 4) & 0xf; 1311 /* TODO: phy layer with polling, timeouts, etc. */
1569 if (tmp & (1 << 0)) 1312 if (sata_dev_present(ap))
1570 speed = "1.5";
1571 else if (tmp & (1 << 1))
1572 speed = "3.0";
1573 else
1574 speed = "<unknown>";
1575 printk(KERN_INFO "ata%u: SATA link up %s Gbps (SStatus %X)\n",
1576 ap->id, speed, sstatus);
1577 ata_port_probe(ap); 1313 ata_port_probe(ap);
1578 } else { 1314 else
1579 printk(KERN_INFO "ata%u: SATA link down (SStatus %X)\n",
1580 ap->id, sstatus);
1581 ata_port_disable(ap); 1315 ata_port_disable(ap);
1582 }
1583 1316
1584 if (ap->flags & ATA_FLAG_PORT_DISABLED) 1317 if (ap->flags & ATA_FLAG_PORT_DISABLED)
1585 return; 1318 return;
@@ -1752,9 +1485,9 @@ int ata_timing_compute(struct ata_device *adev, unsigned short speed,
1752 ata_timing_quantize(t, t, T, UT); 1485 ata_timing_quantize(t, t, T, UT);
1753 1486
1754 /* 1487 /*
1755 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY, S.M.A.R.T 1488 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY,
1756 * and some other commands. We have to ensure that the DMA cycle timing is 1489 * S.M.A.R.T * and some other commands. We have to ensure that the
1757 * slower/equal than the fastest PIO timing. 1490 * DMA cycle timing is slower/equal than the fastest PIO timing.
1758 */ 1491 */
1759 1492
1760 if (speed > XFER_PIO_4) { 1493 if (speed > XFER_PIO_4) {
@@ -1763,7 +1496,7 @@ int ata_timing_compute(struct ata_device *adev, unsigned short speed,
1763 } 1496 }
1764 1497
1765 /* 1498 /*
1766 * Lenghten active & recovery time so that cycle time is correct. 1499 * Lengthen active & recovery time so that cycle time is correct.
1767 */ 1500 */
1768 1501
1769 if (t->act8b + t->rec8b < t->cyc8b) { 1502 if (t->act8b + t->rec8b < t->cyc8b) {
@@ -1882,7 +1615,6 @@ static void ata_host_set_dma(struct ata_port *ap, u8 xfer_mode,
1882 * 1615 *
1883 * LOCKING: 1616 * LOCKING:
1884 * PCI/etc. bus probe sem. 1617 * PCI/etc. bus probe sem.
1885 *
1886 */ 1618 */
1887static void ata_set_mode(struct ata_port *ap) 1619static void ata_set_mode(struct ata_port *ap)
1888{ 1620{
@@ -1922,6 +1654,26 @@ err_out:
1922} 1654}
1923 1655
1924/** 1656/**
1657 * ata_tf_to_host - issue ATA taskfile to host controller
1658 * @ap: port to which command is being issued
1659 * @tf: ATA taskfile register set
1660 *
1661 * Issues ATA taskfile register set to ATA host controller,
1662 * with proper synchronization with interrupt handler and
1663 * other threads.
1664 *
1665 * LOCKING:
1666 * spin_lock_irqsave(host_set lock)
1667 */
1668
1669static inline void ata_tf_to_host(struct ata_port *ap,
1670 const struct ata_taskfile *tf)
1671{
1672 ap->ops->tf_load(ap, tf);
1673 ap->ops->exec_command(ap, tf);
1674}
1675
1676/**
1925 * ata_busy_sleep - sleep until BSY clears, or timeout 1677 * ata_busy_sleep - sleep until BSY clears, or timeout
1926 * @ap: port containing status register to be polled 1678 * @ap: port containing status register to be polled
1927 * @tmout_pat: impatience timeout 1679 * @tmout_pat: impatience timeout
@@ -1931,12 +1683,10 @@ err_out:
1931 * or a timeout occurs. 1683 * or a timeout occurs.
1932 * 1684 *
1933 * LOCKING: None. 1685 * LOCKING: None.
1934 *
1935 */ 1686 */
1936 1687
1937static unsigned int ata_busy_sleep (struct ata_port *ap, 1688unsigned int ata_busy_sleep (struct ata_port *ap,
1938 unsigned long tmout_pat, 1689 unsigned long tmout_pat, unsigned long tmout)
1939 unsigned long tmout)
1940{ 1690{
1941 unsigned long timer_start, timeout; 1691 unsigned long timer_start, timeout;
1942 u8 status; 1692 u8 status;
@@ -2155,9 +1905,9 @@ void ata_bus_reset(struct ata_port *ap)
2155 /* 1905 /*
2156 * determine by signature whether we have ATA or ATAPI devices 1906 * determine by signature whether we have ATA or ATAPI devices
2157 */ 1907 */
2158 err = ata_dev_try_classify(ap, 0); 1908 ap->device[0].class = ata_dev_try_classify(ap, 0, &err);
2159 if ((slave_possible) && (err != 0x81)) 1909 if ((slave_possible) && (err != 0x81))
2160 ata_dev_try_classify(ap, 1); 1910 ap->device[1].class = ata_dev_try_classify(ap, 1, &err);
2161 1911
2162 /* re-enable interrupts */ 1912 /* re-enable interrupts */
2163 if (ap->ioaddr.ctl_addr) /* FIXME: hack. create a hook instead */ 1913 if (ap->ioaddr.ctl_addr) /* FIXME: hack. create a hook instead */
@@ -2192,6 +1942,335 @@ err_out:
2192 DPRINTK("EXIT\n"); 1942 DPRINTK("EXIT\n");
2193} 1943}
2194 1944
1945static int sata_phy_resume(struct ata_port *ap)
1946{
1947 unsigned long timeout = jiffies + (HZ * 5);
1948 u32 sstatus;
1949
1950 scr_write_flush(ap, SCR_CONTROL, 0x300);
1951
1952 /* Wait for phy to become ready, if necessary. */
1953 do {
1954 msleep(200);
1955 sstatus = scr_read(ap, SCR_STATUS);
1956 if ((sstatus & 0xf) != 1)
1957 return 0;
1958 } while (time_before(jiffies, timeout));
1959
1960 return -1;
1961}
1962
1963/**
1964 * ata_std_probeinit - initialize probing
1965 * @ap: port to be probed
1966 *
1967 * @ap is about to be probed. Initialize it. This function is
1968 * to be used as standard callback for ata_drive_probe_reset().
1969 *
1970 * NOTE!!! Do not use this function as probeinit if a low level
1971 * driver implements only hardreset. Just pass NULL as probeinit
1972 * in that case. Using this function is probably okay but doing
1973 * so makes reset sequence different from the original
1974 * ->phy_reset implementation and Jeff nervous. :-P
1975 */
1976extern void ata_std_probeinit(struct ata_port *ap)
1977{
1978 if (ap->flags & ATA_FLAG_SATA && ap->ops->scr_read) {
1979 sata_phy_resume(ap);
1980 if (sata_dev_present(ap))
1981 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
1982 }
1983}
1984
1985/**
1986 * ata_std_softreset - reset host port via ATA SRST
1987 * @ap: port to reset
1988 * @verbose: fail verbosely
1989 * @classes: resulting classes of attached devices
1990 *
1991 * Reset host port using ATA SRST. This function is to be used
1992 * as standard callback for ata_drive_*_reset() functions.
1993 *
1994 * LOCKING:
1995 * Kernel thread context (may sleep)
1996 *
1997 * RETURNS:
1998 * 0 on success, -errno otherwise.
1999 */
2000int ata_std_softreset(struct ata_port *ap, int verbose, unsigned int *classes)
2001{
2002 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
2003 unsigned int devmask = 0, err_mask;
2004 u8 err;
2005
2006 DPRINTK("ENTER\n");
2007
2008 if (ap->ops->scr_read && !sata_dev_present(ap)) {
2009 classes[0] = ATA_DEV_NONE;
2010 goto out;
2011 }
2012
2013 /* determine if device 0/1 are present */
2014 if (ata_devchk(ap, 0))
2015 devmask |= (1 << 0);
2016 if (slave_possible && ata_devchk(ap, 1))
2017 devmask |= (1 << 1);
2018
2019 /* select device 0 again */
2020 ap->ops->dev_select(ap, 0);
2021
2022 /* issue bus reset */
2023 DPRINTK("about to softreset, devmask=%x\n", devmask);
2024 err_mask = ata_bus_softreset(ap, devmask);
2025 if (err_mask) {
2026 if (verbose)
2027 printk(KERN_ERR "ata%u: SRST failed (err_mask=0x%x)\n",
2028 ap->id, err_mask);
2029 else
2030 DPRINTK("EXIT, softreset failed (err_mask=0x%x)\n",
2031 err_mask);
2032 return -EIO;
2033 }
2034
2035 /* determine by signature whether we have ATA or ATAPI devices */
2036 classes[0] = ata_dev_try_classify(ap, 0, &err);
2037 if (slave_possible && err != 0x81)
2038 classes[1] = ata_dev_try_classify(ap, 1, &err);
2039
2040 out:
2041 DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]);
2042 return 0;
2043}
2044
2045/**
2046 * sata_std_hardreset - reset host port via SATA phy reset
2047 * @ap: port to reset
2048 * @verbose: fail verbosely
2049 * @class: resulting class of attached device
2050 *
2051 * SATA phy-reset host port using DET bits of SControl register.
2052 * This function is to be used as standard callback for
2053 * ata_drive_*_reset().
2054 *
2055 * LOCKING:
2056 * Kernel thread context (may sleep)
2057 *
2058 * RETURNS:
2059 * 0 on success, -errno otherwise.
2060 */
2061int sata_std_hardreset(struct ata_port *ap, int verbose, unsigned int *class)
2062{
2063 DPRINTK("ENTER\n");
2064
2065 /* Issue phy wake/reset */
2066 scr_write_flush(ap, SCR_CONTROL, 0x301);
2067
2068 /*
2069 * Couldn't find anything in SATA I/II specs, but AHCI-1.1
2070 * 10.4.2 says at least 1 ms.
2071 */
2072 msleep(1);
2073
2074 /* Bring phy back */
2075 sata_phy_resume(ap);
2076
2077 /* TODO: phy layer with polling, timeouts, etc. */
2078 if (!sata_dev_present(ap)) {
2079 *class = ATA_DEV_NONE;
2080 DPRINTK("EXIT, link offline\n");
2081 return 0;
2082 }
2083
2084 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
2085 if (verbose)
2086 printk(KERN_ERR "ata%u: COMRESET failed "
2087 "(device not ready)\n", ap->id);
2088 else
2089 DPRINTK("EXIT, device not ready\n");
2090 return -EIO;
2091 }
2092
2093 ap->ops->dev_select(ap, 0); /* probably unnecessary */
2094
2095 *class = ata_dev_try_classify(ap, 0, NULL);
2096
2097 DPRINTK("EXIT, class=%u\n", *class);
2098 return 0;
2099}
2100
2101/**
2102 * ata_std_postreset - standard postreset callback
2103 * @ap: the target ata_port
2104 * @classes: classes of attached devices
2105 *
2106 * This function is invoked after a successful reset. Note that
2107 * the device might have been reset more than once using
2108 * different reset methods before postreset is invoked.
2109 * postreset is also reponsible for setting cable type.
2110 *
2111 * This function is to be used as standard callback for
2112 * ata_drive_*_reset().
2113 *
2114 * LOCKING:
2115 * Kernel thread context (may sleep)
2116 */
2117void ata_std_postreset(struct ata_port *ap, unsigned int *classes)
2118{
2119 DPRINTK("ENTER\n");
2120
2121 /* set cable type */
2122 if (ap->cbl == ATA_CBL_NONE && ap->flags & ATA_FLAG_SATA)
2123 ap->cbl = ATA_CBL_SATA;
2124
2125 /* print link status */
2126 if (ap->cbl == ATA_CBL_SATA)
2127 sata_print_link_status(ap);
2128
2129 /* re-enable interrupts */
2130 if (ap->ioaddr.ctl_addr) /* FIXME: hack. create a hook instead */
2131 ata_irq_on(ap);
2132
2133 /* is double-select really necessary? */
2134 if (classes[0] != ATA_DEV_NONE)
2135 ap->ops->dev_select(ap, 1);
2136 if (classes[1] != ATA_DEV_NONE)
2137 ap->ops->dev_select(ap, 0);
2138
2139 /* bail out if no device is present */
2140 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
2141 DPRINTK("EXIT, no device\n");
2142 return;
2143 }
2144
2145 /* set up device control */
2146 if (ap->ioaddr.ctl_addr) {
2147 if (ap->flags & ATA_FLAG_MMIO)
2148 writeb(ap->ctl, (void __iomem *) ap->ioaddr.ctl_addr);
2149 else
2150 outb(ap->ctl, ap->ioaddr.ctl_addr);
2151 }
2152
2153 DPRINTK("EXIT\n");
2154}
2155
2156/**
2157 * ata_std_probe_reset - standard probe reset method
2158 * @ap: prot to perform probe-reset
2159 * @classes: resulting classes of attached devices
2160 *
2161 * The stock off-the-shelf ->probe_reset method.
2162 *
2163 * LOCKING:
2164 * Kernel thread context (may sleep)
2165 *
2166 * RETURNS:
2167 * 0 on success, -errno otherwise.
2168 */
2169int ata_std_probe_reset(struct ata_port *ap, unsigned int *classes)
2170{
2171 ata_reset_fn_t hardreset;
2172
2173 hardreset = NULL;
2174 if (ap->flags & ATA_FLAG_SATA && ap->ops->scr_read)
2175 hardreset = sata_std_hardreset;
2176
2177 return ata_drive_probe_reset(ap, ata_std_probeinit,
2178 ata_std_softreset, hardreset,
2179 ata_std_postreset, classes);
2180}
2181
2182static int do_probe_reset(struct ata_port *ap, ata_reset_fn_t reset,
2183 ata_postreset_fn_t postreset,
2184 unsigned int *classes)
2185{
2186 int i, rc;
2187
2188 for (i = 0; i < ATA_MAX_DEVICES; i++)
2189 classes[i] = ATA_DEV_UNKNOWN;
2190
2191 rc = reset(ap, 0, classes);
2192 if (rc)
2193 return rc;
2194
2195 /* If any class isn't ATA_DEV_UNKNOWN, consider classification
2196 * is complete and convert all ATA_DEV_UNKNOWN to
2197 * ATA_DEV_NONE.
2198 */
2199 for (i = 0; i < ATA_MAX_DEVICES; i++)
2200 if (classes[i] != ATA_DEV_UNKNOWN)
2201 break;
2202
2203 if (i < ATA_MAX_DEVICES)
2204 for (i = 0; i < ATA_MAX_DEVICES; i++)
2205 if (classes[i] == ATA_DEV_UNKNOWN)
2206 classes[i] = ATA_DEV_NONE;
2207
2208 if (postreset)
2209 postreset(ap, classes);
2210
2211 return classes[0] != ATA_DEV_UNKNOWN ? 0 : -ENODEV;
2212}
2213
2214/**
2215 * ata_drive_probe_reset - Perform probe reset with given methods
2216 * @ap: port to reset
2217 * @probeinit: probeinit method (can be NULL)
2218 * @softreset: softreset method (can be NULL)
2219 * @hardreset: hardreset method (can be NULL)
2220 * @postreset: postreset method (can be NULL)
2221 * @classes: resulting classes of attached devices
2222 *
2223 * Reset the specified port and classify attached devices using
2224 * given methods. This function prefers softreset but tries all
2225 * possible reset sequences to reset and classify devices. This
2226 * function is intended to be used for constructing ->probe_reset
2227 * callback by low level drivers.
2228 *
2229 * Reset methods should follow the following rules.
2230 *
2231 * - Return 0 on sucess, -errno on failure.
2232 * - If classification is supported, fill classes[] with
2233 * recognized class codes.
2234 * - If classification is not supported, leave classes[] alone.
2235 * - If verbose is non-zero, print error message on failure;
2236 * otherwise, shut up.
2237 *
2238 * LOCKING:
2239 * Kernel thread context (may sleep)
2240 *
2241 * RETURNS:
2242 * 0 on success, -EINVAL if no reset method is avaliable, -ENODEV
2243 * if classification fails, and any error code from reset
2244 * methods.
2245 */
2246int ata_drive_probe_reset(struct ata_port *ap, ata_probeinit_fn_t probeinit,
2247 ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
2248 ata_postreset_fn_t postreset, unsigned int *classes)
2249{
2250 int rc = -EINVAL;
2251
2252 if (probeinit)
2253 probeinit(ap);
2254
2255 if (softreset) {
2256 rc = do_probe_reset(ap, softreset, postreset, classes);
2257 if (rc == 0)
2258 return 0;
2259 }
2260
2261 if (!hardreset)
2262 return rc;
2263
2264 rc = do_probe_reset(ap, hardreset, postreset, classes);
2265 if (rc == 0 || rc != -ENODEV)
2266 return rc;
2267
2268 if (softreset)
2269 rc = do_probe_reset(ap, softreset, postreset, classes);
2270
2271 return rc;
2272}
2273
2195static void ata_pr_blacklisted(const struct ata_port *ap, 2274static void ata_pr_blacklisted(const struct ata_port *ap,
2196 const struct ata_device *dev) 2275 const struct ata_device *dev)
2197{ 2276{
@@ -2233,24 +2312,14 @@ static const char * const ata_dma_blacklist [] = {
2233 2312
2234static int ata_dma_blacklisted(const struct ata_device *dev) 2313static int ata_dma_blacklisted(const struct ata_device *dev)
2235{ 2314{
2236 unsigned char model_num[40]; 2315 unsigned char model_num[41];
2237 char *s;
2238 unsigned int len;
2239 int i; 2316 int i;
2240 2317
2241 ata_dev_id_string(dev->id, model_num, ATA_ID_PROD_OFS, 2318 ata_dev_id_c_string(dev->id, model_num, ATA_ID_PROD_OFS,
2242 sizeof(model_num)); 2319 sizeof(model_num));
2243 s = &model_num[0];
2244 len = strnlen(s, sizeof(model_num));
2245
2246 /* ATAPI specifies that empty space is blank-filled; remove blanks */
2247 while ((len > 0) && (s[len - 1] == ' ')) {
2248 len--;
2249 s[len] = 0;
2250 }
2251 2320
2252 for (i = 0; i < ARRAY_SIZE(ata_dma_blacklist); i++) 2321 for (i = 0; i < ARRAY_SIZE(ata_dma_blacklist); i++)
2253 if (!strncmp(ata_dma_blacklist[i], s, len)) 2322 if (!strcmp(ata_dma_blacklist[i], model_num))
2254 return 1; 2323 return 1;
2255 2324
2256 return 0; 2325 return 0;
@@ -2264,7 +2333,7 @@ static unsigned int ata_get_mode_mask(const struct ata_port *ap, int shift)
2264 master = &ap->device[0]; 2333 master = &ap->device[0];
2265 slave = &ap->device[1]; 2334 slave = &ap->device[1];
2266 2335
2267 assert (ata_dev_present(master) || ata_dev_present(slave)); 2336 WARN_ON(!ata_dev_present(master) && !ata_dev_present(slave));
2268 2337
2269 if (shift == ATA_SHIFT_UDMA) { 2338 if (shift == ATA_SHIFT_UDMA) {
2270 mask = ap->udma_mask; 2339 mask = ap->udma_mask;
@@ -2446,7 +2515,7 @@ static void ata_dev_reread_id(struct ata_port *ap, struct ata_device *dev)
2446 2515
2447 swap_buf_le16(dev->id, ATA_ID_WORDS); 2516 swap_buf_le16(dev->id, ATA_ID_WORDS);
2448 2517
2449 ata_dump_id(dev); 2518 ata_dump_id(dev->id);
2450 2519
2451 DPRINTK("EXIT\n"); 2520 DPRINTK("EXIT\n");
2452 2521
@@ -2510,11 +2579,11 @@ static void ata_sg_clean(struct ata_queued_cmd *qc)
2510 int dir = qc->dma_dir; 2579 int dir = qc->dma_dir;
2511 void *pad_buf = NULL; 2580 void *pad_buf = NULL;
2512 2581
2513 assert(qc->flags & ATA_QCFLAG_DMAMAP); 2582 WARN_ON(!(qc->flags & ATA_QCFLAG_DMAMAP));
2514 assert(sg != NULL); 2583 WARN_ON(sg == NULL);
2515 2584
2516 if (qc->flags & ATA_QCFLAG_SINGLE) 2585 if (qc->flags & ATA_QCFLAG_SINGLE)
2517 assert(qc->n_elem == 1); 2586 WARN_ON(qc->n_elem != 1);
2518 2587
2519 VPRINTK("unmapping %u sg elements\n", qc->n_elem); 2588 VPRINTK("unmapping %u sg elements\n", qc->n_elem);
2520 2589
@@ -2569,8 +2638,8 @@ static void ata_fill_sg(struct ata_queued_cmd *qc)
2569 struct scatterlist *sg; 2638 struct scatterlist *sg;
2570 unsigned int idx; 2639 unsigned int idx;
2571 2640
2572 assert(qc->__sg != NULL); 2641 WARN_ON(qc->__sg == NULL);
2573 assert(qc->n_elem > 0); 2642 WARN_ON(qc->n_elem == 0);
2574 2643
2575 idx = 0; 2644 idx = 0;
2576 ata_for_each_sg(sg, qc) { 2645 ata_for_each_sg(sg, qc) {
@@ -2722,7 +2791,7 @@ static int ata_sg_setup_one(struct ata_queued_cmd *qc)
2722 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ); 2791 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
2723 struct scatterlist *psg = &qc->pad_sgent; 2792 struct scatterlist *psg = &qc->pad_sgent;
2724 2793
2725 assert(qc->dev->class == ATA_DEV_ATAPI); 2794 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
2726 2795
2727 memset(pad_buf, 0, ATA_DMA_PAD_SZ); 2796 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
2728 2797
@@ -2784,7 +2853,7 @@ static int ata_sg_setup(struct ata_queued_cmd *qc)
2784 int n_elem, pre_n_elem, dir, trim_sg = 0; 2853 int n_elem, pre_n_elem, dir, trim_sg = 0;
2785 2854
2786 VPRINTK("ENTER, ata%u\n", ap->id); 2855 VPRINTK("ENTER, ata%u\n", ap->id);
2787 assert(qc->flags & ATA_QCFLAG_SG); 2856 WARN_ON(!(qc->flags & ATA_QCFLAG_SG));
2788 2857
2789 /* we must lengthen transfers to end on a 32-bit boundary */ 2858 /* we must lengthen transfers to end on a 32-bit boundary */
2790 qc->pad_len = lsg->length & 3; 2859 qc->pad_len = lsg->length & 3;
@@ -2793,7 +2862,7 @@ static int ata_sg_setup(struct ata_queued_cmd *qc)
2793 struct scatterlist *psg = &qc->pad_sgent; 2862 struct scatterlist *psg = &qc->pad_sgent;
2794 unsigned int offset; 2863 unsigned int offset;
2795 2864
2796 assert(qc->dev->class == ATA_DEV_ATAPI); 2865 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
2797 2866
2798 memset(pad_buf, 0, ATA_DMA_PAD_SZ); 2867 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
2799 2868
@@ -2869,7 +2938,7 @@ void ata_poll_qc_complete(struct ata_queued_cmd *qc)
2869} 2938}
2870 2939
2871/** 2940/**
2872 * ata_pio_poll - 2941 * ata_pio_poll - poll using PIO, depending on current state
2873 * @ap: the target ata_port 2942 * @ap: the target ata_port
2874 * 2943 *
2875 * LOCKING: 2944 * LOCKING:
@@ -2887,7 +2956,7 @@ static unsigned long ata_pio_poll(struct ata_port *ap)
2887 unsigned int reg_state = HSM_ST_UNKNOWN; 2956 unsigned int reg_state = HSM_ST_UNKNOWN;
2888 2957
2889 qc = ata_qc_from_tag(ap, ap->active_tag); 2958 qc = ata_qc_from_tag(ap, ap->active_tag);
2890 assert(qc != NULL); 2959 WARN_ON(qc == NULL);
2891 2960
2892 switch (ap->hsm_task_state) { 2961 switch (ap->hsm_task_state) {
2893 case HSM_ST: 2962 case HSM_ST:
@@ -2908,7 +2977,7 @@ static unsigned long ata_pio_poll(struct ata_port *ap)
2908 status = ata_chk_status(ap); 2977 status = ata_chk_status(ap);
2909 if (status & ATA_BUSY) { 2978 if (status & ATA_BUSY) {
2910 if (time_after(jiffies, ap->pio_task_timeout)) { 2979 if (time_after(jiffies, ap->pio_task_timeout)) {
2911 qc->err_mask |= AC_ERR_ATA_BUS; 2980 qc->err_mask |= AC_ERR_TIMEOUT;
2912 ap->hsm_task_state = HSM_ST_TMOUT; 2981 ap->hsm_task_state = HSM_ST_TMOUT;
2913 return 0; 2982 return 0;
2914 } 2983 }
@@ -2955,7 +3024,7 @@ static int ata_pio_complete (struct ata_port *ap)
2955 } 3024 }
2956 3025
2957 qc = ata_qc_from_tag(ap, ap->active_tag); 3026 qc = ata_qc_from_tag(ap, ap->active_tag);
2958 assert(qc != NULL); 3027 WARN_ON(qc == NULL);
2959 3028
2960 drv_stat = ata_wait_idle(ap); 3029 drv_stat = ata_wait_idle(ap);
2961 if (!ata_ok(drv_stat)) { 3030 if (!ata_ok(drv_stat)) {
@@ -2966,7 +3035,7 @@ static int ata_pio_complete (struct ata_port *ap)
2966 3035
2967 ap->hsm_task_state = HSM_ST_IDLE; 3036 ap->hsm_task_state = HSM_ST_IDLE;
2968 3037
2969 assert(qc->err_mask == 0); 3038 WARN_ON(qc->err_mask);
2970 ata_poll_qc_complete(qc); 3039 ata_poll_qc_complete(qc);
2971 3040
2972 /* another command may start at this point */ 3041 /* another command may start at this point */
@@ -2976,7 +3045,7 @@ static int ata_pio_complete (struct ata_port *ap)
2976 3045
2977 3046
2978/** 3047/**
2979 * swap_buf_le16 - swap halves of 16-words in place 3048 * swap_buf_le16 - swap halves of 16-bit words in place
2980 * @buf: Buffer to swap 3049 * @buf: Buffer to swap
2981 * @buf_words: Number of 16-bit words in buffer. 3050 * @buf_words: Number of 16-bit words in buffer.
2982 * 3051 *
@@ -3286,7 +3355,7 @@ static void atapi_pio_bytes(struct ata_queued_cmd *qc)
3286err_out: 3355err_out:
3287 printk(KERN_INFO "ata%u: dev %u: ATAPI check failed\n", 3356 printk(KERN_INFO "ata%u: dev %u: ATAPI check failed\n",
3288 ap->id, dev->devno); 3357 ap->id, dev->devno);
3289 qc->err_mask |= AC_ERR_ATA_BUS; 3358 qc->err_mask |= AC_ERR_HSM;
3290 ap->hsm_task_state = HSM_ST_ERR; 3359 ap->hsm_task_state = HSM_ST_ERR;
3291} 3360}
3292 3361
@@ -3323,7 +3392,7 @@ static void ata_pio_block(struct ata_port *ap)
3323 } 3392 }
3324 3393
3325 qc = ata_qc_from_tag(ap, ap->active_tag); 3394 qc = ata_qc_from_tag(ap, ap->active_tag);
3326 assert(qc != NULL); 3395 WARN_ON(qc == NULL);
3327 3396
3328 /* check error */ 3397 /* check error */
3329 if (status & (ATA_ERR | ATA_DF)) { 3398 if (status & (ATA_ERR | ATA_DF)) {
@@ -3344,7 +3413,7 @@ static void ata_pio_block(struct ata_port *ap)
3344 } else { 3413 } else {
3345 /* handle BSY=0, DRQ=0 as error */ 3414 /* handle BSY=0, DRQ=0 as error */
3346 if ((status & ATA_DRQ) == 0) { 3415 if ((status & ATA_DRQ) == 0) {
3347 qc->err_mask |= AC_ERR_ATA_BUS; 3416 qc->err_mask |= AC_ERR_HSM;
3348 ap->hsm_task_state = HSM_ST_ERR; 3417 ap->hsm_task_state = HSM_ST_ERR;
3349 return; 3418 return;
3350 } 3419 }
@@ -3358,7 +3427,7 @@ static void ata_pio_error(struct ata_port *ap)
3358 struct ata_queued_cmd *qc; 3427 struct ata_queued_cmd *qc;
3359 3428
3360 qc = ata_qc_from_tag(ap, ap->active_tag); 3429 qc = ata_qc_from_tag(ap, ap->active_tag);
3361 assert(qc != NULL); 3430 WARN_ON(qc == NULL);
3362 3431
3363 if (qc->tf.command != ATA_CMD_PACKET) 3432 if (qc->tf.command != ATA_CMD_PACKET)
3364 printk(KERN_WARNING "ata%u: PIO error\n", ap->id); 3433 printk(KERN_WARNING "ata%u: PIO error\n", ap->id);
@@ -3366,7 +3435,7 @@ static void ata_pio_error(struct ata_port *ap)
3366 /* make sure qc->err_mask is available to 3435 /* make sure qc->err_mask is available to
3367 * know what's wrong and recover 3436 * know what's wrong and recover
3368 */ 3437 */
3369 assert(qc->err_mask); 3438 WARN_ON(qc->err_mask == 0);
3370 3439
3371 ap->hsm_task_state = HSM_ST_IDLE; 3440 ap->hsm_task_state = HSM_ST_IDLE;
3372 3441
@@ -3407,7 +3476,7 @@ fsm_start:
3407 } 3476 }
3408 3477
3409 if (timeout) 3478 if (timeout)
3410 queue_delayed_work(ata_wq, &ap->pio_task, timeout); 3479 ata_queue_delayed_pio_task(ap, timeout);
3411 else if (!qc_completed) 3480 else if (!qc_completed)
3412 goto fsm_start; 3481 goto fsm_start;
3413} 3482}
@@ -3440,15 +3509,10 @@ static void ata_qc_timeout(struct ata_queued_cmd *qc)
3440 3509
3441 DPRINTK("ENTER\n"); 3510 DPRINTK("ENTER\n");
3442 3511
3443 spin_lock_irqsave(&host_set->lock, flags); 3512 ata_flush_pio_tasks(ap);
3513 ap->hsm_task_state = HSM_ST_IDLE;
3444 3514
3445 /* hack alert! We cannot use the supplied completion 3515 spin_lock_irqsave(&host_set->lock, flags);
3446 * function from inside the ->eh_strategy_handler() thread.
3447 * libata is the only user of ->eh_strategy_handler() in
3448 * any kernel, so the default scsi_done() assumes it is
3449 * not being called from the SCSI EH.
3450 */
3451 qc->scsidone = scsi_finish_command;
3452 3516
3453 switch (qc->tf.protocol) { 3517 switch (qc->tf.protocol) {
3454 3518
@@ -3473,12 +3537,13 @@ static void ata_qc_timeout(struct ata_queued_cmd *qc)
3473 3537
3474 /* complete taskfile transaction */ 3538 /* complete taskfile transaction */
3475 qc->err_mask |= ac_err_mask(drv_stat); 3539 qc->err_mask |= ac_err_mask(drv_stat);
3476 ata_qc_complete(qc);
3477 break; 3540 break;
3478 } 3541 }
3479 3542
3480 spin_unlock_irqrestore(&host_set->lock, flags); 3543 spin_unlock_irqrestore(&host_set->lock, flags);
3481 3544
3545 ata_eh_qc_complete(qc);
3546
3482 DPRINTK("EXIT\n"); 3547 DPRINTK("EXIT\n");
3483} 3548}
3484 3549
@@ -3503,20 +3568,10 @@ static void ata_qc_timeout(struct ata_queued_cmd *qc)
3503 3568
3504void ata_eng_timeout(struct ata_port *ap) 3569void ata_eng_timeout(struct ata_port *ap)
3505{ 3570{
3506 struct ata_queued_cmd *qc;
3507
3508 DPRINTK("ENTER\n"); 3571 DPRINTK("ENTER\n");
3509 3572
3510 qc = ata_qc_from_tag(ap, ap->active_tag); 3573 ata_qc_timeout(ata_qc_from_tag(ap, ap->active_tag));
3511 if (qc)
3512 ata_qc_timeout(qc);
3513 else {
3514 printk(KERN_ERR "ata%u: BUG: timeout without command\n",
3515 ap->id);
3516 goto out;
3517 }
3518 3574
3519out:
3520 DPRINTK("EXIT\n"); 3575 DPRINTK("EXIT\n");
3521} 3576}
3522 3577
@@ -3572,21 +3627,6 @@ struct ata_queued_cmd *ata_qc_new_init(struct ata_port *ap,
3572 return qc; 3627 return qc;
3573} 3628}
3574 3629
3575static void __ata_qc_complete(struct ata_queued_cmd *qc)
3576{
3577 struct ata_port *ap = qc->ap;
3578 unsigned int tag;
3579
3580 qc->flags = 0;
3581 tag = qc->tag;
3582 if (likely(ata_tag_valid(tag))) {
3583 if (tag == ap->active_tag)
3584 ap->active_tag = ATA_TAG_POISON;
3585 qc->tag = ATA_TAG_POISON;
3586 clear_bit(tag, &ap->qactive);
3587 }
3588}
3589
3590/** 3630/**
3591 * ata_qc_free - free unused ata_queued_cmd 3631 * ata_qc_free - free unused ata_queued_cmd
3592 * @qc: Command to complete 3632 * @qc: Command to complete
@@ -3599,29 +3639,25 @@ static void __ata_qc_complete(struct ata_queued_cmd *qc)
3599 */ 3639 */
3600void ata_qc_free(struct ata_queued_cmd *qc) 3640void ata_qc_free(struct ata_queued_cmd *qc)
3601{ 3641{
3602 assert(qc != NULL); /* ata_qc_from_tag _might_ return NULL */ 3642 struct ata_port *ap = qc->ap;
3643 unsigned int tag;
3603 3644
3604 __ata_qc_complete(qc); 3645 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
3605}
3606 3646
3607/** 3647 qc->flags = 0;
3608 * ata_qc_complete - Complete an active ATA command 3648 tag = qc->tag;
3609 * @qc: Command to complete 3649 if (likely(ata_tag_valid(tag))) {
3610 * @err_mask: ATA Status register contents 3650 if (tag == ap->active_tag)
3611 * 3651 ap->active_tag = ATA_TAG_POISON;
3612 * Indicate to the mid and upper layers that an ATA 3652 qc->tag = ATA_TAG_POISON;
3613 * command has completed, with either an ok or not-ok status. 3653 clear_bit(tag, &ap->qactive);
3614 * 3654 }
3615 * LOCKING: 3655}
3616 * spin_lock_irqsave(host_set lock)
3617 */
3618 3656
3619void ata_qc_complete(struct ata_queued_cmd *qc) 3657void __ata_qc_complete(struct ata_queued_cmd *qc)
3620{ 3658{
3621 int rc; 3659 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
3622 3660 WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
3623 assert(qc != NULL); /* ata_qc_from_tag _might_ return NULL */
3624 assert(qc->flags & ATA_QCFLAG_ACTIVE);
3625 3661
3626 if (likely(qc->flags & ATA_QCFLAG_DMAMAP)) 3662 if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
3627 ata_sg_clean(qc); 3663 ata_sg_clean(qc);
@@ -3633,17 +3669,7 @@ void ata_qc_complete(struct ata_queued_cmd *qc)
3633 qc->flags &= ~ATA_QCFLAG_ACTIVE; 3669 qc->flags &= ~ATA_QCFLAG_ACTIVE;
3634 3670
3635 /* call completion callback */ 3671 /* call completion callback */
3636 rc = qc->complete_fn(qc); 3672 qc->complete_fn(qc);
3637
3638 /* if callback indicates not to complete command (non-zero),
3639 * return immediately
3640 */
3641 if (rc != 0)
3642 return;
3643
3644 __ata_qc_complete(qc);
3645
3646 VPRINTK("EXIT\n");
3647} 3673}
3648 3674
3649static inline int ata_should_dma_map(struct ata_queued_cmd *qc) 3675static inline int ata_should_dma_map(struct ata_queued_cmd *qc)
@@ -3683,20 +3709,20 @@ static inline int ata_should_dma_map(struct ata_queued_cmd *qc)
3683 * spin_lock_irqsave(host_set lock) 3709 * spin_lock_irqsave(host_set lock)
3684 * 3710 *
3685 * RETURNS: 3711 * RETURNS:
3686 * Zero on success, negative on error. 3712 * Zero on success, AC_ERR_* mask on failure
3687 */ 3713 */
3688 3714
3689int ata_qc_issue(struct ata_queued_cmd *qc) 3715unsigned int ata_qc_issue(struct ata_queued_cmd *qc)
3690{ 3716{
3691 struct ata_port *ap = qc->ap; 3717 struct ata_port *ap = qc->ap;
3692 3718
3693 if (ata_should_dma_map(qc)) { 3719 if (ata_should_dma_map(qc)) {
3694 if (qc->flags & ATA_QCFLAG_SG) { 3720 if (qc->flags & ATA_QCFLAG_SG) {
3695 if (ata_sg_setup(qc)) 3721 if (ata_sg_setup(qc))
3696 goto err_out; 3722 goto sg_err;
3697 } else if (qc->flags & ATA_QCFLAG_SINGLE) { 3723 } else if (qc->flags & ATA_QCFLAG_SINGLE) {
3698 if (ata_sg_setup_one(qc)) 3724 if (ata_sg_setup_one(qc))
3699 goto err_out; 3725 goto sg_err;
3700 } 3726 }
3701 } else { 3727 } else {
3702 qc->flags &= ~ATA_QCFLAG_DMAMAP; 3728 qc->flags &= ~ATA_QCFLAG_DMAMAP;
@@ -3709,8 +3735,9 @@ int ata_qc_issue(struct ata_queued_cmd *qc)
3709 3735
3710 return ap->ops->qc_issue(qc); 3736 return ap->ops->qc_issue(qc);
3711 3737
3712err_out: 3738sg_err:
3713 return -1; 3739 qc->flags &= ~ATA_QCFLAG_DMAMAP;
3740 return AC_ERR_SYSTEM;
3714} 3741}
3715 3742
3716 3743
@@ -3729,10 +3756,10 @@ err_out:
3729 * spin_lock_irqsave(host_set lock) 3756 * spin_lock_irqsave(host_set lock)
3730 * 3757 *
3731 * RETURNS: 3758 * RETURNS:
3732 * Zero on success, negative on error. 3759 * Zero on success, AC_ERR_* mask on failure
3733 */ 3760 */
3734 3761
3735int ata_qc_issue_prot(struct ata_queued_cmd *qc) 3762unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc)
3736{ 3763{
3737 struct ata_port *ap = qc->ap; 3764 struct ata_port *ap = qc->ap;
3738 3765
@@ -3753,31 +3780,31 @@ int ata_qc_issue_prot(struct ata_queued_cmd *qc)
3753 ata_qc_set_polling(qc); 3780 ata_qc_set_polling(qc);
3754 ata_tf_to_host(ap, &qc->tf); 3781 ata_tf_to_host(ap, &qc->tf);
3755 ap->hsm_task_state = HSM_ST; 3782 ap->hsm_task_state = HSM_ST;
3756 queue_work(ata_wq, &ap->pio_task); 3783 ata_queue_pio_task(ap);
3757 break; 3784 break;
3758 3785
3759 case ATA_PROT_ATAPI: 3786 case ATA_PROT_ATAPI:
3760 ata_qc_set_polling(qc); 3787 ata_qc_set_polling(qc);
3761 ata_tf_to_host(ap, &qc->tf); 3788 ata_tf_to_host(ap, &qc->tf);
3762 queue_work(ata_wq, &ap->packet_task); 3789 ata_queue_packet_task(ap);
3763 break; 3790 break;
3764 3791
3765 case ATA_PROT_ATAPI_NODATA: 3792 case ATA_PROT_ATAPI_NODATA:
3766 ap->flags |= ATA_FLAG_NOINTR; 3793 ap->flags |= ATA_FLAG_NOINTR;
3767 ata_tf_to_host(ap, &qc->tf); 3794 ata_tf_to_host(ap, &qc->tf);
3768 queue_work(ata_wq, &ap->packet_task); 3795 ata_queue_packet_task(ap);
3769 break; 3796 break;
3770 3797
3771 case ATA_PROT_ATAPI_DMA: 3798 case ATA_PROT_ATAPI_DMA:
3772 ap->flags |= ATA_FLAG_NOINTR; 3799 ap->flags |= ATA_FLAG_NOINTR;
3773 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */ 3800 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
3774 ap->ops->bmdma_setup(qc); /* set up bmdma */ 3801 ap->ops->bmdma_setup(qc); /* set up bmdma */
3775 queue_work(ata_wq, &ap->packet_task); 3802 ata_queue_packet_task(ap);
3776 break; 3803 break;
3777 3804
3778 default: 3805 default:
3779 WARN_ON(1); 3806 WARN_ON(1);
3780 return -1; 3807 return AC_ERR_SYSTEM;
3781 } 3808 }
3782 3809
3783 return 0; 3810 return 0;
@@ -4161,26 +4188,26 @@ static void atapi_packet_task(void *_data)
4161 u8 status; 4188 u8 status;
4162 4189
4163 qc = ata_qc_from_tag(ap, ap->active_tag); 4190 qc = ata_qc_from_tag(ap, ap->active_tag);
4164 assert(qc != NULL); 4191 WARN_ON(qc == NULL);
4165 assert(qc->flags & ATA_QCFLAG_ACTIVE); 4192 WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
4166 4193
4167 /* sleep-wait for BSY to clear */ 4194 /* sleep-wait for BSY to clear */
4168 DPRINTK("busy wait\n"); 4195 DPRINTK("busy wait\n");
4169 if (ata_busy_sleep(ap, ATA_TMOUT_CDB_QUICK, ATA_TMOUT_CDB)) { 4196 if (ata_busy_sleep(ap, ATA_TMOUT_CDB_QUICK, ATA_TMOUT_CDB)) {
4170 qc->err_mask |= AC_ERR_ATA_BUS; 4197 qc->err_mask |= AC_ERR_TIMEOUT;
4171 goto err_out; 4198 goto err_out;
4172 } 4199 }
4173 4200
4174 /* make sure DRQ is set */ 4201 /* make sure DRQ is set */
4175 status = ata_chk_status(ap); 4202 status = ata_chk_status(ap);
4176 if ((status & (ATA_BUSY | ATA_DRQ)) != ATA_DRQ) { 4203 if ((status & (ATA_BUSY | ATA_DRQ)) != ATA_DRQ) {
4177 qc->err_mask |= AC_ERR_ATA_BUS; 4204 qc->err_mask |= AC_ERR_HSM;
4178 goto err_out; 4205 goto err_out;
4179 } 4206 }
4180 4207
4181 /* send SCSI cdb */ 4208 /* send SCSI cdb */
4182 DPRINTK("send cdb\n"); 4209 DPRINTK("send cdb\n");
4183 assert(ap->cdb_len >= 12); 4210 WARN_ON(qc->dev->cdb_len < 12);
4184 4211
4185 if (qc->tf.protocol == ATA_PROT_ATAPI_DMA || 4212 if (qc->tf.protocol == ATA_PROT_ATAPI_DMA ||
4186 qc->tf.protocol == ATA_PROT_ATAPI_NODATA) { 4213 qc->tf.protocol == ATA_PROT_ATAPI_NODATA) {
@@ -4194,16 +4221,16 @@ static void atapi_packet_task(void *_data)
4194 */ 4221 */
4195 spin_lock_irqsave(&ap->host_set->lock, flags); 4222 spin_lock_irqsave(&ap->host_set->lock, flags);
4196 ap->flags &= ~ATA_FLAG_NOINTR; 4223 ap->flags &= ~ATA_FLAG_NOINTR;
4197 ata_data_xfer(ap, qc->cdb, ap->cdb_len, 1); 4224 ata_data_xfer(ap, qc->cdb, qc->dev->cdb_len, 1);
4198 if (qc->tf.protocol == ATA_PROT_ATAPI_DMA) 4225 if (qc->tf.protocol == ATA_PROT_ATAPI_DMA)
4199 ap->ops->bmdma_start(qc); /* initiate bmdma */ 4226 ap->ops->bmdma_start(qc); /* initiate bmdma */
4200 spin_unlock_irqrestore(&ap->host_set->lock, flags); 4227 spin_unlock_irqrestore(&ap->host_set->lock, flags);
4201 } else { 4228 } else {
4202 ata_data_xfer(ap, qc->cdb, ap->cdb_len, 1); 4229 ata_data_xfer(ap, qc->cdb, qc->dev->cdb_len, 1);
4203 4230
4204 /* PIO commands are handled by polling */ 4231 /* PIO commands are handled by polling */
4205 ap->hsm_task_state = HSM_ST; 4232 ap->hsm_task_state = HSM_ST;
4206 queue_work(ata_wq, &ap->pio_task); 4233 ata_queue_pio_task(ap);
4207 } 4234 }
4208 4235
4209 return; 4236 return;
@@ -4213,19 +4240,6 @@ err_out:
4213} 4240}
4214 4241
4215 4242
4216/**
4217 * ata_port_start - Set port up for dma.
4218 * @ap: Port to initialize
4219 *
4220 * Called just after data structures for each port are
4221 * initialized. Allocates space for PRD table.
4222 *
4223 * May be used as the port_start() entry in ata_port_operations.
4224 *
4225 * LOCKING:
4226 * Inherited from caller.
4227 */
4228
4229/* 4243/*
4230 * Execute a 'simple' command, that only consists of the opcode 'cmd' itself, 4244 * Execute a 'simple' command, that only consists of the opcode 'cmd' itself,
4231 * without filling any other registers 4245 * without filling any other registers
@@ -4277,6 +4291,8 @@ static int ata_start_drive(struct ata_port *ap, struct ata_device *dev)
4277 4291
4278/** 4292/**
4279 * ata_device_resume - wakeup a previously suspended devices 4293 * ata_device_resume - wakeup a previously suspended devices
4294 * @ap: port the device is connected to
4295 * @dev: the device to resume
4280 * 4296 *
4281 * Kick the drive back into action, by sending it an idle immediate 4297 * Kick the drive back into action, by sending it an idle immediate
4282 * command and making sure its transfer mode matches between drive 4298 * command and making sure its transfer mode matches between drive
@@ -4299,10 +4315,11 @@ int ata_device_resume(struct ata_port *ap, struct ata_device *dev)
4299 4315
4300/** 4316/**
4301 * ata_device_suspend - prepare a device for suspend 4317 * ata_device_suspend - prepare a device for suspend
4318 * @ap: port the device is connected to
4319 * @dev: the device to suspend
4302 * 4320 *
4303 * Flush the cache on the drive, if appropriate, then issue a 4321 * Flush the cache on the drive, if appropriate, then issue a
4304 * standbynow command. 4322 * standbynow command.
4305 *
4306 */ 4323 */
4307int ata_device_suspend(struct ata_port *ap, struct ata_device *dev) 4324int ata_device_suspend(struct ata_port *ap, struct ata_device *dev)
4308{ 4325{
@@ -4316,6 +4333,19 @@ int ata_device_suspend(struct ata_port *ap, struct ata_device *dev)
4316 return 0; 4333 return 0;
4317} 4334}
4318 4335
4336/**
4337 * ata_port_start - Set port up for dma.
4338 * @ap: Port to initialize
4339 *
4340 * Called just after data structures for each port are
4341 * initialized. Allocates space for PRD table.
4342 *
4343 * May be used as the port_start() entry in ata_port_operations.
4344 *
4345 * LOCKING:
4346 * Inherited from caller.
4347 */
4348
4319int ata_port_start (struct ata_port *ap) 4349int ata_port_start (struct ata_port *ap)
4320{ 4350{
4321 struct device *dev = ap->host_set->dev; 4351 struct device *dev = ap->host_set->dev;
@@ -4431,6 +4461,7 @@ static void ata_host_init(struct ata_port *ap, struct Scsi_Host *host,
4431 4461
4432 INIT_WORK(&ap->packet_task, atapi_packet_task, ap); 4462 INIT_WORK(&ap->packet_task, atapi_packet_task, ap);
4433 INIT_WORK(&ap->pio_task, ata_pio_task, ap); 4463 INIT_WORK(&ap->pio_task, ata_pio_task, ap);
4464 INIT_LIST_HEAD(&ap->eh_done_q);
4434 4465
4435 for (i = 0; i < ATA_MAX_DEVICES; i++) 4466 for (i = 0; i < ATA_MAX_DEVICES; i++)
4436 ap->device[i].devno = i; 4467 ap->device[i].devno = i;
@@ -4572,9 +4603,9 @@ int ata_device_add(const struct ata_probe_ent *ent)
4572 4603
4573 ap = host_set->ports[i]; 4604 ap = host_set->ports[i];
4574 4605
4575 DPRINTK("ata%u: probe begin\n", ap->id); 4606 DPRINTK("ata%u: bus probe begin\n", ap->id);
4576 rc = ata_bus_probe(ap); 4607 rc = ata_bus_probe(ap);
4577 DPRINTK("ata%u: probe end\n", ap->id); 4608 DPRINTK("ata%u: bus probe end\n", ap->id);
4578 4609
4579 if (rc) { 4610 if (rc) {
4580 /* FIXME: do something useful here? 4611 /* FIXME: do something useful here?
@@ -4598,7 +4629,7 @@ int ata_device_add(const struct ata_probe_ent *ent)
4598 } 4629 }
4599 4630
4600 /* probes are done, now scan each port's disk(s) */ 4631 /* probes are done, now scan each port's disk(s) */
4601 DPRINTK("probe begin\n"); 4632 DPRINTK("host probe begin\n");
4602 for (i = 0; i < count; i++) { 4633 for (i = 0; i < count; i++) {
4603 struct ata_port *ap = host_set->ports[i]; 4634 struct ata_port *ap = host_set->ports[i];
4604 4635
@@ -4720,32 +4751,6 @@ void ata_std_ports(struct ata_ioports *ioaddr)
4720 ioaddr->command_addr = ioaddr->cmd_addr + ATA_REG_CMD; 4751 ioaddr->command_addr = ioaddr->cmd_addr + ATA_REG_CMD;
4721} 4752}
4722 4753
4723static struct ata_probe_ent *
4724ata_probe_ent_alloc(struct device *dev, const struct ata_port_info *port)
4725{
4726 struct ata_probe_ent *probe_ent;
4727
4728 probe_ent = kzalloc(sizeof(*probe_ent), GFP_KERNEL);
4729 if (!probe_ent) {
4730 printk(KERN_ERR DRV_NAME "(%s): out of memory\n",
4731 kobject_name(&(dev->kobj)));
4732 return NULL;
4733 }
4734
4735 INIT_LIST_HEAD(&probe_ent->node);
4736 probe_ent->dev = dev;
4737
4738 probe_ent->sht = port->sht;
4739 probe_ent->host_flags = port->host_flags;
4740 probe_ent->pio_mask = port->pio_mask;
4741 probe_ent->mwdma_mask = port->mwdma_mask;
4742 probe_ent->udma_mask = port->udma_mask;
4743 probe_ent->port_ops = port->port_ops;
4744
4745 return probe_ent;
4746}
4747
4748
4749 4754
4750#ifdef CONFIG_PCI 4755#ifdef CONFIG_PCI
4751 4756
@@ -4757,256 +4762,6 @@ void ata_pci_host_stop (struct ata_host_set *host_set)
4757} 4762}
4758 4763
4759/** 4764/**
4760 * ata_pci_init_native_mode - Initialize native-mode driver
4761 * @pdev: pci device to be initialized
4762 * @port: array[2] of pointers to port info structures.
4763 * @ports: bitmap of ports present
4764 *
4765 * Utility function which allocates and initializes an
4766 * ata_probe_ent structure for a standard dual-port
4767 * PIO-based IDE controller. The returned ata_probe_ent
4768 * structure can be passed to ata_device_add(). The returned
4769 * ata_probe_ent structure should then be freed with kfree().
4770 *
4771 * The caller need only pass the address of the primary port, the
4772 * secondary will be deduced automatically. If the device has non
4773 * standard secondary port mappings this function can be called twice,
4774 * once for each interface.
4775 */
4776
4777struct ata_probe_ent *
4778ata_pci_init_native_mode(struct pci_dev *pdev, struct ata_port_info **port, int ports)
4779{
4780 struct ata_probe_ent *probe_ent =
4781 ata_probe_ent_alloc(pci_dev_to_dev(pdev), port[0]);
4782 int p = 0;
4783
4784 if (!probe_ent)
4785 return NULL;
4786
4787 probe_ent->irq = pdev->irq;
4788 probe_ent->irq_flags = SA_SHIRQ;
4789 probe_ent->private_data = port[0]->private_data;
4790
4791 if (ports & ATA_PORT_PRIMARY) {
4792 probe_ent->port[p].cmd_addr = pci_resource_start(pdev, 0);
4793 probe_ent->port[p].altstatus_addr =
4794 probe_ent->port[p].ctl_addr =
4795 pci_resource_start(pdev, 1) | ATA_PCI_CTL_OFS;
4796 probe_ent->port[p].bmdma_addr = pci_resource_start(pdev, 4);
4797 ata_std_ports(&probe_ent->port[p]);
4798 p++;
4799 }
4800
4801 if (ports & ATA_PORT_SECONDARY) {
4802 probe_ent->port[p].cmd_addr = pci_resource_start(pdev, 2);
4803 probe_ent->port[p].altstatus_addr =
4804 probe_ent->port[p].ctl_addr =
4805 pci_resource_start(pdev, 3) | ATA_PCI_CTL_OFS;
4806 probe_ent->port[p].bmdma_addr = pci_resource_start(pdev, 4) + 8;
4807 ata_std_ports(&probe_ent->port[p]);
4808 p++;
4809 }
4810
4811 probe_ent->n_ports = p;
4812 return probe_ent;
4813}
4814
4815static struct ata_probe_ent *ata_pci_init_legacy_port(struct pci_dev *pdev, struct ata_port_info *port, int port_num)
4816{
4817 struct ata_probe_ent *probe_ent;
4818
4819 probe_ent = ata_probe_ent_alloc(pci_dev_to_dev(pdev), port);
4820 if (!probe_ent)
4821 return NULL;
4822
4823 probe_ent->legacy_mode = 1;
4824 probe_ent->n_ports = 1;
4825 probe_ent->hard_port_no = port_num;
4826 probe_ent->private_data = port->private_data;
4827
4828 switch(port_num)
4829 {
4830 case 0:
4831 probe_ent->irq = 14;
4832 probe_ent->port[0].cmd_addr = 0x1f0;
4833 probe_ent->port[0].altstatus_addr =
4834 probe_ent->port[0].ctl_addr = 0x3f6;
4835 break;
4836 case 1:
4837 probe_ent->irq = 15;
4838 probe_ent->port[0].cmd_addr = 0x170;
4839 probe_ent->port[0].altstatus_addr =
4840 probe_ent->port[0].ctl_addr = 0x376;
4841 break;
4842 }
4843 probe_ent->port[0].bmdma_addr = pci_resource_start(pdev, 4) + 8 * port_num;
4844 ata_std_ports(&probe_ent->port[0]);
4845 return probe_ent;
4846}
4847
4848/**
4849 * ata_pci_init_one - Initialize/register PCI IDE host controller
4850 * @pdev: Controller to be initialized
4851 * @port_info: Information from low-level host driver
4852 * @n_ports: Number of ports attached to host controller
4853 *
4854 * This is a helper function which can be called from a driver's
4855 * xxx_init_one() probe function if the hardware uses traditional
4856 * IDE taskfile registers.
4857 *
4858 * This function calls pci_enable_device(), reserves its register
4859 * regions, sets the dma mask, enables bus master mode, and calls
4860 * ata_device_add()
4861 *
4862 * LOCKING:
4863 * Inherited from PCI layer (may sleep).
4864 *
4865 * RETURNS:
4866 * Zero on success, negative on errno-based value on error.
4867 */
4868
4869int ata_pci_init_one (struct pci_dev *pdev, struct ata_port_info **port_info,
4870 unsigned int n_ports)
4871{
4872 struct ata_probe_ent *probe_ent = NULL, *probe_ent2 = NULL;
4873 struct ata_port_info *port[2];
4874 u8 tmp8, mask;
4875 unsigned int legacy_mode = 0;
4876 int disable_dev_on_err = 1;
4877 int rc;
4878
4879 DPRINTK("ENTER\n");
4880
4881 port[0] = port_info[0];
4882 if (n_ports > 1)
4883 port[1] = port_info[1];
4884 else
4885 port[1] = port[0];
4886
4887 if ((port[0]->host_flags & ATA_FLAG_NO_LEGACY) == 0
4888 && (pdev->class >> 8) == PCI_CLASS_STORAGE_IDE) {
4889 /* TODO: What if one channel is in native mode ... */
4890 pci_read_config_byte(pdev, PCI_CLASS_PROG, &tmp8);
4891 mask = (1 << 2) | (1 << 0);
4892 if ((tmp8 & mask) != mask)
4893 legacy_mode = (1 << 3);
4894 }
4895
4896 /* FIXME... */
4897 if ((!legacy_mode) && (n_ports > 2)) {
4898 printk(KERN_ERR "ata: BUG: native mode, n_ports > 2\n");
4899 n_ports = 2;
4900 /* For now */
4901 }
4902
4903 /* FIXME: Really for ATA it isn't safe because the device may be
4904 multi-purpose and we want to leave it alone if it was already
4905 enabled. Secondly for shared use as Arjan says we want refcounting
4906
4907 Checking dev->is_enabled is insufficient as this is not set at
4908 boot for the primary video which is BIOS enabled
4909 */
4910
4911 rc = pci_enable_device(pdev);
4912 if (rc)
4913 return rc;
4914
4915 rc = pci_request_regions(pdev, DRV_NAME);
4916 if (rc) {
4917 disable_dev_on_err = 0;
4918 goto err_out;
4919 }
4920
4921 /* FIXME: Should use platform specific mappers for legacy port ranges */
4922 if (legacy_mode) {
4923 if (!request_region(0x1f0, 8, "libata")) {
4924 struct resource *conflict, res;
4925 res.start = 0x1f0;
4926 res.end = 0x1f0 + 8 - 1;
4927 conflict = ____request_resource(&ioport_resource, &res);
4928 if (!strcmp(conflict->name, "libata"))
4929 legacy_mode |= (1 << 0);
4930 else {
4931 disable_dev_on_err = 0;
4932 printk(KERN_WARNING "ata: 0x1f0 IDE port busy\n");
4933 }
4934 } else
4935 legacy_mode |= (1 << 0);
4936
4937 if (!request_region(0x170, 8, "libata")) {
4938 struct resource *conflict, res;
4939 res.start = 0x170;
4940 res.end = 0x170 + 8 - 1;
4941 conflict = ____request_resource(&ioport_resource, &res);
4942 if (!strcmp(conflict->name, "libata"))
4943 legacy_mode |= (1 << 1);
4944 else {
4945 disable_dev_on_err = 0;
4946 printk(KERN_WARNING "ata: 0x170 IDE port busy\n");
4947 }
4948 } else
4949 legacy_mode |= (1 << 1);
4950 }
4951
4952 /* we have legacy mode, but all ports are unavailable */
4953 if (legacy_mode == (1 << 3)) {
4954 rc = -EBUSY;
4955 goto err_out_regions;
4956 }
4957
4958 rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
4959 if (rc)
4960 goto err_out_regions;
4961 rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
4962 if (rc)
4963 goto err_out_regions;
4964
4965 if (legacy_mode) {
4966 if (legacy_mode & (1 << 0))
4967 probe_ent = ata_pci_init_legacy_port(pdev, port[0], 0);
4968 if (legacy_mode & (1 << 1))
4969 probe_ent2 = ata_pci_init_legacy_port(pdev, port[1], 1);
4970 } else {
4971 if (n_ports == 2)
4972 probe_ent = ata_pci_init_native_mode(pdev, port, ATA_PORT_PRIMARY | ATA_PORT_SECONDARY);
4973 else
4974 probe_ent = ata_pci_init_native_mode(pdev, port, ATA_PORT_PRIMARY);
4975 }
4976 if (!probe_ent && !probe_ent2) {
4977 rc = -ENOMEM;
4978 goto err_out_regions;
4979 }
4980
4981 pci_set_master(pdev);
4982
4983 /* FIXME: check ata_device_add return */
4984 if (legacy_mode) {
4985 if (legacy_mode & (1 << 0))
4986 ata_device_add(probe_ent);
4987 if (legacy_mode & (1 << 1))
4988 ata_device_add(probe_ent2);
4989 } else
4990 ata_device_add(probe_ent);
4991
4992 kfree(probe_ent);
4993 kfree(probe_ent2);
4994
4995 return 0;
4996
4997err_out_regions:
4998 if (legacy_mode & (1 << 0))
4999 release_region(0x1f0, 8);
5000 if (legacy_mode & (1 << 1))
5001 release_region(0x170, 8);
5002 pci_release_regions(pdev);
5003err_out:
5004 if (disable_dev_on_err)
5005 pci_disable_device(pdev);
5006 return rc;
5007}
5008
5009/**
5010 * ata_pci_remove_one - PCI layer callback for device removal 4765 * ata_pci_remove_one - PCI layer callback for device removal
5011 * @pdev: PCI device that was removed 4766 * @pdev: PCI device that was removed
5012 * 4767 *
@@ -5136,7 +4891,7 @@ EXPORT_SYMBOL_GPL(ata_device_add);
5136EXPORT_SYMBOL_GPL(ata_host_set_remove); 4891EXPORT_SYMBOL_GPL(ata_host_set_remove);
5137EXPORT_SYMBOL_GPL(ata_sg_init); 4892EXPORT_SYMBOL_GPL(ata_sg_init);
5138EXPORT_SYMBOL_GPL(ata_sg_init_one); 4893EXPORT_SYMBOL_GPL(ata_sg_init_one);
5139EXPORT_SYMBOL_GPL(ata_qc_complete); 4894EXPORT_SYMBOL_GPL(__ata_qc_complete);
5140EXPORT_SYMBOL_GPL(ata_qc_issue_prot); 4895EXPORT_SYMBOL_GPL(ata_qc_issue_prot);
5141EXPORT_SYMBOL_GPL(ata_eng_timeout); 4896EXPORT_SYMBOL_GPL(ata_eng_timeout);
5142EXPORT_SYMBOL_GPL(ata_tf_load); 4897EXPORT_SYMBOL_GPL(ata_tf_load);
@@ -5162,18 +4917,29 @@ EXPORT_SYMBOL_GPL(ata_port_probe);
5162EXPORT_SYMBOL_GPL(sata_phy_reset); 4917EXPORT_SYMBOL_GPL(sata_phy_reset);
5163EXPORT_SYMBOL_GPL(__sata_phy_reset); 4918EXPORT_SYMBOL_GPL(__sata_phy_reset);
5164EXPORT_SYMBOL_GPL(ata_bus_reset); 4919EXPORT_SYMBOL_GPL(ata_bus_reset);
4920EXPORT_SYMBOL_GPL(ata_std_probeinit);
4921EXPORT_SYMBOL_GPL(ata_std_softreset);
4922EXPORT_SYMBOL_GPL(sata_std_hardreset);
4923EXPORT_SYMBOL_GPL(ata_std_postreset);
4924EXPORT_SYMBOL_GPL(ata_std_probe_reset);
4925EXPORT_SYMBOL_GPL(ata_drive_probe_reset);
5165EXPORT_SYMBOL_GPL(ata_port_disable); 4926EXPORT_SYMBOL_GPL(ata_port_disable);
5166EXPORT_SYMBOL_GPL(ata_ratelimit); 4927EXPORT_SYMBOL_GPL(ata_ratelimit);
4928EXPORT_SYMBOL_GPL(ata_busy_sleep);
5167EXPORT_SYMBOL_GPL(ata_scsi_ioctl); 4929EXPORT_SYMBOL_GPL(ata_scsi_ioctl);
5168EXPORT_SYMBOL_GPL(ata_scsi_queuecmd); 4930EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
4931EXPORT_SYMBOL_GPL(ata_scsi_timed_out);
5169EXPORT_SYMBOL_GPL(ata_scsi_error); 4932EXPORT_SYMBOL_GPL(ata_scsi_error);
5170EXPORT_SYMBOL_GPL(ata_scsi_slave_config); 4933EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
5171EXPORT_SYMBOL_GPL(ata_scsi_release); 4934EXPORT_SYMBOL_GPL(ata_scsi_release);
5172EXPORT_SYMBOL_GPL(ata_host_intr); 4935EXPORT_SYMBOL_GPL(ata_host_intr);
5173EXPORT_SYMBOL_GPL(ata_dev_classify); 4936EXPORT_SYMBOL_GPL(ata_dev_classify);
5174EXPORT_SYMBOL_GPL(ata_dev_id_string); 4937EXPORT_SYMBOL_GPL(ata_dev_id_string);
4938EXPORT_SYMBOL_GPL(ata_dev_id_c_string);
5175EXPORT_SYMBOL_GPL(ata_dev_config); 4939EXPORT_SYMBOL_GPL(ata_dev_config);
5176EXPORT_SYMBOL_GPL(ata_scsi_simulate); 4940EXPORT_SYMBOL_GPL(ata_scsi_simulate);
4941EXPORT_SYMBOL_GPL(ata_eh_qc_complete);
4942EXPORT_SYMBOL_GPL(ata_eh_qc_retry);
5177 4943
5178EXPORT_SYMBOL_GPL(ata_pio_need_iordy); 4944EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
5179EXPORT_SYMBOL_GPL(ata_timing_compute); 4945EXPORT_SYMBOL_GPL(ata_timing_compute);
diff --git a/drivers/scsi/libata-scsi.c b/drivers/scsi/libata-scsi.c
index 07b1e7cc61df..86da46502b3e 100644
--- a/drivers/scsi/libata-scsi.c
+++ b/drivers/scsi/libata-scsi.c
@@ -151,7 +151,7 @@ int ata_cmd_ioctl(struct scsi_device *scsidev, void __user *arg)
151 struct scsi_sense_hdr sshdr; 151 struct scsi_sense_hdr sshdr;
152 enum dma_data_direction data_dir; 152 enum dma_data_direction data_dir;
153 153
154 if (NULL == (void *)arg) 154 if (arg == NULL)
155 return -EINVAL; 155 return -EINVAL;
156 156
157 if (copy_from_user(args, arg, sizeof(args))) 157 if (copy_from_user(args, arg, sizeof(args)))
@@ -201,7 +201,7 @@ int ata_cmd_ioctl(struct scsi_device *scsidev, void __user *arg)
201 /* Need code to retrieve data from check condition? */ 201 /* Need code to retrieve data from check condition? */
202 202
203 if ((argbuf) 203 if ((argbuf)
204 && copy_to_user((void *)(arg + sizeof(args)), argbuf, argsize)) 204 && copy_to_user(arg + sizeof(args), argbuf, argsize))
205 rc = -EFAULT; 205 rc = -EFAULT;
206error: 206error:
207 if (argbuf) 207 if (argbuf)
@@ -228,7 +228,7 @@ int ata_task_ioctl(struct scsi_device *scsidev, void __user *arg)
228 u8 args[7]; 228 u8 args[7];
229 struct scsi_sense_hdr sshdr; 229 struct scsi_sense_hdr sshdr;
230 230
231 if (NULL == (void *)arg) 231 if (arg == NULL)
232 return -EINVAL; 232 return -EINVAL;
233 233
234 if (copy_from_user(args, arg, sizeof(args))) 234 if (copy_from_user(args, arg, sizeof(args)))
@@ -553,7 +553,7 @@ void ata_gen_ata_desc_sense(struct ata_queued_cmd *qc)
553 /* 553 /*
554 * Read the controller registers. 554 * Read the controller registers.
555 */ 555 */
556 assert(NULL != qc->ap->ops->tf_read); 556 WARN_ON(qc->ap->ops->tf_read == NULL);
557 qc->ap->ops->tf_read(qc->ap, tf); 557 qc->ap->ops->tf_read(qc->ap, tf);
558 558
559 /* 559 /*
@@ -628,7 +628,7 @@ void ata_gen_fixed_sense(struct ata_queued_cmd *qc)
628 /* 628 /*
629 * Read the controller registers. 629 * Read the controller registers.
630 */ 630 */
631 assert(NULL != qc->ap->ops->tf_read); 631 WARN_ON(qc->ap->ops->tf_read == NULL);
632 qc->ap->ops->tf_read(qc->ap, tf); 632 qc->ap->ops->tf_read(qc->ap, tf);
633 633
634 /* 634 /*
@@ -684,23 +684,23 @@ int ata_scsi_slave_config(struct scsi_device *sdev)
684 if (sdev->id < ATA_MAX_DEVICES) { 684 if (sdev->id < ATA_MAX_DEVICES) {
685 struct ata_port *ap; 685 struct ata_port *ap;
686 struct ata_device *dev; 686 struct ata_device *dev;
687 unsigned int max_sectors;
687 688
688 ap = (struct ata_port *) &sdev->host->hostdata[0]; 689 ap = (struct ata_port *) &sdev->host->hostdata[0];
689 dev = &ap->device[sdev->id]; 690 dev = &ap->device[sdev->id];
690 691
691 /* TODO: 1024 is an arbitrary number, not the 692 /* TODO: 2048 is an arbitrary number, not the
692 * hardware maximum. This should be increased to 693 * hardware maximum. This should be increased to
693 * 65534 when Jens Axboe's patch for dynamically 694 * 65534 when Jens Axboe's patch for dynamically
694 * determining max_sectors is merged. 695 * determining max_sectors is merged.
695 */ 696 */
696 if ((dev->flags & ATA_DFLAG_LBA48) && 697 max_sectors = ATA_MAX_SECTORS;
697 ((dev->flags & ATA_DFLAG_LOCK_SECTORS) == 0)) { 698 if (dev->flags & ATA_DFLAG_LBA48)
698 /* 699 max_sectors = 2048;
699 * do not overwrite sdev->host->max_sectors, since 700 if (dev->max_sectors)
700 * other drives on this host may not support LBA48 701 max_sectors = dev->max_sectors;
701 */ 702
702 blk_queue_max_sectors(sdev->request_queue, 2048); 703 blk_queue_max_sectors(sdev->request_queue, max_sectors);
703 }
704 704
705 /* 705 /*
706 * SATA DMA transfers must be multiples of 4 byte, so 706 * SATA DMA transfers must be multiples of 4 byte, so
@@ -717,6 +717,47 @@ int ata_scsi_slave_config(struct scsi_device *sdev)
717} 717}
718 718
719/** 719/**
720 * ata_scsi_timed_out - SCSI layer time out callback
721 * @cmd: timed out SCSI command
722 *
723 * Handles SCSI layer timeout. We race with normal completion of
724 * the qc for @cmd. If the qc is already gone, we lose and let
725 * the scsi command finish (EH_HANDLED). Otherwise, the qc has
726 * timed out and EH should be invoked. Prevent ata_qc_complete()
727 * from finishing it by setting EH_SCHEDULED and return
728 * EH_NOT_HANDLED.
729 *
730 * LOCKING:
731 * Called from timer context
732 *
733 * RETURNS:
734 * EH_HANDLED or EH_NOT_HANDLED
735 */
736enum scsi_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd)
737{
738 struct Scsi_Host *host = cmd->device->host;
739 struct ata_port *ap = (struct ata_port *) &host->hostdata[0];
740 unsigned long flags;
741 struct ata_queued_cmd *qc;
742 enum scsi_eh_timer_return ret = EH_HANDLED;
743
744 DPRINTK("ENTER\n");
745
746 spin_lock_irqsave(&ap->host_set->lock, flags);
747 qc = ata_qc_from_tag(ap, ap->active_tag);
748 if (qc) {
749 WARN_ON(qc->scsicmd != cmd);
750 qc->flags |= ATA_QCFLAG_EH_SCHEDULED;
751 qc->err_mask |= AC_ERR_TIMEOUT;
752 ret = EH_NOT_HANDLED;
753 }
754 spin_unlock_irqrestore(&ap->host_set->lock, flags);
755
756 DPRINTK("EXIT, ret=%d\n", ret);
757 return ret;
758}
759
760/**
720 * ata_scsi_error - SCSI layer error handler callback 761 * ata_scsi_error - SCSI layer error handler callback
721 * @host: SCSI host on which error occurred 762 * @host: SCSI host on which error occurred
722 * 763 *
@@ -732,23 +773,82 @@ int ata_scsi_slave_config(struct scsi_device *sdev)
732int ata_scsi_error(struct Scsi_Host *host) 773int ata_scsi_error(struct Scsi_Host *host)
733{ 774{
734 struct ata_port *ap; 775 struct ata_port *ap;
776 unsigned long flags;
735 777
736 DPRINTK("ENTER\n"); 778 DPRINTK("ENTER\n");
737 779
738 ap = (struct ata_port *) &host->hostdata[0]; 780 ap = (struct ata_port *) &host->hostdata[0];
781
782 spin_lock_irqsave(&ap->host_set->lock, flags);
783 WARN_ON(ap->flags & ATA_FLAG_IN_EH);
784 ap->flags |= ATA_FLAG_IN_EH;
785 WARN_ON(ata_qc_from_tag(ap, ap->active_tag) == NULL);
786 spin_unlock_irqrestore(&ap->host_set->lock, flags);
787
739 ap->ops->eng_timeout(ap); 788 ap->ops->eng_timeout(ap);
740 789
741 /* TODO: this is per-command; when queueing is supported 790 WARN_ON(host->host_failed || !list_empty(&host->eh_cmd_q));
742 * this code will either change or move to a more 791
743 * appropriate place 792 scsi_eh_flush_done_q(&ap->eh_done_q);
744 */ 793
745 host->host_failed--; 794 spin_lock_irqsave(&ap->host_set->lock, flags);
746 INIT_LIST_HEAD(&host->eh_cmd_q); 795 ap->flags &= ~ATA_FLAG_IN_EH;
796 spin_unlock_irqrestore(&ap->host_set->lock, flags);
747 797
748 DPRINTK("EXIT\n"); 798 DPRINTK("EXIT\n");
749 return 0; 799 return 0;
750} 800}
751 801
802static void ata_eh_scsidone(struct scsi_cmnd *scmd)
803{
804 /* nada */
805}
806
807static void __ata_eh_qc_complete(struct ata_queued_cmd *qc)
808{
809 struct ata_port *ap = qc->ap;
810 struct scsi_cmnd *scmd = qc->scsicmd;
811 unsigned long flags;
812
813 spin_lock_irqsave(&ap->host_set->lock, flags);
814 qc->scsidone = ata_eh_scsidone;
815 __ata_qc_complete(qc);
816 WARN_ON(ata_tag_valid(qc->tag));
817 spin_unlock_irqrestore(&ap->host_set->lock, flags);
818
819 scsi_eh_finish_cmd(scmd, &ap->eh_done_q);
820}
821
822/**
823 * ata_eh_qc_complete - Complete an active ATA command from EH
824 * @qc: Command to complete
825 *
826 * Indicate to the mid and upper layers that an ATA command has
827 * completed. To be used from EH.
828 */
829void ata_eh_qc_complete(struct ata_queued_cmd *qc)
830{
831 struct scsi_cmnd *scmd = qc->scsicmd;
832 scmd->retries = scmd->allowed;
833 __ata_eh_qc_complete(qc);
834}
835
836/**
837 * ata_eh_qc_retry - Tell midlayer to retry an ATA command after EH
838 * @qc: Command to retry
839 *
840 * Indicate to the mid and upper layers that an ATA command
841 * should be retried. To be used from EH.
842 *
843 * SCSI midlayer limits the number of retries to scmd->allowed.
844 * This function might need to adjust scmd->retries for commands
845 * which get retried due to unrelated NCQ failures.
846 */
847void ata_eh_qc_retry(struct ata_queued_cmd *qc)
848{
849 __ata_eh_qc_complete(qc);
850}
851
752/** 852/**
753 * ata_scsi_start_stop_xlat - Translate SCSI START STOP UNIT command 853 * ata_scsi_start_stop_xlat - Translate SCSI START STOP UNIT command
754 * @qc: Storage for translated ATA taskfile 854 * @qc: Storage for translated ATA taskfile
@@ -985,9 +1085,13 @@ static unsigned int ata_scsi_verify_xlat(struct ata_queued_cmd *qc, const u8 *sc
985 if (dev->flags & ATA_DFLAG_LBA) { 1085 if (dev->flags & ATA_DFLAG_LBA) {
986 tf->flags |= ATA_TFLAG_LBA; 1086 tf->flags |= ATA_TFLAG_LBA;
987 1087
988 if (dev->flags & ATA_DFLAG_LBA48) { 1088 if (lba_28_ok(block, n_block)) {
989 if (n_block > (64 * 1024)) 1089 /* use LBA28 */
990 goto invalid_fld; 1090 tf->command = ATA_CMD_VERIFY;
1091 tf->device |= (block >> 24) & 0xf;
1092 } else if (lba_48_ok(block, n_block)) {
1093 if (!(dev->flags & ATA_DFLAG_LBA48))
1094 goto out_of_range;
991 1095
992 /* use LBA48 */ 1096 /* use LBA48 */
993 tf->flags |= ATA_TFLAG_LBA48; 1097 tf->flags |= ATA_TFLAG_LBA48;
@@ -998,15 +1102,9 @@ static unsigned int ata_scsi_verify_xlat(struct ata_queued_cmd *qc, const u8 *sc
998 tf->hob_lbah = (block >> 40) & 0xff; 1102 tf->hob_lbah = (block >> 40) & 0xff;
999 tf->hob_lbam = (block >> 32) & 0xff; 1103 tf->hob_lbam = (block >> 32) & 0xff;
1000 tf->hob_lbal = (block >> 24) & 0xff; 1104 tf->hob_lbal = (block >> 24) & 0xff;
1001 } else { 1105 } else
1002 if (n_block > 256) 1106 /* request too large even for LBA48 */
1003 goto invalid_fld; 1107 goto out_of_range;
1004
1005 /* use LBA28 */
1006 tf->command = ATA_CMD_VERIFY;
1007
1008 tf->device |= (block >> 24) & 0xf;
1009 }
1010 1108
1011 tf->nsect = n_block & 0xff; 1109 tf->nsect = n_block & 0xff;
1012 1110
@@ -1019,8 +1117,8 @@ static unsigned int ata_scsi_verify_xlat(struct ata_queued_cmd *qc, const u8 *sc
1019 /* CHS */ 1117 /* CHS */
1020 u32 sect, head, cyl, track; 1118 u32 sect, head, cyl, track;
1021 1119
1022 if (n_block > 256) 1120 if (!lba_28_ok(block, n_block))
1023 goto invalid_fld; 1121 goto out_of_range;
1024 1122
1025 /* Convert LBA to CHS */ 1123 /* Convert LBA to CHS */
1026 track = (u32)block / dev->sectors; 1124 track = (u32)block / dev->sectors;
@@ -1139,9 +1237,11 @@ static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc, const u8 *scsicm
1139 if (dev->flags & ATA_DFLAG_LBA) { 1237 if (dev->flags & ATA_DFLAG_LBA) {
1140 tf->flags |= ATA_TFLAG_LBA; 1238 tf->flags |= ATA_TFLAG_LBA;
1141 1239
1142 if (dev->flags & ATA_DFLAG_LBA48) { 1240 if (lba_28_ok(block, n_block)) {
1143 /* The request -may- be too large for LBA48. */ 1241 /* use LBA28 */
1144 if ((block >> 48) || (n_block > 65536)) 1242 tf->device |= (block >> 24) & 0xf;
1243 } else if (lba_48_ok(block, n_block)) {
1244 if (!(dev->flags & ATA_DFLAG_LBA48))
1145 goto out_of_range; 1245 goto out_of_range;
1146 1246
1147 /* use LBA48 */ 1247 /* use LBA48 */
@@ -1152,15 +1252,9 @@ static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc, const u8 *scsicm
1152 tf->hob_lbah = (block >> 40) & 0xff; 1252 tf->hob_lbah = (block >> 40) & 0xff;
1153 tf->hob_lbam = (block >> 32) & 0xff; 1253 tf->hob_lbam = (block >> 32) & 0xff;
1154 tf->hob_lbal = (block >> 24) & 0xff; 1254 tf->hob_lbal = (block >> 24) & 0xff;
1155 } else { 1255 } else
1156 /* use LBA28 */ 1256 /* request too large even for LBA48 */
1157 1257 goto out_of_range;
1158 /* The request -may- be too large for LBA28. */
1159 if ((block >> 28) || (n_block > 256))
1160 goto out_of_range;
1161
1162 tf->device |= (block >> 24) & 0xf;
1163 }
1164 1258
1165 if (unlikely(ata_rwcmd_protocol(qc) < 0)) 1259 if (unlikely(ata_rwcmd_protocol(qc) < 0))
1166 goto invalid_fld; 1260 goto invalid_fld;
@@ -1178,7 +1272,7 @@ static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc, const u8 *scsicm
1178 u32 sect, head, cyl, track; 1272 u32 sect, head, cyl, track;
1179 1273
1180 /* The request -may- be too large for CHS addressing. */ 1274 /* The request -may- be too large for CHS addressing. */
1181 if ((block >> 28) || (n_block > 256)) 1275 if (!lba_28_ok(block, n_block))
1182 goto out_of_range; 1276 goto out_of_range;
1183 1277
1184 if (unlikely(ata_rwcmd_protocol(qc) < 0)) 1278 if (unlikely(ata_rwcmd_protocol(qc) < 0))
@@ -1225,7 +1319,7 @@ nothing_to_do:
1225 return 1; 1319 return 1;
1226} 1320}
1227 1321
1228static int ata_scsi_qc_complete(struct ata_queued_cmd *qc) 1322static void ata_scsi_qc_complete(struct ata_queued_cmd *qc)
1229{ 1323{
1230 struct scsi_cmnd *cmd = qc->scsicmd; 1324 struct scsi_cmnd *cmd = qc->scsicmd;
1231 u8 *cdb = cmd->cmnd; 1325 u8 *cdb = cmd->cmnd;
@@ -1262,7 +1356,7 @@ static int ata_scsi_qc_complete(struct ata_queued_cmd *qc)
1262 1356
1263 qc->scsidone(cmd); 1357 qc->scsidone(cmd);
1264 1358
1265 return 0; 1359 ata_qc_free(qc);
1266} 1360}
1267 1361
1268/** 1362/**
@@ -1328,8 +1422,9 @@ static void ata_scsi_translate(struct ata_port *ap, struct ata_device *dev,
1328 goto early_finish; 1422 goto early_finish;
1329 1423
1330 /* select device, send command to hardware */ 1424 /* select device, send command to hardware */
1331 if (ata_qc_issue(qc)) 1425 qc->err_mask = ata_qc_issue(qc);
1332 goto err_did; 1426 if (qc->err_mask)
1427 ata_qc_complete(qc);
1333 1428
1334 VPRINTK("EXIT\n"); 1429 VPRINTK("EXIT\n");
1335 return; 1430 return;
@@ -1711,15 +1806,12 @@ static int ata_dev_supports_fua(u16 *id)
1711 if (!ata_id_has_fua(id)) 1806 if (!ata_id_has_fua(id))
1712 return 0; 1807 return 0;
1713 1808
1714 model[40] = '\0'; 1809 ata_dev_id_c_string(id, model, ATA_ID_PROD_OFS, sizeof(model));
1715 fw[8] = '\0'; 1810 ata_dev_id_c_string(id, fw, ATA_ID_FW_REV_OFS, sizeof(fw));
1716
1717 ata_dev_id_string(id, model, ATA_ID_PROD_OFS, sizeof(model) - 1);
1718 ata_dev_id_string(id, fw, ATA_ID_FW_REV_OFS, sizeof(fw) - 1);
1719 1811
1720 if (strncmp(model, "Maxtor", 6)) 1812 if (strcmp(model, "Maxtor"))
1721 return 1; 1813 return 1;
1722 if (strncmp(fw, "BANC1G10", 8)) 1814 if (strcmp(fw, "BANC1G10"))
1723 return 1; 1815 return 1;
1724 1816
1725 return 0; /* blacklisted */ 1817 return 0; /* blacklisted */
@@ -2013,7 +2105,7 @@ void ata_scsi_badcmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *), u8
2013 done(cmd); 2105 done(cmd);
2014} 2106}
2015 2107
2016static int atapi_sense_complete(struct ata_queued_cmd *qc) 2108static void atapi_sense_complete(struct ata_queued_cmd *qc)
2017{ 2109{
2018 if (qc->err_mask && ((qc->err_mask & AC_ERR_DEV) == 0)) 2110 if (qc->err_mask && ((qc->err_mask & AC_ERR_DEV) == 0))
2019 /* FIXME: not quite right; we don't want the 2111 /* FIXME: not quite right; we don't want the
@@ -2024,7 +2116,7 @@ static int atapi_sense_complete(struct ata_queued_cmd *qc)
2024 ata_gen_ata_desc_sense(qc); 2116 ata_gen_ata_desc_sense(qc);
2025 2117
2026 qc->scsidone(qc->scsicmd); 2118 qc->scsidone(qc->scsicmd);
2027 return 0; 2119 ata_qc_free(qc);
2028} 2120}
2029 2121
2030/* is it pointless to prefer PIO for "safety reasons"? */ 2122/* is it pointless to prefer PIO for "safety reasons"? */
@@ -2054,7 +2146,7 @@ static void atapi_request_sense(struct ata_queued_cmd *qc)
2054 ata_sg_init_one(qc, cmd->sense_buffer, sizeof(cmd->sense_buffer)); 2146 ata_sg_init_one(qc, cmd->sense_buffer, sizeof(cmd->sense_buffer));
2055 qc->dma_dir = DMA_FROM_DEVICE; 2147 qc->dma_dir = DMA_FROM_DEVICE;
2056 2148
2057 memset(&qc->cdb, 0, ap->cdb_len); 2149 memset(&qc->cdb, 0, qc->dev->cdb_len);
2058 qc->cdb[0] = REQUEST_SENSE; 2150 qc->cdb[0] = REQUEST_SENSE;
2059 qc->cdb[4] = SCSI_SENSE_BUFFERSIZE; 2151 qc->cdb[4] = SCSI_SENSE_BUFFERSIZE;
2060 2152
@@ -2073,15 +2165,14 @@ static void atapi_request_sense(struct ata_queued_cmd *qc)
2073 2165
2074 qc->complete_fn = atapi_sense_complete; 2166 qc->complete_fn = atapi_sense_complete;
2075 2167
2076 if (ata_qc_issue(qc)) { 2168 qc->err_mask = ata_qc_issue(qc);
2077 qc->err_mask |= AC_ERR_OTHER; 2169 if (qc->err_mask)
2078 ata_qc_complete(qc); 2170 ata_qc_complete(qc);
2079 }
2080 2171
2081 DPRINTK("EXIT\n"); 2172 DPRINTK("EXIT\n");
2082} 2173}
2083 2174
2084static int atapi_qc_complete(struct ata_queued_cmd *qc) 2175static void atapi_qc_complete(struct ata_queued_cmd *qc)
2085{ 2176{
2086 struct scsi_cmnd *cmd = qc->scsicmd; 2177 struct scsi_cmnd *cmd = qc->scsicmd;
2087 unsigned int err_mask = qc->err_mask; 2178 unsigned int err_mask = qc->err_mask;
@@ -2091,7 +2182,7 @@ static int atapi_qc_complete(struct ata_queued_cmd *qc)
2091 if (unlikely(err_mask & AC_ERR_DEV)) { 2182 if (unlikely(err_mask & AC_ERR_DEV)) {
2092 cmd->result = SAM_STAT_CHECK_CONDITION; 2183 cmd->result = SAM_STAT_CHECK_CONDITION;
2093 atapi_request_sense(qc); 2184 atapi_request_sense(qc);
2094 return 1; 2185 return;
2095 } 2186 }
2096 2187
2097 else if (unlikely(err_mask)) 2188 else if (unlikely(err_mask))
@@ -2131,7 +2222,7 @@ static int atapi_qc_complete(struct ata_queued_cmd *qc)
2131 } 2222 }
2132 2223
2133 qc->scsidone(cmd); 2224 qc->scsidone(cmd);
2134 return 0; 2225 ata_qc_free(qc);
2135} 2226}
2136/** 2227/**
2137 * atapi_xlat - Initialize PACKET taskfile 2228 * atapi_xlat - Initialize PACKET taskfile
@@ -2157,7 +2248,7 @@ static unsigned int atapi_xlat(struct ata_queued_cmd *qc, const u8 *scsicmd)
2157 if (ata_check_atapi_dma(qc)) 2248 if (ata_check_atapi_dma(qc))
2158 using_pio = 1; 2249 using_pio = 1;
2159 2250
2160 memcpy(&qc->cdb, scsicmd, qc->ap->cdb_len); 2251 memcpy(&qc->cdb, scsicmd, dev->cdb_len);
2161 2252
2162 qc->complete_fn = atapi_qc_complete; 2253 qc->complete_fn = atapi_qc_complete;
2163 2254
@@ -2517,7 +2608,8 @@ out_unlock:
2517 2608
2518/** 2609/**
2519 * ata_scsi_simulate - simulate SCSI command on ATA device 2610 * ata_scsi_simulate - simulate SCSI command on ATA device
2520 * @id: current IDENTIFY data for target device. 2611 * @ap: port the device is connected to
2612 * @dev: the target device
2521 * @cmd: SCSI command being sent to device. 2613 * @cmd: SCSI command being sent to device.
2522 * @done: SCSI command completion function. 2614 * @done: SCSI command completion function.
2523 * 2615 *
diff --git a/drivers/scsi/libata.h b/drivers/scsi/libata.h
index e03ce48b7b4b..9d76923a2253 100644
--- a/drivers/scsi/libata.h
+++ b/drivers/scsi/libata.h
@@ -45,7 +45,7 @@ extern struct ata_queued_cmd *ata_qc_new_init(struct ata_port *ap,
45 struct ata_device *dev); 45 struct ata_device *dev);
46extern int ata_rwcmd_protocol(struct ata_queued_cmd *qc); 46extern int ata_rwcmd_protocol(struct ata_queued_cmd *qc);
47extern void ata_qc_free(struct ata_queued_cmd *qc); 47extern void ata_qc_free(struct ata_queued_cmd *qc);
48extern int ata_qc_issue(struct ata_queued_cmd *qc); 48extern unsigned int ata_qc_issue(struct ata_queued_cmd *qc);
49extern int ata_check_atapi_dma(struct ata_queued_cmd *qc); 49extern int ata_check_atapi_dma(struct ata_queued_cmd *qc);
50extern void ata_dev_select(struct ata_port *ap, unsigned int device, 50extern void ata_dev_select(struct ata_port *ap, unsigned int device,
51 unsigned int wait, unsigned int can_sleep); 51 unsigned int wait, unsigned int can_sleep);
diff --git a/drivers/scsi/pdc_adma.c b/drivers/scsi/pdc_adma.c
index e8df0c9ec1e6..5f33cc932e70 100644
--- a/drivers/scsi/pdc_adma.c
+++ b/drivers/scsi/pdc_adma.c
@@ -131,7 +131,7 @@ static void adma_host_stop(struct ata_host_set *host_set);
131static void adma_port_stop(struct ata_port *ap); 131static void adma_port_stop(struct ata_port *ap);
132static void adma_phy_reset(struct ata_port *ap); 132static void adma_phy_reset(struct ata_port *ap);
133static void adma_qc_prep(struct ata_queued_cmd *qc); 133static void adma_qc_prep(struct ata_queued_cmd *qc);
134static int adma_qc_issue(struct ata_queued_cmd *qc); 134static unsigned int adma_qc_issue(struct ata_queued_cmd *qc);
135static int adma_check_atapi_dma(struct ata_queued_cmd *qc); 135static int adma_check_atapi_dma(struct ata_queued_cmd *qc);
136static void adma_bmdma_stop(struct ata_queued_cmd *qc); 136static void adma_bmdma_stop(struct ata_queued_cmd *qc);
137static u8 adma_bmdma_status(struct ata_port *ap); 137static u8 adma_bmdma_status(struct ata_port *ap);
@@ -143,11 +143,11 @@ static struct scsi_host_template adma_ata_sht = {
143 .name = DRV_NAME, 143 .name = DRV_NAME,
144 .ioctl = ata_scsi_ioctl, 144 .ioctl = ata_scsi_ioctl,
145 .queuecommand = ata_scsi_queuecmd, 145 .queuecommand = ata_scsi_queuecmd,
146 .eh_timed_out = ata_scsi_timed_out,
146 .eh_strategy_handler = ata_scsi_error, 147 .eh_strategy_handler = ata_scsi_error,
147 .can_queue = ATA_DEF_QUEUE, 148 .can_queue = ATA_DEF_QUEUE,
148 .this_id = ATA_SHT_THIS_ID, 149 .this_id = ATA_SHT_THIS_ID,
149 .sg_tablesize = LIBATA_MAX_PRD, 150 .sg_tablesize = LIBATA_MAX_PRD,
150 .max_sectors = ATA_MAX_SECTORS,
151 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 151 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
152 .emulated = ATA_SHT_EMULATED, 152 .emulated = ATA_SHT_EMULATED,
153 .use_clustering = ENABLE_CLUSTERING, 153 .use_clustering = ENABLE_CLUSTERING,
@@ -419,7 +419,7 @@ static inline void adma_packet_start(struct ata_queued_cmd *qc)
419 writew(aPIOMD4 | aGO, chan + ADMA_CONTROL); 419 writew(aPIOMD4 | aGO, chan + ADMA_CONTROL);
420} 420}
421 421
422static int adma_qc_issue(struct ata_queued_cmd *qc) 422static unsigned int adma_qc_issue(struct ata_queued_cmd *qc)
423{ 423{
424 struct adma_port_priv *pp = qc->ap->private_data; 424 struct adma_port_priv *pp = qc->ap->private_data;
425 425
diff --git a/drivers/scsi/sata_mv.c b/drivers/scsi/sata_mv.c
index 2770005324b4..aceaf56999a5 100644
--- a/drivers/scsi/sata_mv.c
+++ b/drivers/scsi/sata_mv.c
@@ -37,7 +37,7 @@
37#include <asm/io.h> 37#include <asm/io.h>
38 38
39#define DRV_NAME "sata_mv" 39#define DRV_NAME "sata_mv"
40#define DRV_VERSION "0.5" 40#define DRV_VERSION "0.6"
41 41
42enum { 42enum {
43 /* BAR's are enumerated in terms of pci_resource_start() terms */ 43 /* BAR's are enumerated in terms of pci_resource_start() terms */
@@ -228,7 +228,9 @@ enum {
228 MV_HP_ERRATA_50XXB2 = (1 << 2), 228 MV_HP_ERRATA_50XXB2 = (1 << 2),
229 MV_HP_ERRATA_60X1B2 = (1 << 3), 229 MV_HP_ERRATA_60X1B2 = (1 << 3),
230 MV_HP_ERRATA_60X1C0 = (1 << 4), 230 MV_HP_ERRATA_60X1C0 = (1 << 4),
231 MV_HP_50XX = (1 << 5), 231 MV_HP_ERRATA_XX42A0 = (1 << 5),
232 MV_HP_50XX = (1 << 6),
233 MV_HP_GEN_IIE = (1 << 7),
232 234
233 /* Port private flags (pp_flags) */ 235 /* Port private flags (pp_flags) */
234 MV_PP_FLAG_EDMA_EN = (1 << 0), 236 MV_PP_FLAG_EDMA_EN = (1 << 0),
@@ -237,6 +239,9 @@ enum {
237 239
238#define IS_50XX(hpriv) ((hpriv)->hp_flags & MV_HP_50XX) 240#define IS_50XX(hpriv) ((hpriv)->hp_flags & MV_HP_50XX)
239#define IS_60XX(hpriv) (((hpriv)->hp_flags & MV_HP_50XX) == 0) 241#define IS_60XX(hpriv) (((hpriv)->hp_flags & MV_HP_50XX) == 0)
242#define IS_GEN_I(hpriv) IS_50XX(hpriv)
243#define IS_GEN_II(hpriv) IS_60XX(hpriv)
244#define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
240 245
241enum { 246enum {
242 /* Our DMA boundary is determined by an ePRD being unable to handle 247 /* Our DMA boundary is determined by an ePRD being unable to handle
@@ -255,6 +260,8 @@ enum chip_type {
255 chip_5080, 260 chip_5080,
256 chip_604x, 261 chip_604x,
257 chip_608x, 262 chip_608x,
263 chip_6042,
264 chip_7042,
258}; 265};
259 266
260/* Command ReQuest Block: 32B */ 267/* Command ReQuest Block: 32B */
@@ -265,6 +272,14 @@ struct mv_crqb {
265 u16 ata_cmd[11]; 272 u16 ata_cmd[11];
266}; 273};
267 274
275struct mv_crqb_iie {
276 u32 addr;
277 u32 addr_hi;
278 u32 flags;
279 u32 len;
280 u32 ata_cmd[4];
281};
282
268/* Command ResPonse Block: 8B */ 283/* Command ResPonse Block: 8B */
269struct mv_crpb { 284struct mv_crpb {
270 u16 id; 285 u16 id;
@@ -328,7 +343,8 @@ static void mv_host_stop(struct ata_host_set *host_set);
328static int mv_port_start(struct ata_port *ap); 343static int mv_port_start(struct ata_port *ap);
329static void mv_port_stop(struct ata_port *ap); 344static void mv_port_stop(struct ata_port *ap);
330static void mv_qc_prep(struct ata_queued_cmd *qc); 345static void mv_qc_prep(struct ata_queued_cmd *qc);
331static int mv_qc_issue(struct ata_queued_cmd *qc); 346static void mv_qc_prep_iie(struct ata_queued_cmd *qc);
347static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
332static irqreturn_t mv_interrupt(int irq, void *dev_instance, 348static irqreturn_t mv_interrupt(int irq, void *dev_instance,
333 struct pt_regs *regs); 349 struct pt_regs *regs);
334static void mv_eng_timeout(struct ata_port *ap); 350static void mv_eng_timeout(struct ata_port *ap);
@@ -362,11 +378,11 @@ static struct scsi_host_template mv_sht = {
362 .name = DRV_NAME, 378 .name = DRV_NAME,
363 .ioctl = ata_scsi_ioctl, 379 .ioctl = ata_scsi_ioctl,
364 .queuecommand = ata_scsi_queuecmd, 380 .queuecommand = ata_scsi_queuecmd,
381 .eh_timed_out = ata_scsi_timed_out,
365 .eh_strategy_handler = ata_scsi_error, 382 .eh_strategy_handler = ata_scsi_error,
366 .can_queue = MV_USE_Q_DEPTH, 383 .can_queue = MV_USE_Q_DEPTH,
367 .this_id = ATA_SHT_THIS_ID, 384 .this_id = ATA_SHT_THIS_ID,
368 .sg_tablesize = MV_MAX_SG_CT / 2, 385 .sg_tablesize = MV_MAX_SG_CT / 2,
369 .max_sectors = ATA_MAX_SECTORS,
370 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 386 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
371 .emulated = ATA_SHT_EMULATED, 387 .emulated = ATA_SHT_EMULATED,
372 .use_clustering = ATA_SHT_USE_CLUSTERING, 388 .use_clustering = ATA_SHT_USE_CLUSTERING,
@@ -430,6 +446,33 @@ static const struct ata_port_operations mv6_ops = {
430 .host_stop = mv_host_stop, 446 .host_stop = mv_host_stop,
431}; 447};
432 448
449static const struct ata_port_operations mv_iie_ops = {
450 .port_disable = ata_port_disable,
451
452 .tf_load = ata_tf_load,
453 .tf_read = ata_tf_read,
454 .check_status = ata_check_status,
455 .exec_command = ata_exec_command,
456 .dev_select = ata_std_dev_select,
457
458 .phy_reset = mv_phy_reset,
459
460 .qc_prep = mv_qc_prep_iie,
461 .qc_issue = mv_qc_issue,
462
463 .eng_timeout = mv_eng_timeout,
464
465 .irq_handler = mv_interrupt,
466 .irq_clear = mv_irq_clear,
467
468 .scr_read = mv_scr_read,
469 .scr_write = mv_scr_write,
470
471 .port_start = mv_port_start,
472 .port_stop = mv_port_stop,
473 .host_stop = mv_host_stop,
474};
475
433static const struct ata_port_info mv_port_info[] = { 476static const struct ata_port_info mv_port_info[] = {
434 { /* chip_504x */ 477 { /* chip_504x */
435 .sht = &mv_sht, 478 .sht = &mv_sht,
@@ -467,6 +510,21 @@ static const struct ata_port_info mv_port_info[] = {
467 .udma_mask = 0x7f, /* udma0-6 */ 510 .udma_mask = 0x7f, /* udma0-6 */
468 .port_ops = &mv6_ops, 511 .port_ops = &mv6_ops,
469 }, 512 },
513 { /* chip_6042 */
514 .sht = &mv_sht,
515 .host_flags = (MV_COMMON_FLAGS | MV_6XXX_FLAGS),
516 .pio_mask = 0x1f, /* pio0-4 */
517 .udma_mask = 0x7f, /* udma0-6 */
518 .port_ops = &mv_iie_ops,
519 },
520 { /* chip_7042 */
521 .sht = &mv_sht,
522 .host_flags = (MV_COMMON_FLAGS | MV_6XXX_FLAGS |
523 MV_FLAG_DUAL_HC),
524 .pio_mask = 0x1f, /* pio0-4 */
525 .udma_mask = 0x7f, /* udma0-6 */
526 .port_ops = &mv_iie_ops,
527 },
470}; 528};
471 529
472static const struct pci_device_id mv_pci_tbl[] = { 530static const struct pci_device_id mv_pci_tbl[] = {
@@ -477,6 +535,7 @@ static const struct pci_device_id mv_pci_tbl[] = {
477 535
478 {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x6040), 0, 0, chip_604x}, 536 {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x6040), 0, 0, chip_604x},
479 {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x6041), 0, 0, chip_604x}, 537 {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x6041), 0, 0, chip_604x},
538 {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x6042), 0, 0, chip_6042},
480 {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x6080), 0, 0, chip_608x}, 539 {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x6080), 0, 0, chip_608x},
481 {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x6081), 0, 0, chip_608x}, 540 {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x6081), 0, 0, chip_608x},
482 541
@@ -572,8 +631,8 @@ static void mv_irq_clear(struct ata_port *ap)
572 * @base: port base address 631 * @base: port base address
573 * @pp: port private data 632 * @pp: port private data
574 * 633 *
575 * Verify the local cache of the eDMA state is accurate with an 634 * Verify the local cache of the eDMA state is accurate with a
576 * assert. 635 * WARN_ON.
577 * 636 *
578 * LOCKING: 637 * LOCKING:
579 * Inherited from caller. 638 * Inherited from caller.
@@ -584,15 +643,15 @@ static void mv_start_dma(void __iomem *base, struct mv_port_priv *pp)
584 writelfl(EDMA_EN, base + EDMA_CMD_OFS); 643 writelfl(EDMA_EN, base + EDMA_CMD_OFS);
585 pp->pp_flags |= MV_PP_FLAG_EDMA_EN; 644 pp->pp_flags |= MV_PP_FLAG_EDMA_EN;
586 } 645 }
587 assert(EDMA_EN & readl(base + EDMA_CMD_OFS)); 646 WARN_ON(!(EDMA_EN & readl(base + EDMA_CMD_OFS)));
588} 647}
589 648
590/** 649/**
591 * mv_stop_dma - Disable eDMA engine 650 * mv_stop_dma - Disable eDMA engine
592 * @ap: ATA channel to manipulate 651 * @ap: ATA channel to manipulate
593 * 652 *
594 * Verify the local cache of the eDMA state is accurate with an 653 * Verify the local cache of the eDMA state is accurate with a
595 * assert. 654 * WARN_ON.
596 * 655 *
597 * LOCKING: 656 * LOCKING:
598 * Inherited from caller. 657 * Inherited from caller.
@@ -610,7 +669,7 @@ static void mv_stop_dma(struct ata_port *ap)
610 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS); 669 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
611 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN; 670 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
612 } else { 671 } else {
613 assert(!(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS))); 672 WARN_ON(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS));
614 } 673 }
615 674
616 /* now properly wait for the eDMA to stop */ 675 /* now properly wait for the eDMA to stop */
@@ -773,6 +832,33 @@ static inline void mv_priv_free(struct mv_port_priv *pp, struct device *dev)
773 dma_free_coherent(dev, MV_PORT_PRIV_DMA_SZ, pp->crpb, pp->crpb_dma); 832 dma_free_coherent(dev, MV_PORT_PRIV_DMA_SZ, pp->crpb, pp->crpb_dma);
774} 833}
775 834
835static void mv_edma_cfg(struct mv_host_priv *hpriv, void __iomem *port_mmio)
836{
837 u32 cfg = readl(port_mmio + EDMA_CFG_OFS);
838
839 /* set up non-NCQ EDMA configuration */
840 cfg &= ~0x1f; /* clear queue depth */
841 cfg &= ~EDMA_CFG_NCQ; /* clear NCQ mode */
842 cfg &= ~(1 << 9); /* disable equeue */
843
844 if (IS_GEN_I(hpriv))
845 cfg |= (1 << 8); /* enab config burst size mask */
846
847 else if (IS_GEN_II(hpriv))
848 cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN;
849
850 else if (IS_GEN_IIE(hpriv)) {
851 cfg |= (1 << 23); /* dis RX PM port mask */
852 cfg &= ~(1 << 16); /* dis FIS-based switching (for now) */
853 cfg &= ~(1 << 19); /* dis 128-entry queue (for now?) */
854 cfg |= (1 << 18); /* enab early completion */
855 cfg |= (1 << 17); /* enab host q cache */
856 cfg |= (1 << 22); /* enab cutthrough */
857 }
858
859 writelfl(cfg, port_mmio + EDMA_CFG_OFS);
860}
861
776/** 862/**
777 * mv_port_start - Port specific init/start routine. 863 * mv_port_start - Port specific init/start routine.
778 * @ap: ATA channel to manipulate 864 * @ap: ATA channel to manipulate
@@ -786,6 +872,7 @@ static inline void mv_priv_free(struct mv_port_priv *pp, struct device *dev)
786static int mv_port_start(struct ata_port *ap) 872static int mv_port_start(struct ata_port *ap)
787{ 873{
788 struct device *dev = ap->host_set->dev; 874 struct device *dev = ap->host_set->dev;
875 struct mv_host_priv *hpriv = ap->host_set->private_data;
789 struct mv_port_priv *pp; 876 struct mv_port_priv *pp;
790 void __iomem *port_mmio = mv_ap_base(ap); 877 void __iomem *port_mmio = mv_ap_base(ap);
791 void *mem; 878 void *mem;
@@ -829,17 +916,26 @@ static int mv_port_start(struct ata_port *ap)
829 pp->sg_tbl = mem; 916 pp->sg_tbl = mem;
830 pp->sg_tbl_dma = mem_dma; 917 pp->sg_tbl_dma = mem_dma;
831 918
832 writelfl(EDMA_CFG_Q_DEPTH | EDMA_CFG_RD_BRST_EXT | 919 mv_edma_cfg(hpriv, port_mmio);
833 EDMA_CFG_WR_BUFF_LEN, port_mmio + EDMA_CFG_OFS);
834 920
835 writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS); 921 writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS);
836 writelfl(pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK, 922 writelfl(pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK,
837 port_mmio + EDMA_REQ_Q_IN_PTR_OFS); 923 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
838 924
839 writelfl(0, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS); 925 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
840 writelfl(0, port_mmio + EDMA_RSP_Q_IN_PTR_OFS); 926 writelfl(pp->crqb_dma & 0xffffffff,
927 port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
928 else
929 writelfl(0, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
841 930
842 writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS); 931 writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS);
932
933 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
934 writelfl(pp->crpb_dma & 0xffffffff,
935 port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
936 else
937 writelfl(0, port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
938
843 writelfl(pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK, 939 writelfl(pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK,
844 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS); 940 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
845 941
@@ -960,21 +1056,19 @@ static void mv_qc_prep(struct ata_queued_cmd *qc)
960 struct ata_taskfile *tf; 1056 struct ata_taskfile *tf;
961 u16 flags = 0; 1057 u16 flags = 0;
962 1058
963 if (ATA_PROT_DMA != qc->tf.protocol) { 1059 if (ATA_PROT_DMA != qc->tf.protocol)
964 return; 1060 return;
965 }
966 1061
967 /* the req producer index should be the same as we remember it */ 1062 /* the req producer index should be the same as we remember it */
968 assert(((readl(mv_ap_base(qc->ap) + EDMA_REQ_Q_IN_PTR_OFS) >> 1063 WARN_ON(((readl(mv_ap_base(qc->ap) + EDMA_REQ_Q_IN_PTR_OFS) >>
969 EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) == 1064 EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) !=
970 pp->req_producer); 1065 pp->req_producer);
971 1066
972 /* Fill in command request block 1067 /* Fill in command request block
973 */ 1068 */
974 if (!(qc->tf.flags & ATA_TFLAG_WRITE)) { 1069 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
975 flags |= CRQB_FLAG_READ; 1070 flags |= CRQB_FLAG_READ;
976 } 1071 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
977 assert(MV_MAX_Q_DEPTH > qc->tag);
978 flags |= qc->tag << CRQB_TAG_SHIFT; 1072 flags |= qc->tag << CRQB_TAG_SHIFT;
979 1073
980 pp->crqb[pp->req_producer].sg_addr = 1074 pp->crqb[pp->req_producer].sg_addr =
@@ -1029,9 +1123,76 @@ static void mv_qc_prep(struct ata_queued_cmd *qc)
1029 mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0); 1123 mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0);
1030 mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */ 1124 mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */
1031 1125
1032 if (!(qc->flags & ATA_QCFLAG_DMAMAP)) { 1126 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1127 return;
1128 mv_fill_sg(qc);
1129}
1130
1131/**
1132 * mv_qc_prep_iie - Host specific command preparation.
1133 * @qc: queued command to prepare
1134 *
1135 * This routine simply redirects to the general purpose routine
1136 * if command is not DMA. Else, it handles prep of the CRQB
1137 * (command request block), does some sanity checking, and calls
1138 * the SG load routine.
1139 *
1140 * LOCKING:
1141 * Inherited from caller.
1142 */
1143static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
1144{
1145 struct ata_port *ap = qc->ap;
1146 struct mv_port_priv *pp = ap->private_data;
1147 struct mv_crqb_iie *crqb;
1148 struct ata_taskfile *tf;
1149 u32 flags = 0;
1150
1151 if (ATA_PROT_DMA != qc->tf.protocol)
1152 return;
1153
1154 /* the req producer index should be the same as we remember it */
1155 WARN_ON(((readl(mv_ap_base(qc->ap) + EDMA_REQ_Q_IN_PTR_OFS) >>
1156 EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) !=
1157 pp->req_producer);
1158
1159 /* Fill in Gen IIE command request block
1160 */
1161 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1162 flags |= CRQB_FLAG_READ;
1163
1164 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
1165 flags |= qc->tag << CRQB_TAG_SHIFT;
1166
1167 crqb = (struct mv_crqb_iie *) &pp->crqb[pp->req_producer];
1168 crqb->addr = cpu_to_le32(pp->sg_tbl_dma & 0xffffffff);
1169 crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16);
1170 crqb->flags = cpu_to_le32(flags);
1171
1172 tf = &qc->tf;
1173 crqb->ata_cmd[0] = cpu_to_le32(
1174 (tf->command << 16) |
1175 (tf->feature << 24)
1176 );
1177 crqb->ata_cmd[1] = cpu_to_le32(
1178 (tf->lbal << 0) |
1179 (tf->lbam << 8) |
1180 (tf->lbah << 16) |
1181 (tf->device << 24)
1182 );
1183 crqb->ata_cmd[2] = cpu_to_le32(
1184 (tf->hob_lbal << 0) |
1185 (tf->hob_lbam << 8) |
1186 (tf->hob_lbah << 16) |
1187 (tf->hob_feature << 24)
1188 );
1189 crqb->ata_cmd[3] = cpu_to_le32(
1190 (tf->nsect << 0) |
1191 (tf->hob_nsect << 8)
1192 );
1193
1194 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1033 return; 1195 return;
1034 }
1035 mv_fill_sg(qc); 1196 mv_fill_sg(qc);
1036} 1197}
1037 1198
@@ -1047,7 +1208,7 @@ static void mv_qc_prep(struct ata_queued_cmd *qc)
1047 * LOCKING: 1208 * LOCKING:
1048 * Inherited from caller. 1209 * Inherited from caller.
1049 */ 1210 */
1050static int mv_qc_issue(struct ata_queued_cmd *qc) 1211static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
1051{ 1212{
1052 void __iomem *port_mmio = mv_ap_base(qc->ap); 1213 void __iomem *port_mmio = mv_ap_base(qc->ap);
1053 struct mv_port_priv *pp = qc->ap->private_data; 1214 struct mv_port_priv *pp = qc->ap->private_data;
@@ -1065,12 +1226,12 @@ static int mv_qc_issue(struct ata_queued_cmd *qc)
1065 in_ptr = readl(port_mmio + EDMA_REQ_Q_IN_PTR_OFS); 1226 in_ptr = readl(port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
1066 1227
1067 /* the req producer index should be the same as we remember it */ 1228 /* the req producer index should be the same as we remember it */
1068 assert(((in_ptr >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) == 1229 WARN_ON(((in_ptr >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) !=
1069 pp->req_producer); 1230 pp->req_producer);
1070 /* until we do queuing, the queue should be empty at this point */ 1231 /* until we do queuing, the queue should be empty at this point */
1071 assert(((in_ptr >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) == 1232 WARN_ON(((in_ptr >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) !=
1072 ((readl(port_mmio + EDMA_REQ_Q_OUT_PTR_OFS) >> 1233 ((readl(port_mmio + EDMA_REQ_Q_OUT_PTR_OFS) >>
1073 EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK)); 1234 EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK));
1074 1235
1075 mv_inc_q_index(&pp->req_producer); /* now incr producer index */ 1236 mv_inc_q_index(&pp->req_producer); /* now incr producer index */
1076 1237
@@ -1090,7 +1251,7 @@ static int mv_qc_issue(struct ata_queued_cmd *qc)
1090 * 1251 *
1091 * This routine is for use when the port is in DMA mode, when it 1252 * This routine is for use when the port is in DMA mode, when it
1092 * will be using the CRPB (command response block) method of 1253 * will be using the CRPB (command response block) method of
1093 * returning command completion information. We assert indices 1254 * returning command completion information. We check indices
1094 * are good, grab status, and bump the response consumer index to 1255 * are good, grab status, and bump the response consumer index to
1095 * prove that we're up to date. 1256 * prove that we're up to date.
1096 * 1257 *
@@ -1106,16 +1267,16 @@ static u8 mv_get_crpb_status(struct ata_port *ap)
1106 out_ptr = readl(port_mmio + EDMA_RSP_Q_OUT_PTR_OFS); 1267 out_ptr = readl(port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
1107 1268
1108 /* the response consumer index should be the same as we remember it */ 1269 /* the response consumer index should be the same as we remember it */
1109 assert(((out_ptr >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) == 1270 WARN_ON(((out_ptr >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) !=
1110 pp->rsp_consumer); 1271 pp->rsp_consumer);
1111 1272
1112 /* increment our consumer index... */ 1273 /* increment our consumer index... */
1113 pp->rsp_consumer = mv_inc_q_index(&pp->rsp_consumer); 1274 pp->rsp_consumer = mv_inc_q_index(&pp->rsp_consumer);
1114 1275
1115 /* and, until we do NCQ, there should only be 1 CRPB waiting */ 1276 /* and, until we do NCQ, there should only be 1 CRPB waiting */
1116 assert(((readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS) >> 1277 WARN_ON(((readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS) >>
1117 EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) == 1278 EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) !=
1118 pp->rsp_consumer); 1279 pp->rsp_consumer);
1119 1280
1120 /* write out our inc'd consumer index so EDMA knows we're caught up */ 1281 /* write out our inc'd consumer index so EDMA knows we're caught up */
1121 out_ptr &= EDMA_RSP_Q_BASE_LO_MASK; 1282 out_ptr &= EDMA_RSP_Q_BASE_LO_MASK;
@@ -1681,6 +1842,12 @@ static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
1681 m2 |= hpriv->signal[port].pre; 1842 m2 |= hpriv->signal[port].pre;
1682 m2 &= ~(1 << 16); 1843 m2 &= ~(1 << 16);
1683 1844
1845 /* according to mvSata 3.6.1, some IIE values are fixed */
1846 if (IS_GEN_IIE(hpriv)) {
1847 m2 &= ~0xC30FF01F;
1848 m2 |= 0x0000900F;
1849 }
1850
1684 writel(m2, port_mmio + PHY_MODE2); 1851 writel(m2, port_mmio + PHY_MODE2);
1685} 1852}
1686 1853
@@ -1846,7 +2013,6 @@ static void mv_phy_reset(struct ata_port *ap)
1846static void mv_eng_timeout(struct ata_port *ap) 2013static void mv_eng_timeout(struct ata_port *ap)
1847{ 2014{
1848 struct ata_queued_cmd *qc; 2015 struct ata_queued_cmd *qc;
1849 unsigned long flags;
1850 2016
1851 printk(KERN_ERR "ata%u: Entering mv_eng_timeout\n",ap->id); 2017 printk(KERN_ERR "ata%u: Entering mv_eng_timeout\n",ap->id);
1852 DPRINTK("All regs @ start of eng_timeout\n"); 2018 DPRINTK("All regs @ start of eng_timeout\n");
@@ -1861,22 +2027,8 @@ static void mv_eng_timeout(struct ata_port *ap)
1861 mv_err_intr(ap); 2027 mv_err_intr(ap);
1862 mv_stop_and_reset(ap); 2028 mv_stop_and_reset(ap);
1863 2029
1864 if (!qc) { 2030 qc->err_mask |= AC_ERR_TIMEOUT;
1865 printk(KERN_ERR "ata%u: BUG: timeout without command\n", 2031 ata_eh_qc_complete(qc);
1866 ap->id);
1867 } else {
1868 /* hack alert! We cannot use the supplied completion
1869 * function from inside the ->eh_strategy_handler() thread.
1870 * libata is the only user of ->eh_strategy_handler() in
1871 * any kernel, so the default scsi_done() assumes it is
1872 * not being called from the SCSI EH.
1873 */
1874 spin_lock_irqsave(&ap->host_set->lock, flags);
1875 qc->scsidone = scsi_finish_command;
1876 qc->err_mask |= AC_ERR_OTHER;
1877 ata_qc_complete(qc);
1878 spin_unlock_irqrestore(&ap->host_set->lock, flags);
1879 }
1880} 2032}
1881 2033
1882/** 2034/**
@@ -1995,6 +2147,27 @@ static int mv_chip_id(struct pci_dev *pdev, struct mv_host_priv *hpriv,
1995 } 2147 }
1996 break; 2148 break;
1997 2149
2150 case chip_7042:
2151 case chip_6042:
2152 hpriv->ops = &mv6xxx_ops;
2153
2154 hp_flags |= MV_HP_GEN_IIE;
2155
2156 switch (rev_id) {
2157 case 0x0:
2158 hp_flags |= MV_HP_ERRATA_XX42A0;
2159 break;
2160 case 0x1:
2161 hp_flags |= MV_HP_ERRATA_60X1C0;
2162 break;
2163 default:
2164 dev_printk(KERN_WARNING, &pdev->dev,
2165 "Applying 60X1C0 workarounds to unknown rev\n");
2166 hp_flags |= MV_HP_ERRATA_60X1C0;
2167 break;
2168 }
2169 break;
2170
1998 default: 2171 default:
1999 printk(KERN_ERR DRV_NAME ": BUG: invalid board index %u\n", board_idx); 2172 printk(KERN_ERR DRV_NAME ": BUG: invalid board index %u\n", board_idx);
2000 return 1; 2173 return 1;
diff --git a/drivers/scsi/sata_nv.c b/drivers/scsi/sata_nv.c
index bbbb55eeb73a..caffadc2e0ae 100644
--- a/drivers/scsi/sata_nv.c
+++ b/drivers/scsi/sata_nv.c
@@ -229,11 +229,11 @@ static struct scsi_host_template nv_sht = {
229 .name = DRV_NAME, 229 .name = DRV_NAME,
230 .ioctl = ata_scsi_ioctl, 230 .ioctl = ata_scsi_ioctl,
231 .queuecommand = ata_scsi_queuecmd, 231 .queuecommand = ata_scsi_queuecmd,
232 .eh_timed_out = ata_scsi_timed_out,
232 .eh_strategy_handler = ata_scsi_error, 233 .eh_strategy_handler = ata_scsi_error,
233 .can_queue = ATA_DEF_QUEUE, 234 .can_queue = ATA_DEF_QUEUE,
234 .this_id = ATA_SHT_THIS_ID, 235 .this_id = ATA_SHT_THIS_ID,
235 .sg_tablesize = LIBATA_MAX_PRD, 236 .sg_tablesize = LIBATA_MAX_PRD,
236 .max_sectors = ATA_MAX_SECTORS,
237 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 237 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
238 .emulated = ATA_SHT_EMULATED, 238 .emulated = ATA_SHT_EMULATED,
239 .use_clustering = ATA_SHT_USE_CLUSTERING, 239 .use_clustering = ATA_SHT_USE_CLUSTERING,
diff --git a/drivers/scsi/sata_promise.c b/drivers/scsi/sata_promise.c
index b0b0a69b3563..ba2b7a0983db 100644
--- a/drivers/scsi/sata_promise.c
+++ b/drivers/scsi/sata_promise.c
@@ -46,7 +46,7 @@
46#include "sata_promise.h" 46#include "sata_promise.h"
47 47
48#define DRV_NAME "sata_promise" 48#define DRV_NAME "sata_promise"
49#define DRV_VERSION "1.03" 49#define DRV_VERSION "1.04"
50 50
51 51
52enum { 52enum {
@@ -58,6 +58,7 @@ enum {
58 PDC_GLOBAL_CTL = 0x48, /* Global control/status (per port) */ 58 PDC_GLOBAL_CTL = 0x48, /* Global control/status (per port) */
59 PDC_CTLSTAT = 0x60, /* IDE control and status (per port) */ 59 PDC_CTLSTAT = 0x60, /* IDE control and status (per port) */
60 PDC_SATA_PLUG_CSR = 0x6C, /* SATA Plug control/status reg */ 60 PDC_SATA_PLUG_CSR = 0x6C, /* SATA Plug control/status reg */
61 PDC2_SATA_PLUG_CSR = 0x60, /* SATAII Plug control/status reg */
61 PDC_SLEW_CTL = 0x470, /* slew rate control reg */ 62 PDC_SLEW_CTL = 0x470, /* slew rate control reg */
62 63
63 PDC_ERR_MASK = (1<<19) | (1<<20) | (1<<21) | (1<<22) | 64 PDC_ERR_MASK = (1<<19) | (1<<20) | (1<<21) | (1<<22) |
@@ -67,8 +68,10 @@ enum {
67 board_20319 = 1, /* FastTrak S150 TX4 */ 68 board_20319 = 1, /* FastTrak S150 TX4 */
68 board_20619 = 2, /* FastTrak TX4000 */ 69 board_20619 = 2, /* FastTrak TX4000 */
69 board_20771 = 3, /* FastTrak TX2300 */ 70 board_20771 = 3, /* FastTrak TX2300 */
71 board_2057x = 4, /* SATAII150 Tx2plus */
72 board_40518 = 5, /* SATAII150 Tx4 */
70 73
71 PDC_HAS_PATA = (1 << 1), /* PDC20375 has PATA */ 74 PDC_HAS_PATA = (1 << 1), /* PDC20375/20575 has PATA */
72 75
73 PDC_RESET = (1 << 11), /* HDMA reset */ 76 PDC_RESET = (1 << 11), /* HDMA reset */
74 77
@@ -82,6 +85,10 @@ struct pdc_port_priv {
82 dma_addr_t pkt_dma; 85 dma_addr_t pkt_dma;
83}; 86};
84 87
88struct pdc_host_priv {
89 int hotplug_offset;
90};
91
85static u32 pdc_sata_scr_read (struct ata_port *ap, unsigned int sc_reg); 92static u32 pdc_sata_scr_read (struct ata_port *ap, unsigned int sc_reg);
86static void pdc_sata_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val); 93static void pdc_sata_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val);
87static int pdc_ata_init_one (struct pci_dev *pdev, const struct pci_device_id *ent); 94static int pdc_ata_init_one (struct pci_dev *pdev, const struct pci_device_id *ent);
@@ -95,7 +102,8 @@ static void pdc_qc_prep(struct ata_queued_cmd *qc);
95static void pdc_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf); 102static void pdc_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf);
96static void pdc_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf); 103static void pdc_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf);
97static void pdc_irq_clear(struct ata_port *ap); 104static void pdc_irq_clear(struct ata_port *ap);
98static int pdc_qc_issue_prot(struct ata_queued_cmd *qc); 105static unsigned int pdc_qc_issue_prot(struct ata_queued_cmd *qc);
106static void pdc_host_stop(struct ata_host_set *host_set);
99 107
100 108
101static struct scsi_host_template pdc_ata_sht = { 109static struct scsi_host_template pdc_ata_sht = {
@@ -103,11 +111,11 @@ static struct scsi_host_template pdc_ata_sht = {
103 .name = DRV_NAME, 111 .name = DRV_NAME,
104 .ioctl = ata_scsi_ioctl, 112 .ioctl = ata_scsi_ioctl,
105 .queuecommand = ata_scsi_queuecmd, 113 .queuecommand = ata_scsi_queuecmd,
114 .eh_timed_out = ata_scsi_timed_out,
106 .eh_strategy_handler = ata_scsi_error, 115 .eh_strategy_handler = ata_scsi_error,
107 .can_queue = ATA_DEF_QUEUE, 116 .can_queue = ATA_DEF_QUEUE,
108 .this_id = ATA_SHT_THIS_ID, 117 .this_id = ATA_SHT_THIS_ID,
109 .sg_tablesize = LIBATA_MAX_PRD, 118 .sg_tablesize = LIBATA_MAX_PRD,
110 .max_sectors = ATA_MAX_SECTORS,
111 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 119 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
112 .emulated = ATA_SHT_EMULATED, 120 .emulated = ATA_SHT_EMULATED,
113 .use_clustering = ATA_SHT_USE_CLUSTERING, 121 .use_clustering = ATA_SHT_USE_CLUSTERING,
@@ -137,7 +145,7 @@ static const struct ata_port_operations pdc_sata_ops = {
137 .scr_write = pdc_sata_scr_write, 145 .scr_write = pdc_sata_scr_write,
138 .port_start = pdc_port_start, 146 .port_start = pdc_port_start,
139 .port_stop = pdc_port_stop, 147 .port_stop = pdc_port_stop,
140 .host_stop = ata_pci_host_stop, 148 .host_stop = pdc_host_stop,
141}; 149};
142 150
143static const struct ata_port_operations pdc_pata_ops = { 151static const struct ata_port_operations pdc_pata_ops = {
@@ -158,7 +166,7 @@ static const struct ata_port_operations pdc_pata_ops = {
158 166
159 .port_start = pdc_port_start, 167 .port_start = pdc_port_start,
160 .port_stop = pdc_port_stop, 168 .port_stop = pdc_port_stop,
161 .host_stop = ata_pci_host_stop, 169 .host_stop = pdc_host_stop,
162}; 170};
163 171
164static const struct ata_port_info pdc_port_info[] = { 172static const struct ata_port_info pdc_port_info[] = {
@@ -201,6 +209,26 @@ static const struct ata_port_info pdc_port_info[] = {
201 .udma_mask = 0x7f, /* udma0-6 ; FIXME */ 209 .udma_mask = 0x7f, /* udma0-6 ; FIXME */
202 .port_ops = &pdc_sata_ops, 210 .port_ops = &pdc_sata_ops,
203 }, 211 },
212
213 /* board_2057x */
214 {
215 .sht = &pdc_ata_sht,
216 .host_flags = PDC_COMMON_FLAGS | ATA_FLAG_SATA,
217 .pio_mask = 0x1f, /* pio0-4 */
218 .mwdma_mask = 0x07, /* mwdma0-2 */
219 .udma_mask = 0x7f, /* udma0-6 ; FIXME */
220 .port_ops = &pdc_sata_ops,
221 },
222
223 /* board_40518 */
224 {
225 .sht = &pdc_ata_sht,
226 .host_flags = PDC_COMMON_FLAGS | ATA_FLAG_SATA,
227 .pio_mask = 0x1f, /* pio0-4 */
228 .mwdma_mask = 0x07, /* mwdma0-2 */
229 .udma_mask = 0x7f, /* udma0-6 ; FIXME */
230 .port_ops = &pdc_sata_ops,
231 },
204}; 232};
205 233
206static const struct pci_device_id pdc_ata_pci_tbl[] = { 234static const struct pci_device_id pdc_ata_pci_tbl[] = {
@@ -217,9 +245,9 @@ static const struct pci_device_id pdc_ata_pci_tbl[] = {
217 { PCI_VENDOR_ID_PROMISE, 0x3376, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 245 { PCI_VENDOR_ID_PROMISE, 0x3376, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
218 board_2037x }, 246 board_2037x },
219 { PCI_VENDOR_ID_PROMISE, 0x3574, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 247 { PCI_VENDOR_ID_PROMISE, 0x3574, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
220 board_2037x }, 248 board_2057x },
221 { PCI_VENDOR_ID_PROMISE, 0x3d75, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 249 { PCI_VENDOR_ID_PROMISE, 0x3d75, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
222 board_2037x }, 250 board_2057x },
223 { PCI_VENDOR_ID_PROMISE, 0x3d73, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 251 { PCI_VENDOR_ID_PROMISE, 0x3d73, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
224 board_2037x }, 252 board_2037x },
225 253
@@ -232,7 +260,7 @@ static const struct pci_device_id pdc_ata_pci_tbl[] = {
232 { PCI_VENDOR_ID_PROMISE, 0x3d17, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 260 { PCI_VENDOR_ID_PROMISE, 0x3d17, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
233 board_20319 }, 261 board_20319 },
234 { PCI_VENDOR_ID_PROMISE, 0x3d18, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 262 { PCI_VENDOR_ID_PROMISE, 0x3d18, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
235 board_20319 }, 263 board_40518 },
236 264
237 { PCI_VENDOR_ID_PROMISE, 0x6629, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 265 { PCI_VENDOR_ID_PROMISE, 0x6629, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
238 board_20619 }, 266 board_20619 },
@@ -261,12 +289,11 @@ static int pdc_port_start(struct ata_port *ap)
261 if (rc) 289 if (rc)
262 return rc; 290 return rc;
263 291
264 pp = kmalloc(sizeof(*pp), GFP_KERNEL); 292 pp = kzalloc(sizeof(*pp), GFP_KERNEL);
265 if (!pp) { 293 if (!pp) {
266 rc = -ENOMEM; 294 rc = -ENOMEM;
267 goto err_out; 295 goto err_out;
268 } 296 }
269 memset(pp, 0, sizeof(*pp));
270 297
271 pp->pkt = dma_alloc_coherent(dev, 128, &pp->pkt_dma, GFP_KERNEL); 298 pp->pkt = dma_alloc_coherent(dev, 128, &pp->pkt_dma, GFP_KERNEL);
272 if (!pp->pkt) { 299 if (!pp->pkt) {
@@ -298,6 +325,16 @@ static void pdc_port_stop(struct ata_port *ap)
298} 325}
299 326
300 327
328static void pdc_host_stop(struct ata_host_set *host_set)
329{
330 struct pdc_host_priv *hp = host_set->private_data;
331
332 ata_pci_host_stop(host_set);
333
334 kfree(hp);
335}
336
337
301static void pdc_reset_port(struct ata_port *ap) 338static void pdc_reset_port(struct ata_port *ap)
302{ 339{
303 void __iomem *mmio = (void __iomem *) ap->ioaddr.cmd_addr + PDC_CTLSTAT; 340 void __iomem *mmio = (void __iomem *) ap->ioaddr.cmd_addr + PDC_CTLSTAT;
@@ -394,19 +431,6 @@ static void pdc_eng_timeout(struct ata_port *ap)
394 spin_lock_irqsave(&host_set->lock, flags); 431 spin_lock_irqsave(&host_set->lock, flags);
395 432
396 qc = ata_qc_from_tag(ap, ap->active_tag); 433 qc = ata_qc_from_tag(ap, ap->active_tag);
397 if (!qc) {
398 printk(KERN_ERR "ata%u: BUG: timeout without command\n",
399 ap->id);
400 goto out;
401 }
402
403 /* hack alert! We cannot use the supplied completion
404 * function from inside the ->eh_strategy_handler() thread.
405 * libata is the only user of ->eh_strategy_handler() in
406 * any kernel, so the default scsi_done() assumes it is
407 * not being called from the SCSI EH.
408 */
409 qc->scsidone = scsi_finish_command;
410 434
411 switch (qc->tf.protocol) { 435 switch (qc->tf.protocol) {
412 case ATA_PROT_DMA: 436 case ATA_PROT_DMA:
@@ -414,7 +438,6 @@ static void pdc_eng_timeout(struct ata_port *ap)
414 printk(KERN_ERR "ata%u: command timeout\n", ap->id); 438 printk(KERN_ERR "ata%u: command timeout\n", ap->id);
415 drv_stat = ata_wait_idle(ap); 439 drv_stat = ata_wait_idle(ap);
416 qc->err_mask |= __ac_err_mask(drv_stat); 440 qc->err_mask |= __ac_err_mask(drv_stat);
417 ata_qc_complete(qc);
418 break; 441 break;
419 442
420 default: 443 default:
@@ -424,12 +447,11 @@ static void pdc_eng_timeout(struct ata_port *ap)
424 ap->id, qc->tf.command, drv_stat); 447 ap->id, qc->tf.command, drv_stat);
425 448
426 qc->err_mask |= ac_err_mask(drv_stat); 449 qc->err_mask |= ac_err_mask(drv_stat);
427 ata_qc_complete(qc);
428 break; 450 break;
429 } 451 }
430 452
431out:
432 spin_unlock_irqrestore(&host_set->lock, flags); 453 spin_unlock_irqrestore(&host_set->lock, flags);
454 ata_eh_qc_complete(qc);
433 DPRINTK("EXIT\n"); 455 DPRINTK("EXIT\n");
434} 456}
435 457
@@ -495,14 +517,15 @@ static irqreturn_t pdc_interrupt (int irq, void *dev_instance, struct pt_regs *r
495 VPRINTK("QUICK EXIT 2\n"); 517 VPRINTK("QUICK EXIT 2\n");
496 return IRQ_NONE; 518 return IRQ_NONE;
497 } 519 }
520
521 spin_lock(&host_set->lock);
522
498 mask &= 0xffff; /* only 16 tags possible */ 523 mask &= 0xffff; /* only 16 tags possible */
499 if (!mask) { 524 if (!mask) {
500 VPRINTK("QUICK EXIT 3\n"); 525 VPRINTK("QUICK EXIT 3\n");
501 return IRQ_NONE; 526 goto done_irq;
502 } 527 }
503 528
504 spin_lock(&host_set->lock);
505
506 writel(mask, mmio_base + PDC_INT_SEQMASK); 529 writel(mask, mmio_base + PDC_INT_SEQMASK);
507 530
508 for (i = 0; i < host_set->n_ports; i++) { 531 for (i = 0; i < host_set->n_ports; i++) {
@@ -519,10 +542,10 @@ static irqreturn_t pdc_interrupt (int irq, void *dev_instance, struct pt_regs *r
519 } 542 }
520 } 543 }
521 544
522 spin_unlock(&host_set->lock);
523
524 VPRINTK("EXIT\n"); 545 VPRINTK("EXIT\n");
525 546
547done_irq:
548 spin_unlock(&host_set->lock);
526 return IRQ_RETVAL(handled); 549 return IRQ_RETVAL(handled);
527} 550}
528 551
@@ -544,7 +567,7 @@ static inline void pdc_packet_start(struct ata_queued_cmd *qc)
544 readl((void __iomem *) ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT); /* flush */ 567 readl((void __iomem *) ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT); /* flush */
545} 568}
546 569
547static int pdc_qc_issue_prot(struct ata_queued_cmd *qc) 570static unsigned int pdc_qc_issue_prot(struct ata_queued_cmd *qc)
548{ 571{
549 switch (qc->tf.protocol) { 572 switch (qc->tf.protocol) {
550 case ATA_PROT_DMA: 573 case ATA_PROT_DMA:
@@ -600,6 +623,8 @@ static void pdc_ata_setup_port(struct ata_ioports *port, unsigned long base)
600static void pdc_host_init(unsigned int chip_id, struct ata_probe_ent *pe) 623static void pdc_host_init(unsigned int chip_id, struct ata_probe_ent *pe)
601{ 624{
602 void __iomem *mmio = pe->mmio_base; 625 void __iomem *mmio = pe->mmio_base;
626 struct pdc_host_priv *hp = pe->private_data;
627 int hotplug_offset = hp->hotplug_offset;
603 u32 tmp; 628 u32 tmp;
604 629
605 /* 630 /*
@@ -614,12 +639,12 @@ static void pdc_host_init(unsigned int chip_id, struct ata_probe_ent *pe)
614 writel(tmp, mmio + PDC_FLASH_CTL); 639 writel(tmp, mmio + PDC_FLASH_CTL);
615 640
616 /* clear plug/unplug flags for all ports */ 641 /* clear plug/unplug flags for all ports */
617 tmp = readl(mmio + PDC_SATA_PLUG_CSR); 642 tmp = readl(mmio + hotplug_offset);
618 writel(tmp | 0xff, mmio + PDC_SATA_PLUG_CSR); 643 writel(tmp | 0xff, mmio + hotplug_offset);
619 644
620 /* mask plug/unplug ints */ 645 /* mask plug/unplug ints */
621 tmp = readl(mmio + PDC_SATA_PLUG_CSR); 646 tmp = readl(mmio + hotplug_offset);
622 writel(tmp | 0xff0000, mmio + PDC_SATA_PLUG_CSR); 647 writel(tmp | 0xff0000, mmio + hotplug_offset);
623 648
624 /* reduce TBG clock to 133 Mhz. */ 649 /* reduce TBG clock to 133 Mhz. */
625 tmp = readl(mmio + PDC_TBG_MODE); 650 tmp = readl(mmio + PDC_TBG_MODE);
@@ -641,6 +666,7 @@ static int pdc_ata_init_one (struct pci_dev *pdev, const struct pci_device_id *e
641{ 666{
642 static int printed_version; 667 static int printed_version;
643 struct ata_probe_ent *probe_ent = NULL; 668 struct ata_probe_ent *probe_ent = NULL;
669 struct pdc_host_priv *hp;
644 unsigned long base; 670 unsigned long base;
645 void __iomem *mmio_base; 671 void __iomem *mmio_base;
646 unsigned int board_idx = (unsigned int) ent->driver_data; 672 unsigned int board_idx = (unsigned int) ent->driver_data;
@@ -671,13 +697,12 @@ static int pdc_ata_init_one (struct pci_dev *pdev, const struct pci_device_id *e
671 if (rc) 697 if (rc)
672 goto err_out_regions; 698 goto err_out_regions;
673 699
674 probe_ent = kmalloc(sizeof(*probe_ent), GFP_KERNEL); 700 probe_ent = kzalloc(sizeof(*probe_ent), GFP_KERNEL);
675 if (probe_ent == NULL) { 701 if (probe_ent == NULL) {
676 rc = -ENOMEM; 702 rc = -ENOMEM;
677 goto err_out_regions; 703 goto err_out_regions;
678 } 704 }
679 705
680 memset(probe_ent, 0, sizeof(*probe_ent));
681 probe_ent->dev = pci_dev_to_dev(pdev); 706 probe_ent->dev = pci_dev_to_dev(pdev);
682 INIT_LIST_HEAD(&probe_ent->node); 707 INIT_LIST_HEAD(&probe_ent->node);
683 708
@@ -688,6 +713,16 @@ static int pdc_ata_init_one (struct pci_dev *pdev, const struct pci_device_id *e
688 } 713 }
689 base = (unsigned long) mmio_base; 714 base = (unsigned long) mmio_base;
690 715
716 hp = kzalloc(sizeof(*hp), GFP_KERNEL);
717 if (hp == NULL) {
718 rc = -ENOMEM;
719 goto err_out_free_ent;
720 }
721
722 /* Set default hotplug offset */
723 hp->hotplug_offset = PDC_SATA_PLUG_CSR;
724 probe_ent->private_data = hp;
725
691 probe_ent->sht = pdc_port_info[board_idx].sht; 726 probe_ent->sht = pdc_port_info[board_idx].sht;
692 probe_ent->host_flags = pdc_port_info[board_idx].host_flags; 727 probe_ent->host_flags = pdc_port_info[board_idx].host_flags;
693 probe_ent->pio_mask = pdc_port_info[board_idx].pio_mask; 728 probe_ent->pio_mask = pdc_port_info[board_idx].pio_mask;
@@ -707,6 +742,10 @@ static int pdc_ata_init_one (struct pci_dev *pdev, const struct pci_device_id *e
707 742
708 /* notice 4-port boards */ 743 /* notice 4-port boards */
709 switch (board_idx) { 744 switch (board_idx) {
745 case board_40518:
746 /* Override hotplug offset for SATAII150 */
747 hp->hotplug_offset = PDC2_SATA_PLUG_CSR;
748 /* Fall through */
710 case board_20319: 749 case board_20319:
711 probe_ent->n_ports = 4; 750 probe_ent->n_ports = 4;
712 751
@@ -716,6 +755,10 @@ static int pdc_ata_init_one (struct pci_dev *pdev, const struct pci_device_id *e
716 probe_ent->port[2].scr_addr = base + 0x600; 755 probe_ent->port[2].scr_addr = base + 0x600;
717 probe_ent->port[3].scr_addr = base + 0x700; 756 probe_ent->port[3].scr_addr = base + 0x700;
718 break; 757 break;
758 case board_2057x:
759 /* Override hotplug offset for SATAII150 */
760 hp->hotplug_offset = PDC2_SATA_PLUG_CSR;
761 /* Fall through */
719 case board_2037x: 762 case board_2037x:
720 probe_ent->n_ports = 2; 763 probe_ent->n_ports = 2;
721 break; 764 break;
@@ -741,8 +784,10 @@ static int pdc_ata_init_one (struct pci_dev *pdev, const struct pci_device_id *e
741 /* initialize adapter */ 784 /* initialize adapter */
742 pdc_host_init(board_idx, probe_ent); 785 pdc_host_init(board_idx, probe_ent);
743 786
744 /* FIXME: check ata_device_add return value */ 787 /* FIXME: Need any other frees than hp? */
745 ata_device_add(probe_ent); 788 if (!ata_device_add(probe_ent))
789 kfree(hp);
790
746 kfree(probe_ent); 791 kfree(probe_ent);
747 792
748 return 0; 793 return 0;
diff --git a/drivers/scsi/sata_qstor.c b/drivers/scsi/sata_qstor.c
index de05e2883f9c..286482630be3 100644
--- a/drivers/scsi/sata_qstor.c
+++ b/drivers/scsi/sata_qstor.c
@@ -120,7 +120,7 @@ static void qs_host_stop(struct ata_host_set *host_set);
120static void qs_port_stop(struct ata_port *ap); 120static void qs_port_stop(struct ata_port *ap);
121static void qs_phy_reset(struct ata_port *ap); 121static void qs_phy_reset(struct ata_port *ap);
122static void qs_qc_prep(struct ata_queued_cmd *qc); 122static void qs_qc_prep(struct ata_queued_cmd *qc);
123static int qs_qc_issue(struct ata_queued_cmd *qc); 123static unsigned int qs_qc_issue(struct ata_queued_cmd *qc);
124static int qs_check_atapi_dma(struct ata_queued_cmd *qc); 124static int qs_check_atapi_dma(struct ata_queued_cmd *qc);
125static void qs_bmdma_stop(struct ata_queued_cmd *qc); 125static void qs_bmdma_stop(struct ata_queued_cmd *qc);
126static u8 qs_bmdma_status(struct ata_port *ap); 126static u8 qs_bmdma_status(struct ata_port *ap);
@@ -132,11 +132,11 @@ static struct scsi_host_template qs_ata_sht = {
132 .name = DRV_NAME, 132 .name = DRV_NAME,
133 .ioctl = ata_scsi_ioctl, 133 .ioctl = ata_scsi_ioctl,
134 .queuecommand = ata_scsi_queuecmd, 134 .queuecommand = ata_scsi_queuecmd,
135 .eh_timed_out = ata_scsi_timed_out,
135 .eh_strategy_handler = ata_scsi_error, 136 .eh_strategy_handler = ata_scsi_error,
136 .can_queue = ATA_DEF_QUEUE, 137 .can_queue = ATA_DEF_QUEUE,
137 .this_id = ATA_SHT_THIS_ID, 138 .this_id = ATA_SHT_THIS_ID,
138 .sg_tablesize = QS_MAX_PRD, 139 .sg_tablesize = QS_MAX_PRD,
139 .max_sectors = ATA_MAX_SECTORS,
140 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 140 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
141 .emulated = ATA_SHT_EMULATED, 141 .emulated = ATA_SHT_EMULATED,
142 //FIXME .use_clustering = ATA_SHT_USE_CLUSTERING, 142 //FIXME .use_clustering = ATA_SHT_USE_CLUSTERING,
@@ -276,8 +276,8 @@ static unsigned int qs_fill_sg(struct ata_queued_cmd *qc)
276 unsigned int nelem; 276 unsigned int nelem;
277 u8 *prd = pp->pkt + QS_CPB_BYTES; 277 u8 *prd = pp->pkt + QS_CPB_BYTES;
278 278
279 assert(qc->__sg != NULL); 279 WARN_ON(qc->__sg == NULL);
280 assert(qc->n_elem > 0); 280 WARN_ON(qc->n_elem == 0);
281 281
282 nelem = 0; 282 nelem = 0;
283 ata_for_each_sg(sg, qc) { 283 ata_for_each_sg(sg, qc) {
@@ -352,7 +352,7 @@ static inline void qs_packet_start(struct ata_queued_cmd *qc)
352 readl(chan + QS_CCT_CFF); /* flush */ 352 readl(chan + QS_CCT_CFF); /* flush */
353} 353}
354 354
355static int qs_qc_issue(struct ata_queued_cmd *qc) 355static unsigned int qs_qc_issue(struct ata_queued_cmd *qc)
356{ 356{
357 struct qs_port_priv *pp = qc->ap->private_data; 357 struct qs_port_priv *pp = qc->ap->private_data;
358 358
diff --git a/drivers/scsi/sata_sil.c b/drivers/scsi/sata_sil.c
index 17f74d3c10e7..15346888faf2 100644
--- a/drivers/scsi/sata_sil.c
+++ b/drivers/scsi/sata_sil.c
@@ -135,11 +135,11 @@ static struct scsi_host_template sil_sht = {
135 .name = DRV_NAME, 135 .name = DRV_NAME,
136 .ioctl = ata_scsi_ioctl, 136 .ioctl = ata_scsi_ioctl,
137 .queuecommand = ata_scsi_queuecmd, 137 .queuecommand = ata_scsi_queuecmd,
138 .eh_timed_out = ata_scsi_timed_out,
138 .eh_strategy_handler = ata_scsi_error, 139 .eh_strategy_handler = ata_scsi_error,
139 .can_queue = ATA_DEF_QUEUE, 140 .can_queue = ATA_DEF_QUEUE,
140 .this_id = ATA_SHT_THIS_ID, 141 .this_id = ATA_SHT_THIS_ID,
141 .sg_tablesize = LIBATA_MAX_PRD, 142 .sg_tablesize = LIBATA_MAX_PRD,
142 .max_sectors = ATA_MAX_SECTORS,
143 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 143 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
144 .emulated = ATA_SHT_EMULATED, 144 .emulated = ATA_SHT_EMULATED,
145 .use_clustering = ATA_SHT_USE_CLUSTERING, 145 .use_clustering = ATA_SHT_USE_CLUSTERING,
@@ -157,7 +157,7 @@ static const struct ata_port_operations sil_ops = {
157 .check_status = ata_check_status, 157 .check_status = ata_check_status,
158 .exec_command = ata_exec_command, 158 .exec_command = ata_exec_command,
159 .dev_select = ata_std_dev_select, 159 .dev_select = ata_std_dev_select,
160 .phy_reset = sata_phy_reset, 160 .probe_reset = ata_std_probe_reset,
161 .post_set_mode = sil_post_set_mode, 161 .post_set_mode = sil_post_set_mode,
162 .bmdma_setup = ata_bmdma_setup, 162 .bmdma_setup = ata_bmdma_setup,
163 .bmdma_start = ata_bmdma_start, 163 .bmdma_start = ata_bmdma_start,
@@ -180,7 +180,7 @@ static const struct ata_port_info sil_port_info[] = {
180 { 180 {
181 .sht = &sil_sht, 181 .sht = &sil_sht,
182 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 182 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
183 ATA_FLAG_SRST | ATA_FLAG_MMIO, 183 ATA_FLAG_MMIO,
184 .pio_mask = 0x1f, /* pio0-4 */ 184 .pio_mask = 0x1f, /* pio0-4 */
185 .mwdma_mask = 0x07, /* mwdma0-2 */ 185 .mwdma_mask = 0x07, /* mwdma0-2 */
186 .udma_mask = 0x3f, /* udma0-5 */ 186 .udma_mask = 0x3f, /* udma0-5 */
@@ -189,8 +189,7 @@ static const struct ata_port_info sil_port_info[] = {
189 { 189 {
190 .sht = &sil_sht, 190 .sht = &sil_sht,
191 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 191 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
192 ATA_FLAG_SRST | ATA_FLAG_MMIO | 192 ATA_FLAG_MMIO | SIL_FLAG_MOD15WRITE,
193 SIL_FLAG_MOD15WRITE,
194 .pio_mask = 0x1f, /* pio0-4 */ 193 .pio_mask = 0x1f, /* pio0-4 */
195 .mwdma_mask = 0x07, /* mwdma0-2 */ 194 .mwdma_mask = 0x07, /* mwdma0-2 */
196 .udma_mask = 0x3f, /* udma0-5 */ 195 .udma_mask = 0x3f, /* udma0-5 */
@@ -199,7 +198,7 @@ static const struct ata_port_info sil_port_info[] = {
199 { 198 {
200 .sht = &sil_sht, 199 .sht = &sil_sht,
201 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 200 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
202 ATA_FLAG_SRST | ATA_FLAG_MMIO, 201 ATA_FLAG_MMIO,
203 .pio_mask = 0x1f, /* pio0-4 */ 202 .pio_mask = 0x1f, /* pio0-4 */
204 .mwdma_mask = 0x07, /* mwdma0-2 */ 203 .mwdma_mask = 0x07, /* mwdma0-2 */
205 .udma_mask = 0x3f, /* udma0-5 */ 204 .udma_mask = 0x3f, /* udma0-5 */
@@ -337,22 +336,13 @@ static void sil_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val)
337static void sil_dev_config(struct ata_port *ap, struct ata_device *dev) 336static void sil_dev_config(struct ata_port *ap, struct ata_device *dev)
338{ 337{
339 unsigned int n, quirks = 0; 338 unsigned int n, quirks = 0;
340 unsigned char model_num[40]; 339 unsigned char model_num[41];
341 const char *s;
342 unsigned int len;
343 340
344 ata_dev_id_string(dev->id, model_num, ATA_ID_PROD_OFS, 341 ata_dev_id_c_string(dev->id, model_num, ATA_ID_PROD_OFS,
345 sizeof(model_num)); 342 sizeof(model_num));
346 s = &model_num[0];
347 len = strnlen(s, sizeof(model_num));
348
349 /* ATAPI specifies that empty space is blank-filled; remove blanks */
350 while ((len > 0) && (s[len - 1] == ' '))
351 len--;
352 343
353 for (n = 0; sil_blacklist[n].product; n++) 344 for (n = 0; sil_blacklist[n].product; n++)
354 if (!memcmp(sil_blacklist[n].product, s, 345 if (!strcmp(sil_blacklist[n].product, model_num)) {
355 strlen(sil_blacklist[n].product))) {
356 quirks = sil_blacklist[n].quirk; 346 quirks = sil_blacklist[n].quirk;
357 break; 347 break;
358 } 348 }
@@ -363,16 +353,14 @@ static void sil_dev_config(struct ata_port *ap, struct ata_device *dev)
363 (quirks & SIL_QUIRK_MOD15WRITE))) { 353 (quirks & SIL_QUIRK_MOD15WRITE))) {
364 printk(KERN_INFO "ata%u(%u): applying Seagate errata fix (mod15write workaround)\n", 354 printk(KERN_INFO "ata%u(%u): applying Seagate errata fix (mod15write workaround)\n",
365 ap->id, dev->devno); 355 ap->id, dev->devno);
366 ap->host->max_sectors = 15; 356 dev->max_sectors = 15;
367 ap->host->hostt->max_sectors = 15;
368 dev->flags |= ATA_DFLAG_LOCK_SECTORS;
369 return; 357 return;
370 } 358 }
371 359
372 /* limit to udma5 */ 360 /* limit to udma5 */
373 if (quirks & SIL_QUIRK_UDMA5MAX) { 361 if (quirks & SIL_QUIRK_UDMA5MAX) {
374 printk(KERN_INFO "ata%u(%u): applying Maxtor errata fix %s\n", 362 printk(KERN_INFO "ata%u(%u): applying Maxtor errata fix %s\n",
375 ap->id, dev->devno, s); 363 ap->id, dev->devno, model_num);
376 ap->udma_mask &= ATA_UDMA5; 364 ap->udma_mask &= ATA_UDMA5;
377 return; 365 return;
378 } 366 }
diff --git a/drivers/scsi/sata_sil24.c b/drivers/scsi/sata_sil24.c
index 923130185a9e..a0e35a262156 100644
--- a/drivers/scsi/sata_sil24.c
+++ b/drivers/scsi/sata_sil24.c
@@ -249,9 +249,9 @@ static u8 sil24_check_status(struct ata_port *ap);
249static u32 sil24_scr_read(struct ata_port *ap, unsigned sc_reg); 249static u32 sil24_scr_read(struct ata_port *ap, unsigned sc_reg);
250static void sil24_scr_write(struct ata_port *ap, unsigned sc_reg, u32 val); 250static void sil24_scr_write(struct ata_port *ap, unsigned sc_reg, u32 val);
251static void sil24_tf_read(struct ata_port *ap, struct ata_taskfile *tf); 251static void sil24_tf_read(struct ata_port *ap, struct ata_taskfile *tf);
252static void sil24_phy_reset(struct ata_port *ap); 252static int sil24_probe_reset(struct ata_port *ap, unsigned int *classes);
253static void sil24_qc_prep(struct ata_queued_cmd *qc); 253static void sil24_qc_prep(struct ata_queued_cmd *qc);
254static int sil24_qc_issue(struct ata_queued_cmd *qc); 254static unsigned int sil24_qc_issue(struct ata_queued_cmd *qc);
255static void sil24_irq_clear(struct ata_port *ap); 255static void sil24_irq_clear(struct ata_port *ap);
256static void sil24_eng_timeout(struct ata_port *ap); 256static void sil24_eng_timeout(struct ata_port *ap);
257static irqreturn_t sil24_interrupt(int irq, void *dev_instance, struct pt_regs *regs); 257static irqreturn_t sil24_interrupt(int irq, void *dev_instance, struct pt_regs *regs);
@@ -280,11 +280,11 @@ static struct scsi_host_template sil24_sht = {
280 .name = DRV_NAME, 280 .name = DRV_NAME,
281 .ioctl = ata_scsi_ioctl, 281 .ioctl = ata_scsi_ioctl,
282 .queuecommand = ata_scsi_queuecmd, 282 .queuecommand = ata_scsi_queuecmd,
283 .eh_timed_out = ata_scsi_timed_out,
283 .eh_strategy_handler = ata_scsi_error, 284 .eh_strategy_handler = ata_scsi_error,
284 .can_queue = ATA_DEF_QUEUE, 285 .can_queue = ATA_DEF_QUEUE,
285 .this_id = ATA_SHT_THIS_ID, 286 .this_id = ATA_SHT_THIS_ID,
286 .sg_tablesize = LIBATA_MAX_PRD, 287 .sg_tablesize = LIBATA_MAX_PRD,
287 .max_sectors = ATA_MAX_SECTORS,
288 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 288 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
289 .emulated = ATA_SHT_EMULATED, 289 .emulated = ATA_SHT_EMULATED,
290 .use_clustering = ATA_SHT_USE_CLUSTERING, 290 .use_clustering = ATA_SHT_USE_CLUSTERING,
@@ -305,7 +305,7 @@ static const struct ata_port_operations sil24_ops = {
305 305
306 .tf_read = sil24_tf_read, 306 .tf_read = sil24_tf_read,
307 307
308 .phy_reset = sil24_phy_reset, 308 .probe_reset = sil24_probe_reset,
309 309
310 .qc_prep = sil24_qc_prep, 310 .qc_prep = sil24_qc_prep,
311 .qc_issue = sil24_qc_issue, 311 .qc_issue = sil24_qc_issue,
@@ -335,8 +335,8 @@ static struct ata_port_info sil24_port_info[] = {
335 { 335 {
336 .sht = &sil24_sht, 336 .sht = &sil24_sht,
337 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 337 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
338 ATA_FLAG_SRST | ATA_FLAG_MMIO | 338 ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA |
339 ATA_FLAG_PIO_DMA | SIL24_NPORTS2FLAG(4), 339 SIL24_NPORTS2FLAG(4),
340 .pio_mask = 0x1f, /* pio0-4 */ 340 .pio_mask = 0x1f, /* pio0-4 */
341 .mwdma_mask = 0x07, /* mwdma0-2 */ 341 .mwdma_mask = 0x07, /* mwdma0-2 */
342 .udma_mask = 0x3f, /* udma0-5 */ 342 .udma_mask = 0x3f, /* udma0-5 */
@@ -346,8 +346,8 @@ static struct ata_port_info sil24_port_info[] = {
346 { 346 {
347 .sht = &sil24_sht, 347 .sht = &sil24_sht,
348 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 348 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
349 ATA_FLAG_SRST | ATA_FLAG_MMIO | 349 ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA |
350 ATA_FLAG_PIO_DMA | SIL24_NPORTS2FLAG(2), 350 SIL24_NPORTS2FLAG(2),
351 .pio_mask = 0x1f, /* pio0-4 */ 351 .pio_mask = 0x1f, /* pio0-4 */
352 .mwdma_mask = 0x07, /* mwdma0-2 */ 352 .mwdma_mask = 0x07, /* mwdma0-2 */
353 .udma_mask = 0x3f, /* udma0-5 */ 353 .udma_mask = 0x3f, /* udma0-5 */
@@ -357,8 +357,8 @@ static struct ata_port_info sil24_port_info[] = {
357 { 357 {
358 .sht = &sil24_sht, 358 .sht = &sil24_sht,
359 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 359 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
360 ATA_FLAG_SRST | ATA_FLAG_MMIO | 360 ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA |
361 ATA_FLAG_PIO_DMA | SIL24_NPORTS2FLAG(1), 361 SIL24_NPORTS2FLAG(1),
362 .pio_mask = 0x1f, /* pio0-4 */ 362 .pio_mask = 0x1f, /* pio0-4 */
363 .mwdma_mask = 0x07, /* mwdma0-2 */ 363 .mwdma_mask = 0x07, /* mwdma0-2 */
364 .udma_mask = 0x3f, /* udma0-5 */ 364 .udma_mask = 0x3f, /* udma0-5 */
@@ -370,7 +370,7 @@ static void sil24_dev_config(struct ata_port *ap, struct ata_device *dev)
370{ 370{
371 void __iomem *port = (void __iomem *)ap->ioaddr.cmd_addr; 371 void __iomem *port = (void __iomem *)ap->ioaddr.cmd_addr;
372 372
373 if (ap->cdb_len == 16) 373 if (dev->cdb_len == 16)
374 writel(PORT_CS_CDB16, port + PORT_CTRL_STAT); 374 writel(PORT_CS_CDB16, port + PORT_CTRL_STAT);
375 else 375 else
376 writel(PORT_CS_CDB16, port + PORT_CTRL_CLR); 376 writel(PORT_CS_CDB16, port + PORT_CTRL_CLR);
@@ -427,7 +427,8 @@ static void sil24_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
427 *tf = pp->tf; 427 *tf = pp->tf;
428} 428}
429 429
430static int sil24_issue_SRST(struct ata_port *ap) 430static int sil24_softreset(struct ata_port *ap, int verbose,
431 unsigned int *class)
431{ 432{
432 void __iomem *port = (void __iomem *)ap->ioaddr.cmd_addr; 433 void __iomem *port = (void __iomem *)ap->ioaddr.cmd_addr;
433 struct sil24_port_priv *pp = ap->private_data; 434 struct sil24_port_priv *pp = ap->private_data;
@@ -436,6 +437,8 @@ static int sil24_issue_SRST(struct ata_port *ap)
436 u32 irq_enable, irq_stat; 437 u32 irq_enable, irq_stat;
437 int cnt; 438 int cnt;
438 439
440 DPRINTK("ENTER\n");
441
439 /* temporarily turn off IRQs during SRST */ 442 /* temporarily turn off IRQs during SRST */
440 irq_enable = readl(port + PORT_IRQ_ENABLE_SET); 443 irq_enable = readl(port + PORT_IRQ_ENABLE_SET);
441 writel(irq_enable, port + PORT_IRQ_ENABLE_CLR); 444 writel(irq_enable, port + PORT_IRQ_ENABLE_CLR);
@@ -465,30 +468,36 @@ static int sil24_issue_SRST(struct ata_port *ap)
465 /* restore IRQs */ 468 /* restore IRQs */
466 writel(irq_enable, port + PORT_IRQ_ENABLE_SET); 469 writel(irq_enable, port + PORT_IRQ_ENABLE_SET);
467 470
468 if (!(irq_stat & PORT_IRQ_COMPLETE)) 471 if (sata_dev_present(ap)) {
469 return -1; 472 if (!(irq_stat & PORT_IRQ_COMPLETE)) {
473 DPRINTK("EXIT, srst failed\n");
474 return -EIO;
475 }
470 476
471 /* update TF */ 477 sil24_update_tf(ap);
472 sil24_update_tf(ap); 478 *class = ata_dev_classify(&pp->tf);
479 }
480 if (*class == ATA_DEV_UNKNOWN)
481 *class = ATA_DEV_NONE;
482
483 DPRINTK("EXIT, class=%u\n", *class);
473 return 0; 484 return 0;
474} 485}
475 486
476static void sil24_phy_reset(struct ata_port *ap) 487static int sil24_hardreset(struct ata_port *ap, int verbose,
488 unsigned int *class)
477{ 489{
478 struct sil24_port_priv *pp = ap->private_data; 490 unsigned int dummy_class;
479
480 __sata_phy_reset(ap);
481 if (ap->flags & ATA_FLAG_PORT_DISABLED)
482 return;
483 491
484 if (sil24_issue_SRST(ap) < 0) { 492 /* sil24 doesn't report device signature after hard reset */
485 printk(KERN_ERR DRV_NAME 493 return sata_std_hardreset(ap, verbose, &dummy_class);
486 " ata%u: SRST failed, disabling port\n", ap->id); 494}
487 ap->ops->port_disable(ap);
488 return;
489 }
490 495
491 ap->device->class = ata_dev_classify(&pp->tf); 496static int sil24_probe_reset(struct ata_port *ap, unsigned int *classes)
497{
498 return ata_drive_probe_reset(ap, ata_std_probeinit,
499 sil24_softreset, sil24_hardreset,
500 ata_std_postreset, classes);
492} 501}
493 502
494static inline void sil24_fill_sg(struct ata_queued_cmd *qc, 503static inline void sil24_fill_sg(struct ata_queued_cmd *qc,
@@ -533,7 +542,7 @@ static void sil24_qc_prep(struct ata_queued_cmd *qc)
533 prb = &cb->atapi.prb; 542 prb = &cb->atapi.prb;
534 sge = cb->atapi.sge; 543 sge = cb->atapi.sge;
535 memset(cb->atapi.cdb, 0, 32); 544 memset(cb->atapi.cdb, 0, 32);
536 memcpy(cb->atapi.cdb, qc->cdb, ap->cdb_len); 545 memcpy(cb->atapi.cdb, qc->cdb, qc->dev->cdb_len);
537 546
538 if (qc->tf.protocol != ATA_PROT_ATAPI_NODATA) { 547 if (qc->tf.protocol != ATA_PROT_ATAPI_NODATA) {
539 if (qc->tf.flags & ATA_TFLAG_WRITE) 548 if (qc->tf.flags & ATA_TFLAG_WRITE)
@@ -557,7 +566,7 @@ static void sil24_qc_prep(struct ata_queued_cmd *qc)
557 sil24_fill_sg(qc, sge); 566 sil24_fill_sg(qc, sge);
558} 567}
559 568
560static int sil24_qc_issue(struct ata_queued_cmd *qc) 569static unsigned int sil24_qc_issue(struct ata_queued_cmd *qc)
561{ 570{
562 struct ata_port *ap = qc->ap; 571 struct ata_port *ap = qc->ap;
563 void __iomem *port = (void __iomem *)ap->ioaddr.cmd_addr; 572 void __iomem *port = (void __iomem *)ap->ioaddr.cmd_addr;
@@ -638,23 +647,10 @@ static void sil24_eng_timeout(struct ata_port *ap)
638 struct ata_queued_cmd *qc; 647 struct ata_queued_cmd *qc;
639 648
640 qc = ata_qc_from_tag(ap, ap->active_tag); 649 qc = ata_qc_from_tag(ap, ap->active_tag);
641 if (!qc) {
642 printk(KERN_ERR "ata%u: BUG: timeout without command\n",
643 ap->id);
644 return;
645 }
646 650
647 /*
648 * hack alert! We cannot use the supplied completion
649 * function from inside the ->eh_strategy_handler() thread.
650 * libata is the only user of ->eh_strategy_handler() in
651 * any kernel, so the default scsi_done() assumes it is
652 * not being called from the SCSI EH.
653 */
654 printk(KERN_ERR "ata%u: command timeout\n", ap->id); 651 printk(KERN_ERR "ata%u: command timeout\n", ap->id);
655 qc->scsidone = scsi_finish_command; 652 qc->err_mask |= AC_ERR_TIMEOUT;
656 qc->err_mask |= AC_ERR_OTHER; 653 ata_eh_qc_complete(qc);
657 ata_qc_complete(qc);
658 654
659 sil24_reset_controller(ap); 655 sil24_reset_controller(ap);
660} 656}
diff --git a/drivers/scsi/sata_sis.c b/drivers/scsi/sata_sis.c
index 2df8c5632ac3..7fd45f86de99 100644
--- a/drivers/scsi/sata_sis.c
+++ b/drivers/scsi/sata_sis.c
@@ -87,11 +87,11 @@ static struct scsi_host_template sis_sht = {
87 .name = DRV_NAME, 87 .name = DRV_NAME,
88 .ioctl = ata_scsi_ioctl, 88 .ioctl = ata_scsi_ioctl,
89 .queuecommand = ata_scsi_queuecmd, 89 .queuecommand = ata_scsi_queuecmd,
90 .eh_timed_out = ata_scsi_timed_out,
90 .eh_strategy_handler = ata_scsi_error, 91 .eh_strategy_handler = ata_scsi_error,
91 .can_queue = ATA_DEF_QUEUE, 92 .can_queue = ATA_DEF_QUEUE,
92 .this_id = ATA_SHT_THIS_ID, 93 .this_id = ATA_SHT_THIS_ID,
93 .sg_tablesize = ATA_MAX_PRD, 94 .sg_tablesize = ATA_MAX_PRD,
94 .max_sectors = ATA_MAX_SECTORS,
95 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 95 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
96 .emulated = ATA_SHT_EMULATED, 96 .emulated = ATA_SHT_EMULATED,
97 .use_clustering = ATA_SHT_USE_CLUSTERING, 97 .use_clustering = ATA_SHT_USE_CLUSTERING,
diff --git a/drivers/scsi/sata_svw.c b/drivers/scsi/sata_svw.c
index d8472563fde8..4aaccd53e736 100644
--- a/drivers/scsi/sata_svw.c
+++ b/drivers/scsi/sata_svw.c
@@ -288,11 +288,11 @@ static struct scsi_host_template k2_sata_sht = {
288 .name = DRV_NAME, 288 .name = DRV_NAME,
289 .ioctl = ata_scsi_ioctl, 289 .ioctl = ata_scsi_ioctl,
290 .queuecommand = ata_scsi_queuecmd, 290 .queuecommand = ata_scsi_queuecmd,
291 .eh_timed_out = ata_scsi_timed_out,
291 .eh_strategy_handler = ata_scsi_error, 292 .eh_strategy_handler = ata_scsi_error,
292 .can_queue = ATA_DEF_QUEUE, 293 .can_queue = ATA_DEF_QUEUE,
293 .this_id = ATA_SHT_THIS_ID, 294 .this_id = ATA_SHT_THIS_ID,
294 .sg_tablesize = LIBATA_MAX_PRD, 295 .sg_tablesize = LIBATA_MAX_PRD,
295 .max_sectors = ATA_MAX_SECTORS,
296 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 296 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
297 .emulated = ATA_SHT_EMULATED, 297 .emulated = ATA_SHT_EMULATED,
298 .use_clustering = ATA_SHT_USE_CLUSTERING, 298 .use_clustering = ATA_SHT_USE_CLUSTERING,
diff --git a/drivers/scsi/sata_sx4.c b/drivers/scsi/sata_sx4.c
index bc87c16c80d2..9f8a76815402 100644
--- a/drivers/scsi/sata_sx4.c
+++ b/drivers/scsi/sata_sx4.c
@@ -174,7 +174,7 @@ static void pdc20621_get_from_dimm(struct ata_probe_ent *pe,
174static void pdc20621_put_to_dimm(struct ata_probe_ent *pe, 174static void pdc20621_put_to_dimm(struct ata_probe_ent *pe,
175 void *psource, u32 offset, u32 size); 175 void *psource, u32 offset, u32 size);
176static void pdc20621_irq_clear(struct ata_port *ap); 176static void pdc20621_irq_clear(struct ata_port *ap);
177static int pdc20621_qc_issue_prot(struct ata_queued_cmd *qc); 177static unsigned int pdc20621_qc_issue_prot(struct ata_queued_cmd *qc);
178 178
179 179
180static struct scsi_host_template pdc_sata_sht = { 180static struct scsi_host_template pdc_sata_sht = {
@@ -182,11 +182,11 @@ static struct scsi_host_template pdc_sata_sht = {
182 .name = DRV_NAME, 182 .name = DRV_NAME,
183 .ioctl = ata_scsi_ioctl, 183 .ioctl = ata_scsi_ioctl,
184 .queuecommand = ata_scsi_queuecmd, 184 .queuecommand = ata_scsi_queuecmd,
185 .eh_timed_out = ata_scsi_timed_out,
185 .eh_strategy_handler = ata_scsi_error, 186 .eh_strategy_handler = ata_scsi_error,
186 .can_queue = ATA_DEF_QUEUE, 187 .can_queue = ATA_DEF_QUEUE,
187 .this_id = ATA_SHT_THIS_ID, 188 .this_id = ATA_SHT_THIS_ID,
188 .sg_tablesize = LIBATA_MAX_PRD, 189 .sg_tablesize = LIBATA_MAX_PRD,
189 .max_sectors = ATA_MAX_SECTORS,
190 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 190 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
191 .emulated = ATA_SHT_EMULATED, 191 .emulated = ATA_SHT_EMULATED,
192 .use_clustering = ATA_SHT_USE_CLUSTERING, 192 .use_clustering = ATA_SHT_USE_CLUSTERING,
@@ -460,7 +460,7 @@ static void pdc20621_dma_prep(struct ata_queued_cmd *qc)
460 unsigned int i, idx, total_len = 0, sgt_len; 460 unsigned int i, idx, total_len = 0, sgt_len;
461 u32 *buf = (u32 *) &pp->dimm_buf[PDC_DIMM_HEADER_SZ]; 461 u32 *buf = (u32 *) &pp->dimm_buf[PDC_DIMM_HEADER_SZ];
462 462
463 assert(qc->flags & ATA_QCFLAG_DMAMAP); 463 WARN_ON(!(qc->flags & ATA_QCFLAG_DMAMAP));
464 464
465 VPRINTK("ata%u: ENTER\n", ap->id); 465 VPRINTK("ata%u: ENTER\n", ap->id);
466 466
@@ -678,7 +678,7 @@ static void pdc20621_packet_start(struct ata_queued_cmd *qc)
678 } 678 }
679} 679}
680 680
681static int pdc20621_qc_issue_prot(struct ata_queued_cmd *qc) 681static unsigned int pdc20621_qc_issue_prot(struct ata_queued_cmd *qc)
682{ 682{
683 switch (qc->tf.protocol) { 683 switch (qc->tf.protocol) {
684 case ATA_PROT_DMA: 684 case ATA_PROT_DMA:
@@ -866,26 +866,12 @@ static void pdc_eng_timeout(struct ata_port *ap)
866 spin_lock_irqsave(&host_set->lock, flags); 866 spin_lock_irqsave(&host_set->lock, flags);
867 867
868 qc = ata_qc_from_tag(ap, ap->active_tag); 868 qc = ata_qc_from_tag(ap, ap->active_tag);
869 if (!qc) {
870 printk(KERN_ERR "ata%u: BUG: timeout without command\n",
871 ap->id);
872 goto out;
873 }
874
875 /* hack alert! We cannot use the supplied completion
876 * function from inside the ->eh_strategy_handler() thread.
877 * libata is the only user of ->eh_strategy_handler() in
878 * any kernel, so the default scsi_done() assumes it is
879 * not being called from the SCSI EH.
880 */
881 qc->scsidone = scsi_finish_command;
882 869
883 switch (qc->tf.protocol) { 870 switch (qc->tf.protocol) {
884 case ATA_PROT_DMA: 871 case ATA_PROT_DMA:
885 case ATA_PROT_NODATA: 872 case ATA_PROT_NODATA:
886 printk(KERN_ERR "ata%u: command timeout\n", ap->id); 873 printk(KERN_ERR "ata%u: command timeout\n", ap->id);
887 qc->err_mask |= __ac_err_mask(ata_wait_idle(ap)); 874 qc->err_mask |= __ac_err_mask(ata_wait_idle(ap));
888 ata_qc_complete(qc);
889 break; 875 break;
890 876
891 default: 877 default:
@@ -895,12 +881,11 @@ static void pdc_eng_timeout(struct ata_port *ap)
895 ap->id, qc->tf.command, drv_stat); 881 ap->id, qc->tf.command, drv_stat);
896 882
897 qc->err_mask |= ac_err_mask(drv_stat); 883 qc->err_mask |= ac_err_mask(drv_stat);
898 ata_qc_complete(qc);
899 break; 884 break;
900 } 885 }
901 886
902out:
903 spin_unlock_irqrestore(&host_set->lock, flags); 887 spin_unlock_irqrestore(&host_set->lock, flags);
888 ata_eh_qc_complete(qc);
904 DPRINTK("EXIT\n"); 889 DPRINTK("EXIT\n");
905} 890}
906 891
diff --git a/drivers/scsi/sata_uli.c b/drivers/scsi/sata_uli.c
index 9635ca700977..37a487b7d655 100644
--- a/drivers/scsi/sata_uli.c
+++ b/drivers/scsi/sata_uli.c
@@ -75,11 +75,11 @@ static struct scsi_host_template uli_sht = {
75 .name = DRV_NAME, 75 .name = DRV_NAME,
76 .ioctl = ata_scsi_ioctl, 76 .ioctl = ata_scsi_ioctl,
77 .queuecommand = ata_scsi_queuecmd, 77 .queuecommand = ata_scsi_queuecmd,
78 .eh_timed_out = ata_scsi_timed_out,
78 .eh_strategy_handler = ata_scsi_error, 79 .eh_strategy_handler = ata_scsi_error,
79 .can_queue = ATA_DEF_QUEUE, 80 .can_queue = ATA_DEF_QUEUE,
80 .this_id = ATA_SHT_THIS_ID, 81 .this_id = ATA_SHT_THIS_ID,
81 .sg_tablesize = LIBATA_MAX_PRD, 82 .sg_tablesize = LIBATA_MAX_PRD,
82 .max_sectors = ATA_MAX_SECTORS,
83 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 83 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
84 .emulated = ATA_SHT_EMULATED, 84 .emulated = ATA_SHT_EMULATED,
85 .use_clustering = ATA_SHT_USE_CLUSTERING, 85 .use_clustering = ATA_SHT_USE_CLUSTERING,
diff --git a/drivers/scsi/sata_via.c b/drivers/scsi/sata_via.c
index 6d5b0a794cfd..ff65a0b0457f 100644
--- a/drivers/scsi/sata_via.c
+++ b/drivers/scsi/sata_via.c
@@ -94,11 +94,11 @@ static struct scsi_host_template svia_sht = {
94 .name = DRV_NAME, 94 .name = DRV_NAME,
95 .ioctl = ata_scsi_ioctl, 95 .ioctl = ata_scsi_ioctl,
96 .queuecommand = ata_scsi_queuecmd, 96 .queuecommand = ata_scsi_queuecmd,
97 .eh_timed_out = ata_scsi_timed_out,
97 .eh_strategy_handler = ata_scsi_error, 98 .eh_strategy_handler = ata_scsi_error,
98 .can_queue = ATA_DEF_QUEUE, 99 .can_queue = ATA_DEF_QUEUE,
99 .this_id = ATA_SHT_THIS_ID, 100 .this_id = ATA_SHT_THIS_ID,
100 .sg_tablesize = LIBATA_MAX_PRD, 101 .sg_tablesize = LIBATA_MAX_PRD,
101 .max_sectors = ATA_MAX_SECTORS,
102 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 102 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
103 .emulated = ATA_SHT_EMULATED, 103 .emulated = ATA_SHT_EMULATED,
104 .use_clustering = ATA_SHT_USE_CLUSTERING, 104 .use_clustering = ATA_SHT_USE_CLUSTERING,
diff --git a/drivers/scsi/sata_vsc.c b/drivers/scsi/sata_vsc.c
index e484e8db6810..b574379a7a82 100644
--- a/drivers/scsi/sata_vsc.c
+++ b/drivers/scsi/sata_vsc.c
@@ -251,11 +251,11 @@ static struct scsi_host_template vsc_sata_sht = {
251 .name = DRV_NAME, 251 .name = DRV_NAME,
252 .ioctl = ata_scsi_ioctl, 252 .ioctl = ata_scsi_ioctl,
253 .queuecommand = ata_scsi_queuecmd, 253 .queuecommand = ata_scsi_queuecmd,
254 .eh_timed_out = ata_scsi_timed_out,
254 .eh_strategy_handler = ata_scsi_error, 255 .eh_strategy_handler = ata_scsi_error,
255 .can_queue = ATA_DEF_QUEUE, 256 .can_queue = ATA_DEF_QUEUE,
256 .this_id = ATA_SHT_THIS_ID, 257 .this_id = ATA_SHT_THIS_ID,
257 .sg_tablesize = LIBATA_MAX_PRD, 258 .sg_tablesize = LIBATA_MAX_PRD,
258 .max_sectors = ATA_MAX_SECTORS,
259 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 259 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
260 .emulated = ATA_SHT_EMULATED, 260 .emulated = ATA_SHT_EMULATED,
261 .use_clustering = ATA_SHT_USE_CLUSTERING, 261 .use_clustering = ATA_SHT_USE_CLUSTERING,
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index 5cc97b721661..50f8057be75d 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -584,8 +584,7 @@ static int scsi_request_sense(struct scsi_cmnd *scmd)
584 * keep a list of pending commands for final completion, and once we 584 * keep a list of pending commands for final completion, and once we
585 * are ready to leave error handling we handle completion for real. 585 * are ready to leave error handling we handle completion for real.
586 **/ 586 **/
587static void scsi_eh_finish_cmd(struct scsi_cmnd *scmd, 587void scsi_eh_finish_cmd(struct scsi_cmnd *scmd, struct list_head *done_q)
588 struct list_head *done_q)
589{ 588{
590 scmd->device->host->host_failed--; 589 scmd->device->host->host_failed--;
591 scmd->eh_eflags = 0; 590 scmd->eh_eflags = 0;
@@ -597,6 +596,7 @@ static void scsi_eh_finish_cmd(struct scsi_cmnd *scmd,
597 scsi_setup_cmd_retry(scmd); 596 scsi_setup_cmd_retry(scmd);
598 list_move_tail(&scmd->eh_entry, done_q); 597 list_move_tail(&scmd->eh_entry, done_q);
599} 598}
599EXPORT_SYMBOL(scsi_eh_finish_cmd);
600 600
601/** 601/**
602 * scsi_eh_get_sense - Get device sense data. 602 * scsi_eh_get_sense - Get device sense data.
@@ -1425,7 +1425,7 @@ static void scsi_eh_ready_devs(struct Scsi_Host *shost,
1425 * @done_q: list_head of processed commands. 1425 * @done_q: list_head of processed commands.
1426 * 1426 *
1427 **/ 1427 **/
1428static void scsi_eh_flush_done_q(struct list_head *done_q) 1428void scsi_eh_flush_done_q(struct list_head *done_q)
1429{ 1429{
1430 struct scsi_cmnd *scmd, *next; 1430 struct scsi_cmnd *scmd, *next;
1431 1431
@@ -1454,6 +1454,7 @@ static void scsi_eh_flush_done_q(struct list_head *done_q)
1454 } 1454 }
1455 } 1455 }
1456} 1456}
1457EXPORT_SYMBOL(scsi_eh_flush_done_q);
1457 1458
1458/** 1459/**
1459 * scsi_unjam_host - Attempt to fix a host which has a cmd that failed. 1460 * scsi_unjam_host - Attempt to fix a host which has a cmd that failed.