aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/scsi/Makefile2
-rw-r--r--drivers/scsi/ahci.c196
-rw-r--r--drivers/scsi/ata_piix.c133
-rw-r--r--drivers/scsi/libata-bmdma.c703
-rw-r--r--drivers/scsi/libata-core.c1669
-rw-r--r--drivers/scsi/libata-scsi.c238
-rw-r--r--drivers/scsi/libata.h2
-rw-r--r--drivers/scsi/pdc_adma.c6
-rw-r--r--drivers/scsi/sata_mv.c279
-rw-r--r--drivers/scsi/sata_nv.c2
-rw-r--r--drivers/scsi/sata_promise.c127
-rw-r--r--drivers/scsi/sata_qstor.c10
-rw-r--r--drivers/scsi/sata_sil.c33
-rw-r--r--drivers/scsi/sata_sil24.c88
-rw-r--r--drivers/scsi/sata_sis.c2
-rw-r--r--drivers/scsi/sata_svw.c2
-rw-r--r--drivers/scsi/sata_sx4.c25
-rw-r--r--drivers/scsi/sata_uli.c2
-rw-r--r--drivers/scsi/sata_via.c2
-rw-r--r--drivers/scsi/sata_vsc.c2
-rw-r--r--drivers/scsi/scsi_error.c7
-rw-r--r--include/linux/ata.h22
-rw-r--r--include/linux/libata.h146
-rw-r--r--include/scsi/scsi_eh.h3
24 files changed, 2313 insertions, 1388 deletions
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index 320e765fa0cd..15dc2e00e1b2 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -163,7 +163,7 @@ ncr53c8xx-flags-$(CONFIG_SCSI_ZALON) \
163CFLAGS_ncr53c8xx.o := $(ncr53c8xx-flags-y) $(ncr53c8xx-flags-m) 163CFLAGS_ncr53c8xx.o := $(ncr53c8xx-flags-y) $(ncr53c8xx-flags-m)
164zalon7xx-objs := zalon.o ncr53c8xx.o 164zalon7xx-objs := zalon.o ncr53c8xx.o
165NCR_Q720_mod-objs := NCR_Q720.o ncr53c8xx.o 165NCR_Q720_mod-objs := NCR_Q720.o ncr53c8xx.o
166libata-objs := libata-core.o libata-scsi.o 166libata-objs := libata-core.o libata-scsi.o libata-bmdma.o
167oktagon_esp_mod-objs := oktagon_esp.o oktagon_io.o 167oktagon_esp_mod-objs := oktagon_esp.o oktagon_io.o
168 168
169# Files generated that shall be removed upon make clean 169# Files generated that shall be removed upon make clean
diff --git a/drivers/scsi/ahci.c b/drivers/scsi/ahci.c
index a800fb51168b..1c2ab3dede71 100644
--- a/drivers/scsi/ahci.c
+++ b/drivers/scsi/ahci.c
@@ -66,6 +66,8 @@ enum {
66 AHCI_IRQ_ON_SG = (1 << 31), 66 AHCI_IRQ_ON_SG = (1 << 31),
67 AHCI_CMD_ATAPI = (1 << 5), 67 AHCI_CMD_ATAPI = (1 << 5),
68 AHCI_CMD_WRITE = (1 << 6), 68 AHCI_CMD_WRITE = (1 << 6),
69 AHCI_CMD_RESET = (1 << 8),
70 AHCI_CMD_CLR_BUSY = (1 << 10),
69 71
70 RX_FIS_D2H_REG = 0x40, /* offset of D2H Register FIS data */ 72 RX_FIS_D2H_REG = 0x40, /* offset of D2H Register FIS data */
71 73
@@ -85,6 +87,7 @@ enum {
85 87
86 /* HOST_CAP bits */ 88 /* HOST_CAP bits */
87 HOST_CAP_64 = (1 << 31), /* PCI DAC (64-bit DMA) support */ 89 HOST_CAP_64 = (1 << 31), /* PCI DAC (64-bit DMA) support */
90 HOST_CAP_CLO = (1 << 24), /* Command List Override support */
88 91
89 /* registers for each SATA port */ 92 /* registers for each SATA port */
90 PORT_LST_ADDR = 0x00, /* command list DMA addr */ 93 PORT_LST_ADDR = 0x00, /* command list DMA addr */
@@ -138,6 +141,7 @@ enum {
138 PORT_CMD_LIST_ON = (1 << 15), /* cmd list DMA engine running */ 141 PORT_CMD_LIST_ON = (1 << 15), /* cmd list DMA engine running */
139 PORT_CMD_FIS_ON = (1 << 14), /* FIS DMA engine running */ 142 PORT_CMD_FIS_ON = (1 << 14), /* FIS DMA engine running */
140 PORT_CMD_FIS_RX = (1 << 4), /* Enable FIS receive DMA engine */ 143 PORT_CMD_FIS_RX = (1 << 4), /* Enable FIS receive DMA engine */
144 PORT_CMD_CLO = (1 << 3), /* Command list override */
141 PORT_CMD_POWER_ON = (1 << 2), /* Power up device */ 145 PORT_CMD_POWER_ON = (1 << 2), /* Power up device */
142 PORT_CMD_SPIN_UP = (1 << 1), /* Spin up device */ 146 PORT_CMD_SPIN_UP = (1 << 1), /* Spin up device */
143 PORT_CMD_START = (1 << 0), /* Enable port DMA engine */ 147 PORT_CMD_START = (1 << 0), /* Enable port DMA engine */
@@ -184,9 +188,9 @@ struct ahci_port_priv {
184static u32 ahci_scr_read (struct ata_port *ap, unsigned int sc_reg); 188static u32 ahci_scr_read (struct ata_port *ap, unsigned int sc_reg);
185static void ahci_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val); 189static void ahci_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val);
186static int ahci_init_one (struct pci_dev *pdev, const struct pci_device_id *ent); 190static int ahci_init_one (struct pci_dev *pdev, const struct pci_device_id *ent);
187static int ahci_qc_issue(struct ata_queued_cmd *qc); 191static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc);
188static irqreturn_t ahci_interrupt (int irq, void *dev_instance, struct pt_regs *regs); 192static irqreturn_t ahci_interrupt (int irq, void *dev_instance, struct pt_regs *regs);
189static void ahci_phy_reset(struct ata_port *ap); 193static int ahci_probe_reset(struct ata_port *ap, unsigned int *classes);
190static void ahci_irq_clear(struct ata_port *ap); 194static void ahci_irq_clear(struct ata_port *ap);
191static void ahci_eng_timeout(struct ata_port *ap); 195static void ahci_eng_timeout(struct ata_port *ap);
192static int ahci_port_start(struct ata_port *ap); 196static int ahci_port_start(struct ata_port *ap);
@@ -202,11 +206,11 @@ static struct scsi_host_template ahci_sht = {
202 .name = DRV_NAME, 206 .name = DRV_NAME,
203 .ioctl = ata_scsi_ioctl, 207 .ioctl = ata_scsi_ioctl,
204 .queuecommand = ata_scsi_queuecmd, 208 .queuecommand = ata_scsi_queuecmd,
209 .eh_timed_out = ata_scsi_timed_out,
205 .eh_strategy_handler = ata_scsi_error, 210 .eh_strategy_handler = ata_scsi_error,
206 .can_queue = ATA_DEF_QUEUE, 211 .can_queue = ATA_DEF_QUEUE,
207 .this_id = ATA_SHT_THIS_ID, 212 .this_id = ATA_SHT_THIS_ID,
208 .sg_tablesize = AHCI_MAX_SG, 213 .sg_tablesize = AHCI_MAX_SG,
209 .max_sectors = ATA_MAX_SECTORS,
210 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 214 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
211 .emulated = ATA_SHT_EMULATED, 215 .emulated = ATA_SHT_EMULATED,
212 .use_clustering = AHCI_USE_CLUSTERING, 216 .use_clustering = AHCI_USE_CLUSTERING,
@@ -225,7 +229,7 @@ static const struct ata_port_operations ahci_ops = {
225 229
226 .tf_read = ahci_tf_read, 230 .tf_read = ahci_tf_read,
227 231
228 .phy_reset = ahci_phy_reset, 232 .probe_reset = ahci_probe_reset,
229 233
230 .qc_prep = ahci_qc_prep, 234 .qc_prep = ahci_qc_prep,
231 .qc_issue = ahci_qc_issue, 235 .qc_issue = ahci_qc_issue,
@@ -247,8 +251,7 @@ static const struct ata_port_info ahci_port_info[] = {
247 { 251 {
248 .sht = &ahci_sht, 252 .sht = &ahci_sht,
249 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 253 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
250 ATA_FLAG_SATA_RESET | ATA_FLAG_MMIO | 254 ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA,
251 ATA_FLAG_PIO_DMA,
252 .pio_mask = 0x1f, /* pio0-4 */ 255 .pio_mask = 0x1f, /* pio0-4 */
253 .udma_mask = 0x7f, /* udma0-6 ; FIXME */ 256 .udma_mask = 0x7f, /* udma0-6 ; FIXME */
254 .port_ops = &ahci_ops, 257 .port_ops = &ahci_ops,
@@ -450,17 +453,48 @@ static void ahci_scr_write (struct ata_port *ap, unsigned int sc_reg_in,
450 writel(val, (void __iomem *) ap->ioaddr.scr_addr + (sc_reg * 4)); 453 writel(val, (void __iomem *) ap->ioaddr.scr_addr + (sc_reg * 4));
451} 454}
452 455
453static void ahci_phy_reset(struct ata_port *ap) 456static int ahci_stop_engine(struct ata_port *ap)
454{ 457{
455 void __iomem *port_mmio = (void __iomem *) ap->ioaddr.cmd_addr; 458 void __iomem *mmio = ap->host_set->mmio_base;
456 struct ata_taskfile tf; 459 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
457 struct ata_device *dev = &ap->device[0]; 460 int work;
458 u32 new_tmp, tmp; 461 u32 tmp;
459 462
460 __sata_phy_reset(ap); 463 tmp = readl(port_mmio + PORT_CMD);
464 tmp &= ~PORT_CMD_START;
465 writel(tmp, port_mmio + PORT_CMD);
461 466
462 if (ap->flags & ATA_FLAG_PORT_DISABLED) 467 /* wait for engine to stop. TODO: this could be
463 return; 468 * as long as 500 msec
469 */
470 work = 1000;
471 while (work-- > 0) {
472 tmp = readl(port_mmio + PORT_CMD);
473 if ((tmp & PORT_CMD_LIST_ON) == 0)
474 return 0;
475 udelay(10);
476 }
477
478 return -EIO;
479}
480
481static void ahci_start_engine(struct ata_port *ap)
482{
483 void __iomem *mmio = ap->host_set->mmio_base;
484 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
485 u32 tmp;
486
487 tmp = readl(port_mmio + PORT_CMD);
488 tmp |= PORT_CMD_START;
489 writel(tmp, port_mmio + PORT_CMD);
490 readl(port_mmio + PORT_CMD); /* flush */
491}
492
493static unsigned int ahci_dev_classify(struct ata_port *ap)
494{
495 void __iomem *port_mmio = (void __iomem *) ap->ioaddr.cmd_addr;
496 struct ata_taskfile tf;
497 u32 tmp;
464 498
465 tmp = readl(port_mmio + PORT_SIG); 499 tmp = readl(port_mmio + PORT_SIG);
466 tf.lbah = (tmp >> 24) & 0xff; 500 tf.lbah = (tmp >> 24) & 0xff;
@@ -468,15 +502,46 @@ static void ahci_phy_reset(struct ata_port *ap)
468 tf.lbal = (tmp >> 8) & 0xff; 502 tf.lbal = (tmp >> 8) & 0xff;
469 tf.nsect = (tmp) & 0xff; 503 tf.nsect = (tmp) & 0xff;
470 504
471 dev->class = ata_dev_classify(&tf); 505 return ata_dev_classify(&tf);
472 if (!ata_dev_present(dev)) { 506}
473 ata_port_disable(ap); 507
474 return; 508static void ahci_fill_cmd_slot(struct ahci_port_priv *pp, u32 opts)
475 } 509{
510 pp->cmd_slot[0].opts = cpu_to_le32(opts);
511 pp->cmd_slot[0].status = 0;
512 pp->cmd_slot[0].tbl_addr = cpu_to_le32(pp->cmd_tbl_dma & 0xffffffff);
513 pp->cmd_slot[0].tbl_addr_hi = cpu_to_le32((pp->cmd_tbl_dma >> 16) >> 16);
514}
515
516static int ahci_hardreset(struct ata_port *ap, int verbose, unsigned int *class)
517{
518 int rc;
519
520 DPRINTK("ENTER\n");
521
522 ahci_stop_engine(ap);
523 rc = sata_std_hardreset(ap, verbose, class);
524 ahci_start_engine(ap);
525
526 if (rc == 0)
527 *class = ahci_dev_classify(ap);
528 if (*class == ATA_DEV_UNKNOWN)
529 *class = ATA_DEV_NONE;
530
531 DPRINTK("EXIT, rc=%d, class=%u\n", rc, *class);
532 return rc;
533}
534
535static void ahci_postreset(struct ata_port *ap, unsigned int *class)
536{
537 void __iomem *port_mmio = (void __iomem *) ap->ioaddr.cmd_addr;
538 u32 new_tmp, tmp;
539
540 ata_std_postreset(ap, class);
476 541
477 /* Make sure port's ATAPI bit is set appropriately */ 542 /* Make sure port's ATAPI bit is set appropriately */
478 new_tmp = tmp = readl(port_mmio + PORT_CMD); 543 new_tmp = tmp = readl(port_mmio + PORT_CMD);
479 if (dev->class == ATA_DEV_ATAPI) 544 if (*class == ATA_DEV_ATAPI)
480 new_tmp |= PORT_CMD_ATAPI; 545 new_tmp |= PORT_CMD_ATAPI;
481 else 546 else
482 new_tmp &= ~PORT_CMD_ATAPI; 547 new_tmp &= ~PORT_CMD_ATAPI;
@@ -486,6 +551,12 @@ static void ahci_phy_reset(struct ata_port *ap)
486 } 551 }
487} 552}
488 553
554static int ahci_probe_reset(struct ata_port *ap, unsigned int *classes)
555{
556 return ata_drive_probe_reset(ap, NULL, NULL, ahci_hardreset,
557 ahci_postreset, classes);
558}
559
489static u8 ahci_check_status(struct ata_port *ap) 560static u8 ahci_check_status(struct ata_port *ap)
490{ 561{
491 void __iomem *mmio = (void __iomem *) ap->ioaddr.cmd_addr; 562 void __iomem *mmio = (void __iomem *) ap->ioaddr.cmd_addr;
@@ -533,42 +604,36 @@ static void ahci_qc_prep(struct ata_queued_cmd *qc)
533{ 604{
534 struct ata_port *ap = qc->ap; 605 struct ata_port *ap = qc->ap;
535 struct ahci_port_priv *pp = ap->private_data; 606 struct ahci_port_priv *pp = ap->private_data;
607 int is_atapi = is_atapi_taskfile(&qc->tf);
536 u32 opts; 608 u32 opts;
537 const u32 cmd_fis_len = 5; /* five dwords */ 609 const u32 cmd_fis_len = 5; /* five dwords */
538 unsigned int n_elem; 610 unsigned int n_elem;
539 611
540 /* 612 /*
541 * Fill in command slot information (currently only one slot,
542 * slot 0, is currently since we don't do queueing)
543 */
544
545 opts = cmd_fis_len;
546 if (qc->tf.flags & ATA_TFLAG_WRITE)
547 opts |= AHCI_CMD_WRITE;
548 if (is_atapi_taskfile(&qc->tf))
549 opts |= AHCI_CMD_ATAPI;
550
551 pp->cmd_slot[0].opts = cpu_to_le32(opts);
552 pp->cmd_slot[0].status = 0;
553 pp->cmd_slot[0].tbl_addr = cpu_to_le32(pp->cmd_tbl_dma & 0xffffffff);
554 pp->cmd_slot[0].tbl_addr_hi = cpu_to_le32((pp->cmd_tbl_dma >> 16) >> 16);
555
556 /*
557 * Fill in command table information. First, the header, 613 * Fill in command table information. First, the header,
558 * a SATA Register - Host to Device command FIS. 614 * a SATA Register - Host to Device command FIS.
559 */ 615 */
560 ata_tf_to_fis(&qc->tf, pp->cmd_tbl, 0); 616 ata_tf_to_fis(&qc->tf, pp->cmd_tbl, 0);
561 if (opts & AHCI_CMD_ATAPI) { 617 if (is_atapi) {
562 memset(pp->cmd_tbl + AHCI_CMD_TBL_CDB, 0, 32); 618 memset(pp->cmd_tbl + AHCI_CMD_TBL_CDB, 0, 32);
563 memcpy(pp->cmd_tbl + AHCI_CMD_TBL_CDB, qc->cdb, ap->cdb_len); 619 memcpy(pp->cmd_tbl + AHCI_CMD_TBL_CDB, qc->cdb,
620 qc->dev->cdb_len);
564 } 621 }
565 622
566 if (!(qc->flags & ATA_QCFLAG_DMAMAP)) 623 n_elem = 0;
567 return; 624 if (qc->flags & ATA_QCFLAG_DMAMAP)
625 n_elem = ahci_fill_sg(qc);
568 626
569 n_elem = ahci_fill_sg(qc); 627 /*
628 * Fill in command slot information.
629 */
630 opts = cmd_fis_len | n_elem << 16;
631 if (qc->tf.flags & ATA_TFLAG_WRITE)
632 opts |= AHCI_CMD_WRITE;
633 if (is_atapi)
634 opts |= AHCI_CMD_ATAPI;
570 635
571 pp->cmd_slot[0].opts |= cpu_to_le32(n_elem << 16); 636 ahci_fill_cmd_slot(pp, opts);
572} 637}
573 638
574static void ahci_restart_port(struct ata_port *ap, u32 irq_stat) 639static void ahci_restart_port(struct ata_port *ap, u32 irq_stat)
@@ -576,7 +641,6 @@ static void ahci_restart_port(struct ata_port *ap, u32 irq_stat)
576 void __iomem *mmio = ap->host_set->mmio_base; 641 void __iomem *mmio = ap->host_set->mmio_base;
577 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no); 642 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
578 u32 tmp; 643 u32 tmp;
579 int work;
580 644
581 if ((ap->device[0].class != ATA_DEV_ATAPI) || 645 if ((ap->device[0].class != ATA_DEV_ATAPI) ||
582 ((irq_stat & PORT_IRQ_TF_ERR) == 0)) 646 ((irq_stat & PORT_IRQ_TF_ERR) == 0))
@@ -592,20 +656,7 @@ static void ahci_restart_port(struct ata_port *ap, u32 irq_stat)
592 readl(port_mmio + PORT_SCR_ERR)); 656 readl(port_mmio + PORT_SCR_ERR));
593 657
594 /* stop DMA */ 658 /* stop DMA */
595 tmp = readl(port_mmio + PORT_CMD); 659 ahci_stop_engine(ap);
596 tmp &= ~PORT_CMD_START;
597 writel(tmp, port_mmio + PORT_CMD);
598
599 /* wait for engine to stop. TODO: this could be
600 * as long as 500 msec
601 */
602 work = 1000;
603 while (work-- > 0) {
604 tmp = readl(port_mmio + PORT_CMD);
605 if ((tmp & PORT_CMD_LIST_ON) == 0)
606 break;
607 udelay(10);
608 }
609 660
610 /* clear SATA phy error, if any */ 661 /* clear SATA phy error, if any */
611 tmp = readl(port_mmio + PORT_SCR_ERR); 662 tmp = readl(port_mmio + PORT_SCR_ERR);
@@ -624,10 +675,7 @@ static void ahci_restart_port(struct ata_port *ap, u32 irq_stat)
624 } 675 }
625 676
626 /* re-start DMA */ 677 /* re-start DMA */
627 tmp = readl(port_mmio + PORT_CMD); 678 ahci_start_engine(ap);
628 tmp |= PORT_CMD_START;
629 writel(tmp, port_mmio + PORT_CMD);
630 readl(port_mmio + PORT_CMD); /* flush */
631} 679}
632 680
633static void ahci_eng_timeout(struct ata_port *ap) 681static void ahci_eng_timeout(struct ata_port *ap)
@@ -642,25 +690,13 @@ static void ahci_eng_timeout(struct ata_port *ap)
642 690
643 spin_lock_irqsave(&host_set->lock, flags); 691 spin_lock_irqsave(&host_set->lock, flags);
644 692
693 ahci_restart_port(ap, readl(port_mmio + PORT_IRQ_STAT));
645 qc = ata_qc_from_tag(ap, ap->active_tag); 694 qc = ata_qc_from_tag(ap, ap->active_tag);
646 if (!qc) { 695 qc->err_mask |= AC_ERR_TIMEOUT;
647 printk(KERN_ERR "ata%u: BUG: timeout without command\n",
648 ap->id);
649 } else {
650 ahci_restart_port(ap, readl(port_mmio + PORT_IRQ_STAT));
651
652 /* hack alert! We cannot use the supplied completion
653 * function from inside the ->eh_strategy_handler() thread.
654 * libata is the only user of ->eh_strategy_handler() in
655 * any kernel, so the default scsi_done() assumes it is
656 * not being called from the SCSI EH.
657 */
658 qc->scsidone = scsi_finish_command;
659 qc->err_mask |= AC_ERR_OTHER;
660 ata_qc_complete(qc);
661 }
662 696
663 spin_unlock_irqrestore(&host_set->lock, flags); 697 spin_unlock_irqrestore(&host_set->lock, flags);
698
699 ata_eh_qc_complete(qc);
664} 700}
665 701
666static inline int ahci_host_intr(struct ata_port *ap, struct ata_queued_cmd *qc) 702static inline int ahci_host_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
@@ -678,7 +714,7 @@ static inline int ahci_host_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
678 ci = readl(port_mmio + PORT_CMD_ISSUE); 714 ci = readl(port_mmio + PORT_CMD_ISSUE);
679 if (likely((ci & 0x1) == 0)) { 715 if (likely((ci & 0x1) == 0)) {
680 if (qc) { 716 if (qc) {
681 assert(qc->err_mask == 0); 717 WARN_ON(qc->err_mask);
682 ata_qc_complete(qc); 718 ata_qc_complete(qc);
683 qc = NULL; 719 qc = NULL;
684 } 720 }
@@ -697,7 +733,7 @@ static inline int ahci_host_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
697 ahci_restart_port(ap, status); 733 ahci_restart_port(ap, status);
698 734
699 if (qc) { 735 if (qc) {
700 qc->err_mask |= AC_ERR_OTHER; 736 qc->err_mask |= err_mask;
701 ata_qc_complete(qc); 737 ata_qc_complete(qc);
702 } 738 }
703 } 739 }
@@ -776,7 +812,7 @@ static irqreturn_t ahci_interrupt (int irq, void *dev_instance, struct pt_regs *
776 return IRQ_RETVAL(handled); 812 return IRQ_RETVAL(handled);
777} 813}
778 814
779static int ahci_qc_issue(struct ata_queued_cmd *qc) 815static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc)
780{ 816{
781 struct ata_port *ap = qc->ap; 817 struct ata_port *ap = qc->ap;
782 void __iomem *port_mmio = (void __iomem *) ap->ioaddr.cmd_addr; 818 void __iomem *port_mmio = (void __iomem *) ap->ioaddr.cmd_addr;
diff --git a/drivers/scsi/ata_piix.c b/drivers/scsi/ata_piix.c
index fc3ca051ceed..c662bf531514 100644
--- a/drivers/scsi/ata_piix.c
+++ b/drivers/scsi/ata_piix.c
@@ -101,9 +101,11 @@ enum {
101 ICH5_PCS = 0x92, /* port control and status */ 101 ICH5_PCS = 0x92, /* port control and status */
102 PIIX_SCC = 0x0A, /* sub-class code register */ 102 PIIX_SCC = 0x0A, /* sub-class code register */
103 103
104 PIIX_FLAG_AHCI = (1 << 28), /* AHCI possible */ 104 PIIX_FLAG_AHCI = (1 << 27), /* AHCI possible */
105 PIIX_FLAG_CHECKINTR = (1 << 29), /* make sure PCI INTx enabled */ 105 PIIX_FLAG_CHECKINTR = (1 << 28), /* make sure PCI INTx enabled */
106 PIIX_FLAG_COMBINED = (1 << 30), /* combined mode possible */ 106 PIIX_FLAG_COMBINED = (1 << 29), /* combined mode possible */
107 /* ICH6/7 use different scheme for map value */
108 PIIX_FLAG_COMBINED_ICH6 = PIIX_FLAG_COMBINED | (1 << 30),
107 109
108 /* combined mode. if set, PATA is channel 0. 110 /* combined mode. if set, PATA is channel 0.
109 * if clear, PATA is channel 1. 111 * if clear, PATA is channel 1.
@@ -129,8 +131,8 @@ enum {
129static int piix_init_one (struct pci_dev *pdev, 131static int piix_init_one (struct pci_dev *pdev,
130 const struct pci_device_id *ent); 132 const struct pci_device_id *ent);
131 133
132static void piix_pata_phy_reset(struct ata_port *ap); 134static int piix_pata_probe_reset(struct ata_port *ap, unsigned int *classes);
133static void piix_sata_phy_reset(struct ata_port *ap); 135static int piix_sata_probe_reset(struct ata_port *ap, unsigned int *classes);
134static void piix_set_piomode (struct ata_port *ap, struct ata_device *adev); 136static void piix_set_piomode (struct ata_port *ap, struct ata_device *adev);
135static void piix_set_dmamode (struct ata_port *ap, struct ata_device *adev); 137static void piix_set_dmamode (struct ata_port *ap, struct ata_device *adev);
136 138
@@ -178,11 +180,11 @@ static struct scsi_host_template piix_sht = {
178 .name = DRV_NAME, 180 .name = DRV_NAME,
179 .ioctl = ata_scsi_ioctl, 181 .ioctl = ata_scsi_ioctl,
180 .queuecommand = ata_scsi_queuecmd, 182 .queuecommand = ata_scsi_queuecmd,
183 .eh_timed_out = ata_scsi_timed_out,
181 .eh_strategy_handler = ata_scsi_error, 184 .eh_strategy_handler = ata_scsi_error,
182 .can_queue = ATA_DEF_QUEUE, 185 .can_queue = ATA_DEF_QUEUE,
183 .this_id = ATA_SHT_THIS_ID, 186 .this_id = ATA_SHT_THIS_ID,
184 .sg_tablesize = LIBATA_MAX_PRD, 187 .sg_tablesize = LIBATA_MAX_PRD,
185 .max_sectors = ATA_MAX_SECTORS,
186 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 188 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
187 .emulated = ATA_SHT_EMULATED, 189 .emulated = ATA_SHT_EMULATED,
188 .use_clustering = ATA_SHT_USE_CLUSTERING, 190 .use_clustering = ATA_SHT_USE_CLUSTERING,
@@ -205,7 +207,7 @@ static const struct ata_port_operations piix_pata_ops = {
205 .exec_command = ata_exec_command, 207 .exec_command = ata_exec_command,
206 .dev_select = ata_std_dev_select, 208 .dev_select = ata_std_dev_select,
207 209
208 .phy_reset = piix_pata_phy_reset, 210 .probe_reset = piix_pata_probe_reset,
209 211
210 .bmdma_setup = ata_bmdma_setup, 212 .bmdma_setup = ata_bmdma_setup,
211 .bmdma_start = ata_bmdma_start, 213 .bmdma_start = ata_bmdma_start,
@@ -233,7 +235,7 @@ static const struct ata_port_operations piix_sata_ops = {
233 .exec_command = ata_exec_command, 235 .exec_command = ata_exec_command,
234 .dev_select = ata_std_dev_select, 236 .dev_select = ata_std_dev_select,
235 237
236 .phy_reset = piix_sata_phy_reset, 238 .probe_reset = piix_sata_probe_reset,
237 239
238 .bmdma_setup = ata_bmdma_setup, 240 .bmdma_setup = ata_bmdma_setup,
239 .bmdma_start = ata_bmdma_start, 241 .bmdma_start = ata_bmdma_start,
@@ -256,8 +258,7 @@ static struct ata_port_info piix_port_info[] = {
256 /* ich5_pata */ 258 /* ich5_pata */
257 { 259 {
258 .sht = &piix_sht, 260 .sht = &piix_sht,
259 .host_flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST | 261 .host_flags = ATA_FLAG_SLAVE_POSS | PIIX_FLAG_CHECKINTR,
260 PIIX_FLAG_CHECKINTR,
261 .pio_mask = 0x1f, /* pio0-4 */ 262 .pio_mask = 0x1f, /* pio0-4 */
262#if 0 263#if 0
263 .mwdma_mask = 0x06, /* mwdma1-2 */ 264 .mwdma_mask = 0x06, /* mwdma1-2 */
@@ -271,8 +272,8 @@ static struct ata_port_info piix_port_info[] = {
271 /* ich5_sata */ 272 /* ich5_sata */
272 { 273 {
273 .sht = &piix_sht, 274 .sht = &piix_sht,
274 .host_flags = ATA_FLAG_SATA | ATA_FLAG_SRST | 275 .host_flags = ATA_FLAG_SATA | PIIX_FLAG_COMBINED |
275 PIIX_FLAG_COMBINED | PIIX_FLAG_CHECKINTR, 276 PIIX_FLAG_CHECKINTR,
276 .pio_mask = 0x1f, /* pio0-4 */ 277 .pio_mask = 0x1f, /* pio0-4 */
277 .mwdma_mask = 0x07, /* mwdma0-2 */ 278 .mwdma_mask = 0x07, /* mwdma0-2 */
278 .udma_mask = 0x7f, /* udma0-6 */ 279 .udma_mask = 0x7f, /* udma0-6 */
@@ -282,7 +283,7 @@ static struct ata_port_info piix_port_info[] = {
282 /* piix4_pata */ 283 /* piix4_pata */
283 { 284 {
284 .sht = &piix_sht, 285 .sht = &piix_sht,
285 .host_flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST, 286 .host_flags = ATA_FLAG_SLAVE_POSS,
286 .pio_mask = 0x1f, /* pio0-4 */ 287 .pio_mask = 0x1f, /* pio0-4 */
287#if 0 288#if 0
288 .mwdma_mask = 0x06, /* mwdma1-2 */ 289 .mwdma_mask = 0x06, /* mwdma1-2 */
@@ -296,9 +297,8 @@ static struct ata_port_info piix_port_info[] = {
296 /* ich6_sata */ 297 /* ich6_sata */
297 { 298 {
298 .sht = &piix_sht, 299 .sht = &piix_sht,
299 .host_flags = ATA_FLAG_SATA | ATA_FLAG_SRST | 300 .host_flags = ATA_FLAG_SATA | PIIX_FLAG_COMBINED_ICH6 |
300 PIIX_FLAG_COMBINED | PIIX_FLAG_CHECKINTR | 301 PIIX_FLAG_CHECKINTR | ATA_FLAG_SLAVE_POSS,
301 ATA_FLAG_SLAVE_POSS,
302 .pio_mask = 0x1f, /* pio0-4 */ 302 .pio_mask = 0x1f, /* pio0-4 */
303 .mwdma_mask = 0x07, /* mwdma0-2 */ 303 .mwdma_mask = 0x07, /* mwdma0-2 */
304 .udma_mask = 0x7f, /* udma0-6 */ 304 .udma_mask = 0x7f, /* udma0-6 */
@@ -308,9 +308,9 @@ static struct ata_port_info piix_port_info[] = {
308 /* ich6_sata_ahci */ 308 /* ich6_sata_ahci */
309 { 309 {
310 .sht = &piix_sht, 310 .sht = &piix_sht,
311 .host_flags = ATA_FLAG_SATA | ATA_FLAG_SRST | 311 .host_flags = ATA_FLAG_SATA | PIIX_FLAG_COMBINED_ICH6 |
312 PIIX_FLAG_COMBINED | PIIX_FLAG_CHECKINTR | 312 PIIX_FLAG_CHECKINTR | ATA_FLAG_SLAVE_POSS |
313 ATA_FLAG_SLAVE_POSS | PIIX_FLAG_AHCI, 313 PIIX_FLAG_AHCI,
314 .pio_mask = 0x1f, /* pio0-4 */ 314 .pio_mask = 0x1f, /* pio0-4 */
315 .mwdma_mask = 0x07, /* mwdma0-2 */ 315 .mwdma_mask = 0x07, /* mwdma0-2 */
316 .udma_mask = 0x7f, /* udma0-6 */ 316 .udma_mask = 0x7f, /* udma0-6 */
@@ -363,30 +363,42 @@ cbl40:
363} 363}
364 364
365/** 365/**
366 * piix_pata_phy_reset - Probe specified port on PATA host controller 366 * piix_pata_probeinit - probeinit for PATA host controller
367 * @ap: Port to probe 367 * @ap: Target port
368 * 368 *
369 * Probe PATA phy. 369 * Probeinit including cable detection.
370 * 370 *
371 * LOCKING: 371 * LOCKING:
372 * None (inherited from caller). 372 * None (inherited from caller).
373 */ 373 */
374static void piix_pata_probeinit(struct ata_port *ap)
375{
376 piix_pata_cbl_detect(ap);
377 ata_std_probeinit(ap);
378}
374 379
375static void piix_pata_phy_reset(struct ata_port *ap) 380/**
381 * piix_pata_probe_reset - Perform reset on PATA port and classify
382 * @ap: Port to reset
383 * @classes: Resulting classes of attached devices
384 *
385 * Reset PATA phy and classify attached devices.
386 *
387 * LOCKING:
388 * None (inherited from caller).
389 */
390static int piix_pata_probe_reset(struct ata_port *ap, unsigned int *classes)
376{ 391{
377 struct pci_dev *pdev = to_pci_dev(ap->host_set->dev); 392 struct pci_dev *pdev = to_pci_dev(ap->host_set->dev);
378 393
379 if (!pci_test_config_bits(pdev, &piix_enable_bits[ap->hard_port_no])) { 394 if (!pci_test_config_bits(pdev, &piix_enable_bits[ap->hard_port_no])) {
380 ata_port_disable(ap);
381 printk(KERN_INFO "ata%u: port disabled. ignoring.\n", ap->id); 395 printk(KERN_INFO "ata%u: port disabled. ignoring.\n", ap->id);
382 return; 396 return 0;
383 } 397 }
384 398
385 piix_pata_cbl_detect(ap); 399 return ata_drive_probe_reset(ap, piix_pata_probeinit,
386 400 ata_std_softreset, NULL,
387 ata_port_probe(ap); 401 ata_std_postreset, classes);
388
389 ata_bus_reset(ap);
390} 402}
391 403
392/** 404/**
@@ -411,9 +423,6 @@ static int piix_sata_probe (struct ata_port *ap)
411 int orig_mask, mask, i; 423 int orig_mask, mask, i;
412 u8 pcs; 424 u8 pcs;
413 425
414 mask = (PIIX_PORT_PRESENT << ap->hard_port_no) |
415 (PIIX_PORT_ENABLED << ap->hard_port_no);
416
417 pci_read_config_byte(pdev, ICH5_PCS, &pcs); 426 pci_read_config_byte(pdev, ICH5_PCS, &pcs);
418 orig_mask = (int) pcs & 0xff; 427 orig_mask = (int) pcs & 0xff;
419 428
@@ -437,28 +446,25 @@ static int piix_sata_probe (struct ata_port *ap)
437} 446}
438 447
439/** 448/**
440 * piix_sata_phy_reset - Probe specified port on SATA host controller 449 * piix_sata_probe_reset - Perform reset on SATA port and classify
441 * @ap: Port to probe 450 * @ap: Port to reset
451 * @classes: Resulting classes of attached devices
442 * 452 *
443 * Probe SATA phy. 453 * Reset SATA phy and classify attached devices.
444 * 454 *
445 * LOCKING: 455 * LOCKING:
446 * None (inherited from caller). 456 * None (inherited from caller).
447 */ 457 */
448 458static int piix_sata_probe_reset(struct ata_port *ap, unsigned int *classes)
449static void piix_sata_phy_reset(struct ata_port *ap)
450{ 459{
451 if (!piix_sata_probe(ap)) { 460 if (!piix_sata_probe(ap)) {
452 ata_port_disable(ap);
453 printk(KERN_INFO "ata%u: SATA port has no device.\n", ap->id); 461 printk(KERN_INFO "ata%u: SATA port has no device.\n", ap->id);
454 return; 462 return 0;
455 } 463 }
456 464
457 ap->cbl = ATA_CBL_SATA; 465 return ata_drive_probe_reset(ap, ata_std_probeinit,
458 466 ata_std_softreset, NULL,
459 ata_port_probe(ap); 467 ata_std_postreset, classes);
460
461 ata_bus_reset(ap);
462} 468}
463 469
464/** 470/**
@@ -627,6 +633,7 @@ static int piix_disable_ahci(struct pci_dev *pdev)
627 633
628/** 634/**
629 * piix_check_450nx_errata - Check for problem 450NX setup 635 * piix_check_450nx_errata - Check for problem 450NX setup
636 * @ata_dev: the PCI device to check
630 * 637 *
631 * Check for the present of 450NX errata #19 and errata #25. If 638 * Check for the present of 450NX errata #19 and errata #25. If
632 * they are found return an error code so we can turn off DMA 639 * they are found return an error code so we can turn off DMA
@@ -680,6 +687,7 @@ static int piix_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
680 struct ata_port_info *port_info[2]; 687 struct ata_port_info *port_info[2];
681 unsigned int combined = 0; 688 unsigned int combined = 0;
682 unsigned int pata_chan = 0, sata_chan = 0; 689 unsigned int pata_chan = 0, sata_chan = 0;
690 unsigned long host_flags;
683 691
684 if (!printed_version++) 692 if (!printed_version++)
685 dev_printk(KERN_DEBUG, &pdev->dev, 693 dev_printk(KERN_DEBUG, &pdev->dev,
@@ -692,7 +700,9 @@ static int piix_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
692 port_info[0] = &piix_port_info[ent->driver_data]; 700 port_info[0] = &piix_port_info[ent->driver_data];
693 port_info[1] = &piix_port_info[ent->driver_data]; 701 port_info[1] = &piix_port_info[ent->driver_data];
694 702
695 if (port_info[0]->host_flags & PIIX_FLAG_AHCI) { 703 host_flags = port_info[0]->host_flags;
704
705 if (host_flags & PIIX_FLAG_AHCI) {
696 u8 tmp; 706 u8 tmp;
697 pci_read_config_byte(pdev, PIIX_SCC, &tmp); 707 pci_read_config_byte(pdev, PIIX_SCC, &tmp);
698 if (tmp == PIIX_AHCI_DEVICE) { 708 if (tmp == PIIX_AHCI_DEVICE) {
@@ -702,16 +712,35 @@ static int piix_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
702 } 712 }
703 } 713 }
704 714
705 if (port_info[0]->host_flags & PIIX_FLAG_COMBINED) { 715 if (host_flags & PIIX_FLAG_COMBINED) {
706 u8 tmp; 716 u8 tmp;
707 pci_read_config_byte(pdev, ICH5_PMR, &tmp); 717 pci_read_config_byte(pdev, ICH5_PMR, &tmp);
708 718
709 if (tmp & PIIX_COMB) { 719 if (host_flags & PIIX_FLAG_COMBINED_ICH6) {
710 combined = 1; 720 switch (tmp & 0x3) {
711 if (tmp & PIIX_COMB_PATA_P0) 721 case 0:
722 break;
723 case 1:
724 combined = 1;
712 sata_chan = 1; 725 sata_chan = 1;
713 else 726 break;
727 case 2:
728 combined = 1;
714 pata_chan = 1; 729 pata_chan = 1;
730 break;
731 case 3:
732 dev_printk(KERN_WARNING, &pdev->dev,
733 "invalid MAP value %u\n", tmp);
734 break;
735 }
736 } else {
737 if (tmp & PIIX_COMB) {
738 combined = 1;
739 if (tmp & PIIX_COMB_PATA_P0)
740 sata_chan = 1;
741 else
742 pata_chan = 1;
743 }
715 } 744 }
716 } 745 }
717 746
@@ -721,7 +750,7 @@ static int piix_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
721 * MSI is disabled (and it is disabled, as we don't use 750 * MSI is disabled (and it is disabled, as we don't use
722 * message-signalled interrupts currently). 751 * message-signalled interrupts currently).
723 */ 752 */
724 if (port_info[0]->host_flags & PIIX_FLAG_CHECKINTR) 753 if (host_flags & PIIX_FLAG_CHECKINTR)
725 pci_intx(pdev, 1); 754 pci_intx(pdev, 1);
726 755
727 if (combined) { 756 if (combined) {
diff --git a/drivers/scsi/libata-bmdma.c b/drivers/scsi/libata-bmdma.c
new file mode 100644
index 000000000000..a93336adcd23
--- /dev/null
+++ b/drivers/scsi/libata-bmdma.c
@@ -0,0 +1,703 @@
1/*
2 * libata-bmdma.c - helper library for PCI IDE BMDMA
3 *
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
7 *
8 * Copyright 2003-2006 Red Hat, Inc. All rights reserved.
9 * Copyright 2003-2006 Jeff Garzik
10 *
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
15 * any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; see the file COPYING. If not, write to
24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25 *
26 *
27 * libata documentation is available via 'make {ps|pdf}docs',
28 * as Documentation/DocBook/libata.*
29 *
30 * Hardware documentation available from http://www.t13.org/ and
31 * http://www.sata-io.org/
32 *
33 */
34
35#include <linux/config.h>
36#include <linux/kernel.h>
37#include <linux/pci.h>
38#include <linux/libata.h>
39
40#include "libata.h"
41
42/**
43 * ata_tf_load_pio - send taskfile registers to host controller
44 * @ap: Port to which output is sent
45 * @tf: ATA taskfile register set
46 *
47 * Outputs ATA taskfile to standard ATA host controller.
48 *
49 * LOCKING:
50 * Inherited from caller.
51 */
52
53static void ata_tf_load_pio(struct ata_port *ap, const struct ata_taskfile *tf)
54{
55 struct ata_ioports *ioaddr = &ap->ioaddr;
56 unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
57
58 if (tf->ctl != ap->last_ctl) {
59 outb(tf->ctl, ioaddr->ctl_addr);
60 ap->last_ctl = tf->ctl;
61 ata_wait_idle(ap);
62 }
63
64 if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
65 outb(tf->hob_feature, ioaddr->feature_addr);
66 outb(tf->hob_nsect, ioaddr->nsect_addr);
67 outb(tf->hob_lbal, ioaddr->lbal_addr);
68 outb(tf->hob_lbam, ioaddr->lbam_addr);
69 outb(tf->hob_lbah, ioaddr->lbah_addr);
70 VPRINTK("hob: feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n",
71 tf->hob_feature,
72 tf->hob_nsect,
73 tf->hob_lbal,
74 tf->hob_lbam,
75 tf->hob_lbah);
76 }
77
78 if (is_addr) {
79 outb(tf->feature, ioaddr->feature_addr);
80 outb(tf->nsect, ioaddr->nsect_addr);
81 outb(tf->lbal, ioaddr->lbal_addr);
82 outb(tf->lbam, ioaddr->lbam_addr);
83 outb(tf->lbah, ioaddr->lbah_addr);
84 VPRINTK("feat 0x%X nsect 0x%X lba 0x%X 0x%X 0x%X\n",
85 tf->feature,
86 tf->nsect,
87 tf->lbal,
88 tf->lbam,
89 tf->lbah);
90 }
91
92 if (tf->flags & ATA_TFLAG_DEVICE) {
93 outb(tf->device, ioaddr->device_addr);
94 VPRINTK("device 0x%X\n", tf->device);
95 }
96
97 ata_wait_idle(ap);
98}
99
100/**
101 * ata_tf_load_mmio - send taskfile registers to host controller
102 * @ap: Port to which output is sent
103 * @tf: ATA taskfile register set
104 *
105 * Outputs ATA taskfile to standard ATA host controller using MMIO.
106 *
107 * LOCKING:
108 * Inherited from caller.
109 */
110
111static void ata_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf)
112{
113 struct ata_ioports *ioaddr = &ap->ioaddr;
114 unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
115
116 if (tf->ctl != ap->last_ctl) {
117 writeb(tf->ctl, (void __iomem *) ap->ioaddr.ctl_addr);
118 ap->last_ctl = tf->ctl;
119 ata_wait_idle(ap);
120 }
121
122 if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
123 writeb(tf->hob_feature, (void __iomem *) ioaddr->feature_addr);
124 writeb(tf->hob_nsect, (void __iomem *) ioaddr->nsect_addr);
125 writeb(tf->hob_lbal, (void __iomem *) ioaddr->lbal_addr);
126 writeb(tf->hob_lbam, (void __iomem *) ioaddr->lbam_addr);
127 writeb(tf->hob_lbah, (void __iomem *) ioaddr->lbah_addr);
128 VPRINTK("hob: feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n",
129 tf->hob_feature,
130 tf->hob_nsect,
131 tf->hob_lbal,
132 tf->hob_lbam,
133 tf->hob_lbah);
134 }
135
136 if (is_addr) {
137 writeb(tf->feature, (void __iomem *) ioaddr->feature_addr);
138 writeb(tf->nsect, (void __iomem *) ioaddr->nsect_addr);
139 writeb(tf->lbal, (void __iomem *) ioaddr->lbal_addr);
140 writeb(tf->lbam, (void __iomem *) ioaddr->lbam_addr);
141 writeb(tf->lbah, (void __iomem *) ioaddr->lbah_addr);
142 VPRINTK("feat 0x%X nsect 0x%X lba 0x%X 0x%X 0x%X\n",
143 tf->feature,
144 tf->nsect,
145 tf->lbal,
146 tf->lbam,
147 tf->lbah);
148 }
149
150 if (tf->flags & ATA_TFLAG_DEVICE) {
151 writeb(tf->device, (void __iomem *) ioaddr->device_addr);
152 VPRINTK("device 0x%X\n", tf->device);
153 }
154
155 ata_wait_idle(ap);
156}
157
158
159/**
160 * ata_tf_load - send taskfile registers to host controller
161 * @ap: Port to which output is sent
162 * @tf: ATA taskfile register set
163 *
164 * Outputs ATA taskfile to standard ATA host controller using MMIO
165 * or PIO as indicated by the ATA_FLAG_MMIO flag.
166 * Writes the control, feature, nsect, lbal, lbam, and lbah registers.
167 * Optionally (ATA_TFLAG_LBA48) writes hob_feature, hob_nsect,
168 * hob_lbal, hob_lbam, and hob_lbah.
169 *
170 * This function waits for idle (!BUSY and !DRQ) after writing
171 * registers. If the control register has a new value, this
172 * function also waits for idle after writing control and before
173 * writing the remaining registers.
174 *
175 * May be used as the tf_load() entry in ata_port_operations.
176 *
177 * LOCKING:
178 * Inherited from caller.
179 */
180void ata_tf_load(struct ata_port *ap, const struct ata_taskfile *tf)
181{
182 if (ap->flags & ATA_FLAG_MMIO)
183 ata_tf_load_mmio(ap, tf);
184 else
185 ata_tf_load_pio(ap, tf);
186}
187
188/**
189 * ata_exec_command_pio - issue ATA command to host controller
190 * @ap: port to which command is being issued
191 * @tf: ATA taskfile register set
192 *
193 * Issues PIO write to ATA command register, with proper
194 * synchronization with interrupt handler / other threads.
195 *
196 * LOCKING:
197 * spin_lock_irqsave(host_set lock)
198 */
199
200static void ata_exec_command_pio(struct ata_port *ap, const struct ata_taskfile *tf)
201{
202 DPRINTK("ata%u: cmd 0x%X\n", ap->id, tf->command);
203
204 outb(tf->command, ap->ioaddr.command_addr);
205 ata_pause(ap);
206}
207
208
209/**
210 * ata_exec_command_mmio - issue ATA command to host controller
211 * @ap: port to which command is being issued
212 * @tf: ATA taskfile register set
213 *
214 * Issues MMIO write to ATA command register, with proper
215 * synchronization with interrupt handler / other threads.
216 *
217 * LOCKING:
218 * spin_lock_irqsave(host_set lock)
219 */
220
221static void ata_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf)
222{
223 DPRINTK("ata%u: cmd 0x%X\n", ap->id, tf->command);
224
225 writeb(tf->command, (void __iomem *) ap->ioaddr.command_addr);
226 ata_pause(ap);
227}
228
229
230/**
231 * ata_exec_command - issue ATA command to host controller
232 * @ap: port to which command is being issued
233 * @tf: ATA taskfile register set
234 *
235 * Issues PIO/MMIO write to ATA command register, with proper
236 * synchronization with interrupt handler / other threads.
237 *
238 * LOCKING:
239 * spin_lock_irqsave(host_set lock)
240 */
241void ata_exec_command(struct ata_port *ap, const struct ata_taskfile *tf)
242{
243 if (ap->flags & ATA_FLAG_MMIO)
244 ata_exec_command_mmio(ap, tf);
245 else
246 ata_exec_command_pio(ap, tf);
247}
248
249/**
250 * ata_tf_read_pio - input device's ATA taskfile shadow registers
251 * @ap: Port from which input is read
252 * @tf: ATA taskfile register set for storing input
253 *
254 * Reads ATA taskfile registers for currently-selected device
255 * into @tf.
256 *
257 * LOCKING:
258 * Inherited from caller.
259 */
260
261static void ata_tf_read_pio(struct ata_port *ap, struct ata_taskfile *tf)
262{
263 struct ata_ioports *ioaddr = &ap->ioaddr;
264
265 tf->command = ata_check_status(ap);
266 tf->feature = inb(ioaddr->error_addr);
267 tf->nsect = inb(ioaddr->nsect_addr);
268 tf->lbal = inb(ioaddr->lbal_addr);
269 tf->lbam = inb(ioaddr->lbam_addr);
270 tf->lbah = inb(ioaddr->lbah_addr);
271 tf->device = inb(ioaddr->device_addr);
272
273 if (tf->flags & ATA_TFLAG_LBA48) {
274 outb(tf->ctl | ATA_HOB, ioaddr->ctl_addr);
275 tf->hob_feature = inb(ioaddr->error_addr);
276 tf->hob_nsect = inb(ioaddr->nsect_addr);
277 tf->hob_lbal = inb(ioaddr->lbal_addr);
278 tf->hob_lbam = inb(ioaddr->lbam_addr);
279 tf->hob_lbah = inb(ioaddr->lbah_addr);
280 }
281}
282
283/**
284 * ata_tf_read_mmio - input device's ATA taskfile shadow registers
285 * @ap: Port from which input is read
286 * @tf: ATA taskfile register set for storing input
287 *
288 * Reads ATA taskfile registers for currently-selected device
289 * into @tf via MMIO.
290 *
291 * LOCKING:
292 * Inherited from caller.
293 */
294
295static void ata_tf_read_mmio(struct ata_port *ap, struct ata_taskfile *tf)
296{
297 struct ata_ioports *ioaddr = &ap->ioaddr;
298
299 tf->command = ata_check_status(ap);
300 tf->feature = readb((void __iomem *)ioaddr->error_addr);
301 tf->nsect = readb((void __iomem *)ioaddr->nsect_addr);
302 tf->lbal = readb((void __iomem *)ioaddr->lbal_addr);
303 tf->lbam = readb((void __iomem *)ioaddr->lbam_addr);
304 tf->lbah = readb((void __iomem *)ioaddr->lbah_addr);
305 tf->device = readb((void __iomem *)ioaddr->device_addr);
306
307 if (tf->flags & ATA_TFLAG_LBA48) {
308 writeb(tf->ctl | ATA_HOB, (void __iomem *) ap->ioaddr.ctl_addr);
309 tf->hob_feature = readb((void __iomem *)ioaddr->error_addr);
310 tf->hob_nsect = readb((void __iomem *)ioaddr->nsect_addr);
311 tf->hob_lbal = readb((void __iomem *)ioaddr->lbal_addr);
312 tf->hob_lbam = readb((void __iomem *)ioaddr->lbam_addr);
313 tf->hob_lbah = readb((void __iomem *)ioaddr->lbah_addr);
314 }
315}
316
317
318/**
319 * ata_tf_read - input device's ATA taskfile shadow registers
320 * @ap: Port from which input is read
321 * @tf: ATA taskfile register set for storing input
322 *
323 * Reads ATA taskfile registers for currently-selected device
324 * into @tf.
325 *
326 * Reads nsect, lbal, lbam, lbah, and device. If ATA_TFLAG_LBA48
327 * is set, also reads the hob registers.
328 *
329 * May be used as the tf_read() entry in ata_port_operations.
330 *
331 * LOCKING:
332 * Inherited from caller.
333 */
334void ata_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
335{
336 if (ap->flags & ATA_FLAG_MMIO)
337 ata_tf_read_mmio(ap, tf);
338 else
339 ata_tf_read_pio(ap, tf);
340}
341
342/**
343 * ata_check_status_pio - Read device status reg & clear interrupt
344 * @ap: port where the device is
345 *
346 * Reads ATA taskfile status register for currently-selected device
347 * and return its value. This also clears pending interrupts
348 * from this device
349 *
350 * LOCKING:
351 * Inherited from caller.
352 */
353static u8 ata_check_status_pio(struct ata_port *ap)
354{
355 return inb(ap->ioaddr.status_addr);
356}
357
358/**
359 * ata_check_status_mmio - Read device status reg & clear interrupt
360 * @ap: port where the device is
361 *
362 * Reads ATA taskfile status register for currently-selected device
363 * via MMIO and return its value. This also clears pending interrupts
364 * from this device
365 *
366 * LOCKING:
367 * Inherited from caller.
368 */
369static u8 ata_check_status_mmio(struct ata_port *ap)
370{
371 return readb((void __iomem *) ap->ioaddr.status_addr);
372}
373
374
375/**
376 * ata_check_status - Read device status reg & clear interrupt
377 * @ap: port where the device is
378 *
379 * Reads ATA taskfile status register for currently-selected device
380 * and return its value. This also clears pending interrupts
381 * from this device
382 *
383 * May be used as the check_status() entry in ata_port_operations.
384 *
385 * LOCKING:
386 * Inherited from caller.
387 */
388u8 ata_check_status(struct ata_port *ap)
389{
390 if (ap->flags & ATA_FLAG_MMIO)
391 return ata_check_status_mmio(ap);
392 return ata_check_status_pio(ap);
393}
394
395
396/**
397 * ata_altstatus - Read device alternate status reg
398 * @ap: port where the device is
399 *
400 * Reads ATA taskfile alternate status register for
401 * currently-selected device and return its value.
402 *
403 * Note: may NOT be used as the check_altstatus() entry in
404 * ata_port_operations.
405 *
406 * LOCKING:
407 * Inherited from caller.
408 */
409u8 ata_altstatus(struct ata_port *ap)
410{
411 if (ap->ops->check_altstatus)
412 return ap->ops->check_altstatus(ap);
413
414 if (ap->flags & ATA_FLAG_MMIO)
415 return readb((void __iomem *)ap->ioaddr.altstatus_addr);
416 return inb(ap->ioaddr.altstatus_addr);
417}
418
419#ifdef CONFIG_PCI
420static struct ata_probe_ent *
421ata_probe_ent_alloc(struct device *dev, const struct ata_port_info *port)
422{
423 struct ata_probe_ent *probe_ent;
424
425 probe_ent = kzalloc(sizeof(*probe_ent), GFP_KERNEL);
426 if (!probe_ent) {
427 printk(KERN_ERR DRV_NAME "(%s): out of memory\n",
428 kobject_name(&(dev->kobj)));
429 return NULL;
430 }
431
432 INIT_LIST_HEAD(&probe_ent->node);
433 probe_ent->dev = dev;
434
435 probe_ent->sht = port->sht;
436 probe_ent->host_flags = port->host_flags;
437 probe_ent->pio_mask = port->pio_mask;
438 probe_ent->mwdma_mask = port->mwdma_mask;
439 probe_ent->udma_mask = port->udma_mask;
440 probe_ent->port_ops = port->port_ops;
441
442 return probe_ent;
443}
444
445
446/**
447 * ata_pci_init_native_mode - Initialize native-mode driver
448 * @pdev: pci device to be initialized
449 * @port: array[2] of pointers to port info structures.
450 * @ports: bitmap of ports present
451 *
452 * Utility function which allocates and initializes an
453 * ata_probe_ent structure for a standard dual-port
454 * PIO-based IDE controller. The returned ata_probe_ent
455 * structure can be passed to ata_device_add(). The returned
456 * ata_probe_ent structure should then be freed with kfree().
457 *
458 * The caller need only pass the address of the primary port, the
459 * secondary will be deduced automatically. If the device has non
460 * standard secondary port mappings this function can be called twice,
461 * once for each interface.
462 */
463
464struct ata_probe_ent *
465ata_pci_init_native_mode(struct pci_dev *pdev, struct ata_port_info **port, int ports)
466{
467 struct ata_probe_ent *probe_ent =
468 ata_probe_ent_alloc(pci_dev_to_dev(pdev), port[0]);
469 int p = 0;
470
471 if (!probe_ent)
472 return NULL;
473
474 probe_ent->irq = pdev->irq;
475 probe_ent->irq_flags = SA_SHIRQ;
476 probe_ent->private_data = port[0]->private_data;
477
478 if (ports & ATA_PORT_PRIMARY) {
479 probe_ent->port[p].cmd_addr = pci_resource_start(pdev, 0);
480 probe_ent->port[p].altstatus_addr =
481 probe_ent->port[p].ctl_addr =
482 pci_resource_start(pdev, 1) | ATA_PCI_CTL_OFS;
483 probe_ent->port[p].bmdma_addr = pci_resource_start(pdev, 4);
484 ata_std_ports(&probe_ent->port[p]);
485 p++;
486 }
487
488 if (ports & ATA_PORT_SECONDARY) {
489 probe_ent->port[p].cmd_addr = pci_resource_start(pdev, 2);
490 probe_ent->port[p].altstatus_addr =
491 probe_ent->port[p].ctl_addr =
492 pci_resource_start(pdev, 3) | ATA_PCI_CTL_OFS;
493 probe_ent->port[p].bmdma_addr = pci_resource_start(pdev, 4) + 8;
494 ata_std_ports(&probe_ent->port[p]);
495 p++;
496 }
497
498 probe_ent->n_ports = p;
499 return probe_ent;
500}
501
502
503static struct ata_probe_ent *ata_pci_init_legacy_port(struct pci_dev *pdev,
504 struct ata_port_info *port, int port_num)
505{
506 struct ata_probe_ent *probe_ent;
507
508 probe_ent = ata_probe_ent_alloc(pci_dev_to_dev(pdev), port);
509 if (!probe_ent)
510 return NULL;
511
512 probe_ent->legacy_mode = 1;
513 probe_ent->n_ports = 1;
514 probe_ent->hard_port_no = port_num;
515 probe_ent->private_data = port->private_data;
516
517 switch(port_num)
518 {
519 case 0:
520 probe_ent->irq = 14;
521 probe_ent->port[0].cmd_addr = 0x1f0;
522 probe_ent->port[0].altstatus_addr =
523 probe_ent->port[0].ctl_addr = 0x3f6;
524 break;
525 case 1:
526 probe_ent->irq = 15;
527 probe_ent->port[0].cmd_addr = 0x170;
528 probe_ent->port[0].altstatus_addr =
529 probe_ent->port[0].ctl_addr = 0x376;
530 break;
531 }
532
533 probe_ent->port[0].bmdma_addr =
534 pci_resource_start(pdev, 4) + 8 * port_num;
535 ata_std_ports(&probe_ent->port[0]);
536
537 return probe_ent;
538}
539
540
541/**
542 * ata_pci_init_one - Initialize/register PCI IDE host controller
543 * @pdev: Controller to be initialized
544 * @port_info: Information from low-level host driver
545 * @n_ports: Number of ports attached to host controller
546 *
547 * This is a helper function which can be called from a driver's
548 * xxx_init_one() probe function if the hardware uses traditional
549 * IDE taskfile registers.
550 *
551 * This function calls pci_enable_device(), reserves its register
552 * regions, sets the dma mask, enables bus master mode, and calls
553 * ata_device_add()
554 *
555 * LOCKING:
556 * Inherited from PCI layer (may sleep).
557 *
558 * RETURNS:
559 * Zero on success, negative on errno-based value on error.
560 */
561
562int ata_pci_init_one (struct pci_dev *pdev, struct ata_port_info **port_info,
563 unsigned int n_ports)
564{
565 struct ata_probe_ent *probe_ent = NULL, *probe_ent2 = NULL;
566 struct ata_port_info *port[2];
567 u8 tmp8, mask;
568 unsigned int legacy_mode = 0;
569 int disable_dev_on_err = 1;
570 int rc;
571
572 DPRINTK("ENTER\n");
573
574 port[0] = port_info[0];
575 if (n_ports > 1)
576 port[1] = port_info[1];
577 else
578 port[1] = port[0];
579
580 if ((port[0]->host_flags & ATA_FLAG_NO_LEGACY) == 0
581 && (pdev->class >> 8) == PCI_CLASS_STORAGE_IDE) {
582 /* TODO: What if one channel is in native mode ... */
583 pci_read_config_byte(pdev, PCI_CLASS_PROG, &tmp8);
584 mask = (1 << 2) | (1 << 0);
585 if ((tmp8 & mask) != mask)
586 legacy_mode = (1 << 3);
587 }
588
589 /* FIXME... */
590 if ((!legacy_mode) && (n_ports > 2)) {
591 printk(KERN_ERR "ata: BUG: native mode, n_ports > 2\n");
592 n_ports = 2;
593 /* For now */
594 }
595
596 /* FIXME: Really for ATA it isn't safe because the device may be
597 multi-purpose and we want to leave it alone if it was already
598 enabled. Secondly for shared use as Arjan says we want refcounting
599
600 Checking dev->is_enabled is insufficient as this is not set at
601 boot for the primary video which is BIOS enabled
602 */
603
604 rc = pci_enable_device(pdev);
605 if (rc)
606 return rc;
607
608 rc = pci_request_regions(pdev, DRV_NAME);
609 if (rc) {
610 disable_dev_on_err = 0;
611 goto err_out;
612 }
613
614 /* FIXME: Should use platform specific mappers for legacy port ranges */
615 if (legacy_mode) {
616 if (!request_region(0x1f0, 8, "libata")) {
617 struct resource *conflict, res;
618 res.start = 0x1f0;
619 res.end = 0x1f0 + 8 - 1;
620 conflict = ____request_resource(&ioport_resource, &res);
621 if (!strcmp(conflict->name, "libata"))
622 legacy_mode |= (1 << 0);
623 else {
624 disable_dev_on_err = 0;
625 printk(KERN_WARNING "ata: 0x1f0 IDE port busy\n");
626 }
627 } else
628 legacy_mode |= (1 << 0);
629
630 if (!request_region(0x170, 8, "libata")) {
631 struct resource *conflict, res;
632 res.start = 0x170;
633 res.end = 0x170 + 8 - 1;
634 conflict = ____request_resource(&ioport_resource, &res);
635 if (!strcmp(conflict->name, "libata"))
636 legacy_mode |= (1 << 1);
637 else {
638 disable_dev_on_err = 0;
639 printk(KERN_WARNING "ata: 0x170 IDE port busy\n");
640 }
641 } else
642 legacy_mode |= (1 << 1);
643 }
644
645 /* we have legacy mode, but all ports are unavailable */
646 if (legacy_mode == (1 << 3)) {
647 rc = -EBUSY;
648 goto err_out_regions;
649 }
650
651 rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
652 if (rc)
653 goto err_out_regions;
654 rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
655 if (rc)
656 goto err_out_regions;
657
658 if (legacy_mode) {
659 if (legacy_mode & (1 << 0))
660 probe_ent = ata_pci_init_legacy_port(pdev, port[0], 0);
661 if (legacy_mode & (1 << 1))
662 probe_ent2 = ata_pci_init_legacy_port(pdev, port[1], 1);
663 } else {
664 if (n_ports == 2)
665 probe_ent = ata_pci_init_native_mode(pdev, port, ATA_PORT_PRIMARY | ATA_PORT_SECONDARY);
666 else
667 probe_ent = ata_pci_init_native_mode(pdev, port, ATA_PORT_PRIMARY);
668 }
669 if (!probe_ent && !probe_ent2) {
670 rc = -ENOMEM;
671 goto err_out_regions;
672 }
673
674 pci_set_master(pdev);
675
676 /* FIXME: check ata_device_add return */
677 if (legacy_mode) {
678 if (legacy_mode & (1 << 0))
679 ata_device_add(probe_ent);
680 if (legacy_mode & (1 << 1))
681 ata_device_add(probe_ent2);
682 } else
683 ata_device_add(probe_ent);
684
685 kfree(probe_ent);
686 kfree(probe_ent2);
687
688 return 0;
689
690err_out_regions:
691 if (legacy_mode & (1 << 0))
692 release_region(0x1f0, 8);
693 if (legacy_mode & (1 << 1))
694 release_region(0x170, 8);
695 pci_release_regions(pdev);
696err_out:
697 if (disable_dev_on_err)
698 pci_disable_device(pdev);
699 return rc;
700}
701
702#endif /* CONFIG_PCI */
703
diff --git a/drivers/scsi/libata-core.c b/drivers/scsi/libata-core.c
index 5f1d7580218d..70efde99f652 100644
--- a/drivers/scsi/libata-core.c
+++ b/drivers/scsi/libata-core.c
@@ -61,11 +61,9 @@
61 61
62#include "libata.h" 62#include "libata.h"
63 63
64static unsigned int ata_busy_sleep (struct ata_port *ap,
65 unsigned long tmout_pat,
66 unsigned long tmout);
67static void ata_dev_reread_id(struct ata_port *ap, struct ata_device *dev); 64static void ata_dev_reread_id(struct ata_port *ap, struct ata_device *dev);
68static void ata_dev_init_params(struct ata_port *ap, struct ata_device *dev); 65static unsigned int ata_dev_init_params(struct ata_port *ap,
66 struct ata_device *dev);
69static void ata_set_mode(struct ata_port *ap); 67static void ata_set_mode(struct ata_port *ap);
70static void ata_dev_set_xfermode(struct ata_port *ap, struct ata_device *dev); 68static void ata_dev_set_xfermode(struct ata_port *ap, struct ata_device *dev);
71static unsigned int ata_get_mode_mask(const struct ata_port *ap, int shift); 69static unsigned int ata_get_mode_mask(const struct ata_port *ap, int shift);
@@ -73,7 +71,6 @@ static int fgb(u32 bitmap);
73static int ata_choose_xfer_mode(const struct ata_port *ap, 71static int ata_choose_xfer_mode(const struct ata_port *ap,
74 u8 *xfer_mode_out, 72 u8 *xfer_mode_out,
75 unsigned int *xfer_shift_out); 73 unsigned int *xfer_shift_out);
76static void __ata_qc_complete(struct ata_queued_cmd *qc);
77 74
78static unsigned int ata_unique_id = 1; 75static unsigned int ata_unique_id = 1;
79static struct workqueue_struct *ata_wq; 76static struct workqueue_struct *ata_wq;
@@ -87,403 +84,6 @@ MODULE_DESCRIPTION("Library module for ATA devices");
87MODULE_LICENSE("GPL"); 84MODULE_LICENSE("GPL");
88MODULE_VERSION(DRV_VERSION); 85MODULE_VERSION(DRV_VERSION);
89 86
90/**
91 * ata_tf_load_pio - send taskfile registers to host controller
92 * @ap: Port to which output is sent
93 * @tf: ATA taskfile register set
94 *
95 * Outputs ATA taskfile to standard ATA host controller.
96 *
97 * LOCKING:
98 * Inherited from caller.
99 */
100
101static void ata_tf_load_pio(struct ata_port *ap, const struct ata_taskfile *tf)
102{
103 struct ata_ioports *ioaddr = &ap->ioaddr;
104 unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
105
106 if (tf->ctl != ap->last_ctl) {
107 outb(tf->ctl, ioaddr->ctl_addr);
108 ap->last_ctl = tf->ctl;
109 ata_wait_idle(ap);
110 }
111
112 if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
113 outb(tf->hob_feature, ioaddr->feature_addr);
114 outb(tf->hob_nsect, ioaddr->nsect_addr);
115 outb(tf->hob_lbal, ioaddr->lbal_addr);
116 outb(tf->hob_lbam, ioaddr->lbam_addr);
117 outb(tf->hob_lbah, ioaddr->lbah_addr);
118 VPRINTK("hob: feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n",
119 tf->hob_feature,
120 tf->hob_nsect,
121 tf->hob_lbal,
122 tf->hob_lbam,
123 tf->hob_lbah);
124 }
125
126 if (is_addr) {
127 outb(tf->feature, ioaddr->feature_addr);
128 outb(tf->nsect, ioaddr->nsect_addr);
129 outb(tf->lbal, ioaddr->lbal_addr);
130 outb(tf->lbam, ioaddr->lbam_addr);
131 outb(tf->lbah, ioaddr->lbah_addr);
132 VPRINTK("feat 0x%X nsect 0x%X lba 0x%X 0x%X 0x%X\n",
133 tf->feature,
134 tf->nsect,
135 tf->lbal,
136 tf->lbam,
137 tf->lbah);
138 }
139
140 if (tf->flags & ATA_TFLAG_DEVICE) {
141 outb(tf->device, ioaddr->device_addr);
142 VPRINTK("device 0x%X\n", tf->device);
143 }
144
145 ata_wait_idle(ap);
146}
147
148/**
149 * ata_tf_load_mmio - send taskfile registers to host controller
150 * @ap: Port to which output is sent
151 * @tf: ATA taskfile register set
152 *
153 * Outputs ATA taskfile to standard ATA host controller using MMIO.
154 *
155 * LOCKING:
156 * Inherited from caller.
157 */
158
159static void ata_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf)
160{
161 struct ata_ioports *ioaddr = &ap->ioaddr;
162 unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
163
164 if (tf->ctl != ap->last_ctl) {
165 writeb(tf->ctl, (void __iomem *) ap->ioaddr.ctl_addr);
166 ap->last_ctl = tf->ctl;
167 ata_wait_idle(ap);
168 }
169
170 if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
171 writeb(tf->hob_feature, (void __iomem *) ioaddr->feature_addr);
172 writeb(tf->hob_nsect, (void __iomem *) ioaddr->nsect_addr);
173 writeb(tf->hob_lbal, (void __iomem *) ioaddr->lbal_addr);
174 writeb(tf->hob_lbam, (void __iomem *) ioaddr->lbam_addr);
175 writeb(tf->hob_lbah, (void __iomem *) ioaddr->lbah_addr);
176 VPRINTK("hob: feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n",
177 tf->hob_feature,
178 tf->hob_nsect,
179 tf->hob_lbal,
180 tf->hob_lbam,
181 tf->hob_lbah);
182 }
183
184 if (is_addr) {
185 writeb(tf->feature, (void __iomem *) ioaddr->feature_addr);
186 writeb(tf->nsect, (void __iomem *) ioaddr->nsect_addr);
187 writeb(tf->lbal, (void __iomem *) ioaddr->lbal_addr);
188 writeb(tf->lbam, (void __iomem *) ioaddr->lbam_addr);
189 writeb(tf->lbah, (void __iomem *) ioaddr->lbah_addr);
190 VPRINTK("feat 0x%X nsect 0x%X lba 0x%X 0x%X 0x%X\n",
191 tf->feature,
192 tf->nsect,
193 tf->lbal,
194 tf->lbam,
195 tf->lbah);
196 }
197
198 if (tf->flags & ATA_TFLAG_DEVICE) {
199 writeb(tf->device, (void __iomem *) ioaddr->device_addr);
200 VPRINTK("device 0x%X\n", tf->device);
201 }
202
203 ata_wait_idle(ap);
204}
205
206
207/**
208 * ata_tf_load - send taskfile registers to host controller
209 * @ap: Port to which output is sent
210 * @tf: ATA taskfile register set
211 *
212 * Outputs ATA taskfile to standard ATA host controller using MMIO
213 * or PIO as indicated by the ATA_FLAG_MMIO flag.
214 * Writes the control, feature, nsect, lbal, lbam, and lbah registers.
215 * Optionally (ATA_TFLAG_LBA48) writes hob_feature, hob_nsect,
216 * hob_lbal, hob_lbam, and hob_lbah.
217 *
218 * This function waits for idle (!BUSY and !DRQ) after writing
219 * registers. If the control register has a new value, this
220 * function also waits for idle after writing control and before
221 * writing the remaining registers.
222 *
223 * May be used as the tf_load() entry in ata_port_operations.
224 *
225 * LOCKING:
226 * Inherited from caller.
227 */
228void ata_tf_load(struct ata_port *ap, const struct ata_taskfile *tf)
229{
230 if (ap->flags & ATA_FLAG_MMIO)
231 ata_tf_load_mmio(ap, tf);
232 else
233 ata_tf_load_pio(ap, tf);
234}
235
236/**
237 * ata_exec_command_pio - issue ATA command to host controller
238 * @ap: port to which command is being issued
239 * @tf: ATA taskfile register set
240 *
241 * Issues PIO write to ATA command register, with proper
242 * synchronization with interrupt handler / other threads.
243 *
244 * LOCKING:
245 * spin_lock_irqsave(host_set lock)
246 */
247
248static void ata_exec_command_pio(struct ata_port *ap, const struct ata_taskfile *tf)
249{
250 DPRINTK("ata%u: cmd 0x%X\n", ap->id, tf->command);
251
252 outb(tf->command, ap->ioaddr.command_addr);
253 ata_pause(ap);
254}
255
256
257/**
258 * ata_exec_command_mmio - issue ATA command to host controller
259 * @ap: port to which command is being issued
260 * @tf: ATA taskfile register set
261 *
262 * Issues MMIO write to ATA command register, with proper
263 * synchronization with interrupt handler / other threads.
264 *
265 * LOCKING:
266 * spin_lock_irqsave(host_set lock)
267 */
268
269static void ata_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf)
270{
271 DPRINTK("ata%u: cmd 0x%X\n", ap->id, tf->command);
272
273 writeb(tf->command, (void __iomem *) ap->ioaddr.command_addr);
274 ata_pause(ap);
275}
276
277
278/**
279 * ata_exec_command - issue ATA command to host controller
280 * @ap: port to which command is being issued
281 * @tf: ATA taskfile register set
282 *
283 * Issues PIO/MMIO write to ATA command register, with proper
284 * synchronization with interrupt handler / other threads.
285 *
286 * LOCKING:
287 * spin_lock_irqsave(host_set lock)
288 */
289void ata_exec_command(struct ata_port *ap, const struct ata_taskfile *tf)
290{
291 if (ap->flags & ATA_FLAG_MMIO)
292 ata_exec_command_mmio(ap, tf);
293 else
294 ata_exec_command_pio(ap, tf);
295}
296
297/**
298 * ata_tf_to_host - issue ATA taskfile to host controller
299 * @ap: port to which command is being issued
300 * @tf: ATA taskfile register set
301 *
302 * Issues ATA taskfile register set to ATA host controller,
303 * with proper synchronization with interrupt handler and
304 * other threads.
305 *
306 * LOCKING:
307 * spin_lock_irqsave(host_set lock)
308 */
309
310static inline void ata_tf_to_host(struct ata_port *ap,
311 const struct ata_taskfile *tf)
312{
313 ap->ops->tf_load(ap, tf);
314 ap->ops->exec_command(ap, tf);
315}
316
317/**
318 * ata_tf_read_pio - input device's ATA taskfile shadow registers
319 * @ap: Port from which input is read
320 * @tf: ATA taskfile register set for storing input
321 *
322 * Reads ATA taskfile registers for currently-selected device
323 * into @tf.
324 *
325 * LOCKING:
326 * Inherited from caller.
327 */
328
329static void ata_tf_read_pio(struct ata_port *ap, struct ata_taskfile *tf)
330{
331 struct ata_ioports *ioaddr = &ap->ioaddr;
332
333 tf->command = ata_check_status(ap);
334 tf->feature = inb(ioaddr->error_addr);
335 tf->nsect = inb(ioaddr->nsect_addr);
336 tf->lbal = inb(ioaddr->lbal_addr);
337 tf->lbam = inb(ioaddr->lbam_addr);
338 tf->lbah = inb(ioaddr->lbah_addr);
339 tf->device = inb(ioaddr->device_addr);
340
341 if (tf->flags & ATA_TFLAG_LBA48) {
342 outb(tf->ctl | ATA_HOB, ioaddr->ctl_addr);
343 tf->hob_feature = inb(ioaddr->error_addr);
344 tf->hob_nsect = inb(ioaddr->nsect_addr);
345 tf->hob_lbal = inb(ioaddr->lbal_addr);
346 tf->hob_lbam = inb(ioaddr->lbam_addr);
347 tf->hob_lbah = inb(ioaddr->lbah_addr);
348 }
349}
350
351/**
352 * ata_tf_read_mmio - input device's ATA taskfile shadow registers
353 * @ap: Port from which input is read
354 * @tf: ATA taskfile register set for storing input
355 *
356 * Reads ATA taskfile registers for currently-selected device
357 * into @tf via MMIO.
358 *
359 * LOCKING:
360 * Inherited from caller.
361 */
362
363static void ata_tf_read_mmio(struct ata_port *ap, struct ata_taskfile *tf)
364{
365 struct ata_ioports *ioaddr = &ap->ioaddr;
366
367 tf->command = ata_check_status(ap);
368 tf->feature = readb((void __iomem *)ioaddr->error_addr);
369 tf->nsect = readb((void __iomem *)ioaddr->nsect_addr);
370 tf->lbal = readb((void __iomem *)ioaddr->lbal_addr);
371 tf->lbam = readb((void __iomem *)ioaddr->lbam_addr);
372 tf->lbah = readb((void __iomem *)ioaddr->lbah_addr);
373 tf->device = readb((void __iomem *)ioaddr->device_addr);
374
375 if (tf->flags & ATA_TFLAG_LBA48) {
376 writeb(tf->ctl | ATA_HOB, (void __iomem *) ap->ioaddr.ctl_addr);
377 tf->hob_feature = readb((void __iomem *)ioaddr->error_addr);
378 tf->hob_nsect = readb((void __iomem *)ioaddr->nsect_addr);
379 tf->hob_lbal = readb((void __iomem *)ioaddr->lbal_addr);
380 tf->hob_lbam = readb((void __iomem *)ioaddr->lbam_addr);
381 tf->hob_lbah = readb((void __iomem *)ioaddr->lbah_addr);
382 }
383}
384
385
386/**
387 * ata_tf_read - input device's ATA taskfile shadow registers
388 * @ap: Port from which input is read
389 * @tf: ATA taskfile register set for storing input
390 *
391 * Reads ATA taskfile registers for currently-selected device
392 * into @tf.
393 *
394 * Reads nsect, lbal, lbam, lbah, and device. If ATA_TFLAG_LBA48
395 * is set, also reads the hob registers.
396 *
397 * May be used as the tf_read() entry in ata_port_operations.
398 *
399 * LOCKING:
400 * Inherited from caller.
401 */
402void ata_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
403{
404 if (ap->flags & ATA_FLAG_MMIO)
405 ata_tf_read_mmio(ap, tf);
406 else
407 ata_tf_read_pio(ap, tf);
408}
409
410/**
411 * ata_check_status_pio - Read device status reg & clear interrupt
412 * @ap: port where the device is
413 *
414 * Reads ATA taskfile status register for currently-selected device
415 * and return its value. This also clears pending interrupts
416 * from this device
417 *
418 * LOCKING:
419 * Inherited from caller.
420 */
421static u8 ata_check_status_pio(struct ata_port *ap)
422{
423 return inb(ap->ioaddr.status_addr);
424}
425
426/**
427 * ata_check_status_mmio - Read device status reg & clear interrupt
428 * @ap: port where the device is
429 *
430 * Reads ATA taskfile status register for currently-selected device
431 * via MMIO and return its value. This also clears pending interrupts
432 * from this device
433 *
434 * LOCKING:
435 * Inherited from caller.
436 */
437static u8 ata_check_status_mmio(struct ata_port *ap)
438{
439 return readb((void __iomem *) ap->ioaddr.status_addr);
440}
441
442
443/**
444 * ata_check_status - Read device status reg & clear interrupt
445 * @ap: port where the device is
446 *
447 * Reads ATA taskfile status register for currently-selected device
448 * and return its value. This also clears pending interrupts
449 * from this device
450 *
451 * May be used as the check_status() entry in ata_port_operations.
452 *
453 * LOCKING:
454 * Inherited from caller.
455 */
456u8 ata_check_status(struct ata_port *ap)
457{
458 if (ap->flags & ATA_FLAG_MMIO)
459 return ata_check_status_mmio(ap);
460 return ata_check_status_pio(ap);
461}
462
463
464/**
465 * ata_altstatus - Read device alternate status reg
466 * @ap: port where the device is
467 *
468 * Reads ATA taskfile alternate status register for
469 * currently-selected device and return its value.
470 *
471 * Note: may NOT be used as the check_altstatus() entry in
472 * ata_port_operations.
473 *
474 * LOCKING:
475 * Inherited from caller.
476 */
477u8 ata_altstatus(struct ata_port *ap)
478{
479 if (ap->ops->check_altstatus)
480 return ap->ops->check_altstatus(ap);
481
482 if (ap->flags & ATA_FLAG_MMIO)
483 return readb((void __iomem *)ap->ioaddr.altstatus_addr);
484 return inb(ap->ioaddr.altstatus_addr);
485}
486
487 87
488/** 88/**
489 * ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure 89 * ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
@@ -834,6 +434,7 @@ unsigned int ata_dev_classify(const struct ata_taskfile *tf)
834 * ata_dev_try_classify - Parse returned ATA device signature 434 * ata_dev_try_classify - Parse returned ATA device signature
835 * @ap: ATA channel to examine 435 * @ap: ATA channel to examine
836 * @device: Device to examine (starting at zero) 436 * @device: Device to examine (starting at zero)
437 * @r_err: Value of error register on completion
837 * 438 *
838 * After an event -- SRST, E.D.D., or SATA COMRESET -- occurs, 439 * After an event -- SRST, E.D.D., or SATA COMRESET -- occurs,
839 * an ATA/ATAPI-defined set of values is placed in the ATA 440 * an ATA/ATAPI-defined set of values is placed in the ATA
@@ -846,11 +447,14 @@ unsigned int ata_dev_classify(const struct ata_taskfile *tf)
846 * 447 *
847 * LOCKING: 448 * LOCKING:
848 * caller. 449 * caller.
450 *
451 * RETURNS:
452 * Device type - %ATA_DEV_ATA, %ATA_DEV_ATAPI or %ATA_DEV_NONE.
849 */ 453 */
850 454
851static u8 ata_dev_try_classify(struct ata_port *ap, unsigned int device) 455static unsigned int
456ata_dev_try_classify(struct ata_port *ap, unsigned int device, u8 *r_err)
852{ 457{
853 struct ata_device *dev = &ap->device[device];
854 struct ata_taskfile tf; 458 struct ata_taskfile tf;
855 unsigned int class; 459 unsigned int class;
856 u8 err; 460 u8 err;
@@ -861,8 +465,8 @@ static u8 ata_dev_try_classify(struct ata_port *ap, unsigned int device)
861 465
862 ap->ops->tf_read(ap, &tf); 466 ap->ops->tf_read(ap, &tf);
863 err = tf.feature; 467 err = tf.feature;
864 468 if (r_err)
865 dev->class = ATA_DEV_NONE; 469 *r_err = err;
866 470
867 /* see if device passed diags */ 471 /* see if device passed diags */
868 if (err == 1) 472 if (err == 1)
@@ -870,22 +474,20 @@ static u8 ata_dev_try_classify(struct ata_port *ap, unsigned int device)
870 else if ((device == 0) && (err == 0x81)) 474 else if ((device == 0) && (err == 0x81))
871 /* do nothing */ ; 475 /* do nothing */ ;
872 else 476 else
873 return err; 477 return ATA_DEV_NONE;
874 478
875 /* determine if device if ATA or ATAPI */ 479 /* determine if device is ATA or ATAPI */
876 class = ata_dev_classify(&tf); 480 class = ata_dev_classify(&tf);
481
877 if (class == ATA_DEV_UNKNOWN) 482 if (class == ATA_DEV_UNKNOWN)
878 return err; 483 return ATA_DEV_NONE;
879 if ((class == ATA_DEV_ATA) && (ata_chk_status(ap) == 0)) 484 if ((class == ATA_DEV_ATA) && (ata_chk_status(ap) == 0))
880 return err; 485 return ATA_DEV_NONE;
881 486 return class;
882 dev->class = class;
883
884 return err;
885} 487}
886 488
887/** 489/**
888 * ata_dev_id_string - Convert IDENTIFY DEVICE page into string 490 * ata_id_string - Convert IDENTIFY DEVICE page into string
889 * @id: IDENTIFY DEVICE results we will examine 491 * @id: IDENTIFY DEVICE results we will examine
890 * @s: string into which data is output 492 * @s: string into which data is output
891 * @ofs: offset into identify device page 493 * @ofs: offset into identify device page
@@ -899,8 +501,8 @@ static u8 ata_dev_try_classify(struct ata_port *ap, unsigned int device)
899 * caller. 501 * caller.
900 */ 502 */
901 503
902void ata_dev_id_string(const u16 *id, unsigned char *s, 504void ata_id_string(const u16 *id, unsigned char *s,
903 unsigned int ofs, unsigned int len) 505 unsigned int ofs, unsigned int len)
904{ 506{
905 unsigned int c; 507 unsigned int c;
906 508
@@ -918,6 +520,49 @@ void ata_dev_id_string(const u16 *id, unsigned char *s,
918 } 520 }
919} 521}
920 522
523/**
524 * ata_id_c_string - Convert IDENTIFY DEVICE page into C string
525 * @id: IDENTIFY DEVICE results we will examine
526 * @s: string into which data is output
527 * @ofs: offset into identify device page
528 * @len: length of string to return. must be an odd number.
529 *
530 * This function is identical to ata_id_string except that it
531 * trims trailing spaces and terminates the resulting string with
532 * null. @len must be actual maximum length (even number) + 1.
533 *
534 * LOCKING:
535 * caller.
536 */
537void ata_id_c_string(const u16 *id, unsigned char *s,
538 unsigned int ofs, unsigned int len)
539{
540 unsigned char *p;
541
542 WARN_ON(!(len & 1));
543
544 ata_id_string(id, s, ofs, len - 1);
545
546 p = s + strnlen(s, len - 1);
547 while (p > s && p[-1] == ' ')
548 p--;
549 *p = '\0';
550}
551
552static u64 ata_id_n_sectors(const u16 *id)
553{
554 if (ata_id_has_lba(id)) {
555 if (ata_id_has_lba48(id))
556 return ata_id_u64(id, 100);
557 else
558 return ata_id_u32(id, 60);
559 } else {
560 if (ata_id_current_chs_valid(id))
561 return ata_id_u32(id, 57);
562 else
563 return id[1] * id[3] * id[6];
564 }
565}
921 566
922/** 567/**
923 * ata_noop_dev_select - Select device 0/1 on ATA bus 568 * ata_noop_dev_select - Select device 0/1 on ATA bus
@@ -1007,41 +652,41 @@ void ata_dev_select(struct ata_port *ap, unsigned int device,
1007 652
1008/** 653/**
1009 * ata_dump_id - IDENTIFY DEVICE info debugging output 654 * ata_dump_id - IDENTIFY DEVICE info debugging output
1010 * @dev: Device whose IDENTIFY DEVICE page we will dump 655 * @id: IDENTIFY DEVICE page to dump
1011 * 656 *
1012 * Dump selected 16-bit words from a detected device's 657 * Dump selected 16-bit words from the given IDENTIFY DEVICE
1013 * IDENTIFY PAGE page. 658 * page.
1014 * 659 *
1015 * LOCKING: 660 * LOCKING:
1016 * caller. 661 * caller.
1017 */ 662 */
1018 663
1019static inline void ata_dump_id(const struct ata_device *dev) 664static inline void ata_dump_id(const u16 *id)
1020{ 665{
1021 DPRINTK("49==0x%04x " 666 DPRINTK("49==0x%04x "
1022 "53==0x%04x " 667 "53==0x%04x "
1023 "63==0x%04x " 668 "63==0x%04x "
1024 "64==0x%04x " 669 "64==0x%04x "
1025 "75==0x%04x \n", 670 "75==0x%04x \n",
1026 dev->id[49], 671 id[49],
1027 dev->id[53], 672 id[53],
1028 dev->id[63], 673 id[63],
1029 dev->id[64], 674 id[64],
1030 dev->id[75]); 675 id[75]);
1031 DPRINTK("80==0x%04x " 676 DPRINTK("80==0x%04x "
1032 "81==0x%04x " 677 "81==0x%04x "
1033 "82==0x%04x " 678 "82==0x%04x "
1034 "83==0x%04x " 679 "83==0x%04x "
1035 "84==0x%04x \n", 680 "84==0x%04x \n",
1036 dev->id[80], 681 id[80],
1037 dev->id[81], 682 id[81],
1038 dev->id[82], 683 id[82],
1039 dev->id[83], 684 id[83],
1040 dev->id[84]); 685 id[84]);
1041 DPRINTK("88==0x%04x " 686 DPRINTK("88==0x%04x "
1042 "93==0x%04x\n", 687 "93==0x%04x\n",
1043 dev->id[88], 688 id[88],
1044 dev->id[93]); 689 id[93]);
1045} 690}
1046 691
1047/* 692/*
@@ -1073,24 +718,77 @@ static unsigned int ata_pio_modes(const struct ata_device *adev)
1073 timing API will get this right anyway */ 718 timing API will get this right anyway */
1074} 719}
1075 720
1076struct ata_exec_internal_arg { 721static inline void
1077 unsigned int err_mask; 722ata_queue_packet_task(struct ata_port *ap)
1078 struct ata_taskfile *tf; 723{
1079 struct completion *waiting; 724 if (!(ap->flags & ATA_FLAG_FLUSH_PIO_TASK))
1080}; 725 queue_work(ata_wq, &ap->packet_task);
726}
1081 727
1082int ata_qc_complete_internal(struct ata_queued_cmd *qc) 728static inline void
729ata_queue_pio_task(struct ata_port *ap)
1083{ 730{
1084 struct ata_exec_internal_arg *arg = qc->private_data; 731 if (!(ap->flags & ATA_FLAG_FLUSH_PIO_TASK))
1085 struct completion *waiting = arg->waiting; 732 queue_work(ata_wq, &ap->pio_task);
733}
1086 734
1087 if (!(qc->err_mask & ~AC_ERR_DEV)) 735static inline void
1088 qc->ap->ops->tf_read(qc->ap, arg->tf); 736ata_queue_delayed_pio_task(struct ata_port *ap, unsigned long delay)
1089 arg->err_mask = qc->err_mask; 737{
1090 arg->waiting = NULL; 738 if (!(ap->flags & ATA_FLAG_FLUSH_PIO_TASK))
1091 complete(waiting); 739 queue_delayed_work(ata_wq, &ap->pio_task, delay);
740}
1092 741
1093 return 0; 742/**
743 * ata_flush_pio_tasks - Flush pio_task and packet_task
744 * @ap: the target ata_port
745 *
746 * After this function completes, pio_task and packet_task are
747 * guranteed not to be running or scheduled.
748 *
749 * LOCKING:
750 * Kernel thread context (may sleep)
751 */
752
753static void ata_flush_pio_tasks(struct ata_port *ap)
754{
755 int tmp = 0;
756 unsigned long flags;
757
758 DPRINTK("ENTER\n");
759
760 spin_lock_irqsave(&ap->host_set->lock, flags);
761 ap->flags |= ATA_FLAG_FLUSH_PIO_TASK;
762 spin_unlock_irqrestore(&ap->host_set->lock, flags);
763
764 DPRINTK("flush #1\n");
765 flush_workqueue(ata_wq);
766
767 /*
768 * At this point, if a task is running, it's guaranteed to see
769 * the FLUSH flag; thus, it will never queue pio tasks again.
770 * Cancel and flush.
771 */
772 tmp |= cancel_delayed_work(&ap->pio_task);
773 tmp |= cancel_delayed_work(&ap->packet_task);
774 if (!tmp) {
775 DPRINTK("flush #2\n");
776 flush_workqueue(ata_wq);
777 }
778
779 spin_lock_irqsave(&ap->host_set->lock, flags);
780 ap->flags &= ~ATA_FLAG_FLUSH_PIO_TASK;
781 spin_unlock_irqrestore(&ap->host_set->lock, flags);
782
783 DPRINTK("EXIT\n");
784}
785
786void ata_qc_complete_internal(struct ata_queued_cmd *qc)
787{
788 struct completion *waiting = qc->private_data;
789
790 qc->ap->ops->tf_read(qc->ap, &qc->tf);
791 complete(waiting);
1094} 792}
1095 793
1096/** 794/**
@@ -1121,7 +819,7 @@ ata_exec_internal(struct ata_port *ap, struct ata_device *dev,
1121 struct ata_queued_cmd *qc; 819 struct ata_queued_cmd *qc;
1122 DECLARE_COMPLETION(wait); 820 DECLARE_COMPLETION(wait);
1123 unsigned long flags; 821 unsigned long flags;
1124 struct ata_exec_internal_arg arg; 822 unsigned int err_mask;
1125 823
1126 spin_lock_irqsave(&ap->host_set->lock, flags); 824 spin_lock_irqsave(&ap->host_set->lock, flags);
1127 825
@@ -1135,13 +833,12 @@ ata_exec_internal(struct ata_port *ap, struct ata_device *dev,
1135 qc->nsect = buflen / ATA_SECT_SIZE; 833 qc->nsect = buflen / ATA_SECT_SIZE;
1136 } 834 }
1137 835
1138 arg.waiting = &wait; 836 qc->private_data = &wait;
1139 arg.tf = tf;
1140 qc->private_data = &arg;
1141 qc->complete_fn = ata_qc_complete_internal; 837 qc->complete_fn = ata_qc_complete_internal;
1142 838
1143 if (ata_qc_issue(qc)) 839 qc->err_mask = ata_qc_issue(qc);
1144 goto issue_fail; 840 if (qc->err_mask)
841 ata_qc_complete(qc);
1145 842
1146 spin_unlock_irqrestore(&ap->host_set->lock, flags); 843 spin_unlock_irqrestore(&ap->host_set->lock, flags);
1147 844
@@ -1154,8 +851,8 @@ ata_exec_internal(struct ata_port *ap, struct ata_device *dev,
1154 * before the caller cleans up, it will result in a 851 * before the caller cleans up, it will result in a
1155 * spurious interrupt. We can live with that. 852 * spurious interrupt. We can live with that.
1156 */ 853 */
1157 if (arg.waiting) { 854 if (qc->flags & ATA_QCFLAG_ACTIVE) {
1158 qc->err_mask = AC_ERR_OTHER; 855 qc->err_mask = AC_ERR_TIMEOUT;
1159 ata_qc_complete(qc); 856 ata_qc_complete(qc);
1160 printk(KERN_WARNING "ata%u: qc timeout (cmd 0x%x)\n", 857 printk(KERN_WARNING "ata%u: qc timeout (cmd 0x%x)\n",
1161 ap->id, command); 858 ap->id, command);
@@ -1164,12 +861,12 @@ ata_exec_internal(struct ata_port *ap, struct ata_device *dev,
1164 spin_unlock_irqrestore(&ap->host_set->lock, flags); 861 spin_unlock_irqrestore(&ap->host_set->lock, flags);
1165 } 862 }
1166 863
1167 return arg.err_mask; 864 *tf = qc->tf;
865 err_mask = qc->err_mask;
1168 866
1169 issue_fail:
1170 ata_qc_free(qc); 867 ata_qc_free(qc);
1171 spin_unlock_irqrestore(&ap->host_set->lock, flags); 868
1172 return AC_ERR_OTHER; 869 return err_mask;
1173} 870}
1174 871
1175/** 872/**
@@ -1231,12 +928,11 @@ static void ata_dev_identify(struct ata_port *ap, unsigned int device)
1231{ 928{
1232 struct ata_device *dev = &ap->device[device]; 929 struct ata_device *dev = &ap->device[device];
1233 unsigned int major_version; 930 unsigned int major_version;
1234 u16 tmp;
1235 unsigned long xfer_modes; 931 unsigned long xfer_modes;
1236 unsigned int using_edd; 932 unsigned int using_edd;
1237 struct ata_taskfile tf; 933 struct ata_taskfile tf;
1238 unsigned int err_mask; 934 unsigned int err_mask;
1239 int rc; 935 int i, rc;
1240 936
1241 if (!ata_dev_present(dev)) { 937 if (!ata_dev_present(dev)) {
1242 DPRINTK("ENTER/EXIT (host %u, dev %u) -- nodev\n", 938 DPRINTK("ENTER/EXIT (host %u, dev %u) -- nodev\n",
@@ -1244,15 +940,16 @@ static void ata_dev_identify(struct ata_port *ap, unsigned int device)
1244 return; 940 return;
1245 } 941 }
1246 942
1247 if (ap->flags & (ATA_FLAG_SRST | ATA_FLAG_SATA_RESET)) 943 if (ap->ops->probe_reset ||
944 ap->flags & (ATA_FLAG_SRST | ATA_FLAG_SATA_RESET))
1248 using_edd = 0; 945 using_edd = 0;
1249 else 946 else
1250 using_edd = 1; 947 using_edd = 1;
1251 948
1252 DPRINTK("ENTER, host %u, dev %u\n", ap->id, device); 949 DPRINTK("ENTER, host %u, dev %u\n", ap->id, device);
1253 950
1254 assert (dev->class == ATA_DEV_ATA || dev->class == ATA_DEV_ATAPI || 951 WARN_ON(dev->class != ATA_DEV_ATA && dev->class != ATA_DEV_ATAPI &&
1255 dev->class == ATA_DEV_NONE); 952 dev->class != ATA_DEV_NONE);
1256 953
1257 ata_dev_select(ap, device, 1, 1); /* select device 0/1 */ 954 ata_dev_select(ap, device, 1, 1); /* select device 0/1 */
1258 955
@@ -1324,18 +1021,17 @@ retry:
1324 if (!xfer_modes) 1021 if (!xfer_modes)
1325 xfer_modes = ata_pio_modes(dev); 1022 xfer_modes = ata_pio_modes(dev);
1326 1023
1327 ata_dump_id(dev); 1024 ata_dump_id(dev->id);
1328 1025
1329 /* ATA-specific feature tests */ 1026 /* ATA-specific feature tests */
1330 if (dev->class == ATA_DEV_ATA) { 1027 if (dev->class == ATA_DEV_ATA) {
1028 dev->n_sectors = ata_id_n_sectors(dev->id);
1029
1331 if (!ata_id_is_ata(dev->id)) /* sanity check */ 1030 if (!ata_id_is_ata(dev->id)) /* sanity check */
1332 goto err_out_nosup; 1031 goto err_out_nosup;
1333 1032
1334 /* get major version */ 1033 /* get major version */
1335 tmp = dev->id[ATA_ID_MAJOR_VER]; 1034 major_version = ata_id_major_version(dev->id);
1336 for (major_version = 14; major_version >= 1; major_version--)
1337 if (tmp & (1 << major_version))
1338 break;
1339 1035
1340 /* 1036 /*
1341 * The exact sequence expected by certain pre-ATA4 drives is: 1037 * The exact sequence expected by certain pre-ATA4 drives is:
@@ -1346,7 +1042,12 @@ retry:
1346 * Some drives were very specific about that exact sequence. 1042 * Some drives were very specific about that exact sequence.
1347 */ 1043 */
1348 if (major_version < 4 || (!ata_id_has_lba(dev->id))) { 1044 if (major_version < 4 || (!ata_id_has_lba(dev->id))) {
1349 ata_dev_init_params(ap, dev); 1045 err_mask = ata_dev_init_params(ap, dev);
1046 if (err_mask) {
1047 printk(KERN_ERR "ata%u: failed to init "
1048 "parameters, disabled\n", ap->id);
1049 goto err_out;
1050 }
1350 1051
1351 /* current CHS translation info (id[53-58]) might be 1052 /* current CHS translation info (id[53-58]) might be
1352 * changed. reread the identify device info. 1053 * changed. reread the identify device info.
@@ -1357,12 +1058,8 @@ retry:
1357 if (ata_id_has_lba(dev->id)) { 1058 if (ata_id_has_lba(dev->id)) {
1358 dev->flags |= ATA_DFLAG_LBA; 1059 dev->flags |= ATA_DFLAG_LBA;
1359 1060
1360 if (ata_id_has_lba48(dev->id)) { 1061 if (ata_id_has_lba48(dev->id))
1361 dev->flags |= ATA_DFLAG_LBA48; 1062 dev->flags |= ATA_DFLAG_LBA48;
1362 dev->n_sectors = ata_id_u64(dev->id, 100);
1363 } else {
1364 dev->n_sectors = ata_id_u32(dev->id, 60);
1365 }
1366 1063
1367 /* print device info to dmesg */ 1064 /* print device info to dmesg */
1368 printk(KERN_INFO "ata%u: dev %u ATA-%d, max %s, %Lu sectors:%s\n", 1065 printk(KERN_INFO "ata%u: dev %u ATA-%d, max %s, %Lu sectors:%s\n",
@@ -1378,15 +1075,12 @@ retry:
1378 dev->cylinders = dev->id[1]; 1075 dev->cylinders = dev->id[1];
1379 dev->heads = dev->id[3]; 1076 dev->heads = dev->id[3];
1380 dev->sectors = dev->id[6]; 1077 dev->sectors = dev->id[6];
1381 dev->n_sectors = dev->cylinders * dev->heads * dev->sectors;
1382 1078
1383 if (ata_id_current_chs_valid(dev->id)) { 1079 if (ata_id_current_chs_valid(dev->id)) {
1384 /* Current CHS translation is valid. */ 1080 /* Current CHS translation is valid. */
1385 dev->cylinders = dev->id[54]; 1081 dev->cylinders = dev->id[54];
1386 dev->heads = dev->id[55]; 1082 dev->heads = dev->id[55];
1387 dev->sectors = dev->id[56]; 1083 dev->sectors = dev->id[56];
1388
1389 dev->n_sectors = ata_id_u32(dev->id, 57);
1390 } 1084 }
1391 1085
1392 /* print device info to dmesg */ 1086 /* print device info to dmesg */
@@ -1399,7 +1093,7 @@ retry:
1399 1093
1400 } 1094 }
1401 1095
1402 ap->host->max_cmd_len = 16; 1096 dev->cdb_len = 16;
1403 } 1097 }
1404 1098
1405 /* ATAPI-specific feature tests */ 1099 /* ATAPI-specific feature tests */
@@ -1412,8 +1106,7 @@ retry:
1412 printk(KERN_WARNING "ata%u: unsupported CDB len\n", ap->id); 1106 printk(KERN_WARNING "ata%u: unsupported CDB len\n", ap->id);
1413 goto err_out_nosup; 1107 goto err_out_nosup;
1414 } 1108 }
1415 ap->cdb_len = (unsigned int) rc; 1109 dev->cdb_len = (unsigned int) rc;
1416 ap->host->max_cmd_len = (unsigned char) ap->cdb_len;
1417 1110
1418 /* print device info to dmesg */ 1111 /* print device info to dmesg */
1419 printk(KERN_INFO "ata%u: dev %u ATAPI, max %s\n", 1112 printk(KERN_INFO "ata%u: dev %u ATAPI, max %s\n",
@@ -1421,6 +1114,12 @@ retry:
1421 ata_mode_string(xfer_modes)); 1114 ata_mode_string(xfer_modes));
1422 } 1115 }
1423 1116
1117 ap->host->max_cmd_len = 0;
1118 for (i = 0; i < ATA_MAX_DEVICES; i++)
1119 ap->host->max_cmd_len = max_t(unsigned int,
1120 ap->host->max_cmd_len,
1121 ap->device[i].cdb_len);
1122
1424 DPRINTK("EXIT, drv_stat = 0x%x\n", ata_chk_status(ap)); 1123 DPRINTK("EXIT, drv_stat = 0x%x\n", ata_chk_status(ap));
1425 return; 1124 return;
1426 1125
@@ -1433,30 +1132,28 @@ err_out:
1433} 1132}
1434 1133
1435 1134
1436static inline u8 ata_dev_knobble(const struct ata_port *ap) 1135static inline u8 ata_dev_knobble(const struct ata_port *ap,
1136 struct ata_device *dev)
1437{ 1137{
1438 return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(ap->device->id))); 1138 return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
1439} 1139}
1440 1140
1441/** 1141/**
1442 * ata_dev_config - Run device specific handlers and check for 1142 * ata_dev_config - Run device specific handlers & check for SATA->PATA bridges
1443 * SATA->PATA bridges 1143 * @ap: Bus
1444 * @ap: Bus 1144 * @i: Device
1445 * @i: Device
1446 * 1145 *
1447 * LOCKING: 1146 * LOCKING:
1448 */ 1147 */
1449 1148
1450void ata_dev_config(struct ata_port *ap, unsigned int i) 1149void ata_dev_config(struct ata_port *ap, unsigned int i)
1451{ 1150{
1452 /* limit bridge transfers to udma5, 200 sectors */ 1151 /* limit bridge transfers to udma5, 200 sectors */
1453 if (ata_dev_knobble(ap)) { 1152 if (ata_dev_knobble(ap, &ap->device[i])) {
1454 printk(KERN_INFO "ata%u(%u): applying bridge limits\n", 1153 printk(KERN_INFO "ata%u(%u): applying bridge limits\n",
1455 ap->id, ap->device->devno); 1154 ap->id, i);
1456 ap->udma_mask &= ATA_UDMA5; 1155 ap->udma_mask &= ATA_UDMA5;
1457 ap->host->max_sectors = ATA_MAX_SECTORS; 1156 ap->device[i].max_sectors = ATA_MAX_SECTORS;
1458 ap->host->hostt->max_sectors = ATA_MAX_SECTORS;
1459 ap->device[i].flags |= ATA_DFLAG_LOCK_SECTORS;
1460 } 1157 }
1461 1158
1462 if (ap->ops->dev_config) 1159 if (ap->ops->dev_config)
@@ -1482,7 +1179,27 @@ static int ata_bus_probe(struct ata_port *ap)
1482{ 1179{
1483 unsigned int i, found = 0; 1180 unsigned int i, found = 0;
1484 1181
1485 ap->ops->phy_reset(ap); 1182 if (ap->ops->probe_reset) {
1183 unsigned int classes[ATA_MAX_DEVICES];
1184 int rc;
1185
1186 ata_port_probe(ap);
1187
1188 rc = ap->ops->probe_reset(ap, classes);
1189 if (rc == 0) {
1190 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1191 if (classes[i] == ATA_DEV_UNKNOWN)
1192 classes[i] = ATA_DEV_NONE;
1193 ap->device[i].class = classes[i];
1194 }
1195 } else {
1196 printk(KERN_ERR "ata%u: probe reset failed, "
1197 "disabling port\n", ap->id);
1198 ata_port_disable(ap);
1199 }
1200 } else
1201 ap->ops->phy_reset(ap);
1202
1486 if (ap->flags & ATA_FLAG_PORT_DISABLED) 1203 if (ap->flags & ATA_FLAG_PORT_DISABLED)
1487 goto err_out; 1204 goto err_out;
1488 1205
@@ -1526,6 +1243,41 @@ void ata_port_probe(struct ata_port *ap)
1526} 1243}
1527 1244
1528/** 1245/**
1246 * sata_print_link_status - Print SATA link status
1247 * @ap: SATA port to printk link status about
1248 *
1249 * This function prints link speed and status of a SATA link.
1250 *
1251 * LOCKING:
1252 * None.
1253 */
1254static void sata_print_link_status(struct ata_port *ap)
1255{
1256 u32 sstatus, tmp;
1257 const char *speed;
1258
1259 if (!ap->ops->scr_read)
1260 return;
1261
1262 sstatus = scr_read(ap, SCR_STATUS);
1263
1264 if (sata_dev_present(ap)) {
1265 tmp = (sstatus >> 4) & 0xf;
1266 if (tmp & (1 << 0))
1267 speed = "1.5";
1268 else if (tmp & (1 << 1))
1269 speed = "3.0";
1270 else
1271 speed = "<unknown>";
1272 printk(KERN_INFO "ata%u: SATA link up %s Gbps (SStatus %X)\n",
1273 ap->id, speed, sstatus);
1274 } else {
1275 printk(KERN_INFO "ata%u: SATA link down (SStatus %X)\n",
1276 ap->id, sstatus);
1277 }
1278}
1279
1280/**
1529 * __sata_phy_reset - Wake/reset a low-level SATA PHY 1281 * __sata_phy_reset - Wake/reset a low-level SATA PHY
1530 * @ap: SATA port associated with target SATA PHY. 1282 * @ap: SATA port associated with target SATA PHY.
1531 * 1283 *
@@ -1559,27 +1311,14 @@ void __sata_phy_reset(struct ata_port *ap)
1559 break; 1311 break;
1560 } while (time_before(jiffies, timeout)); 1312 } while (time_before(jiffies, timeout));
1561 1313
1562 /* TODO: phy layer with polling, timeouts, etc. */ 1314 /* print link status */
1563 sstatus = scr_read(ap, SCR_STATUS); 1315 sata_print_link_status(ap);
1564 if (sata_dev_present(ap)) {
1565 const char *speed;
1566 u32 tmp;
1567 1316
1568 tmp = (sstatus >> 4) & 0xf; 1317 /* TODO: phy layer with polling, timeouts, etc. */
1569 if (tmp & (1 << 0)) 1318 if (sata_dev_present(ap))
1570 speed = "1.5";
1571 else if (tmp & (1 << 1))
1572 speed = "3.0";
1573 else
1574 speed = "<unknown>";
1575 printk(KERN_INFO "ata%u: SATA link up %s Gbps (SStatus %X)\n",
1576 ap->id, speed, sstatus);
1577 ata_port_probe(ap); 1319 ata_port_probe(ap);
1578 } else { 1320 else
1579 printk(KERN_INFO "ata%u: SATA link down (SStatus %X)\n",
1580 ap->id, sstatus);
1581 ata_port_disable(ap); 1321 ata_port_disable(ap);
1582 }
1583 1322
1584 if (ap->flags & ATA_FLAG_PORT_DISABLED) 1323 if (ap->flags & ATA_FLAG_PORT_DISABLED)
1585 return; 1324 return;
@@ -1752,9 +1491,9 @@ int ata_timing_compute(struct ata_device *adev, unsigned short speed,
1752 ata_timing_quantize(t, t, T, UT); 1491 ata_timing_quantize(t, t, T, UT);
1753 1492
1754 /* 1493 /*
1755 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY, S.M.A.R.T 1494 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY,
1756 * and some other commands. We have to ensure that the DMA cycle timing is 1495 * S.M.A.R.T * and some other commands. We have to ensure that the
1757 * slower/equal than the fastest PIO timing. 1496 * DMA cycle timing is slower/equal than the fastest PIO timing.
1758 */ 1497 */
1759 1498
1760 if (speed > XFER_PIO_4) { 1499 if (speed > XFER_PIO_4) {
@@ -1763,7 +1502,7 @@ int ata_timing_compute(struct ata_device *adev, unsigned short speed,
1763 } 1502 }
1764 1503
1765 /* 1504 /*
1766 * Lenghten active & recovery time so that cycle time is correct. 1505 * Lengthen active & recovery time so that cycle time is correct.
1767 */ 1506 */
1768 1507
1769 if (t->act8b + t->rec8b < t->cyc8b) { 1508 if (t->act8b + t->rec8b < t->cyc8b) {
@@ -1882,7 +1621,6 @@ static void ata_host_set_dma(struct ata_port *ap, u8 xfer_mode,
1882 * 1621 *
1883 * LOCKING: 1622 * LOCKING:
1884 * PCI/etc. bus probe sem. 1623 * PCI/etc. bus probe sem.
1885 *
1886 */ 1624 */
1887static void ata_set_mode(struct ata_port *ap) 1625static void ata_set_mode(struct ata_port *ap)
1888{ 1626{
@@ -1922,6 +1660,26 @@ err_out:
1922} 1660}
1923 1661
1924/** 1662/**
1663 * ata_tf_to_host - issue ATA taskfile to host controller
1664 * @ap: port to which command is being issued
1665 * @tf: ATA taskfile register set
1666 *
1667 * Issues ATA taskfile register set to ATA host controller,
1668 * with proper synchronization with interrupt handler and
1669 * other threads.
1670 *
1671 * LOCKING:
1672 * spin_lock_irqsave(host_set lock)
1673 */
1674
1675static inline void ata_tf_to_host(struct ata_port *ap,
1676 const struct ata_taskfile *tf)
1677{
1678 ap->ops->tf_load(ap, tf);
1679 ap->ops->exec_command(ap, tf);
1680}
1681
1682/**
1925 * ata_busy_sleep - sleep until BSY clears, or timeout 1683 * ata_busy_sleep - sleep until BSY clears, or timeout
1926 * @ap: port containing status register to be polled 1684 * @ap: port containing status register to be polled
1927 * @tmout_pat: impatience timeout 1685 * @tmout_pat: impatience timeout
@@ -1931,12 +1689,10 @@ err_out:
1931 * or a timeout occurs. 1689 * or a timeout occurs.
1932 * 1690 *
1933 * LOCKING: None. 1691 * LOCKING: None.
1934 *
1935 */ 1692 */
1936 1693
1937static unsigned int ata_busy_sleep (struct ata_port *ap, 1694unsigned int ata_busy_sleep (struct ata_port *ap,
1938 unsigned long tmout_pat, 1695 unsigned long tmout_pat, unsigned long tmout)
1939 unsigned long tmout)
1940{ 1696{
1941 unsigned long timer_start, timeout; 1697 unsigned long timer_start, timeout;
1942 u8 status; 1698 u8 status;
@@ -2155,9 +1911,9 @@ void ata_bus_reset(struct ata_port *ap)
2155 /* 1911 /*
2156 * determine by signature whether we have ATA or ATAPI devices 1912 * determine by signature whether we have ATA or ATAPI devices
2157 */ 1913 */
2158 err = ata_dev_try_classify(ap, 0); 1914 ap->device[0].class = ata_dev_try_classify(ap, 0, &err);
2159 if ((slave_possible) && (err != 0x81)) 1915 if ((slave_possible) && (err != 0x81))
2160 ata_dev_try_classify(ap, 1); 1916 ap->device[1].class = ata_dev_try_classify(ap, 1, &err);
2161 1917
2162 /* re-enable interrupts */ 1918 /* re-enable interrupts */
2163 if (ap->ioaddr.ctl_addr) /* FIXME: hack. create a hook instead */ 1919 if (ap->ioaddr.ctl_addr) /* FIXME: hack. create a hook instead */
@@ -2192,6 +1948,334 @@ err_out:
2192 DPRINTK("EXIT\n"); 1948 DPRINTK("EXIT\n");
2193} 1949}
2194 1950
1951static int sata_phy_resume(struct ata_port *ap)
1952{
1953 unsigned long timeout = jiffies + (HZ * 5);
1954 u32 sstatus;
1955
1956 scr_write_flush(ap, SCR_CONTROL, 0x300);
1957
1958 /* Wait for phy to become ready, if necessary. */
1959 do {
1960 msleep(200);
1961 sstatus = scr_read(ap, SCR_STATUS);
1962 if ((sstatus & 0xf) != 1)
1963 return 0;
1964 } while (time_before(jiffies, timeout));
1965
1966 return -1;
1967}
1968
1969/**
1970 * ata_std_probeinit - initialize probing
1971 * @ap: port to be probed
1972 *
1973 * @ap is about to be probed. Initialize it. This function is
1974 * to be used as standard callback for ata_drive_probe_reset().
1975 *
1976 * NOTE!!! Do not use this function as probeinit if a low level
1977 * driver implements only hardreset. Just pass NULL as probeinit
1978 * in that case. Using this function is probably okay but doing
1979 * so makes reset sequence different from the original
1980 * ->phy_reset implementation and Jeff nervous. :-P
1981 */
1982extern void ata_std_probeinit(struct ata_port *ap)
1983{
1984 if (ap->flags & ATA_FLAG_SATA && ap->ops->scr_read) {
1985 sata_phy_resume(ap);
1986 if (sata_dev_present(ap))
1987 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
1988 }
1989}
1990
1991/**
1992 * ata_std_softreset - reset host port via ATA SRST
1993 * @ap: port to reset
1994 * @verbose: fail verbosely
1995 * @classes: resulting classes of attached devices
1996 *
1997 * Reset host port using ATA SRST. This function is to be used
1998 * as standard callback for ata_drive_*_reset() functions.
1999 *
2000 * LOCKING:
2001 * Kernel thread context (may sleep)
2002 *
2003 * RETURNS:
2004 * 0 on success, -errno otherwise.
2005 */
2006int ata_std_softreset(struct ata_port *ap, int verbose, unsigned int *classes)
2007{
2008 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
2009 unsigned int devmask = 0, err_mask;
2010 u8 err;
2011
2012 DPRINTK("ENTER\n");
2013
2014 if (ap->ops->scr_read && !sata_dev_present(ap)) {
2015 classes[0] = ATA_DEV_NONE;
2016 goto out;
2017 }
2018
2019 /* determine if device 0/1 are present */
2020 if (ata_devchk(ap, 0))
2021 devmask |= (1 << 0);
2022 if (slave_possible && ata_devchk(ap, 1))
2023 devmask |= (1 << 1);
2024
2025 /* select device 0 again */
2026 ap->ops->dev_select(ap, 0);
2027
2028 /* issue bus reset */
2029 DPRINTK("about to softreset, devmask=%x\n", devmask);
2030 err_mask = ata_bus_softreset(ap, devmask);
2031 if (err_mask) {
2032 if (verbose)
2033 printk(KERN_ERR "ata%u: SRST failed (err_mask=0x%x)\n",
2034 ap->id, err_mask);
2035 else
2036 DPRINTK("EXIT, softreset failed (err_mask=0x%x)\n",
2037 err_mask);
2038 return -EIO;
2039 }
2040
2041 /* determine by signature whether we have ATA or ATAPI devices */
2042 classes[0] = ata_dev_try_classify(ap, 0, &err);
2043 if (slave_possible && err != 0x81)
2044 classes[1] = ata_dev_try_classify(ap, 1, &err);
2045
2046 out:
2047 DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]);
2048 return 0;
2049}
2050
2051/**
2052 * sata_std_hardreset - reset host port via SATA phy reset
2053 * @ap: port to reset
2054 * @verbose: fail verbosely
2055 * @class: resulting class of attached device
2056 *
2057 * SATA phy-reset host port using DET bits of SControl register.
2058 * This function is to be used as standard callback for
2059 * ata_drive_*_reset().
2060 *
2061 * LOCKING:
2062 * Kernel thread context (may sleep)
2063 *
2064 * RETURNS:
2065 * 0 on success, -errno otherwise.
2066 */
2067int sata_std_hardreset(struct ata_port *ap, int verbose, unsigned int *class)
2068{
2069 DPRINTK("ENTER\n");
2070
2071 /* Issue phy wake/reset */
2072 scr_write_flush(ap, SCR_CONTROL, 0x301);
2073
2074 /*
2075 * Couldn't find anything in SATA I/II specs, but AHCI-1.1
2076 * 10.4.2 says at least 1 ms.
2077 */
2078 msleep(1);
2079
2080 /* Bring phy back */
2081 sata_phy_resume(ap);
2082
2083 /* TODO: phy layer with polling, timeouts, etc. */
2084 if (!sata_dev_present(ap)) {
2085 *class = ATA_DEV_NONE;
2086 DPRINTK("EXIT, link offline\n");
2087 return 0;
2088 }
2089
2090 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
2091 if (verbose)
2092 printk(KERN_ERR "ata%u: COMRESET failed "
2093 "(device not ready)\n", ap->id);
2094 else
2095 DPRINTK("EXIT, device not ready\n");
2096 return -EIO;
2097 }
2098
2099 ap->ops->dev_select(ap, 0); /* probably unnecessary */
2100
2101 *class = ata_dev_try_classify(ap, 0, NULL);
2102
2103 DPRINTK("EXIT, class=%u\n", *class);
2104 return 0;
2105}
2106
2107/**
2108 * ata_std_postreset - standard postreset callback
2109 * @ap: the target ata_port
2110 * @classes: classes of attached devices
2111 *
2112 * This function is invoked after a successful reset. Note that
2113 * the device might have been reset more than once using
2114 * different reset methods before postreset is invoked.
2115 *
2116 * This function is to be used as standard callback for
2117 * ata_drive_*_reset().
2118 *
2119 * LOCKING:
2120 * Kernel thread context (may sleep)
2121 */
2122void ata_std_postreset(struct ata_port *ap, unsigned int *classes)
2123{
2124 DPRINTK("ENTER\n");
2125
2126 /* set cable type if it isn't already set */
2127 if (ap->cbl == ATA_CBL_NONE && ap->flags & ATA_FLAG_SATA)
2128 ap->cbl = ATA_CBL_SATA;
2129
2130 /* print link status */
2131 if (ap->cbl == ATA_CBL_SATA)
2132 sata_print_link_status(ap);
2133
2134 /* re-enable interrupts */
2135 if (ap->ioaddr.ctl_addr) /* FIXME: hack. create a hook instead */
2136 ata_irq_on(ap);
2137
2138 /* is double-select really necessary? */
2139 if (classes[0] != ATA_DEV_NONE)
2140 ap->ops->dev_select(ap, 1);
2141 if (classes[1] != ATA_DEV_NONE)
2142 ap->ops->dev_select(ap, 0);
2143
2144 /* bail out if no device is present */
2145 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
2146 DPRINTK("EXIT, no device\n");
2147 return;
2148 }
2149
2150 /* set up device control */
2151 if (ap->ioaddr.ctl_addr) {
2152 if (ap->flags & ATA_FLAG_MMIO)
2153 writeb(ap->ctl, (void __iomem *) ap->ioaddr.ctl_addr);
2154 else
2155 outb(ap->ctl, ap->ioaddr.ctl_addr);
2156 }
2157
2158 DPRINTK("EXIT\n");
2159}
2160
2161/**
2162 * ata_std_probe_reset - standard probe reset method
2163 * @ap: prot to perform probe-reset
2164 * @classes: resulting classes of attached devices
2165 *
2166 * The stock off-the-shelf ->probe_reset method.
2167 *
2168 * LOCKING:
2169 * Kernel thread context (may sleep)
2170 *
2171 * RETURNS:
2172 * 0 on success, -errno otherwise.
2173 */
2174int ata_std_probe_reset(struct ata_port *ap, unsigned int *classes)
2175{
2176 ata_reset_fn_t hardreset;
2177
2178 hardreset = NULL;
2179 if (ap->flags & ATA_FLAG_SATA && ap->ops->scr_read)
2180 hardreset = sata_std_hardreset;
2181
2182 return ata_drive_probe_reset(ap, ata_std_probeinit,
2183 ata_std_softreset, hardreset,
2184 ata_std_postreset, classes);
2185}
2186
2187static int do_probe_reset(struct ata_port *ap, ata_reset_fn_t reset,
2188 ata_postreset_fn_t postreset,
2189 unsigned int *classes)
2190{
2191 int i, rc;
2192
2193 for (i = 0; i < ATA_MAX_DEVICES; i++)
2194 classes[i] = ATA_DEV_UNKNOWN;
2195
2196 rc = reset(ap, 0, classes);
2197 if (rc)
2198 return rc;
2199
2200 /* If any class isn't ATA_DEV_UNKNOWN, consider classification
2201 * is complete and convert all ATA_DEV_UNKNOWN to
2202 * ATA_DEV_NONE.
2203 */
2204 for (i = 0; i < ATA_MAX_DEVICES; i++)
2205 if (classes[i] != ATA_DEV_UNKNOWN)
2206 break;
2207
2208 if (i < ATA_MAX_DEVICES)
2209 for (i = 0; i < ATA_MAX_DEVICES; i++)
2210 if (classes[i] == ATA_DEV_UNKNOWN)
2211 classes[i] = ATA_DEV_NONE;
2212
2213 if (postreset)
2214 postreset(ap, classes);
2215
2216 return classes[0] != ATA_DEV_UNKNOWN ? 0 : -ENODEV;
2217}
2218
2219/**
2220 * ata_drive_probe_reset - Perform probe reset with given methods
2221 * @ap: port to reset
2222 * @probeinit: probeinit method (can be NULL)
2223 * @softreset: softreset method (can be NULL)
2224 * @hardreset: hardreset method (can be NULL)
2225 * @postreset: postreset method (can be NULL)
2226 * @classes: resulting classes of attached devices
2227 *
2228 * Reset the specified port and classify attached devices using
2229 * given methods. This function prefers softreset but tries all
2230 * possible reset sequences to reset and classify devices. This
2231 * function is intended to be used for constructing ->probe_reset
2232 * callback by low level drivers.
2233 *
2234 * Reset methods should follow the following rules.
2235 *
2236 * - Return 0 on sucess, -errno on failure.
2237 * - If classification is supported, fill classes[] with
2238 * recognized class codes.
2239 * - If classification is not supported, leave classes[] alone.
2240 * - If verbose is non-zero, print error message on failure;
2241 * otherwise, shut up.
2242 *
2243 * LOCKING:
2244 * Kernel thread context (may sleep)
2245 *
2246 * RETURNS:
2247 * 0 on success, -EINVAL if no reset method is avaliable, -ENODEV
2248 * if classification fails, and any error code from reset
2249 * methods.
2250 */
2251int ata_drive_probe_reset(struct ata_port *ap, ata_probeinit_fn_t probeinit,
2252 ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
2253 ata_postreset_fn_t postreset, unsigned int *classes)
2254{
2255 int rc = -EINVAL;
2256
2257 if (probeinit)
2258 probeinit(ap);
2259
2260 if (softreset) {
2261 rc = do_probe_reset(ap, softreset, postreset, classes);
2262 if (rc == 0)
2263 return 0;
2264 }
2265
2266 if (!hardreset)
2267 return rc;
2268
2269 rc = do_probe_reset(ap, hardreset, postreset, classes);
2270 if (rc == 0 || rc != -ENODEV)
2271 return rc;
2272
2273 if (softreset)
2274 rc = do_probe_reset(ap, softreset, postreset, classes);
2275
2276 return rc;
2277}
2278
2195static void ata_pr_blacklisted(const struct ata_port *ap, 2279static void ata_pr_blacklisted(const struct ata_port *ap,
2196 const struct ata_device *dev) 2280 const struct ata_device *dev)
2197{ 2281{
@@ -2233,24 +2317,13 @@ static const char * const ata_dma_blacklist [] = {
2233 2317
2234static int ata_dma_blacklisted(const struct ata_device *dev) 2318static int ata_dma_blacklisted(const struct ata_device *dev)
2235{ 2319{
2236 unsigned char model_num[40]; 2320 unsigned char model_num[41];
2237 char *s;
2238 unsigned int len;
2239 int i; 2321 int i;
2240 2322
2241 ata_dev_id_string(dev->id, model_num, ATA_ID_PROD_OFS, 2323 ata_id_c_string(dev->id, model_num, ATA_ID_PROD_OFS, sizeof(model_num));
2242 sizeof(model_num));
2243 s = &model_num[0];
2244 len = strnlen(s, sizeof(model_num));
2245
2246 /* ATAPI specifies that empty space is blank-filled; remove blanks */
2247 while ((len > 0) && (s[len - 1] == ' ')) {
2248 len--;
2249 s[len] = 0;
2250 }
2251 2324
2252 for (i = 0; i < ARRAY_SIZE(ata_dma_blacklist); i++) 2325 for (i = 0; i < ARRAY_SIZE(ata_dma_blacklist); i++)
2253 if (!strncmp(ata_dma_blacklist[i], s, len)) 2326 if (!strcmp(ata_dma_blacklist[i], model_num))
2254 return 1; 2327 return 1;
2255 2328
2256 return 0; 2329 return 0;
@@ -2264,7 +2337,7 @@ static unsigned int ata_get_mode_mask(const struct ata_port *ap, int shift)
2264 master = &ap->device[0]; 2337 master = &ap->device[0];
2265 slave = &ap->device[1]; 2338 slave = &ap->device[1];
2266 2339
2267 assert (ata_dev_present(master) || ata_dev_present(slave)); 2340 WARN_ON(!ata_dev_present(master) && !ata_dev_present(slave));
2268 2341
2269 if (shift == ATA_SHIFT_UDMA) { 2342 if (shift == ATA_SHIFT_UDMA) {
2270 mask = ap->udma_mask; 2343 mask = ap->udma_mask;
@@ -2446,7 +2519,7 @@ static void ata_dev_reread_id(struct ata_port *ap, struct ata_device *dev)
2446 2519
2447 swap_buf_le16(dev->id, ATA_ID_WORDS); 2520 swap_buf_le16(dev->id, ATA_ID_WORDS);
2448 2521
2449 ata_dump_id(dev); 2522 ata_dump_id(dev->id);
2450 2523
2451 DPRINTK("EXIT\n"); 2524 DPRINTK("EXIT\n");
2452 2525
@@ -2462,17 +2535,23 @@ err_out:
2462 * @dev: Device to which command will be sent 2535 * @dev: Device to which command will be sent
2463 * 2536 *
2464 * LOCKING: 2537 * LOCKING:
2538 * Kernel thread context (may sleep)
2539 *
2540 * RETURNS:
2541 * 0 on success, AC_ERR_* mask otherwise.
2465 */ 2542 */
2466 2543
2467static void ata_dev_init_params(struct ata_port *ap, struct ata_device *dev) 2544static unsigned int ata_dev_init_params(struct ata_port *ap,
2545 struct ata_device *dev)
2468{ 2546{
2469 struct ata_taskfile tf; 2547 struct ata_taskfile tf;
2548 unsigned int err_mask;
2470 u16 sectors = dev->id[6]; 2549 u16 sectors = dev->id[6];
2471 u16 heads = dev->id[3]; 2550 u16 heads = dev->id[3];
2472 2551
2473 /* Number of sectors per track 1-255. Number of heads 1-16 */ 2552 /* Number of sectors per track 1-255. Number of heads 1-16 */
2474 if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16) 2553 if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
2475 return; 2554 return 0;
2476 2555
2477 /* set up init dev params taskfile */ 2556 /* set up init dev params taskfile */
2478 DPRINTK("init dev params \n"); 2557 DPRINTK("init dev params \n");
@@ -2484,13 +2563,10 @@ static void ata_dev_init_params(struct ata_port *ap, struct ata_device *dev)
2484 tf.nsect = sectors; 2563 tf.nsect = sectors;
2485 tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */ 2564 tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
2486 2565
2487 if (ata_exec_internal(ap, dev, &tf, DMA_NONE, NULL, 0)) { 2566 err_mask = ata_exec_internal(ap, dev, &tf, DMA_NONE, NULL, 0);
2488 printk(KERN_ERR "ata%u: failed to init parameters, disabled\n",
2489 ap->id);
2490 ata_port_disable(ap);
2491 }
2492 2567
2493 DPRINTK("EXIT\n"); 2568 DPRINTK("EXIT, err_mask=%x\n", err_mask);
2569 return err_mask;
2494} 2570}
2495 2571
2496/** 2572/**
@@ -2510,11 +2586,11 @@ static void ata_sg_clean(struct ata_queued_cmd *qc)
2510 int dir = qc->dma_dir; 2586 int dir = qc->dma_dir;
2511 void *pad_buf = NULL; 2587 void *pad_buf = NULL;
2512 2588
2513 assert(qc->flags & ATA_QCFLAG_DMAMAP); 2589 WARN_ON(!(qc->flags & ATA_QCFLAG_DMAMAP));
2514 assert(sg != NULL); 2590 WARN_ON(sg == NULL);
2515 2591
2516 if (qc->flags & ATA_QCFLAG_SINGLE) 2592 if (qc->flags & ATA_QCFLAG_SINGLE)
2517 assert(qc->n_elem <= 1); 2593 WARN_ON(qc->n_elem > 1);
2518 2594
2519 VPRINTK("unmapping %u sg elements\n", qc->n_elem); 2595 VPRINTK("unmapping %u sg elements\n", qc->n_elem);
2520 2596
@@ -2569,8 +2645,8 @@ static void ata_fill_sg(struct ata_queued_cmd *qc)
2569 struct scatterlist *sg; 2645 struct scatterlist *sg;
2570 unsigned int idx; 2646 unsigned int idx;
2571 2647
2572 assert(qc->__sg != NULL); 2648 WARN_ON(qc->__sg == NULL);
2573 assert(qc->n_elem > 0 || qc->pad_len > 0); 2649 WARN_ON(qc->n_elem == 0 && qc->pad_len == 0);
2574 2650
2575 idx = 0; 2651 idx = 0;
2576 ata_for_each_sg(sg, qc) { 2652 ata_for_each_sg(sg, qc) {
@@ -2723,7 +2799,7 @@ static int ata_sg_setup_one(struct ata_queued_cmd *qc)
2723 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ); 2799 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
2724 struct scatterlist *psg = &qc->pad_sgent; 2800 struct scatterlist *psg = &qc->pad_sgent;
2725 2801
2726 assert(qc->dev->class == ATA_DEV_ATAPI); 2802 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
2727 2803
2728 memset(pad_buf, 0, ATA_DMA_PAD_SZ); 2804 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
2729 2805
@@ -2787,7 +2863,7 @@ static int ata_sg_setup(struct ata_queued_cmd *qc)
2787 int n_elem, pre_n_elem, dir, trim_sg = 0; 2863 int n_elem, pre_n_elem, dir, trim_sg = 0;
2788 2864
2789 VPRINTK("ENTER, ata%u\n", ap->id); 2865 VPRINTK("ENTER, ata%u\n", ap->id);
2790 assert(qc->flags & ATA_QCFLAG_SG); 2866 WARN_ON(!(qc->flags & ATA_QCFLAG_SG));
2791 2867
2792 /* we must lengthen transfers to end on a 32-bit boundary */ 2868 /* we must lengthen transfers to end on a 32-bit boundary */
2793 qc->pad_len = lsg->length & 3; 2869 qc->pad_len = lsg->length & 3;
@@ -2796,7 +2872,7 @@ static int ata_sg_setup(struct ata_queued_cmd *qc)
2796 struct scatterlist *psg = &qc->pad_sgent; 2872 struct scatterlist *psg = &qc->pad_sgent;
2797 unsigned int offset; 2873 unsigned int offset;
2798 2874
2799 assert(qc->dev->class == ATA_DEV_ATAPI); 2875 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
2800 2876
2801 memset(pad_buf, 0, ATA_DMA_PAD_SZ); 2877 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
2802 2878
@@ -2872,7 +2948,7 @@ void ata_poll_qc_complete(struct ata_queued_cmd *qc)
2872} 2948}
2873 2949
2874/** 2950/**
2875 * ata_pio_poll - 2951 * ata_pio_poll - poll using PIO, depending on current state
2876 * @ap: the target ata_port 2952 * @ap: the target ata_port
2877 * 2953 *
2878 * LOCKING: 2954 * LOCKING:
@@ -2890,7 +2966,7 @@ static unsigned long ata_pio_poll(struct ata_port *ap)
2890 unsigned int reg_state = HSM_ST_UNKNOWN; 2966 unsigned int reg_state = HSM_ST_UNKNOWN;
2891 2967
2892 qc = ata_qc_from_tag(ap, ap->active_tag); 2968 qc = ata_qc_from_tag(ap, ap->active_tag);
2893 assert(qc != NULL); 2969 WARN_ON(qc == NULL);
2894 2970
2895 switch (ap->hsm_task_state) { 2971 switch (ap->hsm_task_state) {
2896 case HSM_ST: 2972 case HSM_ST:
@@ -2911,7 +2987,7 @@ static unsigned long ata_pio_poll(struct ata_port *ap)
2911 status = ata_chk_status(ap); 2987 status = ata_chk_status(ap);
2912 if (status & ATA_BUSY) { 2988 if (status & ATA_BUSY) {
2913 if (time_after(jiffies, ap->pio_task_timeout)) { 2989 if (time_after(jiffies, ap->pio_task_timeout)) {
2914 qc->err_mask |= AC_ERR_ATA_BUS; 2990 qc->err_mask |= AC_ERR_TIMEOUT;
2915 ap->hsm_task_state = HSM_ST_TMOUT; 2991 ap->hsm_task_state = HSM_ST_TMOUT;
2916 return 0; 2992 return 0;
2917 } 2993 }
@@ -2958,7 +3034,7 @@ static int ata_pio_complete (struct ata_port *ap)
2958 } 3034 }
2959 3035
2960 qc = ata_qc_from_tag(ap, ap->active_tag); 3036 qc = ata_qc_from_tag(ap, ap->active_tag);
2961 assert(qc != NULL); 3037 WARN_ON(qc == NULL);
2962 3038
2963 drv_stat = ata_wait_idle(ap); 3039 drv_stat = ata_wait_idle(ap);
2964 if (!ata_ok(drv_stat)) { 3040 if (!ata_ok(drv_stat)) {
@@ -2969,7 +3045,7 @@ static int ata_pio_complete (struct ata_port *ap)
2969 3045
2970 ap->hsm_task_state = HSM_ST_IDLE; 3046 ap->hsm_task_state = HSM_ST_IDLE;
2971 3047
2972 assert(qc->err_mask == 0); 3048 WARN_ON(qc->err_mask);
2973 ata_poll_qc_complete(qc); 3049 ata_poll_qc_complete(qc);
2974 3050
2975 /* another command may start at this point */ 3051 /* another command may start at this point */
@@ -2979,7 +3055,7 @@ static int ata_pio_complete (struct ata_port *ap)
2979 3055
2980 3056
2981/** 3057/**
2982 * swap_buf_le16 - swap halves of 16-words in place 3058 * swap_buf_le16 - swap halves of 16-bit words in place
2983 * @buf: Buffer to swap 3059 * @buf: Buffer to swap
2984 * @buf_words: Number of 16-bit words in buffer. 3060 * @buf_words: Number of 16-bit words in buffer.
2985 * 3061 *
@@ -3289,7 +3365,7 @@ static void atapi_pio_bytes(struct ata_queued_cmd *qc)
3289err_out: 3365err_out:
3290 printk(KERN_INFO "ata%u: dev %u: ATAPI check failed\n", 3366 printk(KERN_INFO "ata%u: dev %u: ATAPI check failed\n",
3291 ap->id, dev->devno); 3367 ap->id, dev->devno);
3292 qc->err_mask |= AC_ERR_ATA_BUS; 3368 qc->err_mask |= AC_ERR_HSM;
3293 ap->hsm_task_state = HSM_ST_ERR; 3369 ap->hsm_task_state = HSM_ST_ERR;
3294} 3370}
3295 3371
@@ -3326,7 +3402,7 @@ static void ata_pio_block(struct ata_port *ap)
3326 } 3402 }
3327 3403
3328 qc = ata_qc_from_tag(ap, ap->active_tag); 3404 qc = ata_qc_from_tag(ap, ap->active_tag);
3329 assert(qc != NULL); 3405 WARN_ON(qc == NULL);
3330 3406
3331 /* check error */ 3407 /* check error */
3332 if (status & (ATA_ERR | ATA_DF)) { 3408 if (status & (ATA_ERR | ATA_DF)) {
@@ -3347,7 +3423,7 @@ static void ata_pio_block(struct ata_port *ap)
3347 } else { 3423 } else {
3348 /* handle BSY=0, DRQ=0 as error */ 3424 /* handle BSY=0, DRQ=0 as error */
3349 if ((status & ATA_DRQ) == 0) { 3425 if ((status & ATA_DRQ) == 0) {
3350 qc->err_mask |= AC_ERR_ATA_BUS; 3426 qc->err_mask |= AC_ERR_HSM;
3351 ap->hsm_task_state = HSM_ST_ERR; 3427 ap->hsm_task_state = HSM_ST_ERR;
3352 return; 3428 return;
3353 } 3429 }
@@ -3361,7 +3437,7 @@ static void ata_pio_error(struct ata_port *ap)
3361 struct ata_queued_cmd *qc; 3437 struct ata_queued_cmd *qc;
3362 3438
3363 qc = ata_qc_from_tag(ap, ap->active_tag); 3439 qc = ata_qc_from_tag(ap, ap->active_tag);
3364 assert(qc != NULL); 3440 WARN_ON(qc == NULL);
3365 3441
3366 if (qc->tf.command != ATA_CMD_PACKET) 3442 if (qc->tf.command != ATA_CMD_PACKET)
3367 printk(KERN_WARNING "ata%u: PIO error\n", ap->id); 3443 printk(KERN_WARNING "ata%u: PIO error\n", ap->id);
@@ -3369,7 +3445,7 @@ static void ata_pio_error(struct ata_port *ap)
3369 /* make sure qc->err_mask is available to 3445 /* make sure qc->err_mask is available to
3370 * know what's wrong and recover 3446 * know what's wrong and recover
3371 */ 3447 */
3372 assert(qc->err_mask); 3448 WARN_ON(qc->err_mask == 0);
3373 3449
3374 ap->hsm_task_state = HSM_ST_IDLE; 3450 ap->hsm_task_state = HSM_ST_IDLE;
3375 3451
@@ -3410,7 +3486,7 @@ fsm_start:
3410 } 3486 }
3411 3487
3412 if (timeout) 3488 if (timeout)
3413 queue_delayed_work(ata_wq, &ap->pio_task, timeout); 3489 ata_queue_delayed_pio_task(ap, timeout);
3414 else if (!qc_completed) 3490 else if (!qc_completed)
3415 goto fsm_start; 3491 goto fsm_start;
3416} 3492}
@@ -3443,15 +3519,10 @@ static void ata_qc_timeout(struct ata_queued_cmd *qc)
3443 3519
3444 DPRINTK("ENTER\n"); 3520 DPRINTK("ENTER\n");
3445 3521
3446 spin_lock_irqsave(&host_set->lock, flags); 3522 ata_flush_pio_tasks(ap);
3523 ap->hsm_task_state = HSM_ST_IDLE;
3447 3524
3448 /* hack alert! We cannot use the supplied completion 3525 spin_lock_irqsave(&host_set->lock, flags);
3449 * function from inside the ->eh_strategy_handler() thread.
3450 * libata is the only user of ->eh_strategy_handler() in
3451 * any kernel, so the default scsi_done() assumes it is
3452 * not being called from the SCSI EH.
3453 */
3454 qc->scsidone = scsi_finish_command;
3455 3526
3456 switch (qc->tf.protocol) { 3527 switch (qc->tf.protocol) {
3457 3528
@@ -3476,12 +3547,13 @@ static void ata_qc_timeout(struct ata_queued_cmd *qc)
3476 3547
3477 /* complete taskfile transaction */ 3548 /* complete taskfile transaction */
3478 qc->err_mask |= ac_err_mask(drv_stat); 3549 qc->err_mask |= ac_err_mask(drv_stat);
3479 ata_qc_complete(qc);
3480 break; 3550 break;
3481 } 3551 }
3482 3552
3483 spin_unlock_irqrestore(&host_set->lock, flags); 3553 spin_unlock_irqrestore(&host_set->lock, flags);
3484 3554
3555 ata_eh_qc_complete(qc);
3556
3485 DPRINTK("EXIT\n"); 3557 DPRINTK("EXIT\n");
3486} 3558}
3487 3559
@@ -3506,20 +3578,10 @@ static void ata_qc_timeout(struct ata_queued_cmd *qc)
3506 3578
3507void ata_eng_timeout(struct ata_port *ap) 3579void ata_eng_timeout(struct ata_port *ap)
3508{ 3580{
3509 struct ata_queued_cmd *qc;
3510
3511 DPRINTK("ENTER\n"); 3581 DPRINTK("ENTER\n");
3512 3582
3513 qc = ata_qc_from_tag(ap, ap->active_tag); 3583 ata_qc_timeout(ata_qc_from_tag(ap, ap->active_tag));
3514 if (qc)
3515 ata_qc_timeout(qc);
3516 else {
3517 printk(KERN_ERR "ata%u: BUG: timeout without command\n",
3518 ap->id);
3519 goto out;
3520 }
3521 3584
3522out:
3523 DPRINTK("EXIT\n"); 3585 DPRINTK("EXIT\n");
3524} 3586}
3525 3587
@@ -3575,21 +3637,6 @@ struct ata_queued_cmd *ata_qc_new_init(struct ata_port *ap,
3575 return qc; 3637 return qc;
3576} 3638}
3577 3639
3578static void __ata_qc_complete(struct ata_queued_cmd *qc)
3579{
3580 struct ata_port *ap = qc->ap;
3581 unsigned int tag;
3582
3583 qc->flags = 0;
3584 tag = qc->tag;
3585 if (likely(ata_tag_valid(tag))) {
3586 if (tag == ap->active_tag)
3587 ap->active_tag = ATA_TAG_POISON;
3588 qc->tag = ATA_TAG_POISON;
3589 clear_bit(tag, &ap->qactive);
3590 }
3591}
3592
3593/** 3640/**
3594 * ata_qc_free - free unused ata_queued_cmd 3641 * ata_qc_free - free unused ata_queued_cmd
3595 * @qc: Command to complete 3642 * @qc: Command to complete
@@ -3602,29 +3649,25 @@ static void __ata_qc_complete(struct ata_queued_cmd *qc)
3602 */ 3649 */
3603void ata_qc_free(struct ata_queued_cmd *qc) 3650void ata_qc_free(struct ata_queued_cmd *qc)
3604{ 3651{
3605 assert(qc != NULL); /* ata_qc_from_tag _might_ return NULL */ 3652 struct ata_port *ap = qc->ap;
3653 unsigned int tag;
3606 3654
3607 __ata_qc_complete(qc); 3655 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
3608}
3609 3656
3610/** 3657 qc->flags = 0;
3611 * ata_qc_complete - Complete an active ATA command 3658 tag = qc->tag;
3612 * @qc: Command to complete 3659 if (likely(ata_tag_valid(tag))) {
3613 * @err_mask: ATA Status register contents 3660 if (tag == ap->active_tag)
3614 * 3661 ap->active_tag = ATA_TAG_POISON;
3615 * Indicate to the mid and upper layers that an ATA 3662 qc->tag = ATA_TAG_POISON;
3616 * command has completed, with either an ok or not-ok status. 3663 clear_bit(tag, &ap->qactive);
3617 * 3664 }
3618 * LOCKING: 3665}
3619 * spin_lock_irqsave(host_set lock)
3620 */
3621 3666
3622void ata_qc_complete(struct ata_queued_cmd *qc) 3667void __ata_qc_complete(struct ata_queued_cmd *qc)
3623{ 3668{
3624 int rc; 3669 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
3625 3670 WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
3626 assert(qc != NULL); /* ata_qc_from_tag _might_ return NULL */
3627 assert(qc->flags & ATA_QCFLAG_ACTIVE);
3628 3671
3629 if (likely(qc->flags & ATA_QCFLAG_DMAMAP)) 3672 if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
3630 ata_sg_clean(qc); 3673 ata_sg_clean(qc);
@@ -3636,17 +3679,7 @@ void ata_qc_complete(struct ata_queued_cmd *qc)
3636 qc->flags &= ~ATA_QCFLAG_ACTIVE; 3679 qc->flags &= ~ATA_QCFLAG_ACTIVE;
3637 3680
3638 /* call completion callback */ 3681 /* call completion callback */
3639 rc = qc->complete_fn(qc); 3682 qc->complete_fn(qc);
3640
3641 /* if callback indicates not to complete command (non-zero),
3642 * return immediately
3643 */
3644 if (rc != 0)
3645 return;
3646
3647 __ata_qc_complete(qc);
3648
3649 VPRINTK("EXIT\n");
3650} 3683}
3651 3684
3652static inline int ata_should_dma_map(struct ata_queued_cmd *qc) 3685static inline int ata_should_dma_map(struct ata_queued_cmd *qc)
@@ -3686,20 +3719,20 @@ static inline int ata_should_dma_map(struct ata_queued_cmd *qc)
3686 * spin_lock_irqsave(host_set lock) 3719 * spin_lock_irqsave(host_set lock)
3687 * 3720 *
3688 * RETURNS: 3721 * RETURNS:
3689 * Zero on success, negative on error. 3722 * Zero on success, AC_ERR_* mask on failure
3690 */ 3723 */
3691 3724
3692int ata_qc_issue(struct ata_queued_cmd *qc) 3725unsigned int ata_qc_issue(struct ata_queued_cmd *qc)
3693{ 3726{
3694 struct ata_port *ap = qc->ap; 3727 struct ata_port *ap = qc->ap;
3695 3728
3696 if (ata_should_dma_map(qc)) { 3729 if (ata_should_dma_map(qc)) {
3697 if (qc->flags & ATA_QCFLAG_SG) { 3730 if (qc->flags & ATA_QCFLAG_SG) {
3698 if (ata_sg_setup(qc)) 3731 if (ata_sg_setup(qc))
3699 goto err_out; 3732 goto sg_err;
3700 } else if (qc->flags & ATA_QCFLAG_SINGLE) { 3733 } else if (qc->flags & ATA_QCFLAG_SINGLE) {
3701 if (ata_sg_setup_one(qc)) 3734 if (ata_sg_setup_one(qc))
3702 goto err_out; 3735 goto sg_err;
3703 } 3736 }
3704 } else { 3737 } else {
3705 qc->flags &= ~ATA_QCFLAG_DMAMAP; 3738 qc->flags &= ~ATA_QCFLAG_DMAMAP;
@@ -3712,8 +3745,9 @@ int ata_qc_issue(struct ata_queued_cmd *qc)
3712 3745
3713 return ap->ops->qc_issue(qc); 3746 return ap->ops->qc_issue(qc);
3714 3747
3715err_out: 3748sg_err:
3716 return -1; 3749 qc->flags &= ~ATA_QCFLAG_DMAMAP;
3750 return AC_ERR_SYSTEM;
3717} 3751}
3718 3752
3719 3753
@@ -3732,10 +3766,10 @@ err_out:
3732 * spin_lock_irqsave(host_set lock) 3766 * spin_lock_irqsave(host_set lock)
3733 * 3767 *
3734 * RETURNS: 3768 * RETURNS:
3735 * Zero on success, negative on error. 3769 * Zero on success, AC_ERR_* mask on failure
3736 */ 3770 */
3737 3771
3738int ata_qc_issue_prot(struct ata_queued_cmd *qc) 3772unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc)
3739{ 3773{
3740 struct ata_port *ap = qc->ap; 3774 struct ata_port *ap = qc->ap;
3741 3775
@@ -3756,31 +3790,31 @@ int ata_qc_issue_prot(struct ata_queued_cmd *qc)
3756 ata_qc_set_polling(qc); 3790 ata_qc_set_polling(qc);
3757 ata_tf_to_host(ap, &qc->tf); 3791 ata_tf_to_host(ap, &qc->tf);
3758 ap->hsm_task_state = HSM_ST; 3792 ap->hsm_task_state = HSM_ST;
3759 queue_work(ata_wq, &ap->pio_task); 3793 ata_queue_pio_task(ap);
3760 break; 3794 break;
3761 3795
3762 case ATA_PROT_ATAPI: 3796 case ATA_PROT_ATAPI:
3763 ata_qc_set_polling(qc); 3797 ata_qc_set_polling(qc);
3764 ata_tf_to_host(ap, &qc->tf); 3798 ata_tf_to_host(ap, &qc->tf);
3765 queue_work(ata_wq, &ap->packet_task); 3799 ata_queue_packet_task(ap);
3766 break; 3800 break;
3767 3801
3768 case ATA_PROT_ATAPI_NODATA: 3802 case ATA_PROT_ATAPI_NODATA:
3769 ap->flags |= ATA_FLAG_NOINTR; 3803 ap->flags |= ATA_FLAG_NOINTR;
3770 ata_tf_to_host(ap, &qc->tf); 3804 ata_tf_to_host(ap, &qc->tf);
3771 queue_work(ata_wq, &ap->packet_task); 3805 ata_queue_packet_task(ap);
3772 break; 3806 break;
3773 3807
3774 case ATA_PROT_ATAPI_DMA: 3808 case ATA_PROT_ATAPI_DMA:
3775 ap->flags |= ATA_FLAG_NOINTR; 3809 ap->flags |= ATA_FLAG_NOINTR;
3776 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */ 3810 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
3777 ap->ops->bmdma_setup(qc); /* set up bmdma */ 3811 ap->ops->bmdma_setup(qc); /* set up bmdma */
3778 queue_work(ata_wq, &ap->packet_task); 3812 ata_queue_packet_task(ap);
3779 break; 3813 break;
3780 3814
3781 default: 3815 default:
3782 WARN_ON(1); 3816 WARN_ON(1);
3783 return -1; 3817 return AC_ERR_SYSTEM;
3784 } 3818 }
3785 3819
3786 return 0; 3820 return 0;
@@ -4164,26 +4198,26 @@ static void atapi_packet_task(void *_data)
4164 u8 status; 4198 u8 status;
4165 4199
4166 qc = ata_qc_from_tag(ap, ap->active_tag); 4200 qc = ata_qc_from_tag(ap, ap->active_tag);
4167 assert(qc != NULL); 4201 WARN_ON(qc == NULL);
4168 assert(qc->flags & ATA_QCFLAG_ACTIVE); 4202 WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
4169 4203
4170 /* sleep-wait for BSY to clear */ 4204 /* sleep-wait for BSY to clear */
4171 DPRINTK("busy wait\n"); 4205 DPRINTK("busy wait\n");
4172 if (ata_busy_sleep(ap, ATA_TMOUT_CDB_QUICK, ATA_TMOUT_CDB)) { 4206 if (ata_busy_sleep(ap, ATA_TMOUT_CDB_QUICK, ATA_TMOUT_CDB)) {
4173 qc->err_mask |= AC_ERR_ATA_BUS; 4207 qc->err_mask |= AC_ERR_TIMEOUT;
4174 goto err_out; 4208 goto err_out;
4175 } 4209 }
4176 4210
4177 /* make sure DRQ is set */ 4211 /* make sure DRQ is set */
4178 status = ata_chk_status(ap); 4212 status = ata_chk_status(ap);
4179 if ((status & (ATA_BUSY | ATA_DRQ)) != ATA_DRQ) { 4213 if ((status & (ATA_BUSY | ATA_DRQ)) != ATA_DRQ) {
4180 qc->err_mask |= AC_ERR_ATA_BUS; 4214 qc->err_mask |= AC_ERR_HSM;
4181 goto err_out; 4215 goto err_out;
4182 } 4216 }
4183 4217
4184 /* send SCSI cdb */ 4218 /* send SCSI cdb */
4185 DPRINTK("send cdb\n"); 4219 DPRINTK("send cdb\n");
4186 assert(ap->cdb_len >= 12); 4220 WARN_ON(qc->dev->cdb_len < 12);
4187 4221
4188 if (qc->tf.protocol == ATA_PROT_ATAPI_DMA || 4222 if (qc->tf.protocol == ATA_PROT_ATAPI_DMA ||
4189 qc->tf.protocol == ATA_PROT_ATAPI_NODATA) { 4223 qc->tf.protocol == ATA_PROT_ATAPI_NODATA) {
@@ -4197,16 +4231,16 @@ static void atapi_packet_task(void *_data)
4197 */ 4231 */
4198 spin_lock_irqsave(&ap->host_set->lock, flags); 4232 spin_lock_irqsave(&ap->host_set->lock, flags);
4199 ap->flags &= ~ATA_FLAG_NOINTR; 4233 ap->flags &= ~ATA_FLAG_NOINTR;
4200 ata_data_xfer(ap, qc->cdb, ap->cdb_len, 1); 4234 ata_data_xfer(ap, qc->cdb, qc->dev->cdb_len, 1);
4201 if (qc->tf.protocol == ATA_PROT_ATAPI_DMA) 4235 if (qc->tf.protocol == ATA_PROT_ATAPI_DMA)
4202 ap->ops->bmdma_start(qc); /* initiate bmdma */ 4236 ap->ops->bmdma_start(qc); /* initiate bmdma */
4203 spin_unlock_irqrestore(&ap->host_set->lock, flags); 4237 spin_unlock_irqrestore(&ap->host_set->lock, flags);
4204 } else { 4238 } else {
4205 ata_data_xfer(ap, qc->cdb, ap->cdb_len, 1); 4239 ata_data_xfer(ap, qc->cdb, qc->dev->cdb_len, 1);
4206 4240
4207 /* PIO commands are handled by polling */ 4241 /* PIO commands are handled by polling */
4208 ap->hsm_task_state = HSM_ST; 4242 ap->hsm_task_state = HSM_ST;
4209 queue_work(ata_wq, &ap->pio_task); 4243 ata_queue_pio_task(ap);
4210 } 4244 }
4211 4245
4212 return; 4246 return;
@@ -4216,19 +4250,6 @@ err_out:
4216} 4250}
4217 4251
4218 4252
4219/**
4220 * ata_port_start - Set port up for dma.
4221 * @ap: Port to initialize
4222 *
4223 * Called just after data structures for each port are
4224 * initialized. Allocates space for PRD table.
4225 *
4226 * May be used as the port_start() entry in ata_port_operations.
4227 *
4228 * LOCKING:
4229 * Inherited from caller.
4230 */
4231
4232/* 4253/*
4233 * Execute a 'simple' command, that only consists of the opcode 'cmd' itself, 4254 * Execute a 'simple' command, that only consists of the opcode 'cmd' itself,
4234 * without filling any other registers 4255 * without filling any other registers
@@ -4280,6 +4301,8 @@ static int ata_start_drive(struct ata_port *ap, struct ata_device *dev)
4280 4301
4281/** 4302/**
4282 * ata_device_resume - wakeup a previously suspended devices 4303 * ata_device_resume - wakeup a previously suspended devices
4304 * @ap: port the device is connected to
4305 * @dev: the device to resume
4283 * 4306 *
4284 * Kick the drive back into action, by sending it an idle immediate 4307 * Kick the drive back into action, by sending it an idle immediate
4285 * command and making sure its transfer mode matches between drive 4308 * command and making sure its transfer mode matches between drive
@@ -4302,10 +4325,11 @@ int ata_device_resume(struct ata_port *ap, struct ata_device *dev)
4302 4325
4303/** 4326/**
4304 * ata_device_suspend - prepare a device for suspend 4327 * ata_device_suspend - prepare a device for suspend
4328 * @ap: port the device is connected to
4329 * @dev: the device to suspend
4305 * 4330 *
4306 * Flush the cache on the drive, if appropriate, then issue a 4331 * Flush the cache on the drive, if appropriate, then issue a
4307 * standbynow command. 4332 * standbynow command.
4308 *
4309 */ 4333 */
4310int ata_device_suspend(struct ata_port *ap, struct ata_device *dev) 4334int ata_device_suspend(struct ata_port *ap, struct ata_device *dev)
4311{ 4335{
@@ -4319,6 +4343,19 @@ int ata_device_suspend(struct ata_port *ap, struct ata_device *dev)
4319 return 0; 4343 return 0;
4320} 4344}
4321 4345
4346/**
4347 * ata_port_start - Set port up for dma.
4348 * @ap: Port to initialize
4349 *
4350 * Called just after data structures for each port are
4351 * initialized. Allocates space for PRD table.
4352 *
4353 * May be used as the port_start() entry in ata_port_operations.
4354 *
4355 * LOCKING:
4356 * Inherited from caller.
4357 */
4358
4322int ata_port_start (struct ata_port *ap) 4359int ata_port_start (struct ata_port *ap)
4323{ 4360{
4324 struct device *dev = ap->host_set->dev; 4361 struct device *dev = ap->host_set->dev;
@@ -4434,6 +4471,7 @@ static void ata_host_init(struct ata_port *ap, struct Scsi_Host *host,
4434 4471
4435 INIT_WORK(&ap->packet_task, atapi_packet_task, ap); 4472 INIT_WORK(&ap->packet_task, atapi_packet_task, ap);
4436 INIT_WORK(&ap->pio_task, ata_pio_task, ap); 4473 INIT_WORK(&ap->pio_task, ata_pio_task, ap);
4474 INIT_LIST_HEAD(&ap->eh_done_q);
4437 4475
4438 for (i = 0; i < ATA_MAX_DEVICES; i++) 4476 for (i = 0; i < ATA_MAX_DEVICES; i++)
4439 ap->device[i].devno = i; 4477 ap->device[i].devno = i;
@@ -4575,9 +4613,9 @@ int ata_device_add(const struct ata_probe_ent *ent)
4575 4613
4576 ap = host_set->ports[i]; 4614 ap = host_set->ports[i];
4577 4615
4578 DPRINTK("ata%u: probe begin\n", ap->id); 4616 DPRINTK("ata%u: bus probe begin\n", ap->id);
4579 rc = ata_bus_probe(ap); 4617 rc = ata_bus_probe(ap);
4580 DPRINTK("ata%u: probe end\n", ap->id); 4618 DPRINTK("ata%u: bus probe end\n", ap->id);
4581 4619
4582 if (rc) { 4620 if (rc) {
4583 /* FIXME: do something useful here? 4621 /* FIXME: do something useful here?
@@ -4601,7 +4639,7 @@ int ata_device_add(const struct ata_probe_ent *ent)
4601 } 4639 }
4602 4640
4603 /* probes are done, now scan each port's disk(s) */ 4641 /* probes are done, now scan each port's disk(s) */
4604 DPRINTK("probe begin\n"); 4642 DPRINTK("host probe begin\n");
4605 for (i = 0; i < count; i++) { 4643 for (i = 0; i < count; i++) {
4606 struct ata_port *ap = host_set->ports[i]; 4644 struct ata_port *ap = host_set->ports[i];
4607 4645
@@ -4723,32 +4761,6 @@ void ata_std_ports(struct ata_ioports *ioaddr)
4723 ioaddr->command_addr = ioaddr->cmd_addr + ATA_REG_CMD; 4761 ioaddr->command_addr = ioaddr->cmd_addr + ATA_REG_CMD;
4724} 4762}
4725 4763
4726static struct ata_probe_ent *
4727ata_probe_ent_alloc(struct device *dev, const struct ata_port_info *port)
4728{
4729 struct ata_probe_ent *probe_ent;
4730
4731 probe_ent = kzalloc(sizeof(*probe_ent), GFP_KERNEL);
4732 if (!probe_ent) {
4733 printk(KERN_ERR DRV_NAME "(%s): out of memory\n",
4734 kobject_name(&(dev->kobj)));
4735 return NULL;
4736 }
4737
4738 INIT_LIST_HEAD(&probe_ent->node);
4739 probe_ent->dev = dev;
4740
4741 probe_ent->sht = port->sht;
4742 probe_ent->host_flags = port->host_flags;
4743 probe_ent->pio_mask = port->pio_mask;
4744 probe_ent->mwdma_mask = port->mwdma_mask;
4745 probe_ent->udma_mask = port->udma_mask;
4746 probe_ent->port_ops = port->port_ops;
4747
4748 return probe_ent;
4749}
4750
4751
4752 4764
4753#ifdef CONFIG_PCI 4765#ifdef CONFIG_PCI
4754 4766
@@ -4760,256 +4772,6 @@ void ata_pci_host_stop (struct ata_host_set *host_set)
4760} 4772}
4761 4773
4762/** 4774/**
4763 * ata_pci_init_native_mode - Initialize native-mode driver
4764 * @pdev: pci device to be initialized
4765 * @port: array[2] of pointers to port info structures.
4766 * @ports: bitmap of ports present
4767 *
4768 * Utility function which allocates and initializes an
4769 * ata_probe_ent structure for a standard dual-port
4770 * PIO-based IDE controller. The returned ata_probe_ent
4771 * structure can be passed to ata_device_add(). The returned
4772 * ata_probe_ent structure should then be freed with kfree().
4773 *
4774 * The caller need only pass the address of the primary port, the
4775 * secondary will be deduced automatically. If the device has non
4776 * standard secondary port mappings this function can be called twice,
4777 * once for each interface.
4778 */
4779
4780struct ata_probe_ent *
4781ata_pci_init_native_mode(struct pci_dev *pdev, struct ata_port_info **port, int ports)
4782{
4783 struct ata_probe_ent *probe_ent =
4784 ata_probe_ent_alloc(pci_dev_to_dev(pdev), port[0]);
4785 int p = 0;
4786
4787 if (!probe_ent)
4788 return NULL;
4789
4790 probe_ent->irq = pdev->irq;
4791 probe_ent->irq_flags = SA_SHIRQ;
4792 probe_ent->private_data = port[0]->private_data;
4793
4794 if (ports & ATA_PORT_PRIMARY) {
4795 probe_ent->port[p].cmd_addr = pci_resource_start(pdev, 0);
4796 probe_ent->port[p].altstatus_addr =
4797 probe_ent->port[p].ctl_addr =
4798 pci_resource_start(pdev, 1) | ATA_PCI_CTL_OFS;
4799 probe_ent->port[p].bmdma_addr = pci_resource_start(pdev, 4);
4800 ata_std_ports(&probe_ent->port[p]);
4801 p++;
4802 }
4803
4804 if (ports & ATA_PORT_SECONDARY) {
4805 probe_ent->port[p].cmd_addr = pci_resource_start(pdev, 2);
4806 probe_ent->port[p].altstatus_addr =
4807 probe_ent->port[p].ctl_addr =
4808 pci_resource_start(pdev, 3) | ATA_PCI_CTL_OFS;
4809 probe_ent->port[p].bmdma_addr = pci_resource_start(pdev, 4) + 8;
4810 ata_std_ports(&probe_ent->port[p]);
4811 p++;
4812 }
4813
4814 probe_ent->n_ports = p;
4815 return probe_ent;
4816}
4817
4818static struct ata_probe_ent *ata_pci_init_legacy_port(struct pci_dev *pdev, struct ata_port_info *port, int port_num)
4819{
4820 struct ata_probe_ent *probe_ent;
4821
4822 probe_ent = ata_probe_ent_alloc(pci_dev_to_dev(pdev), port);
4823 if (!probe_ent)
4824 return NULL;
4825
4826 probe_ent->legacy_mode = 1;
4827 probe_ent->n_ports = 1;
4828 probe_ent->hard_port_no = port_num;
4829 probe_ent->private_data = port->private_data;
4830
4831 switch(port_num)
4832 {
4833 case 0:
4834 probe_ent->irq = 14;
4835 probe_ent->port[0].cmd_addr = 0x1f0;
4836 probe_ent->port[0].altstatus_addr =
4837 probe_ent->port[0].ctl_addr = 0x3f6;
4838 break;
4839 case 1:
4840 probe_ent->irq = 15;
4841 probe_ent->port[0].cmd_addr = 0x170;
4842 probe_ent->port[0].altstatus_addr =
4843 probe_ent->port[0].ctl_addr = 0x376;
4844 break;
4845 }
4846 probe_ent->port[0].bmdma_addr = pci_resource_start(pdev, 4) + 8 * port_num;
4847 ata_std_ports(&probe_ent->port[0]);
4848 return probe_ent;
4849}
4850
4851/**
4852 * ata_pci_init_one - Initialize/register PCI IDE host controller
4853 * @pdev: Controller to be initialized
4854 * @port_info: Information from low-level host driver
4855 * @n_ports: Number of ports attached to host controller
4856 *
4857 * This is a helper function which can be called from a driver's
4858 * xxx_init_one() probe function if the hardware uses traditional
4859 * IDE taskfile registers.
4860 *
4861 * This function calls pci_enable_device(), reserves its register
4862 * regions, sets the dma mask, enables bus master mode, and calls
4863 * ata_device_add()
4864 *
4865 * LOCKING:
4866 * Inherited from PCI layer (may sleep).
4867 *
4868 * RETURNS:
4869 * Zero on success, negative on errno-based value on error.
4870 */
4871
4872int ata_pci_init_one (struct pci_dev *pdev, struct ata_port_info **port_info,
4873 unsigned int n_ports)
4874{
4875 struct ata_probe_ent *probe_ent = NULL, *probe_ent2 = NULL;
4876 struct ata_port_info *port[2];
4877 u8 tmp8, mask;
4878 unsigned int legacy_mode = 0;
4879 int disable_dev_on_err = 1;
4880 int rc;
4881
4882 DPRINTK("ENTER\n");
4883
4884 port[0] = port_info[0];
4885 if (n_ports > 1)
4886 port[1] = port_info[1];
4887 else
4888 port[1] = port[0];
4889
4890 if ((port[0]->host_flags & ATA_FLAG_NO_LEGACY) == 0
4891 && (pdev->class >> 8) == PCI_CLASS_STORAGE_IDE) {
4892 /* TODO: What if one channel is in native mode ... */
4893 pci_read_config_byte(pdev, PCI_CLASS_PROG, &tmp8);
4894 mask = (1 << 2) | (1 << 0);
4895 if ((tmp8 & mask) != mask)
4896 legacy_mode = (1 << 3);
4897 }
4898
4899 /* FIXME... */
4900 if ((!legacy_mode) && (n_ports > 2)) {
4901 printk(KERN_ERR "ata: BUG: native mode, n_ports > 2\n");
4902 n_ports = 2;
4903 /* For now */
4904 }
4905
4906 /* FIXME: Really for ATA it isn't safe because the device may be
4907 multi-purpose and we want to leave it alone if it was already
4908 enabled. Secondly for shared use as Arjan says we want refcounting
4909
4910 Checking dev->is_enabled is insufficient as this is not set at
4911 boot for the primary video which is BIOS enabled
4912 */
4913
4914 rc = pci_enable_device(pdev);
4915 if (rc)
4916 return rc;
4917
4918 rc = pci_request_regions(pdev, DRV_NAME);
4919 if (rc) {
4920 disable_dev_on_err = 0;
4921 goto err_out;
4922 }
4923
4924 /* FIXME: Should use platform specific mappers for legacy port ranges */
4925 if (legacy_mode) {
4926 if (!request_region(0x1f0, 8, "libata")) {
4927 struct resource *conflict, res;
4928 res.start = 0x1f0;
4929 res.end = 0x1f0 + 8 - 1;
4930 conflict = ____request_resource(&ioport_resource, &res);
4931 if (!strcmp(conflict->name, "libata"))
4932 legacy_mode |= (1 << 0);
4933 else {
4934 disable_dev_on_err = 0;
4935 printk(KERN_WARNING "ata: 0x1f0 IDE port busy\n");
4936 }
4937 } else
4938 legacy_mode |= (1 << 0);
4939
4940 if (!request_region(0x170, 8, "libata")) {
4941 struct resource *conflict, res;
4942 res.start = 0x170;
4943 res.end = 0x170 + 8 - 1;
4944 conflict = ____request_resource(&ioport_resource, &res);
4945 if (!strcmp(conflict->name, "libata"))
4946 legacy_mode |= (1 << 1);
4947 else {
4948 disable_dev_on_err = 0;
4949 printk(KERN_WARNING "ata: 0x170 IDE port busy\n");
4950 }
4951 } else
4952 legacy_mode |= (1 << 1);
4953 }
4954
4955 /* we have legacy mode, but all ports are unavailable */
4956 if (legacy_mode == (1 << 3)) {
4957 rc = -EBUSY;
4958 goto err_out_regions;
4959 }
4960
4961 rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
4962 if (rc)
4963 goto err_out_regions;
4964 rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
4965 if (rc)
4966 goto err_out_regions;
4967
4968 if (legacy_mode) {
4969 if (legacy_mode & (1 << 0))
4970 probe_ent = ata_pci_init_legacy_port(pdev, port[0], 0);
4971 if (legacy_mode & (1 << 1))
4972 probe_ent2 = ata_pci_init_legacy_port(pdev, port[1], 1);
4973 } else {
4974 if (n_ports == 2)
4975 probe_ent = ata_pci_init_native_mode(pdev, port, ATA_PORT_PRIMARY | ATA_PORT_SECONDARY);
4976 else
4977 probe_ent = ata_pci_init_native_mode(pdev, port, ATA_PORT_PRIMARY);
4978 }
4979 if (!probe_ent && !probe_ent2) {
4980 rc = -ENOMEM;
4981 goto err_out_regions;
4982 }
4983
4984 pci_set_master(pdev);
4985
4986 /* FIXME: check ata_device_add return */
4987 if (legacy_mode) {
4988 if (legacy_mode & (1 << 0))
4989 ata_device_add(probe_ent);
4990 if (legacy_mode & (1 << 1))
4991 ata_device_add(probe_ent2);
4992 } else
4993 ata_device_add(probe_ent);
4994
4995 kfree(probe_ent);
4996 kfree(probe_ent2);
4997
4998 return 0;
4999
5000err_out_regions:
5001 if (legacy_mode & (1 << 0))
5002 release_region(0x1f0, 8);
5003 if (legacy_mode & (1 << 1))
5004 release_region(0x170, 8);
5005 pci_release_regions(pdev);
5006err_out:
5007 if (disable_dev_on_err)
5008 pci_disable_device(pdev);
5009 return rc;
5010}
5011
5012/**
5013 * ata_pci_remove_one - PCI layer callback for device removal 4775 * ata_pci_remove_one - PCI layer callback for device removal
5014 * @pdev: PCI device that was removed 4776 * @pdev: PCI device that was removed
5015 * 4777 *
@@ -5139,7 +4901,7 @@ EXPORT_SYMBOL_GPL(ata_device_add);
5139EXPORT_SYMBOL_GPL(ata_host_set_remove); 4901EXPORT_SYMBOL_GPL(ata_host_set_remove);
5140EXPORT_SYMBOL_GPL(ata_sg_init); 4902EXPORT_SYMBOL_GPL(ata_sg_init);
5141EXPORT_SYMBOL_GPL(ata_sg_init_one); 4903EXPORT_SYMBOL_GPL(ata_sg_init_one);
5142EXPORT_SYMBOL_GPL(ata_qc_complete); 4904EXPORT_SYMBOL_GPL(__ata_qc_complete);
5143EXPORT_SYMBOL_GPL(ata_qc_issue_prot); 4905EXPORT_SYMBOL_GPL(ata_qc_issue_prot);
5144EXPORT_SYMBOL_GPL(ata_eng_timeout); 4906EXPORT_SYMBOL_GPL(ata_eng_timeout);
5145EXPORT_SYMBOL_GPL(ata_tf_load); 4907EXPORT_SYMBOL_GPL(ata_tf_load);
@@ -5165,18 +4927,29 @@ EXPORT_SYMBOL_GPL(ata_port_probe);
5165EXPORT_SYMBOL_GPL(sata_phy_reset); 4927EXPORT_SYMBOL_GPL(sata_phy_reset);
5166EXPORT_SYMBOL_GPL(__sata_phy_reset); 4928EXPORT_SYMBOL_GPL(__sata_phy_reset);
5167EXPORT_SYMBOL_GPL(ata_bus_reset); 4929EXPORT_SYMBOL_GPL(ata_bus_reset);
4930EXPORT_SYMBOL_GPL(ata_std_probeinit);
4931EXPORT_SYMBOL_GPL(ata_std_softreset);
4932EXPORT_SYMBOL_GPL(sata_std_hardreset);
4933EXPORT_SYMBOL_GPL(ata_std_postreset);
4934EXPORT_SYMBOL_GPL(ata_std_probe_reset);
4935EXPORT_SYMBOL_GPL(ata_drive_probe_reset);
5168EXPORT_SYMBOL_GPL(ata_port_disable); 4936EXPORT_SYMBOL_GPL(ata_port_disable);
5169EXPORT_SYMBOL_GPL(ata_ratelimit); 4937EXPORT_SYMBOL_GPL(ata_ratelimit);
4938EXPORT_SYMBOL_GPL(ata_busy_sleep);
5170EXPORT_SYMBOL_GPL(ata_scsi_ioctl); 4939EXPORT_SYMBOL_GPL(ata_scsi_ioctl);
5171EXPORT_SYMBOL_GPL(ata_scsi_queuecmd); 4940EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
4941EXPORT_SYMBOL_GPL(ata_scsi_timed_out);
5172EXPORT_SYMBOL_GPL(ata_scsi_error); 4942EXPORT_SYMBOL_GPL(ata_scsi_error);
5173EXPORT_SYMBOL_GPL(ata_scsi_slave_config); 4943EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
5174EXPORT_SYMBOL_GPL(ata_scsi_release); 4944EXPORT_SYMBOL_GPL(ata_scsi_release);
5175EXPORT_SYMBOL_GPL(ata_host_intr); 4945EXPORT_SYMBOL_GPL(ata_host_intr);
5176EXPORT_SYMBOL_GPL(ata_dev_classify); 4946EXPORT_SYMBOL_GPL(ata_dev_classify);
5177EXPORT_SYMBOL_GPL(ata_dev_id_string); 4947EXPORT_SYMBOL_GPL(ata_id_string);
4948EXPORT_SYMBOL_GPL(ata_id_c_string);
5178EXPORT_SYMBOL_GPL(ata_dev_config); 4949EXPORT_SYMBOL_GPL(ata_dev_config);
5179EXPORT_SYMBOL_GPL(ata_scsi_simulate); 4950EXPORT_SYMBOL_GPL(ata_scsi_simulate);
4951EXPORT_SYMBOL_GPL(ata_eh_qc_complete);
4952EXPORT_SYMBOL_GPL(ata_eh_qc_retry);
5180 4953
5181EXPORT_SYMBOL_GPL(ata_pio_need_iordy); 4954EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
5182EXPORT_SYMBOL_GPL(ata_timing_compute); 4955EXPORT_SYMBOL_GPL(ata_timing_compute);
diff --git a/drivers/scsi/libata-scsi.c b/drivers/scsi/libata-scsi.c
index 07b1e7cc61df..538784e65cdc 100644
--- a/drivers/scsi/libata-scsi.c
+++ b/drivers/scsi/libata-scsi.c
@@ -151,7 +151,7 @@ int ata_cmd_ioctl(struct scsi_device *scsidev, void __user *arg)
151 struct scsi_sense_hdr sshdr; 151 struct scsi_sense_hdr sshdr;
152 enum dma_data_direction data_dir; 152 enum dma_data_direction data_dir;
153 153
154 if (NULL == (void *)arg) 154 if (arg == NULL)
155 return -EINVAL; 155 return -EINVAL;
156 156
157 if (copy_from_user(args, arg, sizeof(args))) 157 if (copy_from_user(args, arg, sizeof(args)))
@@ -201,7 +201,7 @@ int ata_cmd_ioctl(struct scsi_device *scsidev, void __user *arg)
201 /* Need code to retrieve data from check condition? */ 201 /* Need code to retrieve data from check condition? */
202 202
203 if ((argbuf) 203 if ((argbuf)
204 && copy_to_user((void *)(arg + sizeof(args)), argbuf, argsize)) 204 && copy_to_user(arg + sizeof(args), argbuf, argsize))
205 rc = -EFAULT; 205 rc = -EFAULT;
206error: 206error:
207 if (argbuf) 207 if (argbuf)
@@ -228,7 +228,7 @@ int ata_task_ioctl(struct scsi_device *scsidev, void __user *arg)
228 u8 args[7]; 228 u8 args[7];
229 struct scsi_sense_hdr sshdr; 229 struct scsi_sense_hdr sshdr;
230 230
231 if (NULL == (void *)arg) 231 if (arg == NULL)
232 return -EINVAL; 232 return -EINVAL;
233 233
234 if (copy_from_user(args, arg, sizeof(args))) 234 if (copy_from_user(args, arg, sizeof(args)))
@@ -553,7 +553,7 @@ void ata_gen_ata_desc_sense(struct ata_queued_cmd *qc)
553 /* 553 /*
554 * Read the controller registers. 554 * Read the controller registers.
555 */ 555 */
556 assert(NULL != qc->ap->ops->tf_read); 556 WARN_ON(qc->ap->ops->tf_read == NULL);
557 qc->ap->ops->tf_read(qc->ap, tf); 557 qc->ap->ops->tf_read(qc->ap, tf);
558 558
559 /* 559 /*
@@ -628,7 +628,7 @@ void ata_gen_fixed_sense(struct ata_queued_cmd *qc)
628 /* 628 /*
629 * Read the controller registers. 629 * Read the controller registers.
630 */ 630 */
631 assert(NULL != qc->ap->ops->tf_read); 631 WARN_ON(qc->ap->ops->tf_read == NULL);
632 qc->ap->ops->tf_read(qc->ap, tf); 632 qc->ap->ops->tf_read(qc->ap, tf);
633 633
634 /* 634 /*
@@ -684,23 +684,23 @@ int ata_scsi_slave_config(struct scsi_device *sdev)
684 if (sdev->id < ATA_MAX_DEVICES) { 684 if (sdev->id < ATA_MAX_DEVICES) {
685 struct ata_port *ap; 685 struct ata_port *ap;
686 struct ata_device *dev; 686 struct ata_device *dev;
687 unsigned int max_sectors;
687 688
688 ap = (struct ata_port *) &sdev->host->hostdata[0]; 689 ap = (struct ata_port *) &sdev->host->hostdata[0];
689 dev = &ap->device[sdev->id]; 690 dev = &ap->device[sdev->id];
690 691
691 /* TODO: 1024 is an arbitrary number, not the 692 /* TODO: 2048 is an arbitrary number, not the
692 * hardware maximum. This should be increased to 693 * hardware maximum. This should be increased to
693 * 65534 when Jens Axboe's patch for dynamically 694 * 65534 when Jens Axboe's patch for dynamically
694 * determining max_sectors is merged. 695 * determining max_sectors is merged.
695 */ 696 */
696 if ((dev->flags & ATA_DFLAG_LBA48) && 697 max_sectors = ATA_MAX_SECTORS;
697 ((dev->flags & ATA_DFLAG_LOCK_SECTORS) == 0)) { 698 if (dev->flags & ATA_DFLAG_LBA48)
698 /* 699 max_sectors = 2048;
699 * do not overwrite sdev->host->max_sectors, since 700 if (dev->max_sectors)
700 * other drives on this host may not support LBA48 701 max_sectors = dev->max_sectors;
701 */ 702
702 blk_queue_max_sectors(sdev->request_queue, 2048); 703 blk_queue_max_sectors(sdev->request_queue, max_sectors);
703 }
704 704
705 /* 705 /*
706 * SATA DMA transfers must be multiples of 4 byte, so 706 * SATA DMA transfers must be multiples of 4 byte, so
@@ -717,6 +717,47 @@ int ata_scsi_slave_config(struct scsi_device *sdev)
717} 717}
718 718
719/** 719/**
720 * ata_scsi_timed_out - SCSI layer time out callback
721 * @cmd: timed out SCSI command
722 *
723 * Handles SCSI layer timeout. We race with normal completion of
724 * the qc for @cmd. If the qc is already gone, we lose and let
725 * the scsi command finish (EH_HANDLED). Otherwise, the qc has
726 * timed out and EH should be invoked. Prevent ata_qc_complete()
727 * from finishing it by setting EH_SCHEDULED and return
728 * EH_NOT_HANDLED.
729 *
730 * LOCKING:
731 * Called from timer context
732 *
733 * RETURNS:
734 * EH_HANDLED or EH_NOT_HANDLED
735 */
736enum scsi_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd)
737{
738 struct Scsi_Host *host = cmd->device->host;
739 struct ata_port *ap = (struct ata_port *) &host->hostdata[0];
740 unsigned long flags;
741 struct ata_queued_cmd *qc;
742 enum scsi_eh_timer_return ret = EH_HANDLED;
743
744 DPRINTK("ENTER\n");
745
746 spin_lock_irqsave(&ap->host_set->lock, flags);
747 qc = ata_qc_from_tag(ap, ap->active_tag);
748 if (qc) {
749 WARN_ON(qc->scsicmd != cmd);
750 qc->flags |= ATA_QCFLAG_EH_SCHEDULED;
751 qc->err_mask |= AC_ERR_TIMEOUT;
752 ret = EH_NOT_HANDLED;
753 }
754 spin_unlock_irqrestore(&ap->host_set->lock, flags);
755
756 DPRINTK("EXIT, ret=%d\n", ret);
757 return ret;
758}
759
760/**
720 * ata_scsi_error - SCSI layer error handler callback 761 * ata_scsi_error - SCSI layer error handler callback
721 * @host: SCSI host on which error occurred 762 * @host: SCSI host on which error occurred
722 * 763 *
@@ -732,23 +773,82 @@ int ata_scsi_slave_config(struct scsi_device *sdev)
732int ata_scsi_error(struct Scsi_Host *host) 773int ata_scsi_error(struct Scsi_Host *host)
733{ 774{
734 struct ata_port *ap; 775 struct ata_port *ap;
776 unsigned long flags;
735 777
736 DPRINTK("ENTER\n"); 778 DPRINTK("ENTER\n");
737 779
738 ap = (struct ata_port *) &host->hostdata[0]; 780 ap = (struct ata_port *) &host->hostdata[0];
781
782 spin_lock_irqsave(&ap->host_set->lock, flags);
783 WARN_ON(ap->flags & ATA_FLAG_IN_EH);
784 ap->flags |= ATA_FLAG_IN_EH;
785 WARN_ON(ata_qc_from_tag(ap, ap->active_tag) == NULL);
786 spin_unlock_irqrestore(&ap->host_set->lock, flags);
787
739 ap->ops->eng_timeout(ap); 788 ap->ops->eng_timeout(ap);
740 789
741 /* TODO: this is per-command; when queueing is supported 790 WARN_ON(host->host_failed || !list_empty(&host->eh_cmd_q));
742 * this code will either change or move to a more 791
743 * appropriate place 792 scsi_eh_flush_done_q(&ap->eh_done_q);
744 */ 793
745 host->host_failed--; 794 spin_lock_irqsave(&ap->host_set->lock, flags);
746 INIT_LIST_HEAD(&host->eh_cmd_q); 795 ap->flags &= ~ATA_FLAG_IN_EH;
796 spin_unlock_irqrestore(&ap->host_set->lock, flags);
747 797
748 DPRINTK("EXIT\n"); 798 DPRINTK("EXIT\n");
749 return 0; 799 return 0;
750} 800}
751 801
802static void ata_eh_scsidone(struct scsi_cmnd *scmd)
803{
804 /* nada */
805}
806
807static void __ata_eh_qc_complete(struct ata_queued_cmd *qc)
808{
809 struct ata_port *ap = qc->ap;
810 struct scsi_cmnd *scmd = qc->scsicmd;
811 unsigned long flags;
812
813 spin_lock_irqsave(&ap->host_set->lock, flags);
814 qc->scsidone = ata_eh_scsidone;
815 __ata_qc_complete(qc);
816 WARN_ON(ata_tag_valid(qc->tag));
817 spin_unlock_irqrestore(&ap->host_set->lock, flags);
818
819 scsi_eh_finish_cmd(scmd, &ap->eh_done_q);
820}
821
822/**
823 * ata_eh_qc_complete - Complete an active ATA command from EH
824 * @qc: Command to complete
825 *
826 * Indicate to the mid and upper layers that an ATA command has
827 * completed. To be used from EH.
828 */
829void ata_eh_qc_complete(struct ata_queued_cmd *qc)
830{
831 struct scsi_cmnd *scmd = qc->scsicmd;
832 scmd->retries = scmd->allowed;
833 __ata_eh_qc_complete(qc);
834}
835
836/**
837 * ata_eh_qc_retry - Tell midlayer to retry an ATA command after EH
838 * @qc: Command to retry
839 *
840 * Indicate to the mid and upper layers that an ATA command
841 * should be retried. To be used from EH.
842 *
843 * SCSI midlayer limits the number of retries to scmd->allowed.
844 * This function might need to adjust scmd->retries for commands
845 * which get retried due to unrelated NCQ failures.
846 */
847void ata_eh_qc_retry(struct ata_queued_cmd *qc)
848{
849 __ata_eh_qc_complete(qc);
850}
851
752/** 852/**
753 * ata_scsi_start_stop_xlat - Translate SCSI START STOP UNIT command 853 * ata_scsi_start_stop_xlat - Translate SCSI START STOP UNIT command
754 * @qc: Storage for translated ATA taskfile 854 * @qc: Storage for translated ATA taskfile
@@ -985,9 +1085,13 @@ static unsigned int ata_scsi_verify_xlat(struct ata_queued_cmd *qc, const u8 *sc
985 if (dev->flags & ATA_DFLAG_LBA) { 1085 if (dev->flags & ATA_DFLAG_LBA) {
986 tf->flags |= ATA_TFLAG_LBA; 1086 tf->flags |= ATA_TFLAG_LBA;
987 1087
988 if (dev->flags & ATA_DFLAG_LBA48) { 1088 if (lba_28_ok(block, n_block)) {
989 if (n_block > (64 * 1024)) 1089 /* use LBA28 */
990 goto invalid_fld; 1090 tf->command = ATA_CMD_VERIFY;
1091 tf->device |= (block >> 24) & 0xf;
1092 } else if (lba_48_ok(block, n_block)) {
1093 if (!(dev->flags & ATA_DFLAG_LBA48))
1094 goto out_of_range;
991 1095
992 /* use LBA48 */ 1096 /* use LBA48 */
993 tf->flags |= ATA_TFLAG_LBA48; 1097 tf->flags |= ATA_TFLAG_LBA48;
@@ -998,15 +1102,9 @@ static unsigned int ata_scsi_verify_xlat(struct ata_queued_cmd *qc, const u8 *sc
998 tf->hob_lbah = (block >> 40) & 0xff; 1102 tf->hob_lbah = (block >> 40) & 0xff;
999 tf->hob_lbam = (block >> 32) & 0xff; 1103 tf->hob_lbam = (block >> 32) & 0xff;
1000 tf->hob_lbal = (block >> 24) & 0xff; 1104 tf->hob_lbal = (block >> 24) & 0xff;
1001 } else { 1105 } else
1002 if (n_block > 256) 1106 /* request too large even for LBA48 */
1003 goto invalid_fld; 1107 goto out_of_range;
1004
1005 /* use LBA28 */
1006 tf->command = ATA_CMD_VERIFY;
1007
1008 tf->device |= (block >> 24) & 0xf;
1009 }
1010 1108
1011 tf->nsect = n_block & 0xff; 1109 tf->nsect = n_block & 0xff;
1012 1110
@@ -1019,8 +1117,8 @@ static unsigned int ata_scsi_verify_xlat(struct ata_queued_cmd *qc, const u8 *sc
1019 /* CHS */ 1117 /* CHS */
1020 u32 sect, head, cyl, track; 1118 u32 sect, head, cyl, track;
1021 1119
1022 if (n_block > 256) 1120 if (!lba_28_ok(block, n_block))
1023 goto invalid_fld; 1121 goto out_of_range;
1024 1122
1025 /* Convert LBA to CHS */ 1123 /* Convert LBA to CHS */
1026 track = (u32)block / dev->sectors; 1124 track = (u32)block / dev->sectors;
@@ -1139,9 +1237,11 @@ static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc, const u8 *scsicm
1139 if (dev->flags & ATA_DFLAG_LBA) { 1237 if (dev->flags & ATA_DFLAG_LBA) {
1140 tf->flags |= ATA_TFLAG_LBA; 1238 tf->flags |= ATA_TFLAG_LBA;
1141 1239
1142 if (dev->flags & ATA_DFLAG_LBA48) { 1240 if (lba_28_ok(block, n_block)) {
1143 /* The request -may- be too large for LBA48. */ 1241 /* use LBA28 */
1144 if ((block >> 48) || (n_block > 65536)) 1242 tf->device |= (block >> 24) & 0xf;
1243 } else if (lba_48_ok(block, n_block)) {
1244 if (!(dev->flags & ATA_DFLAG_LBA48))
1145 goto out_of_range; 1245 goto out_of_range;
1146 1246
1147 /* use LBA48 */ 1247 /* use LBA48 */
@@ -1152,15 +1252,9 @@ static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc, const u8 *scsicm
1152 tf->hob_lbah = (block >> 40) & 0xff; 1252 tf->hob_lbah = (block >> 40) & 0xff;
1153 tf->hob_lbam = (block >> 32) & 0xff; 1253 tf->hob_lbam = (block >> 32) & 0xff;
1154 tf->hob_lbal = (block >> 24) & 0xff; 1254 tf->hob_lbal = (block >> 24) & 0xff;
1155 } else { 1255 } else
1156 /* use LBA28 */ 1256 /* request too large even for LBA48 */
1157 1257 goto out_of_range;
1158 /* The request -may- be too large for LBA28. */
1159 if ((block >> 28) || (n_block > 256))
1160 goto out_of_range;
1161
1162 tf->device |= (block >> 24) & 0xf;
1163 }
1164 1258
1165 if (unlikely(ata_rwcmd_protocol(qc) < 0)) 1259 if (unlikely(ata_rwcmd_protocol(qc) < 0))
1166 goto invalid_fld; 1260 goto invalid_fld;
@@ -1178,7 +1272,7 @@ static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc, const u8 *scsicm
1178 u32 sect, head, cyl, track; 1272 u32 sect, head, cyl, track;
1179 1273
1180 /* The request -may- be too large for CHS addressing. */ 1274 /* The request -may- be too large for CHS addressing. */
1181 if ((block >> 28) || (n_block > 256)) 1275 if (!lba_28_ok(block, n_block))
1182 goto out_of_range; 1276 goto out_of_range;
1183 1277
1184 if (unlikely(ata_rwcmd_protocol(qc) < 0)) 1278 if (unlikely(ata_rwcmd_protocol(qc) < 0))
@@ -1225,7 +1319,7 @@ nothing_to_do:
1225 return 1; 1319 return 1;
1226} 1320}
1227 1321
1228static int ata_scsi_qc_complete(struct ata_queued_cmd *qc) 1322static void ata_scsi_qc_complete(struct ata_queued_cmd *qc)
1229{ 1323{
1230 struct scsi_cmnd *cmd = qc->scsicmd; 1324 struct scsi_cmnd *cmd = qc->scsicmd;
1231 u8 *cdb = cmd->cmnd; 1325 u8 *cdb = cmd->cmnd;
@@ -1262,7 +1356,7 @@ static int ata_scsi_qc_complete(struct ata_queued_cmd *qc)
1262 1356
1263 qc->scsidone(cmd); 1357 qc->scsidone(cmd);
1264 1358
1265 return 0; 1359 ata_qc_free(qc);
1266} 1360}
1267 1361
1268/** 1362/**
@@ -1328,8 +1422,9 @@ static void ata_scsi_translate(struct ata_port *ap, struct ata_device *dev,
1328 goto early_finish; 1422 goto early_finish;
1329 1423
1330 /* select device, send command to hardware */ 1424 /* select device, send command to hardware */
1331 if (ata_qc_issue(qc)) 1425 qc->err_mask = ata_qc_issue(qc);
1332 goto err_did; 1426 if (qc->err_mask)
1427 ata_qc_complete(qc);
1333 1428
1334 VPRINTK("EXIT\n"); 1429 VPRINTK("EXIT\n");
1335 return; 1430 return;
@@ -1472,8 +1567,8 @@ unsigned int ata_scsiop_inq_std(struct ata_scsi_args *args, u8 *rbuf,
1472 1567
1473 if (buflen > 35) { 1568 if (buflen > 35) {
1474 memcpy(&rbuf[8], "ATA ", 8); 1569 memcpy(&rbuf[8], "ATA ", 8);
1475 ata_dev_id_string(args->id, &rbuf[16], ATA_ID_PROD_OFS, 16); 1570 ata_id_string(args->id, &rbuf[16], ATA_ID_PROD_OFS, 16);
1476 ata_dev_id_string(args->id, &rbuf[32], ATA_ID_FW_REV_OFS, 4); 1571 ata_id_string(args->id, &rbuf[32], ATA_ID_FW_REV_OFS, 4);
1477 if (rbuf[32] == 0 || rbuf[32] == ' ') 1572 if (rbuf[32] == 0 || rbuf[32] == ' ')
1478 memcpy(&rbuf[32], "n/a ", 4); 1573 memcpy(&rbuf[32], "n/a ", 4);
1479 } 1574 }
@@ -1547,8 +1642,8 @@ unsigned int ata_scsiop_inq_80(struct ata_scsi_args *args, u8 *rbuf,
1547 memcpy(rbuf, hdr, sizeof(hdr)); 1642 memcpy(rbuf, hdr, sizeof(hdr));
1548 1643
1549 if (buflen > (ATA_SERNO_LEN + 4 - 1)) 1644 if (buflen > (ATA_SERNO_LEN + 4 - 1))
1550 ata_dev_id_string(args->id, (unsigned char *) &rbuf[4], 1645 ata_id_string(args->id, (unsigned char *) &rbuf[4],
1551 ATA_ID_SERNO_OFS, ATA_SERNO_LEN); 1646 ATA_ID_SERNO_OFS, ATA_SERNO_LEN);
1552 1647
1553 return 0; 1648 return 0;
1554} 1649}
@@ -1711,15 +1806,12 @@ static int ata_dev_supports_fua(u16 *id)
1711 if (!ata_id_has_fua(id)) 1806 if (!ata_id_has_fua(id))
1712 return 0; 1807 return 0;
1713 1808
1714 model[40] = '\0'; 1809 ata_id_c_string(id, model, ATA_ID_PROD_OFS, sizeof(model));
1715 fw[8] = '\0'; 1810 ata_id_c_string(id, fw, ATA_ID_FW_REV_OFS, sizeof(fw));
1716
1717 ata_dev_id_string(id, model, ATA_ID_PROD_OFS, sizeof(model) - 1);
1718 ata_dev_id_string(id, fw, ATA_ID_FW_REV_OFS, sizeof(fw) - 1);
1719 1811
1720 if (strncmp(model, "Maxtor", 6)) 1812 if (strcmp(model, "Maxtor"))
1721 return 1; 1813 return 1;
1722 if (strncmp(fw, "BANC1G10", 8)) 1814 if (strcmp(fw, "BANC1G10"))
1723 return 1; 1815 return 1;
1724 1816
1725 return 0; /* blacklisted */ 1817 return 0; /* blacklisted */
@@ -2013,7 +2105,7 @@ void ata_scsi_badcmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *), u8
2013 done(cmd); 2105 done(cmd);
2014} 2106}
2015 2107
2016static int atapi_sense_complete(struct ata_queued_cmd *qc) 2108static void atapi_sense_complete(struct ata_queued_cmd *qc)
2017{ 2109{
2018 if (qc->err_mask && ((qc->err_mask & AC_ERR_DEV) == 0)) 2110 if (qc->err_mask && ((qc->err_mask & AC_ERR_DEV) == 0))
2019 /* FIXME: not quite right; we don't want the 2111 /* FIXME: not quite right; we don't want the
@@ -2024,7 +2116,7 @@ static int atapi_sense_complete(struct ata_queued_cmd *qc)
2024 ata_gen_ata_desc_sense(qc); 2116 ata_gen_ata_desc_sense(qc);
2025 2117
2026 qc->scsidone(qc->scsicmd); 2118 qc->scsidone(qc->scsicmd);
2027 return 0; 2119 ata_qc_free(qc);
2028} 2120}
2029 2121
2030/* is it pointless to prefer PIO for "safety reasons"? */ 2122/* is it pointless to prefer PIO for "safety reasons"? */
@@ -2054,7 +2146,7 @@ static void atapi_request_sense(struct ata_queued_cmd *qc)
2054 ata_sg_init_one(qc, cmd->sense_buffer, sizeof(cmd->sense_buffer)); 2146 ata_sg_init_one(qc, cmd->sense_buffer, sizeof(cmd->sense_buffer));
2055 qc->dma_dir = DMA_FROM_DEVICE; 2147 qc->dma_dir = DMA_FROM_DEVICE;
2056 2148
2057 memset(&qc->cdb, 0, ap->cdb_len); 2149 memset(&qc->cdb, 0, qc->dev->cdb_len);
2058 qc->cdb[0] = REQUEST_SENSE; 2150 qc->cdb[0] = REQUEST_SENSE;
2059 qc->cdb[4] = SCSI_SENSE_BUFFERSIZE; 2151 qc->cdb[4] = SCSI_SENSE_BUFFERSIZE;
2060 2152
@@ -2073,15 +2165,14 @@ static void atapi_request_sense(struct ata_queued_cmd *qc)
2073 2165
2074 qc->complete_fn = atapi_sense_complete; 2166 qc->complete_fn = atapi_sense_complete;
2075 2167
2076 if (ata_qc_issue(qc)) { 2168 qc->err_mask = ata_qc_issue(qc);
2077 qc->err_mask |= AC_ERR_OTHER; 2169 if (qc->err_mask)
2078 ata_qc_complete(qc); 2170 ata_qc_complete(qc);
2079 }
2080 2171
2081 DPRINTK("EXIT\n"); 2172 DPRINTK("EXIT\n");
2082} 2173}
2083 2174
2084static int atapi_qc_complete(struct ata_queued_cmd *qc) 2175static void atapi_qc_complete(struct ata_queued_cmd *qc)
2085{ 2176{
2086 struct scsi_cmnd *cmd = qc->scsicmd; 2177 struct scsi_cmnd *cmd = qc->scsicmd;
2087 unsigned int err_mask = qc->err_mask; 2178 unsigned int err_mask = qc->err_mask;
@@ -2091,7 +2182,7 @@ static int atapi_qc_complete(struct ata_queued_cmd *qc)
2091 if (unlikely(err_mask & AC_ERR_DEV)) { 2182 if (unlikely(err_mask & AC_ERR_DEV)) {
2092 cmd->result = SAM_STAT_CHECK_CONDITION; 2183 cmd->result = SAM_STAT_CHECK_CONDITION;
2093 atapi_request_sense(qc); 2184 atapi_request_sense(qc);
2094 return 1; 2185 return;
2095 } 2186 }
2096 2187
2097 else if (unlikely(err_mask)) 2188 else if (unlikely(err_mask))
@@ -2131,7 +2222,7 @@ static int atapi_qc_complete(struct ata_queued_cmd *qc)
2131 } 2222 }
2132 2223
2133 qc->scsidone(cmd); 2224 qc->scsidone(cmd);
2134 return 0; 2225 ata_qc_free(qc);
2135} 2226}
2136/** 2227/**
2137 * atapi_xlat - Initialize PACKET taskfile 2228 * atapi_xlat - Initialize PACKET taskfile
@@ -2157,7 +2248,7 @@ static unsigned int atapi_xlat(struct ata_queued_cmd *qc, const u8 *scsicmd)
2157 if (ata_check_atapi_dma(qc)) 2248 if (ata_check_atapi_dma(qc))
2158 using_pio = 1; 2249 using_pio = 1;
2159 2250
2160 memcpy(&qc->cdb, scsicmd, qc->ap->cdb_len); 2251 memcpy(&qc->cdb, scsicmd, dev->cdb_len);
2161 2252
2162 qc->complete_fn = atapi_qc_complete; 2253 qc->complete_fn = atapi_qc_complete;
2163 2254
@@ -2517,7 +2608,8 @@ out_unlock:
2517 2608
2518/** 2609/**
2519 * ata_scsi_simulate - simulate SCSI command on ATA device 2610 * ata_scsi_simulate - simulate SCSI command on ATA device
2520 * @id: current IDENTIFY data for target device. 2611 * @ap: port the device is connected to
2612 * @dev: the target device
2521 * @cmd: SCSI command being sent to device. 2613 * @cmd: SCSI command being sent to device.
2522 * @done: SCSI command completion function. 2614 * @done: SCSI command completion function.
2523 * 2615 *
diff --git a/drivers/scsi/libata.h b/drivers/scsi/libata.h
index e03ce48b7b4b..9d76923a2253 100644
--- a/drivers/scsi/libata.h
+++ b/drivers/scsi/libata.h
@@ -45,7 +45,7 @@ extern struct ata_queued_cmd *ata_qc_new_init(struct ata_port *ap,
45 struct ata_device *dev); 45 struct ata_device *dev);
46extern int ata_rwcmd_protocol(struct ata_queued_cmd *qc); 46extern int ata_rwcmd_protocol(struct ata_queued_cmd *qc);
47extern void ata_qc_free(struct ata_queued_cmd *qc); 47extern void ata_qc_free(struct ata_queued_cmd *qc);
48extern int ata_qc_issue(struct ata_queued_cmd *qc); 48extern unsigned int ata_qc_issue(struct ata_queued_cmd *qc);
49extern int ata_check_atapi_dma(struct ata_queued_cmd *qc); 49extern int ata_check_atapi_dma(struct ata_queued_cmd *qc);
50extern void ata_dev_select(struct ata_port *ap, unsigned int device, 50extern void ata_dev_select(struct ata_port *ap, unsigned int device,
51 unsigned int wait, unsigned int can_sleep); 51 unsigned int wait, unsigned int can_sleep);
diff --git a/drivers/scsi/pdc_adma.c b/drivers/scsi/pdc_adma.c
index e8df0c9ec1e6..5f33cc932e70 100644
--- a/drivers/scsi/pdc_adma.c
+++ b/drivers/scsi/pdc_adma.c
@@ -131,7 +131,7 @@ static void adma_host_stop(struct ata_host_set *host_set);
131static void adma_port_stop(struct ata_port *ap); 131static void adma_port_stop(struct ata_port *ap);
132static void adma_phy_reset(struct ata_port *ap); 132static void adma_phy_reset(struct ata_port *ap);
133static void adma_qc_prep(struct ata_queued_cmd *qc); 133static void adma_qc_prep(struct ata_queued_cmd *qc);
134static int adma_qc_issue(struct ata_queued_cmd *qc); 134static unsigned int adma_qc_issue(struct ata_queued_cmd *qc);
135static int adma_check_atapi_dma(struct ata_queued_cmd *qc); 135static int adma_check_atapi_dma(struct ata_queued_cmd *qc);
136static void adma_bmdma_stop(struct ata_queued_cmd *qc); 136static void adma_bmdma_stop(struct ata_queued_cmd *qc);
137static u8 adma_bmdma_status(struct ata_port *ap); 137static u8 adma_bmdma_status(struct ata_port *ap);
@@ -143,11 +143,11 @@ static struct scsi_host_template adma_ata_sht = {
143 .name = DRV_NAME, 143 .name = DRV_NAME,
144 .ioctl = ata_scsi_ioctl, 144 .ioctl = ata_scsi_ioctl,
145 .queuecommand = ata_scsi_queuecmd, 145 .queuecommand = ata_scsi_queuecmd,
146 .eh_timed_out = ata_scsi_timed_out,
146 .eh_strategy_handler = ata_scsi_error, 147 .eh_strategy_handler = ata_scsi_error,
147 .can_queue = ATA_DEF_QUEUE, 148 .can_queue = ATA_DEF_QUEUE,
148 .this_id = ATA_SHT_THIS_ID, 149 .this_id = ATA_SHT_THIS_ID,
149 .sg_tablesize = LIBATA_MAX_PRD, 150 .sg_tablesize = LIBATA_MAX_PRD,
150 .max_sectors = ATA_MAX_SECTORS,
151 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 151 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
152 .emulated = ATA_SHT_EMULATED, 152 .emulated = ATA_SHT_EMULATED,
153 .use_clustering = ENABLE_CLUSTERING, 153 .use_clustering = ENABLE_CLUSTERING,
@@ -419,7 +419,7 @@ static inline void adma_packet_start(struct ata_queued_cmd *qc)
419 writew(aPIOMD4 | aGO, chan + ADMA_CONTROL); 419 writew(aPIOMD4 | aGO, chan + ADMA_CONTROL);
420} 420}
421 421
422static int adma_qc_issue(struct ata_queued_cmd *qc) 422static unsigned int adma_qc_issue(struct ata_queued_cmd *qc)
423{ 423{
424 struct adma_port_priv *pp = qc->ap->private_data; 424 struct adma_port_priv *pp = qc->ap->private_data;
425 425
diff --git a/drivers/scsi/sata_mv.c b/drivers/scsi/sata_mv.c
index 2770005324b4..aceaf56999a5 100644
--- a/drivers/scsi/sata_mv.c
+++ b/drivers/scsi/sata_mv.c
@@ -37,7 +37,7 @@
37#include <asm/io.h> 37#include <asm/io.h>
38 38
39#define DRV_NAME "sata_mv" 39#define DRV_NAME "sata_mv"
40#define DRV_VERSION "0.5" 40#define DRV_VERSION "0.6"
41 41
42enum { 42enum {
43 /* BAR's are enumerated in terms of pci_resource_start() terms */ 43 /* BAR's are enumerated in terms of pci_resource_start() terms */
@@ -228,7 +228,9 @@ enum {
228 MV_HP_ERRATA_50XXB2 = (1 << 2), 228 MV_HP_ERRATA_50XXB2 = (1 << 2),
229 MV_HP_ERRATA_60X1B2 = (1 << 3), 229 MV_HP_ERRATA_60X1B2 = (1 << 3),
230 MV_HP_ERRATA_60X1C0 = (1 << 4), 230 MV_HP_ERRATA_60X1C0 = (1 << 4),
231 MV_HP_50XX = (1 << 5), 231 MV_HP_ERRATA_XX42A0 = (1 << 5),
232 MV_HP_50XX = (1 << 6),
233 MV_HP_GEN_IIE = (1 << 7),
232 234
233 /* Port private flags (pp_flags) */ 235 /* Port private flags (pp_flags) */
234 MV_PP_FLAG_EDMA_EN = (1 << 0), 236 MV_PP_FLAG_EDMA_EN = (1 << 0),
@@ -237,6 +239,9 @@ enum {
237 239
238#define IS_50XX(hpriv) ((hpriv)->hp_flags & MV_HP_50XX) 240#define IS_50XX(hpriv) ((hpriv)->hp_flags & MV_HP_50XX)
239#define IS_60XX(hpriv) (((hpriv)->hp_flags & MV_HP_50XX) == 0) 241#define IS_60XX(hpriv) (((hpriv)->hp_flags & MV_HP_50XX) == 0)
242#define IS_GEN_I(hpriv) IS_50XX(hpriv)
243#define IS_GEN_II(hpriv) IS_60XX(hpriv)
244#define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
240 245
241enum { 246enum {
242 /* Our DMA boundary is determined by an ePRD being unable to handle 247 /* Our DMA boundary is determined by an ePRD being unable to handle
@@ -255,6 +260,8 @@ enum chip_type {
255 chip_5080, 260 chip_5080,
256 chip_604x, 261 chip_604x,
257 chip_608x, 262 chip_608x,
263 chip_6042,
264 chip_7042,
258}; 265};
259 266
260/* Command ReQuest Block: 32B */ 267/* Command ReQuest Block: 32B */
@@ -265,6 +272,14 @@ struct mv_crqb {
265 u16 ata_cmd[11]; 272 u16 ata_cmd[11];
266}; 273};
267 274
275struct mv_crqb_iie {
276 u32 addr;
277 u32 addr_hi;
278 u32 flags;
279 u32 len;
280 u32 ata_cmd[4];
281};
282
268/* Command ResPonse Block: 8B */ 283/* Command ResPonse Block: 8B */
269struct mv_crpb { 284struct mv_crpb {
270 u16 id; 285 u16 id;
@@ -328,7 +343,8 @@ static void mv_host_stop(struct ata_host_set *host_set);
328static int mv_port_start(struct ata_port *ap); 343static int mv_port_start(struct ata_port *ap);
329static void mv_port_stop(struct ata_port *ap); 344static void mv_port_stop(struct ata_port *ap);
330static void mv_qc_prep(struct ata_queued_cmd *qc); 345static void mv_qc_prep(struct ata_queued_cmd *qc);
331static int mv_qc_issue(struct ata_queued_cmd *qc); 346static void mv_qc_prep_iie(struct ata_queued_cmd *qc);
347static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
332static irqreturn_t mv_interrupt(int irq, void *dev_instance, 348static irqreturn_t mv_interrupt(int irq, void *dev_instance,
333 struct pt_regs *regs); 349 struct pt_regs *regs);
334static void mv_eng_timeout(struct ata_port *ap); 350static void mv_eng_timeout(struct ata_port *ap);
@@ -362,11 +378,11 @@ static struct scsi_host_template mv_sht = {
362 .name = DRV_NAME, 378 .name = DRV_NAME,
363 .ioctl = ata_scsi_ioctl, 379 .ioctl = ata_scsi_ioctl,
364 .queuecommand = ata_scsi_queuecmd, 380 .queuecommand = ata_scsi_queuecmd,
381 .eh_timed_out = ata_scsi_timed_out,
365 .eh_strategy_handler = ata_scsi_error, 382 .eh_strategy_handler = ata_scsi_error,
366 .can_queue = MV_USE_Q_DEPTH, 383 .can_queue = MV_USE_Q_DEPTH,
367 .this_id = ATA_SHT_THIS_ID, 384 .this_id = ATA_SHT_THIS_ID,
368 .sg_tablesize = MV_MAX_SG_CT / 2, 385 .sg_tablesize = MV_MAX_SG_CT / 2,
369 .max_sectors = ATA_MAX_SECTORS,
370 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 386 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
371 .emulated = ATA_SHT_EMULATED, 387 .emulated = ATA_SHT_EMULATED,
372 .use_clustering = ATA_SHT_USE_CLUSTERING, 388 .use_clustering = ATA_SHT_USE_CLUSTERING,
@@ -430,6 +446,33 @@ static const struct ata_port_operations mv6_ops = {
430 .host_stop = mv_host_stop, 446 .host_stop = mv_host_stop,
431}; 447};
432 448
449static const struct ata_port_operations mv_iie_ops = {
450 .port_disable = ata_port_disable,
451
452 .tf_load = ata_tf_load,
453 .tf_read = ata_tf_read,
454 .check_status = ata_check_status,
455 .exec_command = ata_exec_command,
456 .dev_select = ata_std_dev_select,
457
458 .phy_reset = mv_phy_reset,
459
460 .qc_prep = mv_qc_prep_iie,
461 .qc_issue = mv_qc_issue,
462
463 .eng_timeout = mv_eng_timeout,
464
465 .irq_handler = mv_interrupt,
466 .irq_clear = mv_irq_clear,
467
468 .scr_read = mv_scr_read,
469 .scr_write = mv_scr_write,
470
471 .port_start = mv_port_start,
472 .port_stop = mv_port_stop,
473 .host_stop = mv_host_stop,
474};
475
433static const struct ata_port_info mv_port_info[] = { 476static const struct ata_port_info mv_port_info[] = {
434 { /* chip_504x */ 477 { /* chip_504x */
435 .sht = &mv_sht, 478 .sht = &mv_sht,
@@ -467,6 +510,21 @@ static const struct ata_port_info mv_port_info[] = {
467 .udma_mask = 0x7f, /* udma0-6 */ 510 .udma_mask = 0x7f, /* udma0-6 */
468 .port_ops = &mv6_ops, 511 .port_ops = &mv6_ops,
469 }, 512 },
513 { /* chip_6042 */
514 .sht = &mv_sht,
515 .host_flags = (MV_COMMON_FLAGS | MV_6XXX_FLAGS),
516 .pio_mask = 0x1f, /* pio0-4 */
517 .udma_mask = 0x7f, /* udma0-6 */
518 .port_ops = &mv_iie_ops,
519 },
520 { /* chip_7042 */
521 .sht = &mv_sht,
522 .host_flags = (MV_COMMON_FLAGS | MV_6XXX_FLAGS |
523 MV_FLAG_DUAL_HC),
524 .pio_mask = 0x1f, /* pio0-4 */
525 .udma_mask = 0x7f, /* udma0-6 */
526 .port_ops = &mv_iie_ops,
527 },
470}; 528};
471 529
472static const struct pci_device_id mv_pci_tbl[] = { 530static const struct pci_device_id mv_pci_tbl[] = {
@@ -477,6 +535,7 @@ static const struct pci_device_id mv_pci_tbl[] = {
477 535
478 {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x6040), 0, 0, chip_604x}, 536 {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x6040), 0, 0, chip_604x},
479 {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x6041), 0, 0, chip_604x}, 537 {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x6041), 0, 0, chip_604x},
538 {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x6042), 0, 0, chip_6042},
480 {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x6080), 0, 0, chip_608x}, 539 {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x6080), 0, 0, chip_608x},
481 {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x6081), 0, 0, chip_608x}, 540 {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x6081), 0, 0, chip_608x},
482 541
@@ -572,8 +631,8 @@ static void mv_irq_clear(struct ata_port *ap)
572 * @base: port base address 631 * @base: port base address
573 * @pp: port private data 632 * @pp: port private data
574 * 633 *
575 * Verify the local cache of the eDMA state is accurate with an 634 * Verify the local cache of the eDMA state is accurate with a
576 * assert. 635 * WARN_ON.
577 * 636 *
578 * LOCKING: 637 * LOCKING:
579 * Inherited from caller. 638 * Inherited from caller.
@@ -584,15 +643,15 @@ static void mv_start_dma(void __iomem *base, struct mv_port_priv *pp)
584 writelfl(EDMA_EN, base + EDMA_CMD_OFS); 643 writelfl(EDMA_EN, base + EDMA_CMD_OFS);
585 pp->pp_flags |= MV_PP_FLAG_EDMA_EN; 644 pp->pp_flags |= MV_PP_FLAG_EDMA_EN;
586 } 645 }
587 assert(EDMA_EN & readl(base + EDMA_CMD_OFS)); 646 WARN_ON(!(EDMA_EN & readl(base + EDMA_CMD_OFS)));
588} 647}
589 648
590/** 649/**
591 * mv_stop_dma - Disable eDMA engine 650 * mv_stop_dma - Disable eDMA engine
592 * @ap: ATA channel to manipulate 651 * @ap: ATA channel to manipulate
593 * 652 *
594 * Verify the local cache of the eDMA state is accurate with an 653 * Verify the local cache of the eDMA state is accurate with a
595 * assert. 654 * WARN_ON.
596 * 655 *
597 * LOCKING: 656 * LOCKING:
598 * Inherited from caller. 657 * Inherited from caller.
@@ -610,7 +669,7 @@ static void mv_stop_dma(struct ata_port *ap)
610 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS); 669 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
611 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN; 670 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
612 } else { 671 } else {
613 assert(!(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS))); 672 WARN_ON(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS));
614 } 673 }
615 674
616 /* now properly wait for the eDMA to stop */ 675 /* now properly wait for the eDMA to stop */
@@ -773,6 +832,33 @@ static inline void mv_priv_free(struct mv_port_priv *pp, struct device *dev)
773 dma_free_coherent(dev, MV_PORT_PRIV_DMA_SZ, pp->crpb, pp->crpb_dma); 832 dma_free_coherent(dev, MV_PORT_PRIV_DMA_SZ, pp->crpb, pp->crpb_dma);
774} 833}
775 834
835static void mv_edma_cfg(struct mv_host_priv *hpriv, void __iomem *port_mmio)
836{
837 u32 cfg = readl(port_mmio + EDMA_CFG_OFS);
838
839 /* set up non-NCQ EDMA configuration */
840 cfg &= ~0x1f; /* clear queue depth */
841 cfg &= ~EDMA_CFG_NCQ; /* clear NCQ mode */
842 cfg &= ~(1 << 9); /* disable equeue */
843
844 if (IS_GEN_I(hpriv))
845 cfg |= (1 << 8); /* enab config burst size mask */
846
847 else if (IS_GEN_II(hpriv))
848 cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN;
849
850 else if (IS_GEN_IIE(hpriv)) {
851 cfg |= (1 << 23); /* dis RX PM port mask */
852 cfg &= ~(1 << 16); /* dis FIS-based switching (for now) */
853 cfg &= ~(1 << 19); /* dis 128-entry queue (for now?) */
854 cfg |= (1 << 18); /* enab early completion */
855 cfg |= (1 << 17); /* enab host q cache */
856 cfg |= (1 << 22); /* enab cutthrough */
857 }
858
859 writelfl(cfg, port_mmio + EDMA_CFG_OFS);
860}
861
776/** 862/**
777 * mv_port_start - Port specific init/start routine. 863 * mv_port_start - Port specific init/start routine.
778 * @ap: ATA channel to manipulate 864 * @ap: ATA channel to manipulate
@@ -786,6 +872,7 @@ static inline void mv_priv_free(struct mv_port_priv *pp, struct device *dev)
786static int mv_port_start(struct ata_port *ap) 872static int mv_port_start(struct ata_port *ap)
787{ 873{
788 struct device *dev = ap->host_set->dev; 874 struct device *dev = ap->host_set->dev;
875 struct mv_host_priv *hpriv = ap->host_set->private_data;
789 struct mv_port_priv *pp; 876 struct mv_port_priv *pp;
790 void __iomem *port_mmio = mv_ap_base(ap); 877 void __iomem *port_mmio = mv_ap_base(ap);
791 void *mem; 878 void *mem;
@@ -829,17 +916,26 @@ static int mv_port_start(struct ata_port *ap)
829 pp->sg_tbl = mem; 916 pp->sg_tbl = mem;
830 pp->sg_tbl_dma = mem_dma; 917 pp->sg_tbl_dma = mem_dma;
831 918
832 writelfl(EDMA_CFG_Q_DEPTH | EDMA_CFG_RD_BRST_EXT | 919 mv_edma_cfg(hpriv, port_mmio);
833 EDMA_CFG_WR_BUFF_LEN, port_mmio + EDMA_CFG_OFS);
834 920
835 writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS); 921 writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS);
836 writelfl(pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK, 922 writelfl(pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK,
837 port_mmio + EDMA_REQ_Q_IN_PTR_OFS); 923 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
838 924
839 writelfl(0, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS); 925 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
840 writelfl(0, port_mmio + EDMA_RSP_Q_IN_PTR_OFS); 926 writelfl(pp->crqb_dma & 0xffffffff,
927 port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
928 else
929 writelfl(0, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
841 930
842 writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS); 931 writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS);
932
933 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
934 writelfl(pp->crpb_dma & 0xffffffff,
935 port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
936 else
937 writelfl(0, port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
938
843 writelfl(pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK, 939 writelfl(pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK,
844 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS); 940 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
845 941
@@ -960,21 +1056,19 @@ static void mv_qc_prep(struct ata_queued_cmd *qc)
960 struct ata_taskfile *tf; 1056 struct ata_taskfile *tf;
961 u16 flags = 0; 1057 u16 flags = 0;
962 1058
963 if (ATA_PROT_DMA != qc->tf.protocol) { 1059 if (ATA_PROT_DMA != qc->tf.protocol)
964 return; 1060 return;
965 }
966 1061
967 /* the req producer index should be the same as we remember it */ 1062 /* the req producer index should be the same as we remember it */
968 assert(((readl(mv_ap_base(qc->ap) + EDMA_REQ_Q_IN_PTR_OFS) >> 1063 WARN_ON(((readl(mv_ap_base(qc->ap) + EDMA_REQ_Q_IN_PTR_OFS) >>
969 EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) == 1064 EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) !=
970 pp->req_producer); 1065 pp->req_producer);
971 1066
972 /* Fill in command request block 1067 /* Fill in command request block
973 */ 1068 */
974 if (!(qc->tf.flags & ATA_TFLAG_WRITE)) { 1069 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
975 flags |= CRQB_FLAG_READ; 1070 flags |= CRQB_FLAG_READ;
976 } 1071 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
977 assert(MV_MAX_Q_DEPTH > qc->tag);
978 flags |= qc->tag << CRQB_TAG_SHIFT; 1072 flags |= qc->tag << CRQB_TAG_SHIFT;
979 1073
980 pp->crqb[pp->req_producer].sg_addr = 1074 pp->crqb[pp->req_producer].sg_addr =
@@ -1029,9 +1123,76 @@ static void mv_qc_prep(struct ata_queued_cmd *qc)
1029 mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0); 1123 mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0);
1030 mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */ 1124 mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */
1031 1125
1032 if (!(qc->flags & ATA_QCFLAG_DMAMAP)) { 1126 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1127 return;
1128 mv_fill_sg(qc);
1129}
1130
1131/**
1132 * mv_qc_prep_iie - Host specific command preparation.
1133 * @qc: queued command to prepare
1134 *
1135 * This routine simply redirects to the general purpose routine
1136 * if command is not DMA. Else, it handles prep of the CRQB
1137 * (command request block), does some sanity checking, and calls
1138 * the SG load routine.
1139 *
1140 * LOCKING:
1141 * Inherited from caller.
1142 */
1143static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
1144{
1145 struct ata_port *ap = qc->ap;
1146 struct mv_port_priv *pp = ap->private_data;
1147 struct mv_crqb_iie *crqb;
1148 struct ata_taskfile *tf;
1149 u32 flags = 0;
1150
1151 if (ATA_PROT_DMA != qc->tf.protocol)
1152 return;
1153
1154 /* the req producer index should be the same as we remember it */
1155 WARN_ON(((readl(mv_ap_base(qc->ap) + EDMA_REQ_Q_IN_PTR_OFS) >>
1156 EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) !=
1157 pp->req_producer);
1158
1159 /* Fill in Gen IIE command request block
1160 */
1161 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1162 flags |= CRQB_FLAG_READ;
1163
1164 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
1165 flags |= qc->tag << CRQB_TAG_SHIFT;
1166
1167 crqb = (struct mv_crqb_iie *) &pp->crqb[pp->req_producer];
1168 crqb->addr = cpu_to_le32(pp->sg_tbl_dma & 0xffffffff);
1169 crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16);
1170 crqb->flags = cpu_to_le32(flags);
1171
1172 tf = &qc->tf;
1173 crqb->ata_cmd[0] = cpu_to_le32(
1174 (tf->command << 16) |
1175 (tf->feature << 24)
1176 );
1177 crqb->ata_cmd[1] = cpu_to_le32(
1178 (tf->lbal << 0) |
1179 (tf->lbam << 8) |
1180 (tf->lbah << 16) |
1181 (tf->device << 24)
1182 );
1183 crqb->ata_cmd[2] = cpu_to_le32(
1184 (tf->hob_lbal << 0) |
1185 (tf->hob_lbam << 8) |
1186 (tf->hob_lbah << 16) |
1187 (tf->hob_feature << 24)
1188 );
1189 crqb->ata_cmd[3] = cpu_to_le32(
1190 (tf->nsect << 0) |
1191 (tf->hob_nsect << 8)
1192 );
1193
1194 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1033 return; 1195 return;
1034 }
1035 mv_fill_sg(qc); 1196 mv_fill_sg(qc);
1036} 1197}
1037 1198
@@ -1047,7 +1208,7 @@ static void mv_qc_prep(struct ata_queued_cmd *qc)
1047 * LOCKING: 1208 * LOCKING:
1048 * Inherited from caller. 1209 * Inherited from caller.
1049 */ 1210 */
1050static int mv_qc_issue(struct ata_queued_cmd *qc) 1211static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
1051{ 1212{
1052 void __iomem *port_mmio = mv_ap_base(qc->ap); 1213 void __iomem *port_mmio = mv_ap_base(qc->ap);
1053 struct mv_port_priv *pp = qc->ap->private_data; 1214 struct mv_port_priv *pp = qc->ap->private_data;
@@ -1065,12 +1226,12 @@ static int mv_qc_issue(struct ata_queued_cmd *qc)
1065 in_ptr = readl(port_mmio + EDMA_REQ_Q_IN_PTR_OFS); 1226 in_ptr = readl(port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
1066 1227
1067 /* the req producer index should be the same as we remember it */ 1228 /* the req producer index should be the same as we remember it */
1068 assert(((in_ptr >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) == 1229 WARN_ON(((in_ptr >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) !=
1069 pp->req_producer); 1230 pp->req_producer);
1070 /* until we do queuing, the queue should be empty at this point */ 1231 /* until we do queuing, the queue should be empty at this point */
1071 assert(((in_ptr >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) == 1232 WARN_ON(((in_ptr >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) !=
1072 ((readl(port_mmio + EDMA_REQ_Q_OUT_PTR_OFS) >> 1233 ((readl(port_mmio + EDMA_REQ_Q_OUT_PTR_OFS) >>
1073 EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK)); 1234 EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK));
1074 1235
1075 mv_inc_q_index(&pp->req_producer); /* now incr producer index */ 1236 mv_inc_q_index(&pp->req_producer); /* now incr producer index */
1076 1237
@@ -1090,7 +1251,7 @@ static int mv_qc_issue(struct ata_queued_cmd *qc)
1090 * 1251 *
1091 * This routine is for use when the port is in DMA mode, when it 1252 * This routine is for use when the port is in DMA mode, when it
1092 * will be using the CRPB (command response block) method of 1253 * will be using the CRPB (command response block) method of
1093 * returning command completion information. We assert indices 1254 * returning command completion information. We check indices
1094 * are good, grab status, and bump the response consumer index to 1255 * are good, grab status, and bump the response consumer index to
1095 * prove that we're up to date. 1256 * prove that we're up to date.
1096 * 1257 *
@@ -1106,16 +1267,16 @@ static u8 mv_get_crpb_status(struct ata_port *ap)
1106 out_ptr = readl(port_mmio + EDMA_RSP_Q_OUT_PTR_OFS); 1267 out_ptr = readl(port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
1107 1268
1108 /* the response consumer index should be the same as we remember it */ 1269 /* the response consumer index should be the same as we remember it */
1109 assert(((out_ptr >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) == 1270 WARN_ON(((out_ptr >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) !=
1110 pp->rsp_consumer); 1271 pp->rsp_consumer);
1111 1272
1112 /* increment our consumer index... */ 1273 /* increment our consumer index... */
1113 pp->rsp_consumer = mv_inc_q_index(&pp->rsp_consumer); 1274 pp->rsp_consumer = mv_inc_q_index(&pp->rsp_consumer);
1114 1275
1115 /* and, until we do NCQ, there should only be 1 CRPB waiting */ 1276 /* and, until we do NCQ, there should only be 1 CRPB waiting */
1116 assert(((readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS) >> 1277 WARN_ON(((readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS) >>
1117 EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) == 1278 EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) !=
1118 pp->rsp_consumer); 1279 pp->rsp_consumer);
1119 1280
1120 /* write out our inc'd consumer index so EDMA knows we're caught up */ 1281 /* write out our inc'd consumer index so EDMA knows we're caught up */
1121 out_ptr &= EDMA_RSP_Q_BASE_LO_MASK; 1282 out_ptr &= EDMA_RSP_Q_BASE_LO_MASK;
@@ -1681,6 +1842,12 @@ static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
1681 m2 |= hpriv->signal[port].pre; 1842 m2 |= hpriv->signal[port].pre;
1682 m2 &= ~(1 << 16); 1843 m2 &= ~(1 << 16);
1683 1844
1845 /* according to mvSata 3.6.1, some IIE values are fixed */
1846 if (IS_GEN_IIE(hpriv)) {
1847 m2 &= ~0xC30FF01F;
1848 m2 |= 0x0000900F;
1849 }
1850
1684 writel(m2, port_mmio + PHY_MODE2); 1851 writel(m2, port_mmio + PHY_MODE2);
1685} 1852}
1686 1853
@@ -1846,7 +2013,6 @@ static void mv_phy_reset(struct ata_port *ap)
1846static void mv_eng_timeout(struct ata_port *ap) 2013static void mv_eng_timeout(struct ata_port *ap)
1847{ 2014{
1848 struct ata_queued_cmd *qc; 2015 struct ata_queued_cmd *qc;
1849 unsigned long flags;
1850 2016
1851 printk(KERN_ERR "ata%u: Entering mv_eng_timeout\n",ap->id); 2017 printk(KERN_ERR "ata%u: Entering mv_eng_timeout\n",ap->id);
1852 DPRINTK("All regs @ start of eng_timeout\n"); 2018 DPRINTK("All regs @ start of eng_timeout\n");
@@ -1861,22 +2027,8 @@ static void mv_eng_timeout(struct ata_port *ap)
1861 mv_err_intr(ap); 2027 mv_err_intr(ap);
1862 mv_stop_and_reset(ap); 2028 mv_stop_and_reset(ap);
1863 2029
1864 if (!qc) { 2030 qc->err_mask |= AC_ERR_TIMEOUT;
1865 printk(KERN_ERR "ata%u: BUG: timeout without command\n", 2031 ata_eh_qc_complete(qc);
1866 ap->id);
1867 } else {
1868 /* hack alert! We cannot use the supplied completion
1869 * function from inside the ->eh_strategy_handler() thread.
1870 * libata is the only user of ->eh_strategy_handler() in
1871 * any kernel, so the default scsi_done() assumes it is
1872 * not being called from the SCSI EH.
1873 */
1874 spin_lock_irqsave(&ap->host_set->lock, flags);
1875 qc->scsidone = scsi_finish_command;
1876 qc->err_mask |= AC_ERR_OTHER;
1877 ata_qc_complete(qc);
1878 spin_unlock_irqrestore(&ap->host_set->lock, flags);
1879 }
1880} 2032}
1881 2033
1882/** 2034/**
@@ -1995,6 +2147,27 @@ static int mv_chip_id(struct pci_dev *pdev, struct mv_host_priv *hpriv,
1995 } 2147 }
1996 break; 2148 break;
1997 2149
2150 case chip_7042:
2151 case chip_6042:
2152 hpriv->ops = &mv6xxx_ops;
2153
2154 hp_flags |= MV_HP_GEN_IIE;
2155
2156 switch (rev_id) {
2157 case 0x0:
2158 hp_flags |= MV_HP_ERRATA_XX42A0;
2159 break;
2160 case 0x1:
2161 hp_flags |= MV_HP_ERRATA_60X1C0;
2162 break;
2163 default:
2164 dev_printk(KERN_WARNING, &pdev->dev,
2165 "Applying 60X1C0 workarounds to unknown rev\n");
2166 hp_flags |= MV_HP_ERRATA_60X1C0;
2167 break;
2168 }
2169 break;
2170
1998 default: 2171 default:
1999 printk(KERN_ERR DRV_NAME ": BUG: invalid board index %u\n", board_idx); 2172 printk(KERN_ERR DRV_NAME ": BUG: invalid board index %u\n", board_idx);
2000 return 1; 2173 return 1;
diff --git a/drivers/scsi/sata_nv.c b/drivers/scsi/sata_nv.c
index bbbb55eeb73a..caffadc2e0ae 100644
--- a/drivers/scsi/sata_nv.c
+++ b/drivers/scsi/sata_nv.c
@@ -229,11 +229,11 @@ static struct scsi_host_template nv_sht = {
229 .name = DRV_NAME, 229 .name = DRV_NAME,
230 .ioctl = ata_scsi_ioctl, 230 .ioctl = ata_scsi_ioctl,
231 .queuecommand = ata_scsi_queuecmd, 231 .queuecommand = ata_scsi_queuecmd,
232 .eh_timed_out = ata_scsi_timed_out,
232 .eh_strategy_handler = ata_scsi_error, 233 .eh_strategy_handler = ata_scsi_error,
233 .can_queue = ATA_DEF_QUEUE, 234 .can_queue = ATA_DEF_QUEUE,
234 .this_id = ATA_SHT_THIS_ID, 235 .this_id = ATA_SHT_THIS_ID,
235 .sg_tablesize = LIBATA_MAX_PRD, 236 .sg_tablesize = LIBATA_MAX_PRD,
236 .max_sectors = ATA_MAX_SECTORS,
237 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 237 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
238 .emulated = ATA_SHT_EMULATED, 238 .emulated = ATA_SHT_EMULATED,
239 .use_clustering = ATA_SHT_USE_CLUSTERING, 239 .use_clustering = ATA_SHT_USE_CLUSTERING,
diff --git a/drivers/scsi/sata_promise.c b/drivers/scsi/sata_promise.c
index b0b0a69b3563..ba2b7a0983db 100644
--- a/drivers/scsi/sata_promise.c
+++ b/drivers/scsi/sata_promise.c
@@ -46,7 +46,7 @@
46#include "sata_promise.h" 46#include "sata_promise.h"
47 47
48#define DRV_NAME "sata_promise" 48#define DRV_NAME "sata_promise"
49#define DRV_VERSION "1.03" 49#define DRV_VERSION "1.04"
50 50
51 51
52enum { 52enum {
@@ -58,6 +58,7 @@ enum {
58 PDC_GLOBAL_CTL = 0x48, /* Global control/status (per port) */ 58 PDC_GLOBAL_CTL = 0x48, /* Global control/status (per port) */
59 PDC_CTLSTAT = 0x60, /* IDE control and status (per port) */ 59 PDC_CTLSTAT = 0x60, /* IDE control and status (per port) */
60 PDC_SATA_PLUG_CSR = 0x6C, /* SATA Plug control/status reg */ 60 PDC_SATA_PLUG_CSR = 0x6C, /* SATA Plug control/status reg */
61 PDC2_SATA_PLUG_CSR = 0x60, /* SATAII Plug control/status reg */
61 PDC_SLEW_CTL = 0x470, /* slew rate control reg */ 62 PDC_SLEW_CTL = 0x470, /* slew rate control reg */
62 63
63 PDC_ERR_MASK = (1<<19) | (1<<20) | (1<<21) | (1<<22) | 64 PDC_ERR_MASK = (1<<19) | (1<<20) | (1<<21) | (1<<22) |
@@ -67,8 +68,10 @@ enum {
67 board_20319 = 1, /* FastTrak S150 TX4 */ 68 board_20319 = 1, /* FastTrak S150 TX4 */
68 board_20619 = 2, /* FastTrak TX4000 */ 69 board_20619 = 2, /* FastTrak TX4000 */
69 board_20771 = 3, /* FastTrak TX2300 */ 70 board_20771 = 3, /* FastTrak TX2300 */
71 board_2057x = 4, /* SATAII150 Tx2plus */
72 board_40518 = 5, /* SATAII150 Tx4 */
70 73
71 PDC_HAS_PATA = (1 << 1), /* PDC20375 has PATA */ 74 PDC_HAS_PATA = (1 << 1), /* PDC20375/20575 has PATA */
72 75
73 PDC_RESET = (1 << 11), /* HDMA reset */ 76 PDC_RESET = (1 << 11), /* HDMA reset */
74 77
@@ -82,6 +85,10 @@ struct pdc_port_priv {
82 dma_addr_t pkt_dma; 85 dma_addr_t pkt_dma;
83}; 86};
84 87
88struct pdc_host_priv {
89 int hotplug_offset;
90};
91
85static u32 pdc_sata_scr_read (struct ata_port *ap, unsigned int sc_reg); 92static u32 pdc_sata_scr_read (struct ata_port *ap, unsigned int sc_reg);
86static void pdc_sata_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val); 93static void pdc_sata_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val);
87static int pdc_ata_init_one (struct pci_dev *pdev, const struct pci_device_id *ent); 94static int pdc_ata_init_one (struct pci_dev *pdev, const struct pci_device_id *ent);
@@ -95,7 +102,8 @@ static void pdc_qc_prep(struct ata_queued_cmd *qc);
95static void pdc_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf); 102static void pdc_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf);
96static void pdc_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf); 103static void pdc_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf);
97static void pdc_irq_clear(struct ata_port *ap); 104static void pdc_irq_clear(struct ata_port *ap);
98static int pdc_qc_issue_prot(struct ata_queued_cmd *qc); 105static unsigned int pdc_qc_issue_prot(struct ata_queued_cmd *qc);
106static void pdc_host_stop(struct ata_host_set *host_set);
99 107
100 108
101static struct scsi_host_template pdc_ata_sht = { 109static struct scsi_host_template pdc_ata_sht = {
@@ -103,11 +111,11 @@ static struct scsi_host_template pdc_ata_sht = {
103 .name = DRV_NAME, 111 .name = DRV_NAME,
104 .ioctl = ata_scsi_ioctl, 112 .ioctl = ata_scsi_ioctl,
105 .queuecommand = ata_scsi_queuecmd, 113 .queuecommand = ata_scsi_queuecmd,
114 .eh_timed_out = ata_scsi_timed_out,
106 .eh_strategy_handler = ata_scsi_error, 115 .eh_strategy_handler = ata_scsi_error,
107 .can_queue = ATA_DEF_QUEUE, 116 .can_queue = ATA_DEF_QUEUE,
108 .this_id = ATA_SHT_THIS_ID, 117 .this_id = ATA_SHT_THIS_ID,
109 .sg_tablesize = LIBATA_MAX_PRD, 118 .sg_tablesize = LIBATA_MAX_PRD,
110 .max_sectors = ATA_MAX_SECTORS,
111 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 119 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
112 .emulated = ATA_SHT_EMULATED, 120 .emulated = ATA_SHT_EMULATED,
113 .use_clustering = ATA_SHT_USE_CLUSTERING, 121 .use_clustering = ATA_SHT_USE_CLUSTERING,
@@ -137,7 +145,7 @@ static const struct ata_port_operations pdc_sata_ops = {
137 .scr_write = pdc_sata_scr_write, 145 .scr_write = pdc_sata_scr_write,
138 .port_start = pdc_port_start, 146 .port_start = pdc_port_start,
139 .port_stop = pdc_port_stop, 147 .port_stop = pdc_port_stop,
140 .host_stop = ata_pci_host_stop, 148 .host_stop = pdc_host_stop,
141}; 149};
142 150
143static const struct ata_port_operations pdc_pata_ops = { 151static const struct ata_port_operations pdc_pata_ops = {
@@ -158,7 +166,7 @@ static const struct ata_port_operations pdc_pata_ops = {
158 166
159 .port_start = pdc_port_start, 167 .port_start = pdc_port_start,
160 .port_stop = pdc_port_stop, 168 .port_stop = pdc_port_stop,
161 .host_stop = ata_pci_host_stop, 169 .host_stop = pdc_host_stop,
162}; 170};
163 171
164static const struct ata_port_info pdc_port_info[] = { 172static const struct ata_port_info pdc_port_info[] = {
@@ -201,6 +209,26 @@ static const struct ata_port_info pdc_port_info[] = {
201 .udma_mask = 0x7f, /* udma0-6 ; FIXME */ 209 .udma_mask = 0x7f, /* udma0-6 ; FIXME */
202 .port_ops = &pdc_sata_ops, 210 .port_ops = &pdc_sata_ops,
203 }, 211 },
212
213 /* board_2057x */
214 {
215 .sht = &pdc_ata_sht,
216 .host_flags = PDC_COMMON_FLAGS | ATA_FLAG_SATA,
217 .pio_mask = 0x1f, /* pio0-4 */
218 .mwdma_mask = 0x07, /* mwdma0-2 */
219 .udma_mask = 0x7f, /* udma0-6 ; FIXME */
220 .port_ops = &pdc_sata_ops,
221 },
222
223 /* board_40518 */
224 {
225 .sht = &pdc_ata_sht,
226 .host_flags = PDC_COMMON_FLAGS | ATA_FLAG_SATA,
227 .pio_mask = 0x1f, /* pio0-4 */
228 .mwdma_mask = 0x07, /* mwdma0-2 */
229 .udma_mask = 0x7f, /* udma0-6 ; FIXME */
230 .port_ops = &pdc_sata_ops,
231 },
204}; 232};
205 233
206static const struct pci_device_id pdc_ata_pci_tbl[] = { 234static const struct pci_device_id pdc_ata_pci_tbl[] = {
@@ -217,9 +245,9 @@ static const struct pci_device_id pdc_ata_pci_tbl[] = {
217 { PCI_VENDOR_ID_PROMISE, 0x3376, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 245 { PCI_VENDOR_ID_PROMISE, 0x3376, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
218 board_2037x }, 246 board_2037x },
219 { PCI_VENDOR_ID_PROMISE, 0x3574, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 247 { PCI_VENDOR_ID_PROMISE, 0x3574, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
220 board_2037x }, 248 board_2057x },
221 { PCI_VENDOR_ID_PROMISE, 0x3d75, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 249 { PCI_VENDOR_ID_PROMISE, 0x3d75, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
222 board_2037x }, 250 board_2057x },
223 { PCI_VENDOR_ID_PROMISE, 0x3d73, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 251 { PCI_VENDOR_ID_PROMISE, 0x3d73, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
224 board_2037x }, 252 board_2037x },
225 253
@@ -232,7 +260,7 @@ static const struct pci_device_id pdc_ata_pci_tbl[] = {
232 { PCI_VENDOR_ID_PROMISE, 0x3d17, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 260 { PCI_VENDOR_ID_PROMISE, 0x3d17, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
233 board_20319 }, 261 board_20319 },
234 { PCI_VENDOR_ID_PROMISE, 0x3d18, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 262 { PCI_VENDOR_ID_PROMISE, 0x3d18, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
235 board_20319 }, 263 board_40518 },
236 264
237 { PCI_VENDOR_ID_PROMISE, 0x6629, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 265 { PCI_VENDOR_ID_PROMISE, 0x6629, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
238 board_20619 }, 266 board_20619 },
@@ -261,12 +289,11 @@ static int pdc_port_start(struct ata_port *ap)
261 if (rc) 289 if (rc)
262 return rc; 290 return rc;
263 291
264 pp = kmalloc(sizeof(*pp), GFP_KERNEL); 292 pp = kzalloc(sizeof(*pp), GFP_KERNEL);
265 if (!pp) { 293 if (!pp) {
266 rc = -ENOMEM; 294 rc = -ENOMEM;
267 goto err_out; 295 goto err_out;
268 } 296 }
269 memset(pp, 0, sizeof(*pp));
270 297
271 pp->pkt = dma_alloc_coherent(dev, 128, &pp->pkt_dma, GFP_KERNEL); 298 pp->pkt = dma_alloc_coherent(dev, 128, &pp->pkt_dma, GFP_KERNEL);
272 if (!pp->pkt) { 299 if (!pp->pkt) {
@@ -298,6 +325,16 @@ static void pdc_port_stop(struct ata_port *ap)
298} 325}
299 326
300 327
328static void pdc_host_stop(struct ata_host_set *host_set)
329{
330 struct pdc_host_priv *hp = host_set->private_data;
331
332 ata_pci_host_stop(host_set);
333
334 kfree(hp);
335}
336
337
301static void pdc_reset_port(struct ata_port *ap) 338static void pdc_reset_port(struct ata_port *ap)
302{ 339{
303 void __iomem *mmio = (void __iomem *) ap->ioaddr.cmd_addr + PDC_CTLSTAT; 340 void __iomem *mmio = (void __iomem *) ap->ioaddr.cmd_addr + PDC_CTLSTAT;
@@ -394,19 +431,6 @@ static void pdc_eng_timeout(struct ata_port *ap)
394 spin_lock_irqsave(&host_set->lock, flags); 431 spin_lock_irqsave(&host_set->lock, flags);
395 432
396 qc = ata_qc_from_tag(ap, ap->active_tag); 433 qc = ata_qc_from_tag(ap, ap->active_tag);
397 if (!qc) {
398 printk(KERN_ERR "ata%u: BUG: timeout without command\n",
399 ap->id);
400 goto out;
401 }
402
403 /* hack alert! We cannot use the supplied completion
404 * function from inside the ->eh_strategy_handler() thread.
405 * libata is the only user of ->eh_strategy_handler() in
406 * any kernel, so the default scsi_done() assumes it is
407 * not being called from the SCSI EH.
408 */
409 qc->scsidone = scsi_finish_command;
410 434
411 switch (qc->tf.protocol) { 435 switch (qc->tf.protocol) {
412 case ATA_PROT_DMA: 436 case ATA_PROT_DMA:
@@ -414,7 +438,6 @@ static void pdc_eng_timeout(struct ata_port *ap)
414 printk(KERN_ERR "ata%u: command timeout\n", ap->id); 438 printk(KERN_ERR "ata%u: command timeout\n", ap->id);
415 drv_stat = ata_wait_idle(ap); 439 drv_stat = ata_wait_idle(ap);
416 qc->err_mask |= __ac_err_mask(drv_stat); 440 qc->err_mask |= __ac_err_mask(drv_stat);
417 ata_qc_complete(qc);
418 break; 441 break;
419 442
420 default: 443 default:
@@ -424,12 +447,11 @@ static void pdc_eng_timeout(struct ata_port *ap)
424 ap->id, qc->tf.command, drv_stat); 447 ap->id, qc->tf.command, drv_stat);
425 448
426 qc->err_mask |= ac_err_mask(drv_stat); 449 qc->err_mask |= ac_err_mask(drv_stat);
427 ata_qc_complete(qc);
428 break; 450 break;
429 } 451 }
430 452
431out:
432 spin_unlock_irqrestore(&host_set->lock, flags); 453 spin_unlock_irqrestore(&host_set->lock, flags);
454 ata_eh_qc_complete(qc);
433 DPRINTK("EXIT\n"); 455 DPRINTK("EXIT\n");
434} 456}
435 457
@@ -495,14 +517,15 @@ static irqreturn_t pdc_interrupt (int irq, void *dev_instance, struct pt_regs *r
495 VPRINTK("QUICK EXIT 2\n"); 517 VPRINTK("QUICK EXIT 2\n");
496 return IRQ_NONE; 518 return IRQ_NONE;
497 } 519 }
520
521 spin_lock(&host_set->lock);
522
498 mask &= 0xffff; /* only 16 tags possible */ 523 mask &= 0xffff; /* only 16 tags possible */
499 if (!mask) { 524 if (!mask) {
500 VPRINTK("QUICK EXIT 3\n"); 525 VPRINTK("QUICK EXIT 3\n");
501 return IRQ_NONE; 526 goto done_irq;
502 } 527 }
503 528
504 spin_lock(&host_set->lock);
505
506 writel(mask, mmio_base + PDC_INT_SEQMASK); 529 writel(mask, mmio_base + PDC_INT_SEQMASK);
507 530
508 for (i = 0; i < host_set->n_ports; i++) { 531 for (i = 0; i < host_set->n_ports; i++) {
@@ -519,10 +542,10 @@ static irqreturn_t pdc_interrupt (int irq, void *dev_instance, struct pt_regs *r
519 } 542 }
520 } 543 }
521 544
522 spin_unlock(&host_set->lock);
523
524 VPRINTK("EXIT\n"); 545 VPRINTK("EXIT\n");
525 546
547done_irq:
548 spin_unlock(&host_set->lock);
526 return IRQ_RETVAL(handled); 549 return IRQ_RETVAL(handled);
527} 550}
528 551
@@ -544,7 +567,7 @@ static inline void pdc_packet_start(struct ata_queued_cmd *qc)
544 readl((void __iomem *) ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT); /* flush */ 567 readl((void __iomem *) ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT); /* flush */
545} 568}
546 569
547static int pdc_qc_issue_prot(struct ata_queued_cmd *qc) 570static unsigned int pdc_qc_issue_prot(struct ata_queued_cmd *qc)
548{ 571{
549 switch (qc->tf.protocol) { 572 switch (qc->tf.protocol) {
550 case ATA_PROT_DMA: 573 case ATA_PROT_DMA:
@@ -600,6 +623,8 @@ static void pdc_ata_setup_port(struct ata_ioports *port, unsigned long base)
600static void pdc_host_init(unsigned int chip_id, struct ata_probe_ent *pe) 623static void pdc_host_init(unsigned int chip_id, struct ata_probe_ent *pe)
601{ 624{
602 void __iomem *mmio = pe->mmio_base; 625 void __iomem *mmio = pe->mmio_base;
626 struct pdc_host_priv *hp = pe->private_data;
627 int hotplug_offset = hp->hotplug_offset;
603 u32 tmp; 628 u32 tmp;
604 629
605 /* 630 /*
@@ -614,12 +639,12 @@ static void pdc_host_init(unsigned int chip_id, struct ata_probe_ent *pe)
614 writel(tmp, mmio + PDC_FLASH_CTL); 639 writel(tmp, mmio + PDC_FLASH_CTL);
615 640
616 /* clear plug/unplug flags for all ports */ 641 /* clear plug/unplug flags for all ports */
617 tmp = readl(mmio + PDC_SATA_PLUG_CSR); 642 tmp = readl(mmio + hotplug_offset);
618 writel(tmp | 0xff, mmio + PDC_SATA_PLUG_CSR); 643 writel(tmp | 0xff, mmio + hotplug_offset);
619 644
620 /* mask plug/unplug ints */ 645 /* mask plug/unplug ints */
621 tmp = readl(mmio + PDC_SATA_PLUG_CSR); 646 tmp = readl(mmio + hotplug_offset);
622 writel(tmp | 0xff0000, mmio + PDC_SATA_PLUG_CSR); 647 writel(tmp | 0xff0000, mmio + hotplug_offset);
623 648
624 /* reduce TBG clock to 133 Mhz. */ 649 /* reduce TBG clock to 133 Mhz. */
625 tmp = readl(mmio + PDC_TBG_MODE); 650 tmp = readl(mmio + PDC_TBG_MODE);
@@ -641,6 +666,7 @@ static int pdc_ata_init_one (struct pci_dev *pdev, const struct pci_device_id *e
641{ 666{
642 static int printed_version; 667 static int printed_version;
643 struct ata_probe_ent *probe_ent = NULL; 668 struct ata_probe_ent *probe_ent = NULL;
669 struct pdc_host_priv *hp;
644 unsigned long base; 670 unsigned long base;
645 void __iomem *mmio_base; 671 void __iomem *mmio_base;
646 unsigned int board_idx = (unsigned int) ent->driver_data; 672 unsigned int board_idx = (unsigned int) ent->driver_data;
@@ -671,13 +697,12 @@ static int pdc_ata_init_one (struct pci_dev *pdev, const struct pci_device_id *e
671 if (rc) 697 if (rc)
672 goto err_out_regions; 698 goto err_out_regions;
673 699
674 probe_ent = kmalloc(sizeof(*probe_ent), GFP_KERNEL); 700 probe_ent = kzalloc(sizeof(*probe_ent), GFP_KERNEL);
675 if (probe_ent == NULL) { 701 if (probe_ent == NULL) {
676 rc = -ENOMEM; 702 rc = -ENOMEM;
677 goto err_out_regions; 703 goto err_out_regions;
678 } 704 }
679 705
680 memset(probe_ent, 0, sizeof(*probe_ent));
681 probe_ent->dev = pci_dev_to_dev(pdev); 706 probe_ent->dev = pci_dev_to_dev(pdev);
682 INIT_LIST_HEAD(&probe_ent->node); 707 INIT_LIST_HEAD(&probe_ent->node);
683 708
@@ -688,6 +713,16 @@ static int pdc_ata_init_one (struct pci_dev *pdev, const struct pci_device_id *e
688 } 713 }
689 base = (unsigned long) mmio_base; 714 base = (unsigned long) mmio_base;
690 715
716 hp = kzalloc(sizeof(*hp), GFP_KERNEL);
717 if (hp == NULL) {
718 rc = -ENOMEM;
719 goto err_out_free_ent;
720 }
721
722 /* Set default hotplug offset */
723 hp->hotplug_offset = PDC_SATA_PLUG_CSR;
724 probe_ent->private_data = hp;
725
691 probe_ent->sht = pdc_port_info[board_idx].sht; 726 probe_ent->sht = pdc_port_info[board_idx].sht;
692 probe_ent->host_flags = pdc_port_info[board_idx].host_flags; 727 probe_ent->host_flags = pdc_port_info[board_idx].host_flags;
693 probe_ent->pio_mask = pdc_port_info[board_idx].pio_mask; 728 probe_ent->pio_mask = pdc_port_info[board_idx].pio_mask;
@@ -707,6 +742,10 @@ static int pdc_ata_init_one (struct pci_dev *pdev, const struct pci_device_id *e
707 742
708 /* notice 4-port boards */ 743 /* notice 4-port boards */
709 switch (board_idx) { 744 switch (board_idx) {
745 case board_40518:
746 /* Override hotplug offset for SATAII150 */
747 hp->hotplug_offset = PDC2_SATA_PLUG_CSR;
748 /* Fall through */
710 case board_20319: 749 case board_20319:
711 probe_ent->n_ports = 4; 750 probe_ent->n_ports = 4;
712 751
@@ -716,6 +755,10 @@ static int pdc_ata_init_one (struct pci_dev *pdev, const struct pci_device_id *e
716 probe_ent->port[2].scr_addr = base + 0x600; 755 probe_ent->port[2].scr_addr = base + 0x600;
717 probe_ent->port[3].scr_addr = base + 0x700; 756 probe_ent->port[3].scr_addr = base + 0x700;
718 break; 757 break;
758 case board_2057x:
759 /* Override hotplug offset for SATAII150 */
760 hp->hotplug_offset = PDC2_SATA_PLUG_CSR;
761 /* Fall through */
719 case board_2037x: 762 case board_2037x:
720 probe_ent->n_ports = 2; 763 probe_ent->n_ports = 2;
721 break; 764 break;
@@ -741,8 +784,10 @@ static int pdc_ata_init_one (struct pci_dev *pdev, const struct pci_device_id *e
741 /* initialize adapter */ 784 /* initialize adapter */
742 pdc_host_init(board_idx, probe_ent); 785 pdc_host_init(board_idx, probe_ent);
743 786
744 /* FIXME: check ata_device_add return value */ 787 /* FIXME: Need any other frees than hp? */
745 ata_device_add(probe_ent); 788 if (!ata_device_add(probe_ent))
789 kfree(hp);
790
746 kfree(probe_ent); 791 kfree(probe_ent);
747 792
748 return 0; 793 return 0;
diff --git a/drivers/scsi/sata_qstor.c b/drivers/scsi/sata_qstor.c
index 80480f0fb2b8..9602f43a298e 100644
--- a/drivers/scsi/sata_qstor.c
+++ b/drivers/scsi/sata_qstor.c
@@ -120,7 +120,7 @@ static void qs_host_stop(struct ata_host_set *host_set);
120static void qs_port_stop(struct ata_port *ap); 120static void qs_port_stop(struct ata_port *ap);
121static void qs_phy_reset(struct ata_port *ap); 121static void qs_phy_reset(struct ata_port *ap);
122static void qs_qc_prep(struct ata_queued_cmd *qc); 122static void qs_qc_prep(struct ata_queued_cmd *qc);
123static int qs_qc_issue(struct ata_queued_cmd *qc); 123static unsigned int qs_qc_issue(struct ata_queued_cmd *qc);
124static int qs_check_atapi_dma(struct ata_queued_cmd *qc); 124static int qs_check_atapi_dma(struct ata_queued_cmd *qc);
125static void qs_bmdma_stop(struct ata_queued_cmd *qc); 125static void qs_bmdma_stop(struct ata_queued_cmd *qc);
126static u8 qs_bmdma_status(struct ata_port *ap); 126static u8 qs_bmdma_status(struct ata_port *ap);
@@ -132,11 +132,11 @@ static struct scsi_host_template qs_ata_sht = {
132 .name = DRV_NAME, 132 .name = DRV_NAME,
133 .ioctl = ata_scsi_ioctl, 133 .ioctl = ata_scsi_ioctl,
134 .queuecommand = ata_scsi_queuecmd, 134 .queuecommand = ata_scsi_queuecmd,
135 .eh_timed_out = ata_scsi_timed_out,
135 .eh_strategy_handler = ata_scsi_error, 136 .eh_strategy_handler = ata_scsi_error,
136 .can_queue = ATA_DEF_QUEUE, 137 .can_queue = ATA_DEF_QUEUE,
137 .this_id = ATA_SHT_THIS_ID, 138 .this_id = ATA_SHT_THIS_ID,
138 .sg_tablesize = QS_MAX_PRD, 139 .sg_tablesize = QS_MAX_PRD,
139 .max_sectors = ATA_MAX_SECTORS,
140 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 140 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
141 .emulated = ATA_SHT_EMULATED, 141 .emulated = ATA_SHT_EMULATED,
142 //FIXME .use_clustering = ATA_SHT_USE_CLUSTERING, 142 //FIXME .use_clustering = ATA_SHT_USE_CLUSTERING,
@@ -276,8 +276,8 @@ static unsigned int qs_fill_sg(struct ata_queued_cmd *qc)
276 unsigned int nelem; 276 unsigned int nelem;
277 u8 *prd = pp->pkt + QS_CPB_BYTES; 277 u8 *prd = pp->pkt + QS_CPB_BYTES;
278 278
279 assert(qc->__sg != NULL); 279 WARN_ON(qc->__sg == NULL);
280 assert(qc->n_elem > 0 || qc->pad_len > 0); 280 WARN_ON(qc->n_elem == 0 && qc->pad_len == 0);
281 281
282 nelem = 0; 282 nelem = 0;
283 ata_for_each_sg(sg, qc) { 283 ata_for_each_sg(sg, qc) {
@@ -352,7 +352,7 @@ static inline void qs_packet_start(struct ata_queued_cmd *qc)
352 readl(chan + QS_CCT_CFF); /* flush */ 352 readl(chan + QS_CCT_CFF); /* flush */
353} 353}
354 354
355static int qs_qc_issue(struct ata_queued_cmd *qc) 355static unsigned int qs_qc_issue(struct ata_queued_cmd *qc)
356{ 356{
357 struct qs_port_priv *pp = qc->ap->private_data; 357 struct qs_port_priv *pp = qc->ap->private_data;
358 358
diff --git a/drivers/scsi/sata_sil.c b/drivers/scsi/sata_sil.c
index 17f74d3c10e7..e14ed4ebbeed 100644
--- a/drivers/scsi/sata_sil.c
+++ b/drivers/scsi/sata_sil.c
@@ -135,11 +135,11 @@ static struct scsi_host_template sil_sht = {
135 .name = DRV_NAME, 135 .name = DRV_NAME,
136 .ioctl = ata_scsi_ioctl, 136 .ioctl = ata_scsi_ioctl,
137 .queuecommand = ata_scsi_queuecmd, 137 .queuecommand = ata_scsi_queuecmd,
138 .eh_timed_out = ata_scsi_timed_out,
138 .eh_strategy_handler = ata_scsi_error, 139 .eh_strategy_handler = ata_scsi_error,
139 .can_queue = ATA_DEF_QUEUE, 140 .can_queue = ATA_DEF_QUEUE,
140 .this_id = ATA_SHT_THIS_ID, 141 .this_id = ATA_SHT_THIS_ID,
141 .sg_tablesize = LIBATA_MAX_PRD, 142 .sg_tablesize = LIBATA_MAX_PRD,
142 .max_sectors = ATA_MAX_SECTORS,
143 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 143 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
144 .emulated = ATA_SHT_EMULATED, 144 .emulated = ATA_SHT_EMULATED,
145 .use_clustering = ATA_SHT_USE_CLUSTERING, 145 .use_clustering = ATA_SHT_USE_CLUSTERING,
@@ -157,7 +157,7 @@ static const struct ata_port_operations sil_ops = {
157 .check_status = ata_check_status, 157 .check_status = ata_check_status,
158 .exec_command = ata_exec_command, 158 .exec_command = ata_exec_command,
159 .dev_select = ata_std_dev_select, 159 .dev_select = ata_std_dev_select,
160 .phy_reset = sata_phy_reset, 160 .probe_reset = ata_std_probe_reset,
161 .post_set_mode = sil_post_set_mode, 161 .post_set_mode = sil_post_set_mode,
162 .bmdma_setup = ata_bmdma_setup, 162 .bmdma_setup = ata_bmdma_setup,
163 .bmdma_start = ata_bmdma_start, 163 .bmdma_start = ata_bmdma_start,
@@ -180,7 +180,7 @@ static const struct ata_port_info sil_port_info[] = {
180 { 180 {
181 .sht = &sil_sht, 181 .sht = &sil_sht,
182 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 182 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
183 ATA_FLAG_SRST | ATA_FLAG_MMIO, 183 ATA_FLAG_MMIO,
184 .pio_mask = 0x1f, /* pio0-4 */ 184 .pio_mask = 0x1f, /* pio0-4 */
185 .mwdma_mask = 0x07, /* mwdma0-2 */ 185 .mwdma_mask = 0x07, /* mwdma0-2 */
186 .udma_mask = 0x3f, /* udma0-5 */ 186 .udma_mask = 0x3f, /* udma0-5 */
@@ -189,8 +189,7 @@ static const struct ata_port_info sil_port_info[] = {
189 { 189 {
190 .sht = &sil_sht, 190 .sht = &sil_sht,
191 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 191 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
192 ATA_FLAG_SRST | ATA_FLAG_MMIO | 192 ATA_FLAG_MMIO | SIL_FLAG_MOD15WRITE,
193 SIL_FLAG_MOD15WRITE,
194 .pio_mask = 0x1f, /* pio0-4 */ 193 .pio_mask = 0x1f, /* pio0-4 */
195 .mwdma_mask = 0x07, /* mwdma0-2 */ 194 .mwdma_mask = 0x07, /* mwdma0-2 */
196 .udma_mask = 0x3f, /* udma0-5 */ 195 .udma_mask = 0x3f, /* udma0-5 */
@@ -199,7 +198,7 @@ static const struct ata_port_info sil_port_info[] = {
199 { 198 {
200 .sht = &sil_sht, 199 .sht = &sil_sht,
201 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 200 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
202 ATA_FLAG_SRST | ATA_FLAG_MMIO, 201 ATA_FLAG_MMIO,
203 .pio_mask = 0x1f, /* pio0-4 */ 202 .pio_mask = 0x1f, /* pio0-4 */
204 .mwdma_mask = 0x07, /* mwdma0-2 */ 203 .mwdma_mask = 0x07, /* mwdma0-2 */
205 .udma_mask = 0x3f, /* udma0-5 */ 204 .udma_mask = 0x3f, /* udma0-5 */
@@ -337,22 +336,12 @@ static void sil_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val)
337static void sil_dev_config(struct ata_port *ap, struct ata_device *dev) 336static void sil_dev_config(struct ata_port *ap, struct ata_device *dev)
338{ 337{
339 unsigned int n, quirks = 0; 338 unsigned int n, quirks = 0;
340 unsigned char model_num[40]; 339 unsigned char model_num[41];
341 const char *s;
342 unsigned int len;
343 340
344 ata_dev_id_string(dev->id, model_num, ATA_ID_PROD_OFS, 341 ata_id_c_string(dev->id, model_num, ATA_ID_PROD_OFS, sizeof(model_num));
345 sizeof(model_num));
346 s = &model_num[0];
347 len = strnlen(s, sizeof(model_num));
348
349 /* ATAPI specifies that empty space is blank-filled; remove blanks */
350 while ((len > 0) && (s[len - 1] == ' '))
351 len--;
352 342
353 for (n = 0; sil_blacklist[n].product; n++) 343 for (n = 0; sil_blacklist[n].product; n++)
354 if (!memcmp(sil_blacklist[n].product, s, 344 if (!strcmp(sil_blacklist[n].product, model_num)) {
355 strlen(sil_blacklist[n].product))) {
356 quirks = sil_blacklist[n].quirk; 345 quirks = sil_blacklist[n].quirk;
357 break; 346 break;
358 } 347 }
@@ -363,16 +352,14 @@ static void sil_dev_config(struct ata_port *ap, struct ata_device *dev)
363 (quirks & SIL_QUIRK_MOD15WRITE))) { 352 (quirks & SIL_QUIRK_MOD15WRITE))) {
364 printk(KERN_INFO "ata%u(%u): applying Seagate errata fix (mod15write workaround)\n", 353 printk(KERN_INFO "ata%u(%u): applying Seagate errata fix (mod15write workaround)\n",
365 ap->id, dev->devno); 354 ap->id, dev->devno);
366 ap->host->max_sectors = 15; 355 dev->max_sectors = 15;
367 ap->host->hostt->max_sectors = 15;
368 dev->flags |= ATA_DFLAG_LOCK_SECTORS;
369 return; 356 return;
370 } 357 }
371 358
372 /* limit to udma5 */ 359 /* limit to udma5 */
373 if (quirks & SIL_QUIRK_UDMA5MAX) { 360 if (quirks & SIL_QUIRK_UDMA5MAX) {
374 printk(KERN_INFO "ata%u(%u): applying Maxtor errata fix %s\n", 361 printk(KERN_INFO "ata%u(%u): applying Maxtor errata fix %s\n",
375 ap->id, dev->devno, s); 362 ap->id, dev->devno, model_num);
376 ap->udma_mask &= ATA_UDMA5; 363 ap->udma_mask &= ATA_UDMA5;
377 return; 364 return;
378 } 365 }
diff --git a/drivers/scsi/sata_sil24.c b/drivers/scsi/sata_sil24.c
index 923130185a9e..a0e35a262156 100644
--- a/drivers/scsi/sata_sil24.c
+++ b/drivers/scsi/sata_sil24.c
@@ -249,9 +249,9 @@ static u8 sil24_check_status(struct ata_port *ap);
249static u32 sil24_scr_read(struct ata_port *ap, unsigned sc_reg); 249static u32 sil24_scr_read(struct ata_port *ap, unsigned sc_reg);
250static void sil24_scr_write(struct ata_port *ap, unsigned sc_reg, u32 val); 250static void sil24_scr_write(struct ata_port *ap, unsigned sc_reg, u32 val);
251static void sil24_tf_read(struct ata_port *ap, struct ata_taskfile *tf); 251static void sil24_tf_read(struct ata_port *ap, struct ata_taskfile *tf);
252static void sil24_phy_reset(struct ata_port *ap); 252static int sil24_probe_reset(struct ata_port *ap, unsigned int *classes);
253static void sil24_qc_prep(struct ata_queued_cmd *qc); 253static void sil24_qc_prep(struct ata_queued_cmd *qc);
254static int sil24_qc_issue(struct ata_queued_cmd *qc); 254static unsigned int sil24_qc_issue(struct ata_queued_cmd *qc);
255static void sil24_irq_clear(struct ata_port *ap); 255static void sil24_irq_clear(struct ata_port *ap);
256static void sil24_eng_timeout(struct ata_port *ap); 256static void sil24_eng_timeout(struct ata_port *ap);
257static irqreturn_t sil24_interrupt(int irq, void *dev_instance, struct pt_regs *regs); 257static irqreturn_t sil24_interrupt(int irq, void *dev_instance, struct pt_regs *regs);
@@ -280,11 +280,11 @@ static struct scsi_host_template sil24_sht = {
280 .name = DRV_NAME, 280 .name = DRV_NAME,
281 .ioctl = ata_scsi_ioctl, 281 .ioctl = ata_scsi_ioctl,
282 .queuecommand = ata_scsi_queuecmd, 282 .queuecommand = ata_scsi_queuecmd,
283 .eh_timed_out = ata_scsi_timed_out,
283 .eh_strategy_handler = ata_scsi_error, 284 .eh_strategy_handler = ata_scsi_error,
284 .can_queue = ATA_DEF_QUEUE, 285 .can_queue = ATA_DEF_QUEUE,
285 .this_id = ATA_SHT_THIS_ID, 286 .this_id = ATA_SHT_THIS_ID,
286 .sg_tablesize = LIBATA_MAX_PRD, 287 .sg_tablesize = LIBATA_MAX_PRD,
287 .max_sectors = ATA_MAX_SECTORS,
288 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 288 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
289 .emulated = ATA_SHT_EMULATED, 289 .emulated = ATA_SHT_EMULATED,
290 .use_clustering = ATA_SHT_USE_CLUSTERING, 290 .use_clustering = ATA_SHT_USE_CLUSTERING,
@@ -305,7 +305,7 @@ static const struct ata_port_operations sil24_ops = {
305 305
306 .tf_read = sil24_tf_read, 306 .tf_read = sil24_tf_read,
307 307
308 .phy_reset = sil24_phy_reset, 308 .probe_reset = sil24_probe_reset,
309 309
310 .qc_prep = sil24_qc_prep, 310 .qc_prep = sil24_qc_prep,
311 .qc_issue = sil24_qc_issue, 311 .qc_issue = sil24_qc_issue,
@@ -335,8 +335,8 @@ static struct ata_port_info sil24_port_info[] = {
335 { 335 {
336 .sht = &sil24_sht, 336 .sht = &sil24_sht,
337 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 337 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
338 ATA_FLAG_SRST | ATA_FLAG_MMIO | 338 ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA |
339 ATA_FLAG_PIO_DMA | SIL24_NPORTS2FLAG(4), 339 SIL24_NPORTS2FLAG(4),
340 .pio_mask = 0x1f, /* pio0-4 */ 340 .pio_mask = 0x1f, /* pio0-4 */
341 .mwdma_mask = 0x07, /* mwdma0-2 */ 341 .mwdma_mask = 0x07, /* mwdma0-2 */
342 .udma_mask = 0x3f, /* udma0-5 */ 342 .udma_mask = 0x3f, /* udma0-5 */
@@ -346,8 +346,8 @@ static struct ata_port_info sil24_port_info[] = {
346 { 346 {
347 .sht = &sil24_sht, 347 .sht = &sil24_sht,
348 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 348 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
349 ATA_FLAG_SRST | ATA_FLAG_MMIO | 349 ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA |
350 ATA_FLAG_PIO_DMA | SIL24_NPORTS2FLAG(2), 350 SIL24_NPORTS2FLAG(2),
351 .pio_mask = 0x1f, /* pio0-4 */ 351 .pio_mask = 0x1f, /* pio0-4 */
352 .mwdma_mask = 0x07, /* mwdma0-2 */ 352 .mwdma_mask = 0x07, /* mwdma0-2 */
353 .udma_mask = 0x3f, /* udma0-5 */ 353 .udma_mask = 0x3f, /* udma0-5 */
@@ -357,8 +357,8 @@ static struct ata_port_info sil24_port_info[] = {
357 { 357 {
358 .sht = &sil24_sht, 358 .sht = &sil24_sht,
359 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 359 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
360 ATA_FLAG_SRST | ATA_FLAG_MMIO | 360 ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA |
361 ATA_FLAG_PIO_DMA | SIL24_NPORTS2FLAG(1), 361 SIL24_NPORTS2FLAG(1),
362 .pio_mask = 0x1f, /* pio0-4 */ 362 .pio_mask = 0x1f, /* pio0-4 */
363 .mwdma_mask = 0x07, /* mwdma0-2 */ 363 .mwdma_mask = 0x07, /* mwdma0-2 */
364 .udma_mask = 0x3f, /* udma0-5 */ 364 .udma_mask = 0x3f, /* udma0-5 */
@@ -370,7 +370,7 @@ static void sil24_dev_config(struct ata_port *ap, struct ata_device *dev)
370{ 370{
371 void __iomem *port = (void __iomem *)ap->ioaddr.cmd_addr; 371 void __iomem *port = (void __iomem *)ap->ioaddr.cmd_addr;
372 372
373 if (ap->cdb_len == 16) 373 if (dev->cdb_len == 16)
374 writel(PORT_CS_CDB16, port + PORT_CTRL_STAT); 374 writel(PORT_CS_CDB16, port + PORT_CTRL_STAT);
375 else 375 else
376 writel(PORT_CS_CDB16, port + PORT_CTRL_CLR); 376 writel(PORT_CS_CDB16, port + PORT_CTRL_CLR);
@@ -427,7 +427,8 @@ static void sil24_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
427 *tf = pp->tf; 427 *tf = pp->tf;
428} 428}
429 429
430static int sil24_issue_SRST(struct ata_port *ap) 430static int sil24_softreset(struct ata_port *ap, int verbose,
431 unsigned int *class)
431{ 432{
432 void __iomem *port = (void __iomem *)ap->ioaddr.cmd_addr; 433 void __iomem *port = (void __iomem *)ap->ioaddr.cmd_addr;
433 struct sil24_port_priv *pp = ap->private_data; 434 struct sil24_port_priv *pp = ap->private_data;
@@ -436,6 +437,8 @@ static int sil24_issue_SRST(struct ata_port *ap)
436 u32 irq_enable, irq_stat; 437 u32 irq_enable, irq_stat;
437 int cnt; 438 int cnt;
438 439
440 DPRINTK("ENTER\n");
441
439 /* temporarily turn off IRQs during SRST */ 442 /* temporarily turn off IRQs during SRST */
440 irq_enable = readl(port + PORT_IRQ_ENABLE_SET); 443 irq_enable = readl(port + PORT_IRQ_ENABLE_SET);
441 writel(irq_enable, port + PORT_IRQ_ENABLE_CLR); 444 writel(irq_enable, port + PORT_IRQ_ENABLE_CLR);
@@ -465,30 +468,36 @@ static int sil24_issue_SRST(struct ata_port *ap)
465 /* restore IRQs */ 468 /* restore IRQs */
466 writel(irq_enable, port + PORT_IRQ_ENABLE_SET); 469 writel(irq_enable, port + PORT_IRQ_ENABLE_SET);
467 470
468 if (!(irq_stat & PORT_IRQ_COMPLETE)) 471 if (sata_dev_present(ap)) {
469 return -1; 472 if (!(irq_stat & PORT_IRQ_COMPLETE)) {
473 DPRINTK("EXIT, srst failed\n");
474 return -EIO;
475 }
470 476
471 /* update TF */ 477 sil24_update_tf(ap);
472 sil24_update_tf(ap); 478 *class = ata_dev_classify(&pp->tf);
479 }
480 if (*class == ATA_DEV_UNKNOWN)
481 *class = ATA_DEV_NONE;
482
483 DPRINTK("EXIT, class=%u\n", *class);
473 return 0; 484 return 0;
474} 485}
475 486
476static void sil24_phy_reset(struct ata_port *ap) 487static int sil24_hardreset(struct ata_port *ap, int verbose,
488 unsigned int *class)
477{ 489{
478 struct sil24_port_priv *pp = ap->private_data; 490 unsigned int dummy_class;
479
480 __sata_phy_reset(ap);
481 if (ap->flags & ATA_FLAG_PORT_DISABLED)
482 return;
483 491
484 if (sil24_issue_SRST(ap) < 0) { 492 /* sil24 doesn't report device signature after hard reset */
485 printk(KERN_ERR DRV_NAME 493 return sata_std_hardreset(ap, verbose, &dummy_class);
486 " ata%u: SRST failed, disabling port\n", ap->id); 494}
487 ap->ops->port_disable(ap);
488 return;
489 }
490 495
491 ap->device->class = ata_dev_classify(&pp->tf); 496static int sil24_probe_reset(struct ata_port *ap, unsigned int *classes)
497{
498 return ata_drive_probe_reset(ap, ata_std_probeinit,
499 sil24_softreset, sil24_hardreset,
500 ata_std_postreset, classes);
492} 501}
493 502
494static inline void sil24_fill_sg(struct ata_queued_cmd *qc, 503static inline void sil24_fill_sg(struct ata_queued_cmd *qc,
@@ -533,7 +542,7 @@ static void sil24_qc_prep(struct ata_queued_cmd *qc)
533 prb = &cb->atapi.prb; 542 prb = &cb->atapi.prb;
534 sge = cb->atapi.sge; 543 sge = cb->atapi.sge;
535 memset(cb->atapi.cdb, 0, 32); 544 memset(cb->atapi.cdb, 0, 32);
536 memcpy(cb->atapi.cdb, qc->cdb, ap->cdb_len); 545 memcpy(cb->atapi.cdb, qc->cdb, qc->dev->cdb_len);
537 546
538 if (qc->tf.protocol != ATA_PROT_ATAPI_NODATA) { 547 if (qc->tf.protocol != ATA_PROT_ATAPI_NODATA) {
539 if (qc->tf.flags & ATA_TFLAG_WRITE) 548 if (qc->tf.flags & ATA_TFLAG_WRITE)
@@ -557,7 +566,7 @@ static void sil24_qc_prep(struct ata_queued_cmd *qc)
557 sil24_fill_sg(qc, sge); 566 sil24_fill_sg(qc, sge);
558} 567}
559 568
560static int sil24_qc_issue(struct ata_queued_cmd *qc) 569static unsigned int sil24_qc_issue(struct ata_queued_cmd *qc)
561{ 570{
562 struct ata_port *ap = qc->ap; 571 struct ata_port *ap = qc->ap;
563 void __iomem *port = (void __iomem *)ap->ioaddr.cmd_addr; 572 void __iomem *port = (void __iomem *)ap->ioaddr.cmd_addr;
@@ -638,23 +647,10 @@ static void sil24_eng_timeout(struct ata_port *ap)
638 struct ata_queued_cmd *qc; 647 struct ata_queued_cmd *qc;
639 648
640 qc = ata_qc_from_tag(ap, ap->active_tag); 649 qc = ata_qc_from_tag(ap, ap->active_tag);
641 if (!qc) {
642 printk(KERN_ERR "ata%u: BUG: timeout without command\n",
643 ap->id);
644 return;
645 }
646 650
647 /*
648 * hack alert! We cannot use the supplied completion
649 * function from inside the ->eh_strategy_handler() thread.
650 * libata is the only user of ->eh_strategy_handler() in
651 * any kernel, so the default scsi_done() assumes it is
652 * not being called from the SCSI EH.
653 */
654 printk(KERN_ERR "ata%u: command timeout\n", ap->id); 651 printk(KERN_ERR "ata%u: command timeout\n", ap->id);
655 qc->scsidone = scsi_finish_command; 652 qc->err_mask |= AC_ERR_TIMEOUT;
656 qc->err_mask |= AC_ERR_OTHER; 653 ata_eh_qc_complete(qc);
657 ata_qc_complete(qc);
658 654
659 sil24_reset_controller(ap); 655 sil24_reset_controller(ap);
660} 656}
diff --git a/drivers/scsi/sata_sis.c b/drivers/scsi/sata_sis.c
index 2df8c5632ac3..7fd45f86de99 100644
--- a/drivers/scsi/sata_sis.c
+++ b/drivers/scsi/sata_sis.c
@@ -87,11 +87,11 @@ static struct scsi_host_template sis_sht = {
87 .name = DRV_NAME, 87 .name = DRV_NAME,
88 .ioctl = ata_scsi_ioctl, 88 .ioctl = ata_scsi_ioctl,
89 .queuecommand = ata_scsi_queuecmd, 89 .queuecommand = ata_scsi_queuecmd,
90 .eh_timed_out = ata_scsi_timed_out,
90 .eh_strategy_handler = ata_scsi_error, 91 .eh_strategy_handler = ata_scsi_error,
91 .can_queue = ATA_DEF_QUEUE, 92 .can_queue = ATA_DEF_QUEUE,
92 .this_id = ATA_SHT_THIS_ID, 93 .this_id = ATA_SHT_THIS_ID,
93 .sg_tablesize = ATA_MAX_PRD, 94 .sg_tablesize = ATA_MAX_PRD,
94 .max_sectors = ATA_MAX_SECTORS,
95 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 95 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
96 .emulated = ATA_SHT_EMULATED, 96 .emulated = ATA_SHT_EMULATED,
97 .use_clustering = ATA_SHT_USE_CLUSTERING, 97 .use_clustering = ATA_SHT_USE_CLUSTERING,
diff --git a/drivers/scsi/sata_svw.c b/drivers/scsi/sata_svw.c
index d8472563fde8..4aaccd53e736 100644
--- a/drivers/scsi/sata_svw.c
+++ b/drivers/scsi/sata_svw.c
@@ -288,11 +288,11 @@ static struct scsi_host_template k2_sata_sht = {
288 .name = DRV_NAME, 288 .name = DRV_NAME,
289 .ioctl = ata_scsi_ioctl, 289 .ioctl = ata_scsi_ioctl,
290 .queuecommand = ata_scsi_queuecmd, 290 .queuecommand = ata_scsi_queuecmd,
291 .eh_timed_out = ata_scsi_timed_out,
291 .eh_strategy_handler = ata_scsi_error, 292 .eh_strategy_handler = ata_scsi_error,
292 .can_queue = ATA_DEF_QUEUE, 293 .can_queue = ATA_DEF_QUEUE,
293 .this_id = ATA_SHT_THIS_ID, 294 .this_id = ATA_SHT_THIS_ID,
294 .sg_tablesize = LIBATA_MAX_PRD, 295 .sg_tablesize = LIBATA_MAX_PRD,
295 .max_sectors = ATA_MAX_SECTORS,
296 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 296 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
297 .emulated = ATA_SHT_EMULATED, 297 .emulated = ATA_SHT_EMULATED,
298 .use_clustering = ATA_SHT_USE_CLUSTERING, 298 .use_clustering = ATA_SHT_USE_CLUSTERING,
diff --git a/drivers/scsi/sata_sx4.c b/drivers/scsi/sata_sx4.c
index bc87c16c80d2..9f8a76815402 100644
--- a/drivers/scsi/sata_sx4.c
+++ b/drivers/scsi/sata_sx4.c
@@ -174,7 +174,7 @@ static void pdc20621_get_from_dimm(struct ata_probe_ent *pe,
174static void pdc20621_put_to_dimm(struct ata_probe_ent *pe, 174static void pdc20621_put_to_dimm(struct ata_probe_ent *pe,
175 void *psource, u32 offset, u32 size); 175 void *psource, u32 offset, u32 size);
176static void pdc20621_irq_clear(struct ata_port *ap); 176static void pdc20621_irq_clear(struct ata_port *ap);
177static int pdc20621_qc_issue_prot(struct ata_queued_cmd *qc); 177static unsigned int pdc20621_qc_issue_prot(struct ata_queued_cmd *qc);
178 178
179 179
180static struct scsi_host_template pdc_sata_sht = { 180static struct scsi_host_template pdc_sata_sht = {
@@ -182,11 +182,11 @@ static struct scsi_host_template pdc_sata_sht = {
182 .name = DRV_NAME, 182 .name = DRV_NAME,
183 .ioctl = ata_scsi_ioctl, 183 .ioctl = ata_scsi_ioctl,
184 .queuecommand = ata_scsi_queuecmd, 184 .queuecommand = ata_scsi_queuecmd,
185 .eh_timed_out = ata_scsi_timed_out,
185 .eh_strategy_handler = ata_scsi_error, 186 .eh_strategy_handler = ata_scsi_error,
186 .can_queue = ATA_DEF_QUEUE, 187 .can_queue = ATA_DEF_QUEUE,
187 .this_id = ATA_SHT_THIS_ID, 188 .this_id = ATA_SHT_THIS_ID,
188 .sg_tablesize = LIBATA_MAX_PRD, 189 .sg_tablesize = LIBATA_MAX_PRD,
189 .max_sectors = ATA_MAX_SECTORS,
190 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 190 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
191 .emulated = ATA_SHT_EMULATED, 191 .emulated = ATA_SHT_EMULATED,
192 .use_clustering = ATA_SHT_USE_CLUSTERING, 192 .use_clustering = ATA_SHT_USE_CLUSTERING,
@@ -460,7 +460,7 @@ static void pdc20621_dma_prep(struct ata_queued_cmd *qc)
460 unsigned int i, idx, total_len = 0, sgt_len; 460 unsigned int i, idx, total_len = 0, sgt_len;
461 u32 *buf = (u32 *) &pp->dimm_buf[PDC_DIMM_HEADER_SZ]; 461 u32 *buf = (u32 *) &pp->dimm_buf[PDC_DIMM_HEADER_SZ];
462 462
463 assert(qc->flags & ATA_QCFLAG_DMAMAP); 463 WARN_ON(!(qc->flags & ATA_QCFLAG_DMAMAP));
464 464
465 VPRINTK("ata%u: ENTER\n", ap->id); 465 VPRINTK("ata%u: ENTER\n", ap->id);
466 466
@@ -678,7 +678,7 @@ static void pdc20621_packet_start(struct ata_queued_cmd *qc)
678 } 678 }
679} 679}
680 680
681static int pdc20621_qc_issue_prot(struct ata_queued_cmd *qc) 681static unsigned int pdc20621_qc_issue_prot(struct ata_queued_cmd *qc)
682{ 682{
683 switch (qc->tf.protocol) { 683 switch (qc->tf.protocol) {
684 case ATA_PROT_DMA: 684 case ATA_PROT_DMA:
@@ -866,26 +866,12 @@ static void pdc_eng_timeout(struct ata_port *ap)
866 spin_lock_irqsave(&host_set->lock, flags); 866 spin_lock_irqsave(&host_set->lock, flags);
867 867
868 qc = ata_qc_from_tag(ap, ap->active_tag); 868 qc = ata_qc_from_tag(ap, ap->active_tag);
869 if (!qc) {
870 printk(KERN_ERR "ata%u: BUG: timeout without command\n",
871 ap->id);
872 goto out;
873 }
874
875 /* hack alert! We cannot use the supplied completion
876 * function from inside the ->eh_strategy_handler() thread.
877 * libata is the only user of ->eh_strategy_handler() in
878 * any kernel, so the default scsi_done() assumes it is
879 * not being called from the SCSI EH.
880 */
881 qc->scsidone = scsi_finish_command;
882 869
883 switch (qc->tf.protocol) { 870 switch (qc->tf.protocol) {
884 case ATA_PROT_DMA: 871 case ATA_PROT_DMA:
885 case ATA_PROT_NODATA: 872 case ATA_PROT_NODATA:
886 printk(KERN_ERR "ata%u: command timeout\n", ap->id); 873 printk(KERN_ERR "ata%u: command timeout\n", ap->id);
887 qc->err_mask |= __ac_err_mask(ata_wait_idle(ap)); 874 qc->err_mask |= __ac_err_mask(ata_wait_idle(ap));
888 ata_qc_complete(qc);
889 break; 875 break;
890 876
891 default: 877 default:
@@ -895,12 +881,11 @@ static void pdc_eng_timeout(struct ata_port *ap)
895 ap->id, qc->tf.command, drv_stat); 881 ap->id, qc->tf.command, drv_stat);
896 882
897 qc->err_mask |= ac_err_mask(drv_stat); 883 qc->err_mask |= ac_err_mask(drv_stat);
898 ata_qc_complete(qc);
899 break; 884 break;
900 } 885 }
901 886
902out:
903 spin_unlock_irqrestore(&host_set->lock, flags); 887 spin_unlock_irqrestore(&host_set->lock, flags);
888 ata_eh_qc_complete(qc);
904 DPRINTK("EXIT\n"); 889 DPRINTK("EXIT\n");
905} 890}
906 891
diff --git a/drivers/scsi/sata_uli.c b/drivers/scsi/sata_uli.c
index 9635ca700977..37a487b7d655 100644
--- a/drivers/scsi/sata_uli.c
+++ b/drivers/scsi/sata_uli.c
@@ -75,11 +75,11 @@ static struct scsi_host_template uli_sht = {
75 .name = DRV_NAME, 75 .name = DRV_NAME,
76 .ioctl = ata_scsi_ioctl, 76 .ioctl = ata_scsi_ioctl,
77 .queuecommand = ata_scsi_queuecmd, 77 .queuecommand = ata_scsi_queuecmd,
78 .eh_timed_out = ata_scsi_timed_out,
78 .eh_strategy_handler = ata_scsi_error, 79 .eh_strategy_handler = ata_scsi_error,
79 .can_queue = ATA_DEF_QUEUE, 80 .can_queue = ATA_DEF_QUEUE,
80 .this_id = ATA_SHT_THIS_ID, 81 .this_id = ATA_SHT_THIS_ID,
81 .sg_tablesize = LIBATA_MAX_PRD, 82 .sg_tablesize = LIBATA_MAX_PRD,
82 .max_sectors = ATA_MAX_SECTORS,
83 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 83 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
84 .emulated = ATA_SHT_EMULATED, 84 .emulated = ATA_SHT_EMULATED,
85 .use_clustering = ATA_SHT_USE_CLUSTERING, 85 .use_clustering = ATA_SHT_USE_CLUSTERING,
diff --git a/drivers/scsi/sata_via.c b/drivers/scsi/sata_via.c
index 6d5b0a794cfd..ff65a0b0457f 100644
--- a/drivers/scsi/sata_via.c
+++ b/drivers/scsi/sata_via.c
@@ -94,11 +94,11 @@ static struct scsi_host_template svia_sht = {
94 .name = DRV_NAME, 94 .name = DRV_NAME,
95 .ioctl = ata_scsi_ioctl, 95 .ioctl = ata_scsi_ioctl,
96 .queuecommand = ata_scsi_queuecmd, 96 .queuecommand = ata_scsi_queuecmd,
97 .eh_timed_out = ata_scsi_timed_out,
97 .eh_strategy_handler = ata_scsi_error, 98 .eh_strategy_handler = ata_scsi_error,
98 .can_queue = ATA_DEF_QUEUE, 99 .can_queue = ATA_DEF_QUEUE,
99 .this_id = ATA_SHT_THIS_ID, 100 .this_id = ATA_SHT_THIS_ID,
100 .sg_tablesize = LIBATA_MAX_PRD, 101 .sg_tablesize = LIBATA_MAX_PRD,
101 .max_sectors = ATA_MAX_SECTORS,
102 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 102 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
103 .emulated = ATA_SHT_EMULATED, 103 .emulated = ATA_SHT_EMULATED,
104 .use_clustering = ATA_SHT_USE_CLUSTERING, 104 .use_clustering = ATA_SHT_USE_CLUSTERING,
diff --git a/drivers/scsi/sata_vsc.c b/drivers/scsi/sata_vsc.c
index e484e8db6810..b574379a7a82 100644
--- a/drivers/scsi/sata_vsc.c
+++ b/drivers/scsi/sata_vsc.c
@@ -251,11 +251,11 @@ static struct scsi_host_template vsc_sata_sht = {
251 .name = DRV_NAME, 251 .name = DRV_NAME,
252 .ioctl = ata_scsi_ioctl, 252 .ioctl = ata_scsi_ioctl,
253 .queuecommand = ata_scsi_queuecmd, 253 .queuecommand = ata_scsi_queuecmd,
254 .eh_timed_out = ata_scsi_timed_out,
254 .eh_strategy_handler = ata_scsi_error, 255 .eh_strategy_handler = ata_scsi_error,
255 .can_queue = ATA_DEF_QUEUE, 256 .can_queue = ATA_DEF_QUEUE,
256 .this_id = ATA_SHT_THIS_ID, 257 .this_id = ATA_SHT_THIS_ID,
257 .sg_tablesize = LIBATA_MAX_PRD, 258 .sg_tablesize = LIBATA_MAX_PRD,
258 .max_sectors = ATA_MAX_SECTORS,
259 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 259 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
260 .emulated = ATA_SHT_EMULATED, 260 .emulated = ATA_SHT_EMULATED,
261 .use_clustering = ATA_SHT_USE_CLUSTERING, 261 .use_clustering = ATA_SHT_USE_CLUSTERING,
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index 5cc97b721661..50f8057be75d 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -584,8 +584,7 @@ static int scsi_request_sense(struct scsi_cmnd *scmd)
584 * keep a list of pending commands for final completion, and once we 584 * keep a list of pending commands for final completion, and once we
585 * are ready to leave error handling we handle completion for real. 585 * are ready to leave error handling we handle completion for real.
586 **/ 586 **/
587static void scsi_eh_finish_cmd(struct scsi_cmnd *scmd, 587void scsi_eh_finish_cmd(struct scsi_cmnd *scmd, struct list_head *done_q)
588 struct list_head *done_q)
589{ 588{
590 scmd->device->host->host_failed--; 589 scmd->device->host->host_failed--;
591 scmd->eh_eflags = 0; 590 scmd->eh_eflags = 0;
@@ -597,6 +596,7 @@ static void scsi_eh_finish_cmd(struct scsi_cmnd *scmd,
597 scsi_setup_cmd_retry(scmd); 596 scsi_setup_cmd_retry(scmd);
598 list_move_tail(&scmd->eh_entry, done_q); 597 list_move_tail(&scmd->eh_entry, done_q);
599} 598}
599EXPORT_SYMBOL(scsi_eh_finish_cmd);
600 600
601/** 601/**
602 * scsi_eh_get_sense - Get device sense data. 602 * scsi_eh_get_sense - Get device sense data.
@@ -1425,7 +1425,7 @@ static void scsi_eh_ready_devs(struct Scsi_Host *shost,
1425 * @done_q: list_head of processed commands. 1425 * @done_q: list_head of processed commands.
1426 * 1426 *
1427 **/ 1427 **/
1428static void scsi_eh_flush_done_q(struct list_head *done_q) 1428void scsi_eh_flush_done_q(struct list_head *done_q)
1429{ 1429{
1430 struct scsi_cmnd *scmd, *next; 1430 struct scsi_cmnd *scmd, *next;
1431 1431
@@ -1454,6 +1454,7 @@ static void scsi_eh_flush_done_q(struct list_head *done_q)
1454 } 1454 }
1455 } 1455 }
1456} 1456}
1457EXPORT_SYMBOL(scsi_eh_flush_done_q);
1457 1458
1458/** 1459/**
1459 * scsi_unjam_host - Attempt to fix a host which has a cmd that failed. 1460 * scsi_unjam_host - Attempt to fix a host which has a cmd that failed.
diff --git a/include/linux/ata.h b/include/linux/ata.h
index 94f77cce27fa..b02a16c435e7 100644
--- a/include/linux/ata.h
+++ b/include/linux/ata.h
@@ -267,6 +267,16 @@ struct ata_taskfile {
267 ((u64) (id)[(n) + 1] << 16) | \ 267 ((u64) (id)[(n) + 1] << 16) | \
268 ((u64) (id)[(n) + 0]) ) 268 ((u64) (id)[(n) + 0]) )
269 269
270static inline unsigned int ata_id_major_version(const u16 *id)
271{
272 unsigned int mver;
273
274 for (mver = 14; mver >= 1; mver--)
275 if (id[ATA_ID_MAJOR_VER] & (1 << mver))
276 break;
277 return mver;
278}
279
270static inline int ata_id_current_chs_valid(const u16 *id) 280static inline int ata_id_current_chs_valid(const u16 *id)
271{ 281{
272 /* For ATA-1 devices, if the INITIALIZE DEVICE PARAMETERS command 282 /* For ATA-1 devices, if the INITIALIZE DEVICE PARAMETERS command
@@ -302,4 +312,16 @@ static inline int ata_ok(u8 status)
302 == ATA_DRDY); 312 == ATA_DRDY);
303} 313}
304 314
315static inline int lba_28_ok(u64 block, u32 n_block)
316{
317 /* check the ending block number */
318 return ((block + n_block - 1) < ((u64)1 << 28)) && (n_block <= 256);
319}
320
321static inline int lba_48_ok(u64 block, u32 n_block)
322{
323 /* check the ending block number */
324 return ((block + n_block - 1) < ((u64)1 << 48)) && (n_block <= 65536);
325}
326
305#endif /* __LINUX_ATA_H__ */ 327#endif /* __LINUX_ATA_H__ */
diff --git a/include/linux/libata.h b/include/linux/libata.h
index c91be5e64ede..66b6847225df 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -35,7 +35,8 @@
35#include <linux/workqueue.h> 35#include <linux/workqueue.h>
36 36
37/* 37/*
38 * compile-time options 38 * compile-time options: to be removed as soon as all the drivers are
39 * converted to the new debugging mechanism
39 */ 40 */
40#undef ATA_DEBUG /* debugging output */ 41#undef ATA_DEBUG /* debugging output */
41#undef ATA_VERBOSE_DEBUG /* yet more debugging output */ 42#undef ATA_VERBOSE_DEBUG /* yet more debugging output */
@@ -61,15 +62,37 @@
61 62
62#define BPRINTK(fmt, args...) if (ap->flags & ATA_FLAG_DEBUGMSG) printk(KERN_ERR "%s: " fmt, __FUNCTION__, ## args) 63#define BPRINTK(fmt, args...) if (ap->flags & ATA_FLAG_DEBUGMSG) printk(KERN_ERR "%s: " fmt, __FUNCTION__, ## args)
63 64
64#ifdef ATA_NDEBUG 65/* NEW: debug levels */
65#define assert(expr) 66#define HAVE_LIBATA_MSG 1
66#else 67
67#define assert(expr) \ 68enum {
68 if(unlikely(!(expr))) { \ 69 ATA_MSG_DRV = 0x0001,
69 printk(KERN_ERR "Assertion failed! %s,%s,%s,line=%d\n", \ 70 ATA_MSG_INFO = 0x0002,
70 #expr,__FILE__,__FUNCTION__,__LINE__); \ 71 ATA_MSG_PROBE = 0x0004,
71 } 72 ATA_MSG_WARN = 0x0008,
72#endif 73 ATA_MSG_MALLOC = 0x0010,
74 ATA_MSG_CTL = 0x0020,
75 ATA_MSG_INTR = 0x0040,
76 ATA_MSG_ERR = 0x0080,
77};
78
79#define ata_msg_drv(p) ((p)->msg_enable & ATA_MSG_DRV)
80#define ata_msg_info(p) ((p)->msg_enable & ATA_MSG_INFO)
81#define ata_msg_probe(p) ((p)->msg_enable & ATA_MSG_PROBE)
82#define ata_msg_warn(p) ((p)->msg_enable & ATA_MSG_WARN)
83#define ata_msg_malloc(p) ((p)->msg_enable & ATA_MSG_MALLOC)
84#define ata_msg_ctl(p) ((p)->msg_enable & ATA_MSG_CTL)
85#define ata_msg_intr(p) ((p)->msg_enable & ATA_MSG_INTR)
86#define ata_msg_err(p) ((p)->msg_enable & ATA_MSG_ERR)
87
88static inline u32 ata_msg_init(int dval, int default_msg_enable_bits)
89{
90 if (dval < 0 || dval >= (sizeof(u32) * 8))
91 return default_msg_enable_bits; /* should be 0x1 - only driver info msgs */
92 if (!dval)
93 return 0;
94 return (1 << dval) - 1;
95}
73 96
74/* defines only for the constants which don't work well as enums */ 97/* defines only for the constants which don't work well as enums */
75#define ATA_TAG_POISON 0xfafbfcfdU 98#define ATA_TAG_POISON 0xfafbfcfdU
@@ -99,8 +122,7 @@ enum {
99 /* struct ata_device stuff */ 122 /* struct ata_device stuff */
100 ATA_DFLAG_LBA48 = (1 << 0), /* device supports LBA48 */ 123 ATA_DFLAG_LBA48 = (1 << 0), /* device supports LBA48 */
101 ATA_DFLAG_PIO = (1 << 1), /* device currently in PIO mode */ 124 ATA_DFLAG_PIO = (1 << 1), /* device currently in PIO mode */
102 ATA_DFLAG_LOCK_SECTORS = (1 << 2), /* don't adjust max_sectors */ 125 ATA_DFLAG_LBA = (1 << 2), /* device supports LBA */
103 ATA_DFLAG_LBA = (1 << 3), /* device supports LBA */
104 126
105 ATA_DEV_UNKNOWN = 0, /* unknown device */ 127 ATA_DEV_UNKNOWN = 0, /* unknown device */
106 ATA_DEV_ATA = 1, /* ATA device */ 128 ATA_DEV_ATA = 1, /* ATA device */
@@ -115,9 +137,9 @@ enum {
115 ATA_FLAG_PORT_DISABLED = (1 << 2), /* port is disabled, ignore it */ 137 ATA_FLAG_PORT_DISABLED = (1 << 2), /* port is disabled, ignore it */
116 ATA_FLAG_SATA = (1 << 3), 138 ATA_FLAG_SATA = (1 << 3),
117 ATA_FLAG_NO_LEGACY = (1 << 4), /* no legacy mode check */ 139 ATA_FLAG_NO_LEGACY = (1 << 4), /* no legacy mode check */
118 ATA_FLAG_SRST = (1 << 5), /* use ATA SRST, not E.D.D. */ 140 ATA_FLAG_SRST = (1 << 5), /* (obsolete) use ATA SRST, not E.D.D. */
119 ATA_FLAG_MMIO = (1 << 6), /* use MMIO, not PIO */ 141 ATA_FLAG_MMIO = (1 << 6), /* use MMIO, not PIO */
120 ATA_FLAG_SATA_RESET = (1 << 7), /* use COMRESET */ 142 ATA_FLAG_SATA_RESET = (1 << 7), /* (obsolete) use COMRESET */
121 ATA_FLAG_PIO_DMA = (1 << 8), /* PIO cmds via DMA */ 143 ATA_FLAG_PIO_DMA = (1 << 8), /* PIO cmds via DMA */
122 ATA_FLAG_NOINTR = (1 << 9), /* FIXME: Remove this once 144 ATA_FLAG_NOINTR = (1 << 9), /* FIXME: Remove this once
123 * proper HSM is in place. */ 145 * proper HSM is in place. */
@@ -129,10 +151,14 @@ enum {
129 ATA_FLAG_PIO_LBA48 = (1 << 13), /* Host DMA engine is LBA28 only */ 151 ATA_FLAG_PIO_LBA48 = (1 << 13), /* Host DMA engine is LBA28 only */
130 ATA_FLAG_IRQ_MASK = (1 << 14), /* Mask IRQ in PIO xfers */ 152 ATA_FLAG_IRQ_MASK = (1 << 14), /* Mask IRQ in PIO xfers */
131 153
154 ATA_FLAG_FLUSH_PIO_TASK = (1 << 15), /* Flush PIO task */
155 ATA_FLAG_IN_EH = (1 << 16), /* EH in progress */
156
132 ATA_QCFLAG_ACTIVE = (1 << 1), /* cmd not yet ack'd to scsi lyer */ 157 ATA_QCFLAG_ACTIVE = (1 << 1), /* cmd not yet ack'd to scsi lyer */
133 ATA_QCFLAG_SG = (1 << 3), /* have s/g table? */ 158 ATA_QCFLAG_SG = (1 << 3), /* have s/g table? */
134 ATA_QCFLAG_SINGLE = (1 << 4), /* no s/g, just a single buffer */ 159 ATA_QCFLAG_SINGLE = (1 << 4), /* no s/g, just a single buffer */
135 ATA_QCFLAG_DMAMAP = ATA_QCFLAG_SG | ATA_QCFLAG_SINGLE, 160 ATA_QCFLAG_DMAMAP = ATA_QCFLAG_SG | ATA_QCFLAG_SINGLE,
161 ATA_QCFLAG_EH_SCHEDULED = (1 << 5), /* EH scheduled */
136 162
137 /* various lengths of time */ 163 /* various lengths of time */
138 ATA_TMOUT_EDD = 5 * HZ, /* heuristic */ 164 ATA_TMOUT_EDD = 5 * HZ, /* heuristic */
@@ -189,10 +215,15 @@ enum hsm_task_states {
189}; 215};
190 216
191enum ata_completion_errors { 217enum ata_completion_errors {
192 AC_ERR_OTHER = (1 << 0), 218 AC_ERR_DEV = (1 << 0), /* device reported error */
193 AC_ERR_DEV = (1 << 1), 219 AC_ERR_HSM = (1 << 1), /* host state machine violation */
194 AC_ERR_ATA_BUS = (1 << 2), 220 AC_ERR_TIMEOUT = (1 << 2), /* timeout */
195 AC_ERR_HOST_BUS = (1 << 3), 221 AC_ERR_MEDIA = (1 << 3), /* media error */
222 AC_ERR_ATA_BUS = (1 << 4), /* ATA bus error */
223 AC_ERR_HOST_BUS = (1 << 5), /* host bus error */
224 AC_ERR_SYSTEM = (1 << 6), /* system error */
225 AC_ERR_INVALID = (1 << 7), /* invalid argument */
226 AC_ERR_OTHER = (1 << 8), /* unknown */
196}; 227};
197 228
198/* forward declarations */ 229/* forward declarations */
@@ -202,7 +233,10 @@ struct ata_port;
202struct ata_queued_cmd; 233struct ata_queued_cmd;
203 234
204/* typedefs */ 235/* typedefs */
205typedef int (*ata_qc_cb_t) (struct ata_queued_cmd *qc); 236typedef void (*ata_qc_cb_t) (struct ata_queued_cmd *qc);
237typedef void (*ata_probeinit_fn_t)(struct ata_port *);
238typedef int (*ata_reset_fn_t)(struct ata_port *, int, unsigned int *);
239typedef void (*ata_postreset_fn_t)(struct ata_port *ap, unsigned int *);
206 240
207struct ata_ioports { 241struct ata_ioports {
208 unsigned long cmd_addr; 242 unsigned long cmd_addr;
@@ -313,6 +347,8 @@ struct ata_device {
313 347
314 unsigned int multi_count; /* sectors count for 348 unsigned int multi_count; /* sectors count for
315 READ/WRITE MULTIPLE */ 349 READ/WRITE MULTIPLE */
350 unsigned int max_sectors; /* per-device max sectors */
351 unsigned int cdb_len;
316 352
317 /* for CHS addressing */ 353 /* for CHS addressing */
318 u16 cylinders; /* Number of cylinders */ 354 u16 cylinders; /* Number of cylinders */
@@ -342,7 +378,6 @@ struct ata_port {
342 unsigned int mwdma_mask; 378 unsigned int mwdma_mask;
343 unsigned int udma_mask; 379 unsigned int udma_mask;
344 unsigned int cbl; /* cable type; ATA_CBL_xxx */ 380 unsigned int cbl; /* cable type; ATA_CBL_xxx */
345 unsigned int cdb_len;
346 381
347 struct ata_device device[ATA_MAX_DEVICES]; 382 struct ata_device device[ATA_MAX_DEVICES];
348 383
@@ -359,6 +394,9 @@ struct ata_port {
359 unsigned int hsm_task_state; 394 unsigned int hsm_task_state;
360 unsigned long pio_task_timeout; 395 unsigned long pio_task_timeout;
361 396
397 u32 msg_enable;
398 struct list_head eh_done_q;
399
362 void *private_data; 400 void *private_data;
363}; 401};
364 402
@@ -378,7 +416,9 @@ struct ata_port_operations {
378 u8 (*check_altstatus)(struct ata_port *ap); 416 u8 (*check_altstatus)(struct ata_port *ap);
379 void (*dev_select)(struct ata_port *ap, unsigned int device); 417 void (*dev_select)(struct ata_port *ap, unsigned int device);
380 418
381 void (*phy_reset) (struct ata_port *ap); 419 void (*phy_reset) (struct ata_port *ap); /* obsolete */
420 int (*probe_reset) (struct ata_port *ap, unsigned int *classes);
421
382 void (*post_set_mode) (struct ata_port *ap); 422 void (*post_set_mode) (struct ata_port *ap);
383 423
384 int (*check_atapi_dma) (struct ata_queued_cmd *qc); 424 int (*check_atapi_dma) (struct ata_queued_cmd *qc);
@@ -387,7 +427,7 @@ struct ata_port_operations {
387 void (*bmdma_start) (struct ata_queued_cmd *qc); 427 void (*bmdma_start) (struct ata_queued_cmd *qc);
388 428
389 void (*qc_prep) (struct ata_queued_cmd *qc); 429 void (*qc_prep) (struct ata_queued_cmd *qc);
390 int (*qc_issue) (struct ata_queued_cmd *qc); 430 unsigned int (*qc_issue) (struct ata_queued_cmd *qc);
391 431
392 void (*eng_timeout) (struct ata_port *ap); 432 void (*eng_timeout) (struct ata_port *ap);
393 433
@@ -435,6 +475,16 @@ extern void ata_port_probe(struct ata_port *);
435extern void __sata_phy_reset(struct ata_port *ap); 475extern void __sata_phy_reset(struct ata_port *ap);
436extern void sata_phy_reset(struct ata_port *ap); 476extern void sata_phy_reset(struct ata_port *ap);
437extern void ata_bus_reset(struct ata_port *ap); 477extern void ata_bus_reset(struct ata_port *ap);
478extern int ata_drive_probe_reset(struct ata_port *ap,
479 ata_probeinit_fn_t probeinit,
480 ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
481 ata_postreset_fn_t postreset, unsigned int *classes);
482extern void ata_std_probeinit(struct ata_port *ap);
483extern int ata_std_softreset(struct ata_port *ap, int verbose,
484 unsigned int *classes);
485extern int sata_std_hardreset(struct ata_port *ap, int verbose,
486 unsigned int *class);
487extern void ata_std_postreset(struct ata_port *ap, unsigned int *classes);
438extern void ata_port_disable(struct ata_port *); 488extern void ata_port_disable(struct ata_port *);
439extern void ata_std_ports(struct ata_ioports *ioaddr); 489extern void ata_std_ports(struct ata_ioports *ioaddr);
440#ifdef CONFIG_PCI 490#ifdef CONFIG_PCI
@@ -449,7 +499,10 @@ extern void ata_host_set_remove(struct ata_host_set *host_set);
449extern int ata_scsi_detect(struct scsi_host_template *sht); 499extern int ata_scsi_detect(struct scsi_host_template *sht);
450extern int ata_scsi_ioctl(struct scsi_device *dev, int cmd, void __user *arg); 500extern int ata_scsi_ioctl(struct scsi_device *dev, int cmd, void __user *arg);
451extern int ata_scsi_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)); 501extern int ata_scsi_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *));
502extern enum scsi_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd);
452extern int ata_scsi_error(struct Scsi_Host *host); 503extern int ata_scsi_error(struct Scsi_Host *host);
504extern void ata_eh_qc_complete(struct ata_queued_cmd *qc);
505extern void ata_eh_qc_retry(struct ata_queued_cmd *qc);
453extern int ata_scsi_release(struct Scsi_Host *host); 506extern int ata_scsi_release(struct Scsi_Host *host);
454extern unsigned int ata_host_intr(struct ata_port *ap, struct ata_queued_cmd *qc); 507extern unsigned int ata_host_intr(struct ata_port *ap, struct ata_queued_cmd *qc);
455extern int ata_scsi_device_resume(struct scsi_device *); 508extern int ata_scsi_device_resume(struct scsi_device *);
@@ -457,6 +510,9 @@ extern int ata_scsi_device_suspend(struct scsi_device *);
457extern int ata_device_resume(struct ata_port *, struct ata_device *); 510extern int ata_device_resume(struct ata_port *, struct ata_device *);
458extern int ata_device_suspend(struct ata_port *, struct ata_device *); 511extern int ata_device_suspend(struct ata_port *, struct ata_device *);
459extern int ata_ratelimit(void); 512extern int ata_ratelimit(void);
513extern unsigned int ata_busy_sleep(struct ata_port *ap,
514 unsigned long timeout_pat,
515 unsigned long timeout);
460 516
461/* 517/*
462 * Default driver ops implementations 518 * Default driver ops implementations
@@ -470,26 +526,29 @@ extern void ata_std_dev_select (struct ata_port *ap, unsigned int device);
470extern u8 ata_check_status(struct ata_port *ap); 526extern u8 ata_check_status(struct ata_port *ap);
471extern u8 ata_altstatus(struct ata_port *ap); 527extern u8 ata_altstatus(struct ata_port *ap);
472extern void ata_exec_command(struct ata_port *ap, const struct ata_taskfile *tf); 528extern void ata_exec_command(struct ata_port *ap, const struct ata_taskfile *tf);
529extern int ata_std_probe_reset(struct ata_port *ap, unsigned int *classes);
473extern int ata_port_start (struct ata_port *ap); 530extern int ata_port_start (struct ata_port *ap);
474extern void ata_port_stop (struct ata_port *ap); 531extern void ata_port_stop (struct ata_port *ap);
475extern void ata_host_stop (struct ata_host_set *host_set); 532extern void ata_host_stop (struct ata_host_set *host_set);
476extern irqreturn_t ata_interrupt (int irq, void *dev_instance, struct pt_regs *regs); 533extern irqreturn_t ata_interrupt (int irq, void *dev_instance, struct pt_regs *regs);
477extern void ata_qc_prep(struct ata_queued_cmd *qc); 534extern void ata_qc_prep(struct ata_queued_cmd *qc);
478extern int ata_qc_issue_prot(struct ata_queued_cmd *qc); 535extern unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc);
479extern void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf, 536extern void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf,
480 unsigned int buflen); 537 unsigned int buflen);
481extern void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg, 538extern void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
482 unsigned int n_elem); 539 unsigned int n_elem);
483extern unsigned int ata_dev_classify(const struct ata_taskfile *tf); 540extern unsigned int ata_dev_classify(const struct ata_taskfile *tf);
484extern void ata_dev_id_string(const u16 *id, unsigned char *s, 541extern void ata_id_string(const u16 *id, unsigned char *s,
485 unsigned int ofs, unsigned int len); 542 unsigned int ofs, unsigned int len);
543extern void ata_id_c_string(const u16 *id, unsigned char *s,
544 unsigned int ofs, unsigned int len);
486extern void ata_dev_config(struct ata_port *ap, unsigned int i); 545extern void ata_dev_config(struct ata_port *ap, unsigned int i);
487extern void ata_bmdma_setup (struct ata_queued_cmd *qc); 546extern void ata_bmdma_setup (struct ata_queued_cmd *qc);
488extern void ata_bmdma_start (struct ata_queued_cmd *qc); 547extern void ata_bmdma_start (struct ata_queued_cmd *qc);
489extern void ata_bmdma_stop(struct ata_queued_cmd *qc); 548extern void ata_bmdma_stop(struct ata_queued_cmd *qc);
490extern u8 ata_bmdma_status(struct ata_port *ap); 549extern u8 ata_bmdma_status(struct ata_port *ap);
491extern void ata_bmdma_irq_clear(struct ata_port *ap); 550extern void ata_bmdma_irq_clear(struct ata_port *ap);
492extern void ata_qc_complete(struct ata_queued_cmd *qc); 551extern void __ata_qc_complete(struct ata_queued_cmd *qc);
493extern void ata_eng_timeout(struct ata_port *ap); 552extern void ata_eng_timeout(struct ata_port *ap);
494extern void ata_scsi_simulate(struct ata_port *ap, struct ata_device *dev, 553extern void ata_scsi_simulate(struct ata_port *ap, struct ata_device *dev,
495 struct scsi_cmnd *cmd, 554 struct scsi_cmnd *cmd,
@@ -657,9 +716,9 @@ static inline u8 ata_wait_idle(struct ata_port *ap)
657 716
658 if (status & (ATA_BUSY | ATA_DRQ)) { 717 if (status & (ATA_BUSY | ATA_DRQ)) {
659 unsigned long l = ap->ioaddr.status_addr; 718 unsigned long l = ap->ioaddr.status_addr;
660 printk(KERN_WARNING 719 if (ata_msg_warn(ap))
661 "ATA: abnormal status 0x%X on port 0x%lX\n", 720 printk(KERN_WARNING "ATA: abnormal status 0x%X on port 0x%lX\n",
662 status, l); 721 status, l);
663 } 722 }
664 723
665 return status; 724 return status;
@@ -701,6 +760,24 @@ static inline void ata_qc_reinit(struct ata_queued_cmd *qc)
701 ata_tf_init(qc->ap, &qc->tf, qc->dev->devno); 760 ata_tf_init(qc->ap, &qc->tf, qc->dev->devno);
702} 761}
703 762
763/**
764 * ata_qc_complete - Complete an active ATA command
765 * @qc: Command to complete
766 * @err_mask: ATA Status register contents
767 *
768 * Indicate to the mid and upper layers that an ATA
769 * command has completed, with either an ok or not-ok status.
770 *
771 * LOCKING:
772 * spin_lock_irqsave(host_set lock)
773 */
774static inline void ata_qc_complete(struct ata_queued_cmd *qc)
775{
776 if (unlikely(qc->flags & ATA_QCFLAG_EH_SCHEDULED))
777 return;
778
779 __ata_qc_complete(qc);
780}
704 781
705/** 782/**
706 * ata_irq_on - Enable interrupts on a port. 783 * ata_irq_on - Enable interrupts on a port.
@@ -751,7 +828,8 @@ static inline u8 ata_irq_ack(struct ata_port *ap, unsigned int chk_drq)
751 828
752 status = ata_busy_wait(ap, bits, 1000); 829 status = ata_busy_wait(ap, bits, 1000);
753 if (status & bits) 830 if (status & bits)
754 DPRINTK("abnormal status 0x%X\n", status); 831 if (ata_msg_err(ap))
832 printk(KERN_ERR "abnormal status 0x%X\n", status);
755 833
756 /* get controller status; clear intr, err bits */ 834 /* get controller status; clear intr, err bits */
757 if (ap->flags & ATA_FLAG_MMIO) { 835 if (ap->flags & ATA_FLAG_MMIO) {
@@ -769,8 +847,10 @@ static inline u8 ata_irq_ack(struct ata_port *ap, unsigned int chk_drq)
769 post_stat = inb(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS); 847 post_stat = inb(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
770 } 848 }
771 849
772 VPRINTK("irq ack: host_stat 0x%X, new host_stat 0x%X, drv_stat 0x%X\n", 850 if (ata_msg_intr(ap))
773 host_stat, post_stat, status); 851 printk(KERN_INFO "%s: irq ack: host_stat 0x%X, new host_stat 0x%X, drv_stat 0x%X\n",
852 __FUNCTION__,
853 host_stat, post_stat, status);
774 854
775 return status; 855 return status;
776} 856}
@@ -807,7 +887,7 @@ static inline int ata_try_flush_cache(const struct ata_device *dev)
807static inline unsigned int ac_err_mask(u8 status) 887static inline unsigned int ac_err_mask(u8 status)
808{ 888{
809 if (status & ATA_BUSY) 889 if (status & ATA_BUSY)
810 return AC_ERR_ATA_BUS; 890 return AC_ERR_HSM;
811 if (status & (ATA_ERR | ATA_DF)) 891 if (status & (ATA_ERR | ATA_DF))
812 return AC_ERR_DEV; 892 return AC_ERR_DEV;
813 return 0; 893 return 0;
diff --git a/include/scsi/scsi_eh.h b/include/scsi/scsi_eh.h
index fabd879c2f2e..d160880b2a87 100644
--- a/include/scsi/scsi_eh.h
+++ b/include/scsi/scsi_eh.h
@@ -35,6 +35,9 @@ static inline int scsi_sense_valid(struct scsi_sense_hdr *sshdr)
35} 35}
36 36
37 37
38extern void scsi_eh_finish_cmd(struct scsi_cmnd *scmd,
39 struct list_head *done_q);
40extern void scsi_eh_flush_done_q(struct list_head *done_q);
38extern void scsi_report_bus_reset(struct Scsi_Host *, int); 41extern void scsi_report_bus_reset(struct Scsi_Host *, int);
39extern void scsi_report_device_reset(struct Scsi_Host *, int, int); 42extern void scsi_report_device_reset(struct Scsi_Host *, int, int);
40extern int scsi_block_when_processing_errors(struct scsi_device *); 43extern int scsi_block_when_processing_errors(struct scsi_device *);