aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/scsi/Makefile2
-rw-r--r--drivers/scsi/ahci.c196
-rw-r--r--drivers/scsi/ata_piix.c392
-rw-r--r--drivers/scsi/libata-bmdma.c703
-rw-r--r--drivers/scsi/libata-core.c2128
-rw-r--r--drivers/scsi/libata-scsi.c238
-rw-r--r--drivers/scsi/libata.h2
-rw-r--r--drivers/scsi/pdc_adma.c6
-rw-r--r--drivers/scsi/sata_mv.c279
-rw-r--r--drivers/scsi/sata_nv.c2
-rw-r--r--drivers/scsi/sata_promise.c129
-rw-r--r--drivers/scsi/sata_qstor.c10
-rw-r--r--drivers/scsi/sata_sil.c126
-rw-r--r--drivers/scsi/sata_sil24.c90
-rw-r--r--drivers/scsi/sata_sis.c2
-rw-r--r--drivers/scsi/sata_svw.c2
-rw-r--r--drivers/scsi/sata_sx4.c25
-rw-r--r--drivers/scsi/sata_uli.c2
-rw-r--r--drivers/scsi/sata_via.c2
-rw-r--r--drivers/scsi/sata_vsc.c2
-rw-r--r--drivers/scsi/scsi_error.c7
-rw-r--r--include/linux/ata.h22
-rw-r--r--include/linux/libata.h159
-rw-r--r--include/scsi/scsi_eh.h3
24 files changed, 2858 insertions, 1671 deletions
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index 320e765fa0cd..15dc2e00e1b2 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -163,7 +163,7 @@ ncr53c8xx-flags-$(CONFIG_SCSI_ZALON) \
163CFLAGS_ncr53c8xx.o := $(ncr53c8xx-flags-y) $(ncr53c8xx-flags-m) 163CFLAGS_ncr53c8xx.o := $(ncr53c8xx-flags-y) $(ncr53c8xx-flags-m)
164zalon7xx-objs := zalon.o ncr53c8xx.o 164zalon7xx-objs := zalon.o ncr53c8xx.o
165NCR_Q720_mod-objs := NCR_Q720.o ncr53c8xx.o 165NCR_Q720_mod-objs := NCR_Q720.o ncr53c8xx.o
166libata-objs := libata-core.o libata-scsi.o 166libata-objs := libata-core.o libata-scsi.o libata-bmdma.o
167oktagon_esp_mod-objs := oktagon_esp.o oktagon_io.o 167oktagon_esp_mod-objs := oktagon_esp.o oktagon_io.o
168 168
169# Files generated that shall be removed upon make clean 169# Files generated that shall be removed upon make clean
diff --git a/drivers/scsi/ahci.c b/drivers/scsi/ahci.c
index a800fb51168b..1c2ab3dede71 100644
--- a/drivers/scsi/ahci.c
+++ b/drivers/scsi/ahci.c
@@ -66,6 +66,8 @@ enum {
66 AHCI_IRQ_ON_SG = (1 << 31), 66 AHCI_IRQ_ON_SG = (1 << 31),
67 AHCI_CMD_ATAPI = (1 << 5), 67 AHCI_CMD_ATAPI = (1 << 5),
68 AHCI_CMD_WRITE = (1 << 6), 68 AHCI_CMD_WRITE = (1 << 6),
69 AHCI_CMD_RESET = (1 << 8),
70 AHCI_CMD_CLR_BUSY = (1 << 10),
69 71
70 RX_FIS_D2H_REG = 0x40, /* offset of D2H Register FIS data */ 72 RX_FIS_D2H_REG = 0x40, /* offset of D2H Register FIS data */
71 73
@@ -85,6 +87,7 @@ enum {
85 87
86 /* HOST_CAP bits */ 88 /* HOST_CAP bits */
87 HOST_CAP_64 = (1 << 31), /* PCI DAC (64-bit DMA) support */ 89 HOST_CAP_64 = (1 << 31), /* PCI DAC (64-bit DMA) support */
90 HOST_CAP_CLO = (1 << 24), /* Command List Override support */
88 91
89 /* registers for each SATA port */ 92 /* registers for each SATA port */
90 PORT_LST_ADDR = 0x00, /* command list DMA addr */ 93 PORT_LST_ADDR = 0x00, /* command list DMA addr */
@@ -138,6 +141,7 @@ enum {
138 PORT_CMD_LIST_ON = (1 << 15), /* cmd list DMA engine running */ 141 PORT_CMD_LIST_ON = (1 << 15), /* cmd list DMA engine running */
139 PORT_CMD_FIS_ON = (1 << 14), /* FIS DMA engine running */ 142 PORT_CMD_FIS_ON = (1 << 14), /* FIS DMA engine running */
140 PORT_CMD_FIS_RX = (1 << 4), /* Enable FIS receive DMA engine */ 143 PORT_CMD_FIS_RX = (1 << 4), /* Enable FIS receive DMA engine */
144 PORT_CMD_CLO = (1 << 3), /* Command list override */
141 PORT_CMD_POWER_ON = (1 << 2), /* Power up device */ 145 PORT_CMD_POWER_ON = (1 << 2), /* Power up device */
142 PORT_CMD_SPIN_UP = (1 << 1), /* Spin up device */ 146 PORT_CMD_SPIN_UP = (1 << 1), /* Spin up device */
143 PORT_CMD_START = (1 << 0), /* Enable port DMA engine */ 147 PORT_CMD_START = (1 << 0), /* Enable port DMA engine */
@@ -184,9 +188,9 @@ struct ahci_port_priv {
184static u32 ahci_scr_read (struct ata_port *ap, unsigned int sc_reg); 188static u32 ahci_scr_read (struct ata_port *ap, unsigned int sc_reg);
185static void ahci_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val); 189static void ahci_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val);
186static int ahci_init_one (struct pci_dev *pdev, const struct pci_device_id *ent); 190static int ahci_init_one (struct pci_dev *pdev, const struct pci_device_id *ent);
187static int ahci_qc_issue(struct ata_queued_cmd *qc); 191static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc);
188static irqreturn_t ahci_interrupt (int irq, void *dev_instance, struct pt_regs *regs); 192static irqreturn_t ahci_interrupt (int irq, void *dev_instance, struct pt_regs *regs);
189static void ahci_phy_reset(struct ata_port *ap); 193static int ahci_probe_reset(struct ata_port *ap, unsigned int *classes);
190static void ahci_irq_clear(struct ata_port *ap); 194static void ahci_irq_clear(struct ata_port *ap);
191static void ahci_eng_timeout(struct ata_port *ap); 195static void ahci_eng_timeout(struct ata_port *ap);
192static int ahci_port_start(struct ata_port *ap); 196static int ahci_port_start(struct ata_port *ap);
@@ -202,11 +206,11 @@ static struct scsi_host_template ahci_sht = {
202 .name = DRV_NAME, 206 .name = DRV_NAME,
203 .ioctl = ata_scsi_ioctl, 207 .ioctl = ata_scsi_ioctl,
204 .queuecommand = ata_scsi_queuecmd, 208 .queuecommand = ata_scsi_queuecmd,
209 .eh_timed_out = ata_scsi_timed_out,
205 .eh_strategy_handler = ata_scsi_error, 210 .eh_strategy_handler = ata_scsi_error,
206 .can_queue = ATA_DEF_QUEUE, 211 .can_queue = ATA_DEF_QUEUE,
207 .this_id = ATA_SHT_THIS_ID, 212 .this_id = ATA_SHT_THIS_ID,
208 .sg_tablesize = AHCI_MAX_SG, 213 .sg_tablesize = AHCI_MAX_SG,
209 .max_sectors = ATA_MAX_SECTORS,
210 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 214 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
211 .emulated = ATA_SHT_EMULATED, 215 .emulated = ATA_SHT_EMULATED,
212 .use_clustering = AHCI_USE_CLUSTERING, 216 .use_clustering = AHCI_USE_CLUSTERING,
@@ -225,7 +229,7 @@ static const struct ata_port_operations ahci_ops = {
225 229
226 .tf_read = ahci_tf_read, 230 .tf_read = ahci_tf_read,
227 231
228 .phy_reset = ahci_phy_reset, 232 .probe_reset = ahci_probe_reset,
229 233
230 .qc_prep = ahci_qc_prep, 234 .qc_prep = ahci_qc_prep,
231 .qc_issue = ahci_qc_issue, 235 .qc_issue = ahci_qc_issue,
@@ -247,8 +251,7 @@ static const struct ata_port_info ahci_port_info[] = {
247 { 251 {
248 .sht = &ahci_sht, 252 .sht = &ahci_sht,
249 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 253 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
250 ATA_FLAG_SATA_RESET | ATA_FLAG_MMIO | 254 ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA,
251 ATA_FLAG_PIO_DMA,
252 .pio_mask = 0x1f, /* pio0-4 */ 255 .pio_mask = 0x1f, /* pio0-4 */
253 .udma_mask = 0x7f, /* udma0-6 ; FIXME */ 256 .udma_mask = 0x7f, /* udma0-6 ; FIXME */
254 .port_ops = &ahci_ops, 257 .port_ops = &ahci_ops,
@@ -450,17 +453,48 @@ static void ahci_scr_write (struct ata_port *ap, unsigned int sc_reg_in,
450 writel(val, (void __iomem *) ap->ioaddr.scr_addr + (sc_reg * 4)); 453 writel(val, (void __iomem *) ap->ioaddr.scr_addr + (sc_reg * 4));
451} 454}
452 455
453static void ahci_phy_reset(struct ata_port *ap) 456static int ahci_stop_engine(struct ata_port *ap)
454{ 457{
455 void __iomem *port_mmio = (void __iomem *) ap->ioaddr.cmd_addr; 458 void __iomem *mmio = ap->host_set->mmio_base;
456 struct ata_taskfile tf; 459 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
457 struct ata_device *dev = &ap->device[0]; 460 int work;
458 u32 new_tmp, tmp; 461 u32 tmp;
459 462
460 __sata_phy_reset(ap); 463 tmp = readl(port_mmio + PORT_CMD);
464 tmp &= ~PORT_CMD_START;
465 writel(tmp, port_mmio + PORT_CMD);
461 466
462 if (ap->flags & ATA_FLAG_PORT_DISABLED) 467 /* wait for engine to stop. TODO: this could be
463 return; 468 * as long as 500 msec
469 */
470 work = 1000;
471 while (work-- > 0) {
472 tmp = readl(port_mmio + PORT_CMD);
473 if ((tmp & PORT_CMD_LIST_ON) == 0)
474 return 0;
475 udelay(10);
476 }
477
478 return -EIO;
479}
480
481static void ahci_start_engine(struct ata_port *ap)
482{
483 void __iomem *mmio = ap->host_set->mmio_base;
484 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
485 u32 tmp;
486
487 tmp = readl(port_mmio + PORT_CMD);
488 tmp |= PORT_CMD_START;
489 writel(tmp, port_mmio + PORT_CMD);
490 readl(port_mmio + PORT_CMD); /* flush */
491}
492
493static unsigned int ahci_dev_classify(struct ata_port *ap)
494{
495 void __iomem *port_mmio = (void __iomem *) ap->ioaddr.cmd_addr;
496 struct ata_taskfile tf;
497 u32 tmp;
464 498
465 tmp = readl(port_mmio + PORT_SIG); 499 tmp = readl(port_mmio + PORT_SIG);
466 tf.lbah = (tmp >> 24) & 0xff; 500 tf.lbah = (tmp >> 24) & 0xff;
@@ -468,15 +502,46 @@ static void ahci_phy_reset(struct ata_port *ap)
468 tf.lbal = (tmp >> 8) & 0xff; 502 tf.lbal = (tmp >> 8) & 0xff;
469 tf.nsect = (tmp) & 0xff; 503 tf.nsect = (tmp) & 0xff;
470 504
471 dev->class = ata_dev_classify(&tf); 505 return ata_dev_classify(&tf);
472 if (!ata_dev_present(dev)) { 506}
473 ata_port_disable(ap); 507
474 return; 508static void ahci_fill_cmd_slot(struct ahci_port_priv *pp, u32 opts)
475 } 509{
510 pp->cmd_slot[0].opts = cpu_to_le32(opts);
511 pp->cmd_slot[0].status = 0;
512 pp->cmd_slot[0].tbl_addr = cpu_to_le32(pp->cmd_tbl_dma & 0xffffffff);
513 pp->cmd_slot[0].tbl_addr_hi = cpu_to_le32((pp->cmd_tbl_dma >> 16) >> 16);
514}
515
516static int ahci_hardreset(struct ata_port *ap, int verbose, unsigned int *class)
517{
518 int rc;
519
520 DPRINTK("ENTER\n");
521
522 ahci_stop_engine(ap);
523 rc = sata_std_hardreset(ap, verbose, class);
524 ahci_start_engine(ap);
525
526 if (rc == 0)
527 *class = ahci_dev_classify(ap);
528 if (*class == ATA_DEV_UNKNOWN)
529 *class = ATA_DEV_NONE;
530
531 DPRINTK("EXIT, rc=%d, class=%u\n", rc, *class);
532 return rc;
533}
534
535static void ahci_postreset(struct ata_port *ap, unsigned int *class)
536{
537 void __iomem *port_mmio = (void __iomem *) ap->ioaddr.cmd_addr;
538 u32 new_tmp, tmp;
539
540 ata_std_postreset(ap, class);
476 541
477 /* Make sure port's ATAPI bit is set appropriately */ 542 /* Make sure port's ATAPI bit is set appropriately */
478 new_tmp = tmp = readl(port_mmio + PORT_CMD); 543 new_tmp = tmp = readl(port_mmio + PORT_CMD);
479 if (dev->class == ATA_DEV_ATAPI) 544 if (*class == ATA_DEV_ATAPI)
480 new_tmp |= PORT_CMD_ATAPI; 545 new_tmp |= PORT_CMD_ATAPI;
481 else 546 else
482 new_tmp &= ~PORT_CMD_ATAPI; 547 new_tmp &= ~PORT_CMD_ATAPI;
@@ -486,6 +551,12 @@ static void ahci_phy_reset(struct ata_port *ap)
486 } 551 }
487} 552}
488 553
554static int ahci_probe_reset(struct ata_port *ap, unsigned int *classes)
555{
556 return ata_drive_probe_reset(ap, NULL, NULL, ahci_hardreset,
557 ahci_postreset, classes);
558}
559
489static u8 ahci_check_status(struct ata_port *ap) 560static u8 ahci_check_status(struct ata_port *ap)
490{ 561{
491 void __iomem *mmio = (void __iomem *) ap->ioaddr.cmd_addr; 562 void __iomem *mmio = (void __iomem *) ap->ioaddr.cmd_addr;
@@ -533,42 +604,36 @@ static void ahci_qc_prep(struct ata_queued_cmd *qc)
533{ 604{
534 struct ata_port *ap = qc->ap; 605 struct ata_port *ap = qc->ap;
535 struct ahci_port_priv *pp = ap->private_data; 606 struct ahci_port_priv *pp = ap->private_data;
607 int is_atapi = is_atapi_taskfile(&qc->tf);
536 u32 opts; 608 u32 opts;
537 const u32 cmd_fis_len = 5; /* five dwords */ 609 const u32 cmd_fis_len = 5; /* five dwords */
538 unsigned int n_elem; 610 unsigned int n_elem;
539 611
540 /* 612 /*
541 * Fill in command slot information (currently only one slot,
542 * slot 0, is currently since we don't do queueing)
543 */
544
545 opts = cmd_fis_len;
546 if (qc->tf.flags & ATA_TFLAG_WRITE)
547 opts |= AHCI_CMD_WRITE;
548 if (is_atapi_taskfile(&qc->tf))
549 opts |= AHCI_CMD_ATAPI;
550
551 pp->cmd_slot[0].opts = cpu_to_le32(opts);
552 pp->cmd_slot[0].status = 0;
553 pp->cmd_slot[0].tbl_addr = cpu_to_le32(pp->cmd_tbl_dma & 0xffffffff);
554 pp->cmd_slot[0].tbl_addr_hi = cpu_to_le32((pp->cmd_tbl_dma >> 16) >> 16);
555
556 /*
557 * Fill in command table information. First, the header, 613 * Fill in command table information. First, the header,
558 * a SATA Register - Host to Device command FIS. 614 * a SATA Register - Host to Device command FIS.
559 */ 615 */
560 ata_tf_to_fis(&qc->tf, pp->cmd_tbl, 0); 616 ata_tf_to_fis(&qc->tf, pp->cmd_tbl, 0);
561 if (opts & AHCI_CMD_ATAPI) { 617 if (is_atapi) {
562 memset(pp->cmd_tbl + AHCI_CMD_TBL_CDB, 0, 32); 618 memset(pp->cmd_tbl + AHCI_CMD_TBL_CDB, 0, 32);
563 memcpy(pp->cmd_tbl + AHCI_CMD_TBL_CDB, qc->cdb, ap->cdb_len); 619 memcpy(pp->cmd_tbl + AHCI_CMD_TBL_CDB, qc->cdb,
620 qc->dev->cdb_len);
564 } 621 }
565 622
566 if (!(qc->flags & ATA_QCFLAG_DMAMAP)) 623 n_elem = 0;
567 return; 624 if (qc->flags & ATA_QCFLAG_DMAMAP)
625 n_elem = ahci_fill_sg(qc);
568 626
569 n_elem = ahci_fill_sg(qc); 627 /*
628 * Fill in command slot information.
629 */
630 opts = cmd_fis_len | n_elem << 16;
631 if (qc->tf.flags & ATA_TFLAG_WRITE)
632 opts |= AHCI_CMD_WRITE;
633 if (is_atapi)
634 opts |= AHCI_CMD_ATAPI;
570 635
571 pp->cmd_slot[0].opts |= cpu_to_le32(n_elem << 16); 636 ahci_fill_cmd_slot(pp, opts);
572} 637}
573 638
574static void ahci_restart_port(struct ata_port *ap, u32 irq_stat) 639static void ahci_restart_port(struct ata_port *ap, u32 irq_stat)
@@ -576,7 +641,6 @@ static void ahci_restart_port(struct ata_port *ap, u32 irq_stat)
576 void __iomem *mmio = ap->host_set->mmio_base; 641 void __iomem *mmio = ap->host_set->mmio_base;
577 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no); 642 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
578 u32 tmp; 643 u32 tmp;
579 int work;
580 644
581 if ((ap->device[0].class != ATA_DEV_ATAPI) || 645 if ((ap->device[0].class != ATA_DEV_ATAPI) ||
582 ((irq_stat & PORT_IRQ_TF_ERR) == 0)) 646 ((irq_stat & PORT_IRQ_TF_ERR) == 0))
@@ -592,20 +656,7 @@ static void ahci_restart_port(struct ata_port *ap, u32 irq_stat)
592 readl(port_mmio + PORT_SCR_ERR)); 656 readl(port_mmio + PORT_SCR_ERR));
593 657
594 /* stop DMA */ 658 /* stop DMA */
595 tmp = readl(port_mmio + PORT_CMD); 659 ahci_stop_engine(ap);
596 tmp &= ~PORT_CMD_START;
597 writel(tmp, port_mmio + PORT_CMD);
598
599 /* wait for engine to stop. TODO: this could be
600 * as long as 500 msec
601 */
602 work = 1000;
603 while (work-- > 0) {
604 tmp = readl(port_mmio + PORT_CMD);
605 if ((tmp & PORT_CMD_LIST_ON) == 0)
606 break;
607 udelay(10);
608 }
609 660
610 /* clear SATA phy error, if any */ 661 /* clear SATA phy error, if any */
611 tmp = readl(port_mmio + PORT_SCR_ERR); 662 tmp = readl(port_mmio + PORT_SCR_ERR);
@@ -624,10 +675,7 @@ static void ahci_restart_port(struct ata_port *ap, u32 irq_stat)
624 } 675 }
625 676
626 /* re-start DMA */ 677 /* re-start DMA */
627 tmp = readl(port_mmio + PORT_CMD); 678 ahci_start_engine(ap);
628 tmp |= PORT_CMD_START;
629 writel(tmp, port_mmio + PORT_CMD);
630 readl(port_mmio + PORT_CMD); /* flush */
631} 679}
632 680
633static void ahci_eng_timeout(struct ata_port *ap) 681static void ahci_eng_timeout(struct ata_port *ap)
@@ -642,25 +690,13 @@ static void ahci_eng_timeout(struct ata_port *ap)
642 690
643 spin_lock_irqsave(&host_set->lock, flags); 691 spin_lock_irqsave(&host_set->lock, flags);
644 692
693 ahci_restart_port(ap, readl(port_mmio + PORT_IRQ_STAT));
645 qc = ata_qc_from_tag(ap, ap->active_tag); 694 qc = ata_qc_from_tag(ap, ap->active_tag);
646 if (!qc) { 695 qc->err_mask |= AC_ERR_TIMEOUT;
647 printk(KERN_ERR "ata%u: BUG: timeout without command\n",
648 ap->id);
649 } else {
650 ahci_restart_port(ap, readl(port_mmio + PORT_IRQ_STAT));
651
652 /* hack alert! We cannot use the supplied completion
653 * function from inside the ->eh_strategy_handler() thread.
654 * libata is the only user of ->eh_strategy_handler() in
655 * any kernel, so the default scsi_done() assumes it is
656 * not being called from the SCSI EH.
657 */
658 qc->scsidone = scsi_finish_command;
659 qc->err_mask |= AC_ERR_OTHER;
660 ata_qc_complete(qc);
661 }
662 696
663 spin_unlock_irqrestore(&host_set->lock, flags); 697 spin_unlock_irqrestore(&host_set->lock, flags);
698
699 ata_eh_qc_complete(qc);
664} 700}
665 701
666static inline int ahci_host_intr(struct ata_port *ap, struct ata_queued_cmd *qc) 702static inline int ahci_host_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
@@ -678,7 +714,7 @@ static inline int ahci_host_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
678 ci = readl(port_mmio + PORT_CMD_ISSUE); 714 ci = readl(port_mmio + PORT_CMD_ISSUE);
679 if (likely((ci & 0x1) == 0)) { 715 if (likely((ci & 0x1) == 0)) {
680 if (qc) { 716 if (qc) {
681 assert(qc->err_mask == 0); 717 WARN_ON(qc->err_mask);
682 ata_qc_complete(qc); 718 ata_qc_complete(qc);
683 qc = NULL; 719 qc = NULL;
684 } 720 }
@@ -697,7 +733,7 @@ static inline int ahci_host_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
697 ahci_restart_port(ap, status); 733 ahci_restart_port(ap, status);
698 734
699 if (qc) { 735 if (qc) {
700 qc->err_mask |= AC_ERR_OTHER; 736 qc->err_mask |= err_mask;
701 ata_qc_complete(qc); 737 ata_qc_complete(qc);
702 } 738 }
703 } 739 }
@@ -776,7 +812,7 @@ static irqreturn_t ahci_interrupt (int irq, void *dev_instance, struct pt_regs *
776 return IRQ_RETVAL(handled); 812 return IRQ_RETVAL(handled);
777} 813}
778 814
779static int ahci_qc_issue(struct ata_queued_cmd *qc) 815static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc)
780{ 816{
781 struct ata_port *ap = qc->ap; 817 struct ata_port *ap = qc->ap;
782 void __iomem *port_mmio = (void __iomem *) ap->ioaddr.cmd_addr; 818 void __iomem *port_mmio = (void __iomem *) ap->ioaddr.cmd_addr;
diff --git a/drivers/scsi/ata_piix.c b/drivers/scsi/ata_piix.c
index fc3ca051ceed..9327b62f97de 100644
--- a/drivers/scsi/ata_piix.c
+++ b/drivers/scsi/ata_piix.c
@@ -101,36 +101,54 @@ enum {
101 ICH5_PCS = 0x92, /* port control and status */ 101 ICH5_PCS = 0x92, /* port control and status */
102 PIIX_SCC = 0x0A, /* sub-class code register */ 102 PIIX_SCC = 0x0A, /* sub-class code register */
103 103
104 PIIX_FLAG_AHCI = (1 << 28), /* AHCI possible */ 104 PIIX_FLAG_IGNORE_PCS = (1 << 25), /* ignore PCS present bits */
105 PIIX_FLAG_CHECKINTR = (1 << 29), /* make sure PCI INTx enabled */ 105 PIIX_FLAG_SCR = (1 << 26), /* SCR available */
106 PIIX_FLAG_COMBINED = (1 << 30), /* combined mode possible */ 106 PIIX_FLAG_AHCI = (1 << 27), /* AHCI possible */
107 PIIX_FLAG_CHECKINTR = (1 << 28), /* make sure PCI INTx enabled */
108 PIIX_FLAG_COMBINED = (1 << 29), /* combined mode possible */
109 /* ICH6/7 use different scheme for map value */
110 PIIX_FLAG_COMBINED_ICH6 = PIIX_FLAG_COMBINED | (1 << 30),
107 111
108 /* combined mode. if set, PATA is channel 0. 112 /* combined mode. if set, PATA is channel 0.
109 * if clear, PATA is channel 1. 113 * if clear, PATA is channel 1.
110 */ 114 */
111 PIIX_COMB_PATA_P0 = (1 << 1),
112 PIIX_COMB = (1 << 2), /* combined mode enabled? */
113
114 PIIX_PORT_ENABLED = (1 << 0), 115 PIIX_PORT_ENABLED = (1 << 0),
115 PIIX_PORT_PRESENT = (1 << 4), 116 PIIX_PORT_PRESENT = (1 << 4),
116 117
117 PIIX_80C_PRI = (1 << 5) | (1 << 4), 118 PIIX_80C_PRI = (1 << 5) | (1 << 4),
118 PIIX_80C_SEC = (1 << 7) | (1 << 6), 119 PIIX_80C_SEC = (1 << 7) | (1 << 6),
119 120
120 ich5_pata = 0, 121 /* controller IDs */
121 ich5_sata = 1, 122 piix4_pata = 0,
122 piix4_pata = 2, 123 ich5_pata = 1,
123 ich6_sata = 3, 124 ich5_sata = 2,
124 ich6_sata_ahci = 4, 125 esb_sata = 3,
126 ich6_sata = 4,
127 ich6_sata_ahci = 5,
128 ich6m_sata_ahci = 6,
129
130 /* constants for mapping table */
131 P0 = 0, /* port 0 */
132 P1 = 1, /* port 1 */
133 P2 = 2, /* port 2 */
134 P3 = 3, /* port 3 */
135 IDE = -1, /* IDE */
136 NA = -2, /* not avaliable */
137 RV = -3, /* reserved */
125 138
126 PIIX_AHCI_DEVICE = 6, 139 PIIX_AHCI_DEVICE = 6,
127}; 140};
128 141
142struct piix_map_db {
143 const u32 mask;
144 const int map[][4];
145};
146
129static int piix_init_one (struct pci_dev *pdev, 147static int piix_init_one (struct pci_dev *pdev,
130 const struct pci_device_id *ent); 148 const struct pci_device_id *ent);
131 149
132static void piix_pata_phy_reset(struct ata_port *ap); 150static int piix_pata_probe_reset(struct ata_port *ap, unsigned int *classes);
133static void piix_sata_phy_reset(struct ata_port *ap); 151static int piix_sata_probe_reset(struct ata_port *ap, unsigned int *classes);
134static void piix_set_piomode (struct ata_port *ap, struct ata_device *adev); 152static void piix_set_piomode (struct ata_port *ap, struct ata_device *adev);
135static void piix_set_dmamode (struct ata_port *ap, struct ata_device *adev); 153static void piix_set_dmamode (struct ata_port *ap, struct ata_device *adev);
136 154
@@ -147,19 +165,32 @@ static const struct pci_device_id piix_pci_tbl[] = {
147 * list in drivers/pci/quirks.c. 165 * list in drivers/pci/quirks.c.
148 */ 166 */
149 167
168 /* 82801EB (ICH5) */
150 { 0x8086, 0x24d1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich5_sata }, 169 { 0x8086, 0x24d1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich5_sata },
170 /* 82801EB (ICH5) */
151 { 0x8086, 0x24df, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich5_sata }, 171 { 0x8086, 0x24df, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich5_sata },
152 { 0x8086, 0x25a3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich5_sata }, 172 /* 6300ESB (ICH5 variant with broken PCS present bits) */
153 { 0x8086, 0x25b0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich5_sata }, 173 { 0x8086, 0x25a3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, esb_sata },
174 /* 6300ESB pretending RAID */
175 { 0x8086, 0x25b0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, esb_sata },
176 /* 82801FB/FW (ICH6/ICH6W) */
154 { 0x8086, 0x2651, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata }, 177 { 0x8086, 0x2651, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata },
178 /* 82801FR/FRW (ICH6R/ICH6RW) */
155 { 0x8086, 0x2652, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata_ahci }, 179 { 0x8086, 0x2652, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata_ahci },
156 { 0x8086, 0x2653, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata_ahci }, 180 /* 82801FBM ICH6M (ICH6R with only port 0 and 2 implemented) */
181 { 0x8086, 0x2653, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6m_sata_ahci },
182 /* 82801GB/GR/GH (ICH7, identical to ICH6) */
157 { 0x8086, 0x27c0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata_ahci }, 183 { 0x8086, 0x27c0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata_ahci },
158 { 0x8086, 0x27c4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata_ahci }, 184 /* 2801GBM/GHM (ICH7M, identical to ICH6M) */
185 { 0x8086, 0x27c4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6m_sata_ahci },
186 /* Enterprise Southbridge 2 (where's the datasheet?) */
159 { 0x8086, 0x2680, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata_ahci }, 187 { 0x8086, 0x2680, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata_ahci },
188 /* SATA Controller 1 IDE (ICH8, no datasheet yet) */
160 { 0x8086, 0x2820, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata_ahci }, 189 { 0x8086, 0x2820, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata_ahci },
190 /* SATA Controller 2 IDE (ICH8, ditto) */
161 { 0x8086, 0x2825, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata_ahci }, 191 { 0x8086, 0x2825, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata_ahci },
162 { 0x8086, 0x2828, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata_ahci }, 192 /* Mobile SATA Controller IDE (ICH8M, ditto) */
193 { 0x8086, 0x2828, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6m_sata_ahci },
163 194
164 { } /* terminate list */ 195 { } /* terminate list */
165}; 196};
@@ -178,11 +209,11 @@ static struct scsi_host_template piix_sht = {
178 .name = DRV_NAME, 209 .name = DRV_NAME,
179 .ioctl = ata_scsi_ioctl, 210 .ioctl = ata_scsi_ioctl,
180 .queuecommand = ata_scsi_queuecmd, 211 .queuecommand = ata_scsi_queuecmd,
212 .eh_timed_out = ata_scsi_timed_out,
181 .eh_strategy_handler = ata_scsi_error, 213 .eh_strategy_handler = ata_scsi_error,
182 .can_queue = ATA_DEF_QUEUE, 214 .can_queue = ATA_DEF_QUEUE,
183 .this_id = ATA_SHT_THIS_ID, 215 .this_id = ATA_SHT_THIS_ID,
184 .sg_tablesize = LIBATA_MAX_PRD, 216 .sg_tablesize = LIBATA_MAX_PRD,
185 .max_sectors = ATA_MAX_SECTORS,
186 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 217 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
187 .emulated = ATA_SHT_EMULATED, 218 .emulated = ATA_SHT_EMULATED,
188 .use_clustering = ATA_SHT_USE_CLUSTERING, 219 .use_clustering = ATA_SHT_USE_CLUSTERING,
@@ -205,7 +236,7 @@ static const struct ata_port_operations piix_pata_ops = {
205 .exec_command = ata_exec_command, 236 .exec_command = ata_exec_command,
206 .dev_select = ata_std_dev_select, 237 .dev_select = ata_std_dev_select,
207 238
208 .phy_reset = piix_pata_phy_reset, 239 .probe_reset = piix_pata_probe_reset,
209 240
210 .bmdma_setup = ata_bmdma_setup, 241 .bmdma_setup = ata_bmdma_setup,
211 .bmdma_start = ata_bmdma_start, 242 .bmdma_start = ata_bmdma_start,
@@ -233,7 +264,7 @@ static const struct ata_port_operations piix_sata_ops = {
233 .exec_command = ata_exec_command, 264 .exec_command = ata_exec_command,
234 .dev_select = ata_std_dev_select, 265 .dev_select = ata_std_dev_select,
235 266
236 .phy_reset = piix_sata_phy_reset, 267 .probe_reset = piix_sata_probe_reset,
237 268
238 .bmdma_setup = ata_bmdma_setup, 269 .bmdma_setup = ata_bmdma_setup,
239 .bmdma_start = ata_bmdma_start, 270 .bmdma_start = ata_bmdma_start,
@@ -252,12 +283,62 @@ static const struct ata_port_operations piix_sata_ops = {
252 .host_stop = ata_host_stop, 283 .host_stop = ata_host_stop,
253}; 284};
254 285
286static struct piix_map_db ich5_map_db = {
287 .mask = 0x7,
288 .map = {
289 /* PM PS SM SS MAP */
290 { P0, NA, P1, NA }, /* 000b */
291 { P1, NA, P0, NA }, /* 001b */
292 { RV, RV, RV, RV },
293 { RV, RV, RV, RV },
294 { P0, P1, IDE, IDE }, /* 100b */
295 { P1, P0, IDE, IDE }, /* 101b */
296 { IDE, IDE, P0, P1 }, /* 110b */
297 { IDE, IDE, P1, P0 }, /* 111b */
298 },
299};
300
301static struct piix_map_db ich6_map_db = {
302 .mask = 0x3,
303 .map = {
304 /* PM PS SM SS MAP */
305 { P0, P1, P2, P3 }, /* 00b */
306 { IDE, IDE, P1, P3 }, /* 01b */
307 { P0, P2, IDE, IDE }, /* 10b */
308 { RV, RV, RV, RV },
309 },
310};
311
312static struct piix_map_db ich6m_map_db = {
313 .mask = 0x3,
314 .map = {
315 /* PM PS SM SS MAP */
316 { P0, P1, P2, P3 }, /* 00b */
317 { RV, RV, RV, RV },
318 { P0, P2, IDE, IDE }, /* 10b */
319 { RV, RV, RV, RV },
320 },
321};
322
255static struct ata_port_info piix_port_info[] = { 323static struct ata_port_info piix_port_info[] = {
324 /* piix4_pata */
325 {
326 .sht = &piix_sht,
327 .host_flags = ATA_FLAG_SLAVE_POSS,
328 .pio_mask = 0x1f, /* pio0-4 */
329#if 0
330 .mwdma_mask = 0x06, /* mwdma1-2 */
331#else
332 .mwdma_mask = 0x00, /* mwdma broken */
333#endif
334 .udma_mask = ATA_UDMA_MASK_40C,
335 .port_ops = &piix_pata_ops,
336 },
337
256 /* ich5_pata */ 338 /* ich5_pata */
257 { 339 {
258 .sht = &piix_sht, 340 .sht = &piix_sht,
259 .host_flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST | 341 .host_flags = ATA_FLAG_SLAVE_POSS | PIIX_FLAG_CHECKINTR,
260 PIIX_FLAG_CHECKINTR,
261 .pio_mask = 0x1f, /* pio0-4 */ 342 .pio_mask = 0x1f, /* pio0-4 */
262#if 0 343#if 0
263 .mwdma_mask = 0x06, /* mwdma1-2 */ 344 .mwdma_mask = 0x06, /* mwdma1-2 */
@@ -271,50 +352,63 @@ static struct ata_port_info piix_port_info[] = {
271 /* ich5_sata */ 352 /* ich5_sata */
272 { 353 {
273 .sht = &piix_sht, 354 .sht = &piix_sht,
274 .host_flags = ATA_FLAG_SATA | ATA_FLAG_SRST | 355 .host_flags = ATA_FLAG_SATA | PIIX_FLAG_COMBINED |
275 PIIX_FLAG_COMBINED | PIIX_FLAG_CHECKINTR, 356 PIIX_FLAG_CHECKINTR,
276 .pio_mask = 0x1f, /* pio0-4 */ 357 .pio_mask = 0x1f, /* pio0-4 */
277 .mwdma_mask = 0x07, /* mwdma0-2 */ 358 .mwdma_mask = 0x07, /* mwdma0-2 */
278 .udma_mask = 0x7f, /* udma0-6 */ 359 .udma_mask = 0x7f, /* udma0-6 */
279 .port_ops = &piix_sata_ops, 360 .port_ops = &piix_sata_ops,
361 .private_data = &ich5_map_db,
280 }, 362 },
281 363
282 /* piix4_pata */ 364 /* i6300esb_sata */
283 { 365 {
284 .sht = &piix_sht, 366 .sht = &piix_sht,
285 .host_flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST, 367 .host_flags = ATA_FLAG_SATA | PIIX_FLAG_COMBINED |
368 PIIX_FLAG_CHECKINTR | PIIX_FLAG_IGNORE_PCS,
286 .pio_mask = 0x1f, /* pio0-4 */ 369 .pio_mask = 0x1f, /* pio0-4 */
287#if 0 370 .mwdma_mask = 0x07, /* mwdma0-2 */
288 .mwdma_mask = 0x06, /* mwdma1-2 */ 371 .udma_mask = 0x7f, /* udma0-6 */
289#else 372 .port_ops = &piix_sata_ops,
290 .mwdma_mask = 0x00, /* mwdma broken */ 373 .private_data = &ich5_map_db,
291#endif
292 .udma_mask = ATA_UDMA_MASK_40C,
293 .port_ops = &piix_pata_ops,
294 }, 374 },
295 375
296 /* ich6_sata */ 376 /* ich6_sata */
297 { 377 {
298 .sht = &piix_sht, 378 .sht = &piix_sht,
299 .host_flags = ATA_FLAG_SATA | ATA_FLAG_SRST | 379 .host_flags = ATA_FLAG_SATA | PIIX_FLAG_COMBINED_ICH6 |
300 PIIX_FLAG_COMBINED | PIIX_FLAG_CHECKINTR | 380 PIIX_FLAG_CHECKINTR | PIIX_FLAG_SCR,
301 ATA_FLAG_SLAVE_POSS,
302 .pio_mask = 0x1f, /* pio0-4 */ 381 .pio_mask = 0x1f, /* pio0-4 */
303 .mwdma_mask = 0x07, /* mwdma0-2 */ 382 .mwdma_mask = 0x07, /* mwdma0-2 */
304 .udma_mask = 0x7f, /* udma0-6 */ 383 .udma_mask = 0x7f, /* udma0-6 */
305 .port_ops = &piix_sata_ops, 384 .port_ops = &piix_sata_ops,
385 .private_data = &ich6_map_db,
306 }, 386 },
307 387
308 /* ich6_sata_ahci */ 388 /* ich6_sata_ahci */
309 { 389 {
310 .sht = &piix_sht, 390 .sht = &piix_sht,
311 .host_flags = ATA_FLAG_SATA | ATA_FLAG_SRST | 391 .host_flags = ATA_FLAG_SATA | PIIX_FLAG_COMBINED_ICH6 |
312 PIIX_FLAG_COMBINED | PIIX_FLAG_CHECKINTR | 392 PIIX_FLAG_CHECKINTR | PIIX_FLAG_SCR |
313 ATA_FLAG_SLAVE_POSS | PIIX_FLAG_AHCI, 393 PIIX_FLAG_AHCI,
394 .pio_mask = 0x1f, /* pio0-4 */
395 .mwdma_mask = 0x07, /* mwdma0-2 */
396 .udma_mask = 0x7f, /* udma0-6 */
397 .port_ops = &piix_sata_ops,
398 .private_data = &ich6_map_db,
399 },
400
401 /* ich6m_sata_ahci */
402 {
403 .sht = &piix_sht,
404 .host_flags = ATA_FLAG_SATA | PIIX_FLAG_COMBINED_ICH6 |
405 PIIX_FLAG_CHECKINTR | PIIX_FLAG_SCR |
406 PIIX_FLAG_AHCI,
314 .pio_mask = 0x1f, /* pio0-4 */ 407 .pio_mask = 0x1f, /* pio0-4 */
315 .mwdma_mask = 0x07, /* mwdma0-2 */ 408 .mwdma_mask = 0x07, /* mwdma0-2 */
316 .udma_mask = 0x7f, /* udma0-6 */ 409 .udma_mask = 0x7f, /* udma0-6 */
317 .port_ops = &piix_sata_ops, 410 .port_ops = &piix_sata_ops,
411 .private_data = &ich6m_map_db,
318 }, 412 },
319}; 413};
320 414
@@ -363,102 +457,123 @@ cbl40:
363} 457}
364 458
365/** 459/**
366 * piix_pata_phy_reset - Probe specified port on PATA host controller 460 * piix_pata_probeinit - probeinit for PATA host controller
367 * @ap: Port to probe 461 * @ap: Target port
368 * 462 *
369 * Probe PATA phy. 463 * Probeinit including cable detection.
370 * 464 *
371 * LOCKING: 465 * LOCKING:
372 * None (inherited from caller). 466 * None (inherited from caller).
373 */ 467 */
468static void piix_pata_probeinit(struct ata_port *ap)
469{
470 piix_pata_cbl_detect(ap);
471 ata_std_probeinit(ap);
472}
374 473
375static void piix_pata_phy_reset(struct ata_port *ap) 474/**
475 * piix_pata_probe_reset - Perform reset on PATA port and classify
476 * @ap: Port to reset
477 * @classes: Resulting classes of attached devices
478 *
479 * Reset PATA phy and classify attached devices.
480 *
481 * LOCKING:
482 * None (inherited from caller).
483 */
484static int piix_pata_probe_reset(struct ata_port *ap, unsigned int *classes)
376{ 485{
377 struct pci_dev *pdev = to_pci_dev(ap->host_set->dev); 486 struct pci_dev *pdev = to_pci_dev(ap->host_set->dev);
378 487
379 if (!pci_test_config_bits(pdev, &piix_enable_bits[ap->hard_port_no])) { 488 if (!pci_test_config_bits(pdev, &piix_enable_bits[ap->hard_port_no])) {
380 ata_port_disable(ap);
381 printk(KERN_INFO "ata%u: port disabled. ignoring.\n", ap->id); 489 printk(KERN_INFO "ata%u: port disabled. ignoring.\n", ap->id);
382 return; 490 return 0;
383 } 491 }
384 492
385 piix_pata_cbl_detect(ap); 493 return ata_drive_probe_reset(ap, piix_pata_probeinit,
386 494 ata_std_softreset, NULL,
387 ata_port_probe(ap); 495 ata_std_postreset, classes);
388
389 ata_bus_reset(ap);
390} 496}
391 497
392/** 498/**
393 * piix_sata_probe - Probe PCI device for present SATA devices 499 * piix_sata_probe - Probe PCI device for present SATA devices
394 * @ap: Port associated with the PCI device we wish to probe 500 * @ap: Port associated with the PCI device we wish to probe
395 * 501 *
396 * Reads SATA PCI device's PCI config register Port Configuration 502 * Reads and configures SATA PCI device's PCI config register
397 * and Status (PCS) to determine port and device availability. 503 * Port Configuration and Status (PCS) to determine port and
504 * device availability.
398 * 505 *
399 * LOCKING: 506 * LOCKING:
400 * None (inherited from caller). 507 * None (inherited from caller).
401 * 508 *
402 * RETURNS: 509 * RETURNS:
403 * Non-zero if port is enabled, it may or may not have a device 510 * Mask of avaliable devices on the port.
404 * attached in that case (PRESENT bit would only be set if BIOS probe
405 * was done). Zero is returned if port is disabled.
406 */ 511 */
407static int piix_sata_probe (struct ata_port *ap) 512static unsigned int piix_sata_probe (struct ata_port *ap)
408{ 513{
409 struct pci_dev *pdev = to_pci_dev(ap->host_set->dev); 514 struct pci_dev *pdev = to_pci_dev(ap->host_set->dev);
410 int combined = (ap->flags & ATA_FLAG_SLAVE_POSS); 515 const unsigned int *map = ap->host_set->private_data;
411 int orig_mask, mask, i; 516 int base = 2 * ap->hard_port_no;
517 unsigned int present_mask = 0;
518 int port, i;
412 u8 pcs; 519 u8 pcs;
413 520
414 mask = (PIIX_PORT_PRESENT << ap->hard_port_no) |
415 (PIIX_PORT_ENABLED << ap->hard_port_no);
416
417 pci_read_config_byte(pdev, ICH5_PCS, &pcs); 521 pci_read_config_byte(pdev, ICH5_PCS, &pcs);
418 orig_mask = (int) pcs & 0xff; 522 DPRINTK("ata%u: ENTER, pcs=0x%x base=%d\n", ap->id, pcs, base);
419
420 /* TODO: this is vaguely wrong for ICH6 combined mode,
421 * where only two of the four SATA ports are mapped
422 * onto a single ATA channel. It is also vaguely inaccurate
423 * for ICH5, which has only two ports. However, this is ok,
424 * as further device presence detection code will handle
425 * any false positives produced here.
426 */
427 523
428 for (i = 0; i < 4; i++) { 524 /* enable all ports on this ap and wait for them to settle */
429 mask = (PIIX_PORT_ENABLED << i); 525 for (i = 0; i < 2; i++) {
526 port = map[base + i];
527 if (port >= 0)
528 pcs |= 1 << port;
529 }
530
531 pci_write_config_byte(pdev, ICH5_PCS, pcs);
532 msleep(100);
430 533
431 if ((orig_mask & mask) == mask) 534 /* let's see which devices are present */
432 if (combined || (i == ap->hard_port_no)) 535 pci_read_config_byte(pdev, ICH5_PCS, &pcs);
433 return 1; 536
537 for (i = 0; i < 2; i++) {
538 port = map[base + i];
539 if (port < 0)
540 continue;
541 if (ap->flags & PIIX_FLAG_IGNORE_PCS || pcs & 1 << (4 + port))
542 present_mask |= 1 << i;
543 else
544 pcs &= ~(1 << port);
434 } 545 }
435 546
436 return 0; 547 /* disable offline ports on non-AHCI controllers */
548 if (!(ap->flags & PIIX_FLAG_AHCI))
549 pci_write_config_byte(pdev, ICH5_PCS, pcs);
550
551 DPRINTK("ata%u: LEAVE, pcs=0x%x present_mask=0x%x\n",
552 ap->id, pcs, present_mask);
553
554 return present_mask;
437} 555}
438 556
439/** 557/**
440 * piix_sata_phy_reset - Probe specified port on SATA host controller 558 * piix_sata_probe_reset - Perform reset on SATA port and classify
441 * @ap: Port to probe 559 * @ap: Port to reset
560 * @classes: Resulting classes of attached devices
442 * 561 *
443 * Probe SATA phy. 562 * Reset SATA phy and classify attached devices.
444 * 563 *
445 * LOCKING: 564 * LOCKING:
446 * None (inherited from caller). 565 * None (inherited from caller).
447 */ 566 */
448 567static int piix_sata_probe_reset(struct ata_port *ap, unsigned int *classes)
449static void piix_sata_phy_reset(struct ata_port *ap)
450{ 568{
451 if (!piix_sata_probe(ap)) { 569 if (!piix_sata_probe(ap)) {
452 ata_port_disable(ap);
453 printk(KERN_INFO "ata%u: SATA port has no device.\n", ap->id); 570 printk(KERN_INFO "ata%u: SATA port has no device.\n", ap->id);
454 return; 571 return 0;
455 } 572 }
456 573
457 ap->cbl = ATA_CBL_SATA; 574 return ata_drive_probe_reset(ap, ata_std_probeinit,
458 575 ata_std_softreset, NULL,
459 ata_port_probe(ap); 576 ata_std_postreset, classes);
460
461 ata_bus_reset(ap);
462} 577}
463 578
464/** 579/**
@@ -627,6 +742,7 @@ static int piix_disable_ahci(struct pci_dev *pdev)
627 742
628/** 743/**
629 * piix_check_450nx_errata - Check for problem 450NX setup 744 * piix_check_450nx_errata - Check for problem 450NX setup
745 * @ata_dev: the PCI device to check
630 * 746 *
631 * Check for the present of 450NX errata #19 and errata #25. If 747 * Check for the present of 450NX errata #19 and errata #25. If
632 * they are found return an error code so we can turn off DMA 748 * they are found return an error code so we can turn off DMA
@@ -659,6 +775,54 @@ static int __devinit piix_check_450nx_errata(struct pci_dev *ata_dev)
659 return no_piix_dma; 775 return no_piix_dma;
660} 776}
661 777
778static void __devinit piix_init_sata_map(struct pci_dev *pdev,
779 struct ata_port_info *pinfo)
780{
781 struct piix_map_db *map_db = pinfo[0].private_data;
782 const unsigned int *map;
783 int i, invalid_map = 0;
784 u8 map_value;
785
786 pci_read_config_byte(pdev, ICH5_PMR, &map_value);
787
788 map = map_db->map[map_value & map_db->mask];
789
790 dev_printk(KERN_INFO, &pdev->dev, "MAP [");
791 for (i = 0; i < 4; i++) {
792 switch (map[i]) {
793 case RV:
794 invalid_map = 1;
795 printk(" XX");
796 break;
797
798 case NA:
799 printk(" --");
800 break;
801
802 case IDE:
803 WARN_ON((i & 1) || map[i + 1] != IDE);
804 pinfo[i / 2] = piix_port_info[ich5_pata];
805 i++;
806 printk(" IDE IDE");
807 break;
808
809 default:
810 printk(" P%d", map[i]);
811 if (i & 1)
812 pinfo[i / 2].host_flags |= ATA_FLAG_SLAVE_POSS;
813 break;
814 }
815 }
816 printk(" ]\n");
817
818 if (invalid_map)
819 dev_printk(KERN_ERR, &pdev->dev,
820 "invalid MAP value %u\n", map_value);
821
822 pinfo[0].private_data = (void *)map;
823 pinfo[1].private_data = (void *)map;
824}
825
662/** 826/**
663 * piix_init_one - Register PIIX ATA PCI device with kernel services 827 * piix_init_one - Register PIIX ATA PCI device with kernel services
664 * @pdev: PCI device to register 828 * @pdev: PCI device to register
@@ -677,9 +841,9 @@ static int __devinit piix_check_450nx_errata(struct pci_dev *ata_dev)
677static int piix_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) 841static int piix_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
678{ 842{
679 static int printed_version; 843 static int printed_version;
680 struct ata_port_info *port_info[2]; 844 struct ata_port_info port_info[2];
681 unsigned int combined = 0; 845 struct ata_port_info *ppinfo[2] = { &port_info[0], &port_info[1] };
682 unsigned int pata_chan = 0, sata_chan = 0; 846 unsigned long host_flags;
683 847
684 if (!printed_version++) 848 if (!printed_version++)
685 dev_printk(KERN_DEBUG, &pdev->dev, 849 dev_printk(KERN_DEBUG, &pdev->dev,
@@ -689,10 +853,12 @@ static int piix_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
689 if (!in_module_init) 853 if (!in_module_init)
690 return -ENODEV; 854 return -ENODEV;
691 855
692 port_info[0] = &piix_port_info[ent->driver_data]; 856 port_info[0] = piix_port_info[ent->driver_data];
693 port_info[1] = &piix_port_info[ent->driver_data]; 857 port_info[1] = piix_port_info[ent->driver_data];
858
859 host_flags = port_info[0].host_flags;
694 860
695 if (port_info[0]->host_flags & PIIX_FLAG_AHCI) { 861 if (host_flags & PIIX_FLAG_AHCI) {
696 u8 tmp; 862 u8 tmp;
697 pci_read_config_byte(pdev, PIIX_SCC, &tmp); 863 pci_read_config_byte(pdev, PIIX_SCC, &tmp);
698 if (tmp == PIIX_AHCI_DEVICE) { 864 if (tmp == PIIX_AHCI_DEVICE) {
@@ -702,18 +868,9 @@ static int piix_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
702 } 868 }
703 } 869 }
704 870
705 if (port_info[0]->host_flags & PIIX_FLAG_COMBINED) { 871 /* Initialize SATA map */
706 u8 tmp; 872 if (host_flags & ATA_FLAG_SATA)
707 pci_read_config_byte(pdev, ICH5_PMR, &tmp); 873 piix_init_sata_map(pdev, port_info);
708
709 if (tmp & PIIX_COMB) {
710 combined = 1;
711 if (tmp & PIIX_COMB_PATA_P0)
712 sata_chan = 1;
713 else
714 pata_chan = 1;
715 }
716 }
717 874
718 /* On ICH5, some BIOSen disable the interrupt using the 875 /* On ICH5, some BIOSen disable the interrupt using the
719 * PCI_COMMAND_INTX_DISABLE bit added in PCI 2.3. 876 * PCI_COMMAND_INTX_DISABLE bit added in PCI 2.3.
@@ -721,28 +878,19 @@ static int piix_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
721 * MSI is disabled (and it is disabled, as we don't use 878 * MSI is disabled (and it is disabled, as we don't use
722 * message-signalled interrupts currently). 879 * message-signalled interrupts currently).
723 */ 880 */
724 if (port_info[0]->host_flags & PIIX_FLAG_CHECKINTR) 881 if (host_flags & PIIX_FLAG_CHECKINTR)
725 pci_intx(pdev, 1); 882 pci_intx(pdev, 1);
726 883
727 if (combined) {
728 port_info[sata_chan] = &piix_port_info[ent->driver_data];
729 port_info[sata_chan]->host_flags |= ATA_FLAG_SLAVE_POSS;
730 port_info[pata_chan] = &piix_port_info[ich5_pata];
731
732 dev_printk(KERN_WARNING, &pdev->dev,
733 "combined mode detected (p=%u, s=%u)\n",
734 pata_chan, sata_chan);
735 }
736 if (piix_check_450nx_errata(pdev)) { 884 if (piix_check_450nx_errata(pdev)) {
737 /* This writes into the master table but it does not 885 /* This writes into the master table but it does not
738 really matter for this errata as we will apply it to 886 really matter for this errata as we will apply it to
739 all the PIIX devices on the board */ 887 all the PIIX devices on the board */
740 port_info[0]->mwdma_mask = 0; 888 port_info[0].mwdma_mask = 0;
741 port_info[0]->udma_mask = 0; 889 port_info[0].udma_mask = 0;
742 port_info[1]->mwdma_mask = 0; 890 port_info[1].mwdma_mask = 0;
743 port_info[1]->udma_mask = 0; 891 port_info[1].udma_mask = 0;
744 } 892 }
745 return ata_pci_init_one(pdev, port_info, 2); 893 return ata_pci_init_one(pdev, ppinfo, 2);
746} 894}
747 895
748static int __init piix_init(void) 896static int __init piix_init(void)
diff --git a/drivers/scsi/libata-bmdma.c b/drivers/scsi/libata-bmdma.c
new file mode 100644
index 000000000000..a93336adcd23
--- /dev/null
+++ b/drivers/scsi/libata-bmdma.c
@@ -0,0 +1,703 @@
1/*
2 * libata-bmdma.c - helper library for PCI IDE BMDMA
3 *
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
7 *
8 * Copyright 2003-2006 Red Hat, Inc. All rights reserved.
9 * Copyright 2003-2006 Jeff Garzik
10 *
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
15 * any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; see the file COPYING. If not, write to
24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25 *
26 *
27 * libata documentation is available via 'make {ps|pdf}docs',
28 * as Documentation/DocBook/libata.*
29 *
30 * Hardware documentation available from http://www.t13.org/ and
31 * http://www.sata-io.org/
32 *
33 */
34
35#include <linux/config.h>
36#include <linux/kernel.h>
37#include <linux/pci.h>
38#include <linux/libata.h>
39
40#include "libata.h"
41
42/**
43 * ata_tf_load_pio - send taskfile registers to host controller
44 * @ap: Port to which output is sent
45 * @tf: ATA taskfile register set
46 *
47 * Outputs ATA taskfile to standard ATA host controller.
48 *
49 * LOCKING:
50 * Inherited from caller.
51 */
52
53static void ata_tf_load_pio(struct ata_port *ap, const struct ata_taskfile *tf)
54{
55 struct ata_ioports *ioaddr = &ap->ioaddr;
56 unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
57
58 if (tf->ctl != ap->last_ctl) {
59 outb(tf->ctl, ioaddr->ctl_addr);
60 ap->last_ctl = tf->ctl;
61 ata_wait_idle(ap);
62 }
63
64 if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
65 outb(tf->hob_feature, ioaddr->feature_addr);
66 outb(tf->hob_nsect, ioaddr->nsect_addr);
67 outb(tf->hob_lbal, ioaddr->lbal_addr);
68 outb(tf->hob_lbam, ioaddr->lbam_addr);
69 outb(tf->hob_lbah, ioaddr->lbah_addr);
70 VPRINTK("hob: feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n",
71 tf->hob_feature,
72 tf->hob_nsect,
73 tf->hob_lbal,
74 tf->hob_lbam,
75 tf->hob_lbah);
76 }
77
78 if (is_addr) {
79 outb(tf->feature, ioaddr->feature_addr);
80 outb(tf->nsect, ioaddr->nsect_addr);
81 outb(tf->lbal, ioaddr->lbal_addr);
82 outb(tf->lbam, ioaddr->lbam_addr);
83 outb(tf->lbah, ioaddr->lbah_addr);
84 VPRINTK("feat 0x%X nsect 0x%X lba 0x%X 0x%X 0x%X\n",
85 tf->feature,
86 tf->nsect,
87 tf->lbal,
88 tf->lbam,
89 tf->lbah);
90 }
91
92 if (tf->flags & ATA_TFLAG_DEVICE) {
93 outb(tf->device, ioaddr->device_addr);
94 VPRINTK("device 0x%X\n", tf->device);
95 }
96
97 ata_wait_idle(ap);
98}
99
100/**
101 * ata_tf_load_mmio - send taskfile registers to host controller
102 * @ap: Port to which output is sent
103 * @tf: ATA taskfile register set
104 *
105 * Outputs ATA taskfile to standard ATA host controller using MMIO.
106 *
107 * LOCKING:
108 * Inherited from caller.
109 */
110
111static void ata_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf)
112{
113 struct ata_ioports *ioaddr = &ap->ioaddr;
114 unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
115
116 if (tf->ctl != ap->last_ctl) {
117 writeb(tf->ctl, (void __iomem *) ap->ioaddr.ctl_addr);
118 ap->last_ctl = tf->ctl;
119 ata_wait_idle(ap);
120 }
121
122 if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
123 writeb(tf->hob_feature, (void __iomem *) ioaddr->feature_addr);
124 writeb(tf->hob_nsect, (void __iomem *) ioaddr->nsect_addr);
125 writeb(tf->hob_lbal, (void __iomem *) ioaddr->lbal_addr);
126 writeb(tf->hob_lbam, (void __iomem *) ioaddr->lbam_addr);
127 writeb(tf->hob_lbah, (void __iomem *) ioaddr->lbah_addr);
128 VPRINTK("hob: feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n",
129 tf->hob_feature,
130 tf->hob_nsect,
131 tf->hob_lbal,
132 tf->hob_lbam,
133 tf->hob_lbah);
134 }
135
136 if (is_addr) {
137 writeb(tf->feature, (void __iomem *) ioaddr->feature_addr);
138 writeb(tf->nsect, (void __iomem *) ioaddr->nsect_addr);
139 writeb(tf->lbal, (void __iomem *) ioaddr->lbal_addr);
140 writeb(tf->lbam, (void __iomem *) ioaddr->lbam_addr);
141 writeb(tf->lbah, (void __iomem *) ioaddr->lbah_addr);
142 VPRINTK("feat 0x%X nsect 0x%X lba 0x%X 0x%X 0x%X\n",
143 tf->feature,
144 tf->nsect,
145 tf->lbal,
146 tf->lbam,
147 tf->lbah);
148 }
149
150 if (tf->flags & ATA_TFLAG_DEVICE) {
151 writeb(tf->device, (void __iomem *) ioaddr->device_addr);
152 VPRINTK("device 0x%X\n", tf->device);
153 }
154
155 ata_wait_idle(ap);
156}
157
158
159/**
160 * ata_tf_load - send taskfile registers to host controller
161 * @ap: Port to which output is sent
162 * @tf: ATA taskfile register set
163 *
164 * Outputs ATA taskfile to standard ATA host controller using MMIO
165 * or PIO as indicated by the ATA_FLAG_MMIO flag.
166 * Writes the control, feature, nsect, lbal, lbam, and lbah registers.
167 * Optionally (ATA_TFLAG_LBA48) writes hob_feature, hob_nsect,
168 * hob_lbal, hob_lbam, and hob_lbah.
169 *
170 * This function waits for idle (!BUSY and !DRQ) after writing
171 * registers. If the control register has a new value, this
172 * function also waits for idle after writing control and before
173 * writing the remaining registers.
174 *
175 * May be used as the tf_load() entry in ata_port_operations.
176 *
177 * LOCKING:
178 * Inherited from caller.
179 */
180void ata_tf_load(struct ata_port *ap, const struct ata_taskfile *tf)
181{
182 if (ap->flags & ATA_FLAG_MMIO)
183 ata_tf_load_mmio(ap, tf);
184 else
185 ata_tf_load_pio(ap, tf);
186}
187
188/**
189 * ata_exec_command_pio - issue ATA command to host controller
190 * @ap: port to which command is being issued
191 * @tf: ATA taskfile register set
192 *
193 * Issues PIO write to ATA command register, with proper
194 * synchronization with interrupt handler / other threads.
195 *
196 * LOCKING:
197 * spin_lock_irqsave(host_set lock)
198 */
199
200static void ata_exec_command_pio(struct ata_port *ap, const struct ata_taskfile *tf)
201{
202 DPRINTK("ata%u: cmd 0x%X\n", ap->id, tf->command);
203
204 outb(tf->command, ap->ioaddr.command_addr);
205 ata_pause(ap);
206}
207
208
209/**
210 * ata_exec_command_mmio - issue ATA command to host controller
211 * @ap: port to which command is being issued
212 * @tf: ATA taskfile register set
213 *
214 * Issues MMIO write to ATA command register, with proper
215 * synchronization with interrupt handler / other threads.
216 *
217 * LOCKING:
218 * spin_lock_irqsave(host_set lock)
219 */
220
221static void ata_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf)
222{
223 DPRINTK("ata%u: cmd 0x%X\n", ap->id, tf->command);
224
225 writeb(tf->command, (void __iomem *) ap->ioaddr.command_addr);
226 ata_pause(ap);
227}
228
229
230/**
231 * ata_exec_command - issue ATA command to host controller
232 * @ap: port to which command is being issued
233 * @tf: ATA taskfile register set
234 *
235 * Issues PIO/MMIO write to ATA command register, with proper
236 * synchronization with interrupt handler / other threads.
237 *
238 * LOCKING:
239 * spin_lock_irqsave(host_set lock)
240 */
241void ata_exec_command(struct ata_port *ap, const struct ata_taskfile *tf)
242{
243 if (ap->flags & ATA_FLAG_MMIO)
244 ata_exec_command_mmio(ap, tf);
245 else
246 ata_exec_command_pio(ap, tf);
247}
248
249/**
250 * ata_tf_read_pio - input device's ATA taskfile shadow registers
251 * @ap: Port from which input is read
252 * @tf: ATA taskfile register set for storing input
253 *
254 * Reads ATA taskfile registers for currently-selected device
255 * into @tf.
256 *
257 * LOCKING:
258 * Inherited from caller.
259 */
260
261static void ata_tf_read_pio(struct ata_port *ap, struct ata_taskfile *tf)
262{
263 struct ata_ioports *ioaddr = &ap->ioaddr;
264
265 tf->command = ata_check_status(ap);
266 tf->feature = inb(ioaddr->error_addr);
267 tf->nsect = inb(ioaddr->nsect_addr);
268 tf->lbal = inb(ioaddr->lbal_addr);
269 tf->lbam = inb(ioaddr->lbam_addr);
270 tf->lbah = inb(ioaddr->lbah_addr);
271 tf->device = inb(ioaddr->device_addr);
272
273 if (tf->flags & ATA_TFLAG_LBA48) {
274 outb(tf->ctl | ATA_HOB, ioaddr->ctl_addr);
275 tf->hob_feature = inb(ioaddr->error_addr);
276 tf->hob_nsect = inb(ioaddr->nsect_addr);
277 tf->hob_lbal = inb(ioaddr->lbal_addr);
278 tf->hob_lbam = inb(ioaddr->lbam_addr);
279 tf->hob_lbah = inb(ioaddr->lbah_addr);
280 }
281}
282
283/**
284 * ata_tf_read_mmio - input device's ATA taskfile shadow registers
285 * @ap: Port from which input is read
286 * @tf: ATA taskfile register set for storing input
287 *
288 * Reads ATA taskfile registers for currently-selected device
289 * into @tf via MMIO.
290 *
291 * LOCKING:
292 * Inherited from caller.
293 */
294
295static void ata_tf_read_mmio(struct ata_port *ap, struct ata_taskfile *tf)
296{
297 struct ata_ioports *ioaddr = &ap->ioaddr;
298
299 tf->command = ata_check_status(ap);
300 tf->feature = readb((void __iomem *)ioaddr->error_addr);
301 tf->nsect = readb((void __iomem *)ioaddr->nsect_addr);
302 tf->lbal = readb((void __iomem *)ioaddr->lbal_addr);
303 tf->lbam = readb((void __iomem *)ioaddr->lbam_addr);
304 tf->lbah = readb((void __iomem *)ioaddr->lbah_addr);
305 tf->device = readb((void __iomem *)ioaddr->device_addr);
306
307 if (tf->flags & ATA_TFLAG_LBA48) {
308 writeb(tf->ctl | ATA_HOB, (void __iomem *) ap->ioaddr.ctl_addr);
309 tf->hob_feature = readb((void __iomem *)ioaddr->error_addr);
310 tf->hob_nsect = readb((void __iomem *)ioaddr->nsect_addr);
311 tf->hob_lbal = readb((void __iomem *)ioaddr->lbal_addr);
312 tf->hob_lbam = readb((void __iomem *)ioaddr->lbam_addr);
313 tf->hob_lbah = readb((void __iomem *)ioaddr->lbah_addr);
314 }
315}
316
317
318/**
319 * ata_tf_read - input device's ATA taskfile shadow registers
320 * @ap: Port from which input is read
321 * @tf: ATA taskfile register set for storing input
322 *
323 * Reads ATA taskfile registers for currently-selected device
324 * into @tf.
325 *
326 * Reads nsect, lbal, lbam, lbah, and device. If ATA_TFLAG_LBA48
327 * is set, also reads the hob registers.
328 *
329 * May be used as the tf_read() entry in ata_port_operations.
330 *
331 * LOCKING:
332 * Inherited from caller.
333 */
334void ata_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
335{
336 if (ap->flags & ATA_FLAG_MMIO)
337 ata_tf_read_mmio(ap, tf);
338 else
339 ata_tf_read_pio(ap, tf);
340}
341
342/**
343 * ata_check_status_pio - Read device status reg & clear interrupt
344 * @ap: port where the device is
345 *
346 * Reads ATA taskfile status register for currently-selected device
347 * and return its value. This also clears pending interrupts
348 * from this device
349 *
350 * LOCKING:
351 * Inherited from caller.
352 */
353static u8 ata_check_status_pio(struct ata_port *ap)
354{
355 return inb(ap->ioaddr.status_addr);
356}
357
358/**
359 * ata_check_status_mmio - Read device status reg & clear interrupt
360 * @ap: port where the device is
361 *
362 * Reads ATA taskfile status register for currently-selected device
363 * via MMIO and return its value. This also clears pending interrupts
364 * from this device
365 *
366 * LOCKING:
367 * Inherited from caller.
368 */
369static u8 ata_check_status_mmio(struct ata_port *ap)
370{
371 return readb((void __iomem *) ap->ioaddr.status_addr);
372}
373
374
375/**
376 * ata_check_status - Read device status reg & clear interrupt
377 * @ap: port where the device is
378 *
379 * Reads ATA taskfile status register for currently-selected device
380 * and return its value. This also clears pending interrupts
381 * from this device
382 *
383 * May be used as the check_status() entry in ata_port_operations.
384 *
385 * LOCKING:
386 * Inherited from caller.
387 */
388u8 ata_check_status(struct ata_port *ap)
389{
390 if (ap->flags & ATA_FLAG_MMIO)
391 return ata_check_status_mmio(ap);
392 return ata_check_status_pio(ap);
393}
394
395
396/**
397 * ata_altstatus - Read device alternate status reg
398 * @ap: port where the device is
399 *
400 * Reads ATA taskfile alternate status register for
401 * currently-selected device and return its value.
402 *
403 * Note: may NOT be used as the check_altstatus() entry in
404 * ata_port_operations.
405 *
406 * LOCKING:
407 * Inherited from caller.
408 */
409u8 ata_altstatus(struct ata_port *ap)
410{
411 if (ap->ops->check_altstatus)
412 return ap->ops->check_altstatus(ap);
413
414 if (ap->flags & ATA_FLAG_MMIO)
415 return readb((void __iomem *)ap->ioaddr.altstatus_addr);
416 return inb(ap->ioaddr.altstatus_addr);
417}
418
419#ifdef CONFIG_PCI
420static struct ata_probe_ent *
421ata_probe_ent_alloc(struct device *dev, const struct ata_port_info *port)
422{
423 struct ata_probe_ent *probe_ent;
424
425 probe_ent = kzalloc(sizeof(*probe_ent), GFP_KERNEL);
426 if (!probe_ent) {
427 printk(KERN_ERR DRV_NAME "(%s): out of memory\n",
428 kobject_name(&(dev->kobj)));
429 return NULL;
430 }
431
432 INIT_LIST_HEAD(&probe_ent->node);
433 probe_ent->dev = dev;
434
435 probe_ent->sht = port->sht;
436 probe_ent->host_flags = port->host_flags;
437 probe_ent->pio_mask = port->pio_mask;
438 probe_ent->mwdma_mask = port->mwdma_mask;
439 probe_ent->udma_mask = port->udma_mask;
440 probe_ent->port_ops = port->port_ops;
441
442 return probe_ent;
443}
444
445
446/**
447 * ata_pci_init_native_mode - Initialize native-mode driver
448 * @pdev: pci device to be initialized
449 * @port: array[2] of pointers to port info structures.
450 * @ports: bitmap of ports present
451 *
452 * Utility function which allocates and initializes an
453 * ata_probe_ent structure for a standard dual-port
454 * PIO-based IDE controller. The returned ata_probe_ent
455 * structure can be passed to ata_device_add(). The returned
456 * ata_probe_ent structure should then be freed with kfree().
457 *
458 * The caller need only pass the address of the primary port, the
459 * secondary will be deduced automatically. If the device has non
460 * standard secondary port mappings this function can be called twice,
461 * once for each interface.
462 */
463
464struct ata_probe_ent *
465ata_pci_init_native_mode(struct pci_dev *pdev, struct ata_port_info **port, int ports)
466{
467 struct ata_probe_ent *probe_ent =
468 ata_probe_ent_alloc(pci_dev_to_dev(pdev), port[0]);
469 int p = 0;
470
471 if (!probe_ent)
472 return NULL;
473
474 probe_ent->irq = pdev->irq;
475 probe_ent->irq_flags = SA_SHIRQ;
476 probe_ent->private_data = port[0]->private_data;
477
478 if (ports & ATA_PORT_PRIMARY) {
479 probe_ent->port[p].cmd_addr = pci_resource_start(pdev, 0);
480 probe_ent->port[p].altstatus_addr =
481 probe_ent->port[p].ctl_addr =
482 pci_resource_start(pdev, 1) | ATA_PCI_CTL_OFS;
483 probe_ent->port[p].bmdma_addr = pci_resource_start(pdev, 4);
484 ata_std_ports(&probe_ent->port[p]);
485 p++;
486 }
487
488 if (ports & ATA_PORT_SECONDARY) {
489 probe_ent->port[p].cmd_addr = pci_resource_start(pdev, 2);
490 probe_ent->port[p].altstatus_addr =
491 probe_ent->port[p].ctl_addr =
492 pci_resource_start(pdev, 3) | ATA_PCI_CTL_OFS;
493 probe_ent->port[p].bmdma_addr = pci_resource_start(pdev, 4) + 8;
494 ata_std_ports(&probe_ent->port[p]);
495 p++;
496 }
497
498 probe_ent->n_ports = p;
499 return probe_ent;
500}
501
502
503static struct ata_probe_ent *ata_pci_init_legacy_port(struct pci_dev *pdev,
504 struct ata_port_info *port, int port_num)
505{
506 struct ata_probe_ent *probe_ent;
507
508 probe_ent = ata_probe_ent_alloc(pci_dev_to_dev(pdev), port);
509 if (!probe_ent)
510 return NULL;
511
512 probe_ent->legacy_mode = 1;
513 probe_ent->n_ports = 1;
514 probe_ent->hard_port_no = port_num;
515 probe_ent->private_data = port->private_data;
516
517 switch(port_num)
518 {
519 case 0:
520 probe_ent->irq = 14;
521 probe_ent->port[0].cmd_addr = 0x1f0;
522 probe_ent->port[0].altstatus_addr =
523 probe_ent->port[0].ctl_addr = 0x3f6;
524 break;
525 case 1:
526 probe_ent->irq = 15;
527 probe_ent->port[0].cmd_addr = 0x170;
528 probe_ent->port[0].altstatus_addr =
529 probe_ent->port[0].ctl_addr = 0x376;
530 break;
531 }
532
533 probe_ent->port[0].bmdma_addr =
534 pci_resource_start(pdev, 4) + 8 * port_num;
535 ata_std_ports(&probe_ent->port[0]);
536
537 return probe_ent;
538}
539
540
541/**
542 * ata_pci_init_one - Initialize/register PCI IDE host controller
543 * @pdev: Controller to be initialized
544 * @port_info: Information from low-level host driver
545 * @n_ports: Number of ports attached to host controller
546 *
547 * This is a helper function which can be called from a driver's
548 * xxx_init_one() probe function if the hardware uses traditional
549 * IDE taskfile registers.
550 *
551 * This function calls pci_enable_device(), reserves its register
552 * regions, sets the dma mask, enables bus master mode, and calls
553 * ata_device_add()
554 *
555 * LOCKING:
556 * Inherited from PCI layer (may sleep).
557 *
558 * RETURNS:
559 * Zero on success, negative on errno-based value on error.
560 */
561
562int ata_pci_init_one (struct pci_dev *pdev, struct ata_port_info **port_info,
563 unsigned int n_ports)
564{
565 struct ata_probe_ent *probe_ent = NULL, *probe_ent2 = NULL;
566 struct ata_port_info *port[2];
567 u8 tmp8, mask;
568 unsigned int legacy_mode = 0;
569 int disable_dev_on_err = 1;
570 int rc;
571
572 DPRINTK("ENTER\n");
573
574 port[0] = port_info[0];
575 if (n_ports > 1)
576 port[1] = port_info[1];
577 else
578 port[1] = port[0];
579
580 if ((port[0]->host_flags & ATA_FLAG_NO_LEGACY) == 0
581 && (pdev->class >> 8) == PCI_CLASS_STORAGE_IDE) {
582 /* TODO: What if one channel is in native mode ... */
583 pci_read_config_byte(pdev, PCI_CLASS_PROG, &tmp8);
584 mask = (1 << 2) | (1 << 0);
585 if ((tmp8 & mask) != mask)
586 legacy_mode = (1 << 3);
587 }
588
589 /* FIXME... */
590 if ((!legacy_mode) && (n_ports > 2)) {
591 printk(KERN_ERR "ata: BUG: native mode, n_ports > 2\n");
592 n_ports = 2;
593 /* For now */
594 }
595
596 /* FIXME: Really for ATA it isn't safe because the device may be
597 multi-purpose and we want to leave it alone if it was already
598 enabled. Secondly for shared use as Arjan says we want refcounting
599
600 Checking dev->is_enabled is insufficient as this is not set at
601 boot for the primary video which is BIOS enabled
602 */
603
604 rc = pci_enable_device(pdev);
605 if (rc)
606 return rc;
607
608 rc = pci_request_regions(pdev, DRV_NAME);
609 if (rc) {
610 disable_dev_on_err = 0;
611 goto err_out;
612 }
613
614 /* FIXME: Should use platform specific mappers for legacy port ranges */
615 if (legacy_mode) {
616 if (!request_region(0x1f0, 8, "libata")) {
617 struct resource *conflict, res;
618 res.start = 0x1f0;
619 res.end = 0x1f0 + 8 - 1;
620 conflict = ____request_resource(&ioport_resource, &res);
621 if (!strcmp(conflict->name, "libata"))
622 legacy_mode |= (1 << 0);
623 else {
624 disable_dev_on_err = 0;
625 printk(KERN_WARNING "ata: 0x1f0 IDE port busy\n");
626 }
627 } else
628 legacy_mode |= (1 << 0);
629
630 if (!request_region(0x170, 8, "libata")) {
631 struct resource *conflict, res;
632 res.start = 0x170;
633 res.end = 0x170 + 8 - 1;
634 conflict = ____request_resource(&ioport_resource, &res);
635 if (!strcmp(conflict->name, "libata"))
636 legacy_mode |= (1 << 1);
637 else {
638 disable_dev_on_err = 0;
639 printk(KERN_WARNING "ata: 0x170 IDE port busy\n");
640 }
641 } else
642 legacy_mode |= (1 << 1);
643 }
644
645 /* we have legacy mode, but all ports are unavailable */
646 if (legacy_mode == (1 << 3)) {
647 rc = -EBUSY;
648 goto err_out_regions;
649 }
650
651 rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
652 if (rc)
653 goto err_out_regions;
654 rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
655 if (rc)
656 goto err_out_regions;
657
658 if (legacy_mode) {
659 if (legacy_mode & (1 << 0))
660 probe_ent = ata_pci_init_legacy_port(pdev, port[0], 0);
661 if (legacy_mode & (1 << 1))
662 probe_ent2 = ata_pci_init_legacy_port(pdev, port[1], 1);
663 } else {
664 if (n_ports == 2)
665 probe_ent = ata_pci_init_native_mode(pdev, port, ATA_PORT_PRIMARY | ATA_PORT_SECONDARY);
666 else
667 probe_ent = ata_pci_init_native_mode(pdev, port, ATA_PORT_PRIMARY);
668 }
669 if (!probe_ent && !probe_ent2) {
670 rc = -ENOMEM;
671 goto err_out_regions;
672 }
673
674 pci_set_master(pdev);
675
676 /* FIXME: check ata_device_add return */
677 if (legacy_mode) {
678 if (legacy_mode & (1 << 0))
679 ata_device_add(probe_ent);
680 if (legacy_mode & (1 << 1))
681 ata_device_add(probe_ent2);
682 } else
683 ata_device_add(probe_ent);
684
685 kfree(probe_ent);
686 kfree(probe_ent2);
687
688 return 0;
689
690err_out_regions:
691 if (legacy_mode & (1 << 0))
692 release_region(0x1f0, 8);
693 if (legacy_mode & (1 << 1))
694 release_region(0x170, 8);
695 pci_release_regions(pdev);
696err_out:
697 if (disable_dev_on_err)
698 pci_disable_device(pdev);
699 return rc;
700}
701
702#endif /* CONFIG_PCI */
703
diff --git a/drivers/scsi/libata-core.c b/drivers/scsi/libata-core.c
index 4f91b0dc572b..5dbcf0cf4a10 100644
--- a/drivers/scsi/libata-core.c
+++ b/drivers/scsi/libata-core.c
@@ -61,11 +61,8 @@
61 61
62#include "libata.h" 62#include "libata.h"
63 63
64static unsigned int ata_busy_sleep (struct ata_port *ap, 64static unsigned int ata_dev_init_params(struct ata_port *ap,
65 unsigned long tmout_pat, 65 struct ata_device *dev);
66 unsigned long tmout);
67static void ata_dev_reread_id(struct ata_port *ap, struct ata_device *dev);
68static void ata_dev_init_params(struct ata_port *ap, struct ata_device *dev);
69static void ata_set_mode(struct ata_port *ap); 66static void ata_set_mode(struct ata_port *ap);
70static void ata_dev_set_xfermode(struct ata_port *ap, struct ata_device *dev); 67static void ata_dev_set_xfermode(struct ata_port *ap, struct ata_device *dev);
71static unsigned int ata_get_mode_mask(const struct ata_port *ap, int shift); 68static unsigned int ata_get_mode_mask(const struct ata_port *ap, int shift);
@@ -73,7 +70,6 @@ static int fgb(u32 bitmap);
73static int ata_choose_xfer_mode(const struct ata_port *ap, 70static int ata_choose_xfer_mode(const struct ata_port *ap,
74 u8 *xfer_mode_out, 71 u8 *xfer_mode_out,
75 unsigned int *xfer_shift_out); 72 unsigned int *xfer_shift_out);
76static void __ata_qc_complete(struct ata_queued_cmd *qc);
77 73
78static unsigned int ata_unique_id = 1; 74static unsigned int ata_unique_id = 1;
79static struct workqueue_struct *ata_wq; 75static struct workqueue_struct *ata_wq;
@@ -91,403 +87,6 @@ MODULE_DESCRIPTION("Library module for ATA devices");
91MODULE_LICENSE("GPL"); 87MODULE_LICENSE("GPL");
92MODULE_VERSION(DRV_VERSION); 88MODULE_VERSION(DRV_VERSION);
93 89
94/**
95 * ata_tf_load_pio - send taskfile registers to host controller
96 * @ap: Port to which output is sent
97 * @tf: ATA taskfile register set
98 *
99 * Outputs ATA taskfile to standard ATA host controller.
100 *
101 * LOCKING:
102 * Inherited from caller.
103 */
104
105static void ata_tf_load_pio(struct ata_port *ap, const struct ata_taskfile *tf)
106{
107 struct ata_ioports *ioaddr = &ap->ioaddr;
108 unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
109
110 if (tf->ctl != ap->last_ctl) {
111 outb(tf->ctl, ioaddr->ctl_addr);
112 ap->last_ctl = tf->ctl;
113 ata_wait_idle(ap);
114 }
115
116 if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
117 outb(tf->hob_feature, ioaddr->feature_addr);
118 outb(tf->hob_nsect, ioaddr->nsect_addr);
119 outb(tf->hob_lbal, ioaddr->lbal_addr);
120 outb(tf->hob_lbam, ioaddr->lbam_addr);
121 outb(tf->hob_lbah, ioaddr->lbah_addr);
122 VPRINTK("hob: feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n",
123 tf->hob_feature,
124 tf->hob_nsect,
125 tf->hob_lbal,
126 tf->hob_lbam,
127 tf->hob_lbah);
128 }
129
130 if (is_addr) {
131 outb(tf->feature, ioaddr->feature_addr);
132 outb(tf->nsect, ioaddr->nsect_addr);
133 outb(tf->lbal, ioaddr->lbal_addr);
134 outb(tf->lbam, ioaddr->lbam_addr);
135 outb(tf->lbah, ioaddr->lbah_addr);
136 VPRINTK("feat 0x%X nsect 0x%X lba 0x%X 0x%X 0x%X\n",
137 tf->feature,
138 tf->nsect,
139 tf->lbal,
140 tf->lbam,
141 tf->lbah);
142 }
143
144 if (tf->flags & ATA_TFLAG_DEVICE) {
145 outb(tf->device, ioaddr->device_addr);
146 VPRINTK("device 0x%X\n", tf->device);
147 }
148
149 ata_wait_idle(ap);
150}
151
152/**
153 * ata_tf_load_mmio - send taskfile registers to host controller
154 * @ap: Port to which output is sent
155 * @tf: ATA taskfile register set
156 *
157 * Outputs ATA taskfile to standard ATA host controller using MMIO.
158 *
159 * LOCKING:
160 * Inherited from caller.
161 */
162
163static void ata_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf)
164{
165 struct ata_ioports *ioaddr = &ap->ioaddr;
166 unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
167
168 if (tf->ctl != ap->last_ctl) {
169 writeb(tf->ctl, (void __iomem *) ap->ioaddr.ctl_addr);
170 ap->last_ctl = tf->ctl;
171 ata_wait_idle(ap);
172 }
173
174 if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
175 writeb(tf->hob_feature, (void __iomem *) ioaddr->feature_addr);
176 writeb(tf->hob_nsect, (void __iomem *) ioaddr->nsect_addr);
177 writeb(tf->hob_lbal, (void __iomem *) ioaddr->lbal_addr);
178 writeb(tf->hob_lbam, (void __iomem *) ioaddr->lbam_addr);
179 writeb(tf->hob_lbah, (void __iomem *) ioaddr->lbah_addr);
180 VPRINTK("hob: feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n",
181 tf->hob_feature,
182 tf->hob_nsect,
183 tf->hob_lbal,
184 tf->hob_lbam,
185 tf->hob_lbah);
186 }
187
188 if (is_addr) {
189 writeb(tf->feature, (void __iomem *) ioaddr->feature_addr);
190 writeb(tf->nsect, (void __iomem *) ioaddr->nsect_addr);
191 writeb(tf->lbal, (void __iomem *) ioaddr->lbal_addr);
192 writeb(tf->lbam, (void __iomem *) ioaddr->lbam_addr);
193 writeb(tf->lbah, (void __iomem *) ioaddr->lbah_addr);
194 VPRINTK("feat 0x%X nsect 0x%X lba 0x%X 0x%X 0x%X\n",
195 tf->feature,
196 tf->nsect,
197 tf->lbal,
198 tf->lbam,
199 tf->lbah);
200 }
201
202 if (tf->flags & ATA_TFLAG_DEVICE) {
203 writeb(tf->device, (void __iomem *) ioaddr->device_addr);
204 VPRINTK("device 0x%X\n", tf->device);
205 }
206
207 ata_wait_idle(ap);
208}
209
210
211/**
212 * ata_tf_load - send taskfile registers to host controller
213 * @ap: Port to which output is sent
214 * @tf: ATA taskfile register set
215 *
216 * Outputs ATA taskfile to standard ATA host controller using MMIO
217 * or PIO as indicated by the ATA_FLAG_MMIO flag.
218 * Writes the control, feature, nsect, lbal, lbam, and lbah registers.
219 * Optionally (ATA_TFLAG_LBA48) writes hob_feature, hob_nsect,
220 * hob_lbal, hob_lbam, and hob_lbah.
221 *
222 * This function waits for idle (!BUSY and !DRQ) after writing
223 * registers. If the control register has a new value, this
224 * function also waits for idle after writing control and before
225 * writing the remaining registers.
226 *
227 * May be used as the tf_load() entry in ata_port_operations.
228 *
229 * LOCKING:
230 * Inherited from caller.
231 */
232void ata_tf_load(struct ata_port *ap, const struct ata_taskfile *tf)
233{
234 if (ap->flags & ATA_FLAG_MMIO)
235 ata_tf_load_mmio(ap, tf);
236 else
237 ata_tf_load_pio(ap, tf);
238}
239
240/**
241 * ata_exec_command_pio - issue ATA command to host controller
242 * @ap: port to which command is being issued
243 * @tf: ATA taskfile register set
244 *
245 * Issues PIO write to ATA command register, with proper
246 * synchronization with interrupt handler / other threads.
247 *
248 * LOCKING:
249 * spin_lock_irqsave(host_set lock)
250 */
251
252static void ata_exec_command_pio(struct ata_port *ap, const struct ata_taskfile *tf)
253{
254 DPRINTK("ata%u: cmd 0x%X\n", ap->id, tf->command);
255
256 outb(tf->command, ap->ioaddr.command_addr);
257 ata_pause(ap);
258}
259
260
261/**
262 * ata_exec_command_mmio - issue ATA command to host controller
263 * @ap: port to which command is being issued
264 * @tf: ATA taskfile register set
265 *
266 * Issues MMIO write to ATA command register, with proper
267 * synchronization with interrupt handler / other threads.
268 *
269 * LOCKING:
270 * spin_lock_irqsave(host_set lock)
271 */
272
273static void ata_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf)
274{
275 DPRINTK("ata%u: cmd 0x%X\n", ap->id, tf->command);
276
277 writeb(tf->command, (void __iomem *) ap->ioaddr.command_addr);
278 ata_pause(ap);
279}
280
281
282/**
283 * ata_exec_command - issue ATA command to host controller
284 * @ap: port to which command is being issued
285 * @tf: ATA taskfile register set
286 *
287 * Issues PIO/MMIO write to ATA command register, with proper
288 * synchronization with interrupt handler / other threads.
289 *
290 * LOCKING:
291 * spin_lock_irqsave(host_set lock)
292 */
293void ata_exec_command(struct ata_port *ap, const struct ata_taskfile *tf)
294{
295 if (ap->flags & ATA_FLAG_MMIO)
296 ata_exec_command_mmio(ap, tf);
297 else
298 ata_exec_command_pio(ap, tf);
299}
300
301/**
302 * ata_tf_to_host - issue ATA taskfile to host controller
303 * @ap: port to which command is being issued
304 * @tf: ATA taskfile register set
305 *
306 * Issues ATA taskfile register set to ATA host controller,
307 * with proper synchronization with interrupt handler and
308 * other threads.
309 *
310 * LOCKING:
311 * spin_lock_irqsave(host_set lock)
312 */
313
314static inline void ata_tf_to_host(struct ata_port *ap,
315 const struct ata_taskfile *tf)
316{
317 ap->ops->tf_load(ap, tf);
318 ap->ops->exec_command(ap, tf);
319}
320
321/**
322 * ata_tf_read_pio - input device's ATA taskfile shadow registers
323 * @ap: Port from which input is read
324 * @tf: ATA taskfile register set for storing input
325 *
326 * Reads ATA taskfile registers for currently-selected device
327 * into @tf.
328 *
329 * LOCKING:
330 * Inherited from caller.
331 */
332
333static void ata_tf_read_pio(struct ata_port *ap, struct ata_taskfile *tf)
334{
335 struct ata_ioports *ioaddr = &ap->ioaddr;
336
337 tf->command = ata_check_status(ap);
338 tf->feature = inb(ioaddr->error_addr);
339 tf->nsect = inb(ioaddr->nsect_addr);
340 tf->lbal = inb(ioaddr->lbal_addr);
341 tf->lbam = inb(ioaddr->lbam_addr);
342 tf->lbah = inb(ioaddr->lbah_addr);
343 tf->device = inb(ioaddr->device_addr);
344
345 if (tf->flags & ATA_TFLAG_LBA48) {
346 outb(tf->ctl | ATA_HOB, ioaddr->ctl_addr);
347 tf->hob_feature = inb(ioaddr->error_addr);
348 tf->hob_nsect = inb(ioaddr->nsect_addr);
349 tf->hob_lbal = inb(ioaddr->lbal_addr);
350 tf->hob_lbam = inb(ioaddr->lbam_addr);
351 tf->hob_lbah = inb(ioaddr->lbah_addr);
352 }
353}
354
355/**
356 * ata_tf_read_mmio - input device's ATA taskfile shadow registers
357 * @ap: Port from which input is read
358 * @tf: ATA taskfile register set for storing input
359 *
360 * Reads ATA taskfile registers for currently-selected device
361 * into @tf via MMIO.
362 *
363 * LOCKING:
364 * Inherited from caller.
365 */
366
367static void ata_tf_read_mmio(struct ata_port *ap, struct ata_taskfile *tf)
368{
369 struct ata_ioports *ioaddr = &ap->ioaddr;
370
371 tf->command = ata_check_status(ap);
372 tf->feature = readb((void __iomem *)ioaddr->error_addr);
373 tf->nsect = readb((void __iomem *)ioaddr->nsect_addr);
374 tf->lbal = readb((void __iomem *)ioaddr->lbal_addr);
375 tf->lbam = readb((void __iomem *)ioaddr->lbam_addr);
376 tf->lbah = readb((void __iomem *)ioaddr->lbah_addr);
377 tf->device = readb((void __iomem *)ioaddr->device_addr);
378
379 if (tf->flags & ATA_TFLAG_LBA48) {
380 writeb(tf->ctl | ATA_HOB, (void __iomem *) ap->ioaddr.ctl_addr);
381 tf->hob_feature = readb((void __iomem *)ioaddr->error_addr);
382 tf->hob_nsect = readb((void __iomem *)ioaddr->nsect_addr);
383 tf->hob_lbal = readb((void __iomem *)ioaddr->lbal_addr);
384 tf->hob_lbam = readb((void __iomem *)ioaddr->lbam_addr);
385 tf->hob_lbah = readb((void __iomem *)ioaddr->lbah_addr);
386 }
387}
388
389
390/**
391 * ata_tf_read - input device's ATA taskfile shadow registers
392 * @ap: Port from which input is read
393 * @tf: ATA taskfile register set for storing input
394 *
395 * Reads ATA taskfile registers for currently-selected device
396 * into @tf.
397 *
398 * Reads nsect, lbal, lbam, lbah, and device. If ATA_TFLAG_LBA48
399 * is set, also reads the hob registers.
400 *
401 * May be used as the tf_read() entry in ata_port_operations.
402 *
403 * LOCKING:
404 * Inherited from caller.
405 */
406void ata_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
407{
408 if (ap->flags & ATA_FLAG_MMIO)
409 ata_tf_read_mmio(ap, tf);
410 else
411 ata_tf_read_pio(ap, tf);
412}
413
414/**
415 * ata_check_status_pio - Read device status reg & clear interrupt
416 * @ap: port where the device is
417 *
418 * Reads ATA taskfile status register for currently-selected device
419 * and return its value. This also clears pending interrupts
420 * from this device
421 *
422 * LOCKING:
423 * Inherited from caller.
424 */
425static u8 ata_check_status_pio(struct ata_port *ap)
426{
427 return inb(ap->ioaddr.status_addr);
428}
429
430/**
431 * ata_check_status_mmio - Read device status reg & clear interrupt
432 * @ap: port where the device is
433 *
434 * Reads ATA taskfile status register for currently-selected device
435 * via MMIO and return its value. This also clears pending interrupts
436 * from this device
437 *
438 * LOCKING:
439 * Inherited from caller.
440 */
441static u8 ata_check_status_mmio(struct ata_port *ap)
442{
443 return readb((void __iomem *) ap->ioaddr.status_addr);
444}
445
446
447/**
448 * ata_check_status - Read device status reg & clear interrupt
449 * @ap: port where the device is
450 *
451 * Reads ATA taskfile status register for currently-selected device
452 * and return its value. This also clears pending interrupts
453 * from this device
454 *
455 * May be used as the check_status() entry in ata_port_operations.
456 *
457 * LOCKING:
458 * Inherited from caller.
459 */
460u8 ata_check_status(struct ata_port *ap)
461{
462 if (ap->flags & ATA_FLAG_MMIO)
463 return ata_check_status_mmio(ap);
464 return ata_check_status_pio(ap);
465}
466
467
468/**
469 * ata_altstatus - Read device alternate status reg
470 * @ap: port where the device is
471 *
472 * Reads ATA taskfile alternate status register for
473 * currently-selected device and return its value.
474 *
475 * Note: may NOT be used as the check_altstatus() entry in
476 * ata_port_operations.
477 *
478 * LOCKING:
479 * Inherited from caller.
480 */
481u8 ata_altstatus(struct ata_port *ap)
482{
483 if (ap->ops->check_altstatus)
484 return ap->ops->check_altstatus(ap);
485
486 if (ap->flags & ATA_FLAG_MMIO)
487 return readb((void __iomem *)ap->ioaddr.altstatus_addr);
488 return inb(ap->ioaddr.altstatus_addr);
489}
490
491 90
492/** 91/**
493 * ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure 92 * ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
@@ -838,6 +437,7 @@ unsigned int ata_dev_classify(const struct ata_taskfile *tf)
838 * ata_dev_try_classify - Parse returned ATA device signature 437 * ata_dev_try_classify - Parse returned ATA device signature
839 * @ap: ATA channel to examine 438 * @ap: ATA channel to examine
840 * @device: Device to examine (starting at zero) 439 * @device: Device to examine (starting at zero)
440 * @r_err: Value of error register on completion
841 * 441 *
842 * After an event -- SRST, E.D.D., or SATA COMRESET -- occurs, 442 * After an event -- SRST, E.D.D., or SATA COMRESET -- occurs,
843 * an ATA/ATAPI-defined set of values is placed in the ATA 443 * an ATA/ATAPI-defined set of values is placed in the ATA
@@ -850,11 +450,14 @@ unsigned int ata_dev_classify(const struct ata_taskfile *tf)
850 * 450 *
851 * LOCKING: 451 * LOCKING:
852 * caller. 452 * caller.
453 *
454 * RETURNS:
455 * Device type - %ATA_DEV_ATA, %ATA_DEV_ATAPI or %ATA_DEV_NONE.
853 */ 456 */
854 457
855static u8 ata_dev_try_classify(struct ata_port *ap, unsigned int device) 458static unsigned int
459ata_dev_try_classify(struct ata_port *ap, unsigned int device, u8 *r_err)
856{ 460{
857 struct ata_device *dev = &ap->device[device];
858 struct ata_taskfile tf; 461 struct ata_taskfile tf;
859 unsigned int class; 462 unsigned int class;
860 u8 err; 463 u8 err;
@@ -865,8 +468,8 @@ static u8 ata_dev_try_classify(struct ata_port *ap, unsigned int device)
865 468
866 ap->ops->tf_read(ap, &tf); 469 ap->ops->tf_read(ap, &tf);
867 err = tf.feature; 470 err = tf.feature;
868 471 if (r_err)
869 dev->class = ATA_DEV_NONE; 472 *r_err = err;
870 473
871 /* see if device passed diags */ 474 /* see if device passed diags */
872 if (err == 1) 475 if (err == 1)
@@ -874,22 +477,20 @@ static u8 ata_dev_try_classify(struct ata_port *ap, unsigned int device)
874 else if ((device == 0) && (err == 0x81)) 477 else if ((device == 0) && (err == 0x81))
875 /* do nothing */ ; 478 /* do nothing */ ;
876 else 479 else
877 return err; 480 return ATA_DEV_NONE;
878 481
879 /* determine if device if ATA or ATAPI */ 482 /* determine if device is ATA or ATAPI */
880 class = ata_dev_classify(&tf); 483 class = ata_dev_classify(&tf);
484
881 if (class == ATA_DEV_UNKNOWN) 485 if (class == ATA_DEV_UNKNOWN)
882 return err; 486 return ATA_DEV_NONE;
883 if ((class == ATA_DEV_ATA) && (ata_chk_status(ap) == 0)) 487 if ((class == ATA_DEV_ATA) && (ata_chk_status(ap) == 0))
884 return err; 488 return ATA_DEV_NONE;
885 489 return class;
886 dev->class = class;
887
888 return err;
889} 490}
890 491
891/** 492/**
892 * ata_dev_id_string - Convert IDENTIFY DEVICE page into string 493 * ata_id_string - Convert IDENTIFY DEVICE page into string
893 * @id: IDENTIFY DEVICE results we will examine 494 * @id: IDENTIFY DEVICE results we will examine
894 * @s: string into which data is output 495 * @s: string into which data is output
895 * @ofs: offset into identify device page 496 * @ofs: offset into identify device page
@@ -903,8 +504,8 @@ static u8 ata_dev_try_classify(struct ata_port *ap, unsigned int device)
903 * caller. 504 * caller.
904 */ 505 */
905 506
906void ata_dev_id_string(const u16 *id, unsigned char *s, 507void ata_id_string(const u16 *id, unsigned char *s,
907 unsigned int ofs, unsigned int len) 508 unsigned int ofs, unsigned int len)
908{ 509{
909 unsigned int c; 510 unsigned int c;
910 511
@@ -922,6 +523,49 @@ void ata_dev_id_string(const u16 *id, unsigned char *s,
922 } 523 }
923} 524}
924 525
526/**
527 * ata_id_c_string - Convert IDENTIFY DEVICE page into C string
528 * @id: IDENTIFY DEVICE results we will examine
529 * @s: string into which data is output
530 * @ofs: offset into identify device page
531 * @len: length of string to return. must be an odd number.
532 *
533 * This function is identical to ata_id_string except that it
534 * trims trailing spaces and terminates the resulting string with
535 * null. @len must be actual maximum length (even number) + 1.
536 *
537 * LOCKING:
538 * caller.
539 */
540void ata_id_c_string(const u16 *id, unsigned char *s,
541 unsigned int ofs, unsigned int len)
542{
543 unsigned char *p;
544
545 WARN_ON(!(len & 1));
546
547 ata_id_string(id, s, ofs, len - 1);
548
549 p = s + strnlen(s, len - 1);
550 while (p > s && p[-1] == ' ')
551 p--;
552 *p = '\0';
553}
554
555static u64 ata_id_n_sectors(const u16 *id)
556{
557 if (ata_id_has_lba(id)) {
558 if (ata_id_has_lba48(id))
559 return ata_id_u64(id, 100);
560 else
561 return ata_id_u32(id, 60);
562 } else {
563 if (ata_id_current_chs_valid(id))
564 return ata_id_u32(id, 57);
565 else
566 return id[1] * id[3] * id[6];
567 }
568}
925 569
926/** 570/**
927 * ata_noop_dev_select - Select device 0/1 on ATA bus 571 * ata_noop_dev_select - Select device 0/1 on ATA bus
@@ -1011,41 +655,41 @@ void ata_dev_select(struct ata_port *ap, unsigned int device,
1011 655
1012/** 656/**
1013 * ata_dump_id - IDENTIFY DEVICE info debugging output 657 * ata_dump_id - IDENTIFY DEVICE info debugging output
1014 * @dev: Device whose IDENTIFY DEVICE page we will dump 658 * @id: IDENTIFY DEVICE page to dump
1015 * 659 *
1016 * Dump selected 16-bit words from a detected device's 660 * Dump selected 16-bit words from the given IDENTIFY DEVICE
1017 * IDENTIFY PAGE page. 661 * page.
1018 * 662 *
1019 * LOCKING: 663 * LOCKING:
1020 * caller. 664 * caller.
1021 */ 665 */
1022 666
1023static inline void ata_dump_id(const struct ata_device *dev) 667static inline void ata_dump_id(const u16 *id)
1024{ 668{
1025 DPRINTK("49==0x%04x " 669 DPRINTK("49==0x%04x "
1026 "53==0x%04x " 670 "53==0x%04x "
1027 "63==0x%04x " 671 "63==0x%04x "
1028 "64==0x%04x " 672 "64==0x%04x "
1029 "75==0x%04x \n", 673 "75==0x%04x \n",
1030 dev->id[49], 674 id[49],
1031 dev->id[53], 675 id[53],
1032 dev->id[63], 676 id[63],
1033 dev->id[64], 677 id[64],
1034 dev->id[75]); 678 id[75]);
1035 DPRINTK("80==0x%04x " 679 DPRINTK("80==0x%04x "
1036 "81==0x%04x " 680 "81==0x%04x "
1037 "82==0x%04x " 681 "82==0x%04x "
1038 "83==0x%04x " 682 "83==0x%04x "
1039 "84==0x%04x \n", 683 "84==0x%04x \n",
1040 dev->id[80], 684 id[80],
1041 dev->id[81], 685 id[81],
1042 dev->id[82], 686 id[82],
1043 dev->id[83], 687 id[83],
1044 dev->id[84]); 688 id[84]);
1045 DPRINTK("88==0x%04x " 689 DPRINTK("88==0x%04x "
1046 "93==0x%04x\n", 690 "93==0x%04x\n",
1047 dev->id[88], 691 id[88],
1048 dev->id[93]); 692 id[93]);
1049} 693}
1050 694
1051/* 695/*
@@ -1077,24 +721,77 @@ static unsigned int ata_pio_modes(const struct ata_device *adev)
1077 timing API will get this right anyway */ 721 timing API will get this right anyway */
1078} 722}
1079 723
1080struct ata_exec_internal_arg { 724static inline void
1081 unsigned int err_mask; 725ata_queue_packet_task(struct ata_port *ap)
1082 struct ata_taskfile *tf; 726{
1083 struct completion *waiting; 727 if (!(ap->flags & ATA_FLAG_FLUSH_PIO_TASK))
1084}; 728 queue_work(ata_wq, &ap->packet_task);
729}
1085 730
1086int ata_qc_complete_internal(struct ata_queued_cmd *qc) 731static inline void
732ata_queue_pio_task(struct ata_port *ap)
1087{ 733{
1088 struct ata_exec_internal_arg *arg = qc->private_data; 734 if (!(ap->flags & ATA_FLAG_FLUSH_PIO_TASK))
1089 struct completion *waiting = arg->waiting; 735 queue_work(ata_wq, &ap->pio_task);
736}
1090 737
1091 if (!(qc->err_mask & ~AC_ERR_DEV)) 738static inline void
1092 qc->ap->ops->tf_read(qc->ap, arg->tf); 739ata_queue_delayed_pio_task(struct ata_port *ap, unsigned long delay)
1093 arg->err_mask = qc->err_mask; 740{
1094 arg->waiting = NULL; 741 if (!(ap->flags & ATA_FLAG_FLUSH_PIO_TASK))
1095 complete(waiting); 742 queue_delayed_work(ata_wq, &ap->pio_task, delay);
743}
1096 744
1097 return 0; 745/**
746 * ata_flush_pio_tasks - Flush pio_task and packet_task
747 * @ap: the target ata_port
748 *
749 * After this function completes, pio_task and packet_task are
750 * guranteed not to be running or scheduled.
751 *
752 * LOCKING:
753 * Kernel thread context (may sleep)
754 */
755
756static void ata_flush_pio_tasks(struct ata_port *ap)
757{
758 int tmp = 0;
759 unsigned long flags;
760
761 DPRINTK("ENTER\n");
762
763 spin_lock_irqsave(&ap->host_set->lock, flags);
764 ap->flags |= ATA_FLAG_FLUSH_PIO_TASK;
765 spin_unlock_irqrestore(&ap->host_set->lock, flags);
766
767 DPRINTK("flush #1\n");
768 flush_workqueue(ata_wq);
769
770 /*
771 * At this point, if a task is running, it's guaranteed to see
772 * the FLUSH flag; thus, it will never queue pio tasks again.
773 * Cancel and flush.
774 */
775 tmp |= cancel_delayed_work(&ap->pio_task);
776 tmp |= cancel_delayed_work(&ap->packet_task);
777 if (!tmp) {
778 DPRINTK("flush #2\n");
779 flush_workqueue(ata_wq);
780 }
781
782 spin_lock_irqsave(&ap->host_set->lock, flags);
783 ap->flags &= ~ATA_FLAG_FLUSH_PIO_TASK;
784 spin_unlock_irqrestore(&ap->host_set->lock, flags);
785
786 DPRINTK("EXIT\n");
787}
788
789void ata_qc_complete_internal(struct ata_queued_cmd *qc)
790{
791 struct completion *waiting = qc->private_data;
792
793 qc->ap->ops->tf_read(qc->ap, &qc->tf);
794 complete(waiting);
1098} 795}
1099 796
1100/** 797/**
@@ -1125,7 +822,7 @@ ata_exec_internal(struct ata_port *ap, struct ata_device *dev,
1125 struct ata_queued_cmd *qc; 822 struct ata_queued_cmd *qc;
1126 DECLARE_COMPLETION(wait); 823 DECLARE_COMPLETION(wait);
1127 unsigned long flags; 824 unsigned long flags;
1128 struct ata_exec_internal_arg arg; 825 unsigned int err_mask;
1129 826
1130 spin_lock_irqsave(&ap->host_set->lock, flags); 827 spin_lock_irqsave(&ap->host_set->lock, flags);
1131 828
@@ -1139,13 +836,12 @@ ata_exec_internal(struct ata_port *ap, struct ata_device *dev,
1139 qc->nsect = buflen / ATA_SECT_SIZE; 836 qc->nsect = buflen / ATA_SECT_SIZE;
1140 } 837 }
1141 838
1142 arg.waiting = &wait; 839 qc->private_data = &wait;
1143 arg.tf = tf;
1144 qc->private_data = &arg;
1145 qc->complete_fn = ata_qc_complete_internal; 840 qc->complete_fn = ata_qc_complete_internal;
1146 841
1147 if (ata_qc_issue(qc)) 842 qc->err_mask = ata_qc_issue(qc);
1148 goto issue_fail; 843 if (qc->err_mask)
844 ata_qc_complete(qc);
1149 845
1150 spin_unlock_irqrestore(&ap->host_set->lock, flags); 846 spin_unlock_irqrestore(&ap->host_set->lock, flags);
1151 847
@@ -1158,8 +854,8 @@ ata_exec_internal(struct ata_port *ap, struct ata_device *dev,
1158 * before the caller cleans up, it will result in a 854 * before the caller cleans up, it will result in a
1159 * spurious interrupt. We can live with that. 855 * spurious interrupt. We can live with that.
1160 */ 856 */
1161 if (arg.waiting) { 857 if (qc->flags & ATA_QCFLAG_ACTIVE) {
1162 qc->err_mask = AC_ERR_OTHER; 858 qc->err_mask = AC_ERR_TIMEOUT;
1163 ata_qc_complete(qc); 859 ata_qc_complete(qc);
1164 printk(KERN_WARNING "ata%u: qc timeout (cmd 0x%x)\n", 860 printk(KERN_WARNING "ata%u: qc timeout (cmd 0x%x)\n",
1165 ap->id, command); 861 ap->id, command);
@@ -1168,12 +864,12 @@ ata_exec_internal(struct ata_port *ap, struct ata_device *dev,
1168 spin_unlock_irqrestore(&ap->host_set->lock, flags); 864 spin_unlock_irqrestore(&ap->host_set->lock, flags);
1169 } 865 }
1170 866
1171 return arg.err_mask; 867 *tf = qc->tf;
868 err_mask = qc->err_mask;
1172 869
1173 issue_fail:
1174 ata_qc_free(qc); 870 ata_qc_free(qc);
1175 spin_unlock_irqrestore(&ap->host_set->lock, flags); 871
1176 return AC_ERR_OTHER; 872 return err_mask;
1177} 873}
1178 874
1179/** 875/**
@@ -1210,73 +906,78 @@ unsigned int ata_pio_need_iordy(const struct ata_device *adev)
1210} 906}
1211 907
1212/** 908/**
1213 * ata_dev_identify - obtain IDENTIFY x DEVICE page 909 * ata_dev_read_id - Read ID data from the specified device
1214 * @ap: port on which device we wish to probe resides 910 * @ap: port on which target device resides
1215 * @device: device bus address, starting at zero 911 * @dev: target device
1216 * 912 * @p_class: pointer to class of the target device (may be changed)
1217 * Following bus reset, we issue the IDENTIFY [PACKET] DEVICE 913 * @post_reset: is this read ID post-reset?
1218 * command, and read back the 512-byte device information page. 914 * @p_id: read IDENTIFY page (newly allocated)
1219 * The device information page is fed to us via the standard 915 *
1220 * PIO-IN protocol, but we hand-code it here. (TODO: investigate 916 * Read ID data from the specified device. ATA_CMD_ID_ATA is
1221 * using standard PIO-IN paths) 917 * performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI
1222 * 918 * devices. This function also takes care of EDD signature
1223 * After reading the device information page, we use several 919 * misreporting (to be removed once EDD support is gone) and
1224 * bits of information from it to initialize data structures 920 * issues ATA_CMD_INIT_DEV_PARAMS for pre-ATA4 drives.
1225 * that will be used during the lifetime of the ata_device.
1226 * Other data from the info page is used to disqualify certain
1227 * older ATA devices we do not wish to support.
1228 * 921 *
1229 * LOCKING: 922 * LOCKING:
1230 * Inherited from caller. Some functions called by this function 923 * Kernel thread context (may sleep)
1231 * obtain the host_set lock. 924 *
925 * RETURNS:
926 * 0 on success, -errno otherwise.
1232 */ 927 */
1233 928static int ata_dev_read_id(struct ata_port *ap, struct ata_device *dev,
1234static void ata_dev_identify(struct ata_port *ap, unsigned int device) 929 unsigned int *p_class, int post_reset, u16 **p_id)
1235{ 930{
1236 struct ata_device *dev = &ap->device[device]; 931 unsigned int class = *p_class;
1237 unsigned int major_version;
1238 u16 tmp;
1239 unsigned long xfer_modes;
1240 unsigned int using_edd; 932 unsigned int using_edd;
1241 struct ata_taskfile tf; 933 struct ata_taskfile tf;
1242 unsigned int err_mask; 934 unsigned int err_mask = 0;
935 u16 *id;
936 const char *reason;
1243 int rc; 937 int rc;
1244 938
1245 if (!ata_dev_present(dev)) { 939 DPRINTK("ENTER, host %u, dev %u\n", ap->id, dev->devno);
1246 DPRINTK("ENTER/EXIT (host %u, dev %u) -- nodev\n",
1247 ap->id, device);
1248 return;
1249 }
1250 940
1251 if (ap->flags & (ATA_FLAG_SRST | ATA_FLAG_SATA_RESET)) 941 if (ap->ops->probe_reset ||
942 ap->flags & (ATA_FLAG_SRST | ATA_FLAG_SATA_RESET))
1252 using_edd = 0; 943 using_edd = 0;
1253 else 944 else
1254 using_edd = 1; 945 using_edd = 1;
1255 946
1256 DPRINTK("ENTER, host %u, dev %u\n", ap->id, device); 947 ata_dev_select(ap, dev->devno, 1, 1); /* select device 0/1 */
1257
1258 assert (dev->class == ATA_DEV_ATA || dev->class == ATA_DEV_ATAPI ||
1259 dev->class == ATA_DEV_NONE);
1260 948
1261 ata_dev_select(ap, device, 1, 1); /* select device 0/1 */ 949 id = kmalloc(sizeof(id[0]) * ATA_ID_WORDS, GFP_KERNEL);
950 if (id == NULL) {
951 rc = -ENOMEM;
952 reason = "out of memory";
953 goto err_out;
954 }
1262 955
1263retry: 956 retry:
1264 ata_tf_init(ap, &tf, device); 957 ata_tf_init(ap, &tf, dev->devno);
1265 958
1266 if (dev->class == ATA_DEV_ATA) { 959 switch (class) {
960 case ATA_DEV_ATA:
1267 tf.command = ATA_CMD_ID_ATA; 961 tf.command = ATA_CMD_ID_ATA;
1268 DPRINTK("do ATA identify\n"); 962 break;
1269 } else { 963 case ATA_DEV_ATAPI:
1270 tf.command = ATA_CMD_ID_ATAPI; 964 tf.command = ATA_CMD_ID_ATAPI;
1271 DPRINTK("do ATAPI identify\n"); 965 break;
966 default:
967 rc = -ENODEV;
968 reason = "unsupported class";
969 goto err_out;
1272 } 970 }
1273 971
1274 tf.protocol = ATA_PROT_PIO; 972 tf.protocol = ATA_PROT_PIO;
1275 973
1276 err_mask = ata_exec_internal(ap, dev, &tf, DMA_FROM_DEVICE, 974 err_mask = ata_exec_internal(ap, dev, &tf, DMA_FROM_DEVICE,
1277 dev->id, sizeof(dev->id)); 975 id, sizeof(id[0]) * ATA_ID_WORDS);
1278 976
1279 if (err_mask) { 977 if (err_mask) {
978 rc = -EIO;
979 reason = "I/O error";
980
1280 if (err_mask & ~AC_ERR_DEV) 981 if (err_mask & ~AC_ERR_DEV)
1281 goto err_out; 982 goto err_out;
1282 983
@@ -1291,25 +992,110 @@ retry:
1291 * ATA software reset (SRST, the default) does not appear 992 * ATA software reset (SRST, the default) does not appear
1292 * to have this problem. 993 * to have this problem.
1293 */ 994 */
1294 if ((using_edd) && (dev->class == ATA_DEV_ATA)) { 995 if ((using_edd) && (class == ATA_DEV_ATA)) {
1295 u8 err = tf.feature; 996 u8 err = tf.feature;
1296 if (err & ATA_ABORTED) { 997 if (err & ATA_ABORTED) {
1297 dev->class = ATA_DEV_ATAPI; 998 class = ATA_DEV_ATAPI;
1298 goto retry; 999 goto retry;
1299 } 1000 }
1300 } 1001 }
1301 goto err_out; 1002 goto err_out;
1302 } 1003 }
1303 1004
1304 swap_buf_le16(dev->id, ATA_ID_WORDS); 1005 swap_buf_le16(id, ATA_ID_WORDS);
1305 1006
1306 /* print device capabilities */ 1007 /* print device capabilities */
1307 printk(KERN_DEBUG "ata%u: dev %u cfg " 1008 printk(KERN_DEBUG "ata%u: dev %u cfg "
1308 "49:%04x 82:%04x 83:%04x 84:%04x 85:%04x 86:%04x 87:%04x 88:%04x\n", 1009 "49:%04x 82:%04x 83:%04x 84:%04x 85:%04x 86:%04x 87:%04x 88:%04x\n",
1309 ap->id, device, dev->id[49], 1010 ap->id, dev->devno,
1310 dev->id[82], dev->id[83], dev->id[84], 1011 id[49], id[82], id[83], id[84], id[85], id[86], id[87], id[88]);
1311 dev->id[85], dev->id[86], dev->id[87], 1012
1312 dev->id[88]); 1013 /* sanity check */
1014 if ((class == ATA_DEV_ATA) != ata_id_is_ata(id)) {
1015 rc = -EINVAL;
1016 reason = "device reports illegal type";
1017 goto err_out;
1018 }
1019
1020 if (post_reset && class == ATA_DEV_ATA) {
1021 /*
1022 * The exact sequence expected by certain pre-ATA4 drives is:
1023 * SRST RESET
1024 * IDENTIFY
1025 * INITIALIZE DEVICE PARAMETERS
1026 * anything else..
1027 * Some drives were very specific about that exact sequence.
1028 */
1029 if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
1030 err_mask = ata_dev_init_params(ap, dev);
1031 if (err_mask) {
1032 rc = -EIO;
1033 reason = "INIT_DEV_PARAMS failed";
1034 goto err_out;
1035 }
1036
1037 /* current CHS translation info (id[53-58]) might be
1038 * changed. reread the identify device info.
1039 */
1040 post_reset = 0;
1041 goto retry;
1042 }
1043 }
1044
1045 *p_class = class;
1046 *p_id = id;
1047 return 0;
1048
1049 err_out:
1050 printk(KERN_WARNING "ata%u: dev %u failed to IDENTIFY (%s)\n",
1051 ap->id, dev->devno, reason);
1052 kfree(id);
1053 return rc;
1054}
1055
1056static inline u8 ata_dev_knobble(const struct ata_port *ap,
1057 struct ata_device *dev)
1058{
1059 return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
1060}
1061
1062/**
1063 * ata_dev_configure - Configure the specified ATA/ATAPI device
1064 * @ap: Port on which target device resides
1065 * @dev: Target device to configure
1066 * @print_info: Enable device info printout
1067 *
1068 * Configure @dev according to @dev->id. Generic and low-level
1069 * driver specific fixups are also applied.
1070 *
1071 * LOCKING:
1072 * Kernel thread context (may sleep)
1073 *
1074 * RETURNS:
1075 * 0 on success, -errno otherwise
1076 */
1077static int ata_dev_configure(struct ata_port *ap, struct ata_device *dev,
1078 int print_info)
1079{
1080 unsigned long xfer_modes;
1081 int i, rc;
1082
1083 if (!ata_dev_present(dev)) {
1084 DPRINTK("ENTER/EXIT (host %u, dev %u) -- nodev\n",
1085 ap->id, dev->devno);
1086 return 0;
1087 }
1088
1089 DPRINTK("ENTER, host %u, dev %u\n", ap->id, dev->devno);
1090
1091 /* initialize to-be-configured parameters */
1092 dev->flags = 0;
1093 dev->max_sectors = 0;
1094 dev->cdb_len = 0;
1095 dev->n_sectors = 0;
1096 dev->cylinders = 0;
1097 dev->heads = 0;
1098 dev->sectors = 0;
1313 1099
1314 /* 1100 /*
1315 * common ATA, ATAPI feature tests 1101 * common ATA, ATAPI feature tests
@@ -1318,6 +1104,7 @@ retry:
1318 /* we require DMA support (bits 8 of word 49) */ 1104 /* we require DMA support (bits 8 of word 49) */
1319 if (!ata_id_has_dma(dev->id)) { 1105 if (!ata_id_has_dma(dev->id)) {
1320 printk(KERN_DEBUG "ata%u: no dma\n", ap->id); 1106 printk(KERN_DEBUG "ata%u: no dma\n", ap->id);
1107 rc = -EINVAL;
1321 goto err_out_nosup; 1108 goto err_out_nosup;
1322 } 1109 }
1323 1110
@@ -1328,143 +1115,102 @@ retry:
1328 if (!xfer_modes) 1115 if (!xfer_modes)
1329 xfer_modes = ata_pio_modes(dev); 1116 xfer_modes = ata_pio_modes(dev);
1330 1117
1331 ata_dump_id(dev); 1118 ata_dump_id(dev->id);
1332 1119
1333 /* ATA-specific feature tests */ 1120 /* ATA-specific feature tests */
1334 if (dev->class == ATA_DEV_ATA) { 1121 if (dev->class == ATA_DEV_ATA) {
1335 if (!ata_id_is_ata(dev->id)) /* sanity check */ 1122 dev->n_sectors = ata_id_n_sectors(dev->id);
1336 goto err_out_nosup;
1337
1338 /* get major version */
1339 tmp = dev->id[ATA_ID_MAJOR_VER];
1340 for (major_version = 14; major_version >= 1; major_version--)
1341 if (tmp & (1 << major_version))
1342 break;
1343
1344 /*
1345 * The exact sequence expected by certain pre-ATA4 drives is:
1346 * SRST RESET
1347 * IDENTIFY
1348 * INITIALIZE DEVICE PARAMETERS
1349 * anything else..
1350 * Some drives were very specific about that exact sequence.
1351 */
1352 if (major_version < 4 || (!ata_id_has_lba(dev->id))) {
1353 ata_dev_init_params(ap, dev);
1354
1355 /* current CHS translation info (id[53-58]) might be
1356 * changed. reread the identify device info.
1357 */
1358 ata_dev_reread_id(ap, dev);
1359 }
1360 1123
1361 if (ata_id_has_lba(dev->id)) { 1124 if (ata_id_has_lba(dev->id)) {
1362 dev->flags |= ATA_DFLAG_LBA; 1125 const char *lba_desc;
1363 1126
1127 lba_desc = "LBA";
1128 dev->flags |= ATA_DFLAG_LBA;
1364 if (ata_id_has_lba48(dev->id)) { 1129 if (ata_id_has_lba48(dev->id)) {
1365 dev->flags |= ATA_DFLAG_LBA48; 1130 dev->flags |= ATA_DFLAG_LBA48;
1366 dev->n_sectors = ata_id_u64(dev->id, 100); 1131 lba_desc = "LBA48";
1367 } else {
1368 dev->n_sectors = ata_id_u32(dev->id, 60);
1369 } 1132 }
1370 1133
1371 /* print device info to dmesg */ 1134 /* print device info to dmesg */
1372 printk(KERN_INFO "ata%u: dev %u ATA-%d, max %s, %Lu sectors:%s\n", 1135 if (print_info)
1373 ap->id, device, 1136 printk(KERN_INFO "ata%u: dev %u ATA-%d, "
1374 major_version, 1137 "max %s, %Lu sectors: %s\n",
1375 ata_mode_string(xfer_modes), 1138 ap->id, dev->devno,
1376 (unsigned long long)dev->n_sectors, 1139 ata_id_major_version(dev->id),
1377 dev->flags & ATA_DFLAG_LBA48 ? " LBA48" : " LBA"); 1140 ata_mode_string(xfer_modes),
1378 } else { 1141 (unsigned long long)dev->n_sectors,
1142 lba_desc);
1143 } else {
1379 /* CHS */ 1144 /* CHS */
1380 1145
1381 /* Default translation */ 1146 /* Default translation */
1382 dev->cylinders = dev->id[1]; 1147 dev->cylinders = dev->id[1];
1383 dev->heads = dev->id[3]; 1148 dev->heads = dev->id[3];
1384 dev->sectors = dev->id[6]; 1149 dev->sectors = dev->id[6];
1385 dev->n_sectors = dev->cylinders * dev->heads * dev->sectors;
1386 1150
1387 if (ata_id_current_chs_valid(dev->id)) { 1151 if (ata_id_current_chs_valid(dev->id)) {
1388 /* Current CHS translation is valid. */ 1152 /* Current CHS translation is valid. */
1389 dev->cylinders = dev->id[54]; 1153 dev->cylinders = dev->id[54];
1390 dev->heads = dev->id[55]; 1154 dev->heads = dev->id[55];
1391 dev->sectors = dev->id[56]; 1155 dev->sectors = dev->id[56];
1392
1393 dev->n_sectors = ata_id_u32(dev->id, 57);
1394 } 1156 }
1395 1157
1396 /* print device info to dmesg */ 1158 /* print device info to dmesg */
1397 printk(KERN_INFO "ata%u: dev %u ATA-%d, max %s, %Lu sectors: CHS %d/%d/%d\n", 1159 if (print_info)
1398 ap->id, device, 1160 printk(KERN_INFO "ata%u: dev %u ATA-%d, "
1399 major_version, 1161 "max %s, %Lu sectors: CHS %u/%u/%u\n",
1400 ata_mode_string(xfer_modes), 1162 ap->id, dev->devno,
1401 (unsigned long long)dev->n_sectors, 1163 ata_id_major_version(dev->id),
1402 (int)dev->cylinders, (int)dev->heads, (int)dev->sectors); 1164 ata_mode_string(xfer_modes),
1403 1165 (unsigned long long)dev->n_sectors,
1166 dev->cylinders, dev->heads, dev->sectors);
1404 } 1167 }
1405 1168
1406 ap->host->max_cmd_len = 16; 1169 dev->cdb_len = 16;
1407 } 1170 }
1408 1171
1409 /* ATAPI-specific feature tests */ 1172 /* ATAPI-specific feature tests */
1410 else if (dev->class == ATA_DEV_ATAPI) { 1173 else if (dev->class == ATA_DEV_ATAPI) {
1411 if (ata_id_is_ata(dev->id)) /* sanity check */
1412 goto err_out_nosup;
1413
1414 rc = atapi_cdb_len(dev->id); 1174 rc = atapi_cdb_len(dev->id);
1415 if ((rc < 12) || (rc > ATAPI_CDB_LEN)) { 1175 if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
1416 printk(KERN_WARNING "ata%u: unsupported CDB len\n", ap->id); 1176 printk(KERN_WARNING "ata%u: unsupported CDB len\n", ap->id);
1177 rc = -EINVAL;
1417 goto err_out_nosup; 1178 goto err_out_nosup;
1418 } 1179 }
1419 ap->cdb_len = (unsigned int) rc; 1180 dev->cdb_len = (unsigned int) rc;
1420 ap->host->max_cmd_len = (unsigned char) ap->cdb_len;
1421 1181
1422 /* print device info to dmesg */ 1182 /* print device info to dmesg */
1423 printk(KERN_INFO "ata%u: dev %u ATAPI, max %s\n", 1183 if (print_info)
1424 ap->id, device, 1184 printk(KERN_INFO "ata%u: dev %u ATAPI, max %s\n",
1425 ata_mode_string(xfer_modes)); 1185 ap->id, dev->devno, ata_mode_string(xfer_modes));
1426 } 1186 }
1427 1187
1428 DPRINTK("EXIT, drv_stat = 0x%x\n", ata_chk_status(ap)); 1188 ap->host->max_cmd_len = 0;
1429 return; 1189 for (i = 0; i < ATA_MAX_DEVICES; i++)
1430 1190 ap->host->max_cmd_len = max_t(unsigned int,
1431err_out_nosup: 1191 ap->host->max_cmd_len,
1432 printk(KERN_WARNING "ata%u: dev %u not supported, ignoring\n", 1192 ap->device[i].cdb_len);
1433 ap->id, device);
1434err_out:
1435 dev->class++; /* converts ATA_DEV_xxx into ATA_DEV_xxx_UNSUP */
1436 DPRINTK("EXIT, err\n");
1437}
1438
1439
1440static inline u8 ata_dev_knobble(const struct ata_port *ap)
1441{
1442 return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(ap->device->id)));
1443}
1444
1445/**
1446 * ata_dev_config - Run device specific handlers and check for
1447 * SATA->PATA bridges
1448 * @ap: Bus
1449 * @i: Device
1450 *
1451 * LOCKING:
1452 */
1453 1193
1454void ata_dev_config(struct ata_port *ap, unsigned int i)
1455{
1456 /* limit bridge transfers to udma5, 200 sectors */ 1194 /* limit bridge transfers to udma5, 200 sectors */
1457 if (ata_dev_knobble(ap)) { 1195 if (ata_dev_knobble(ap, dev)) {
1458 printk(KERN_INFO "ata%u(%u): applying bridge limits\n", 1196 if (print_info)
1459 ap->id, ap->device->devno); 1197 printk(KERN_INFO "ata%u(%u): applying bridge limits\n",
1198 ap->id, dev->devno);
1460 ap->udma_mask &= ATA_UDMA5; 1199 ap->udma_mask &= ATA_UDMA5;
1461 ap->host->max_sectors = ATA_MAX_SECTORS; 1200 dev->max_sectors = ATA_MAX_SECTORS;
1462 ap->host->hostt->max_sectors = ATA_MAX_SECTORS;
1463 ap->device[i].flags |= ATA_DFLAG_LOCK_SECTORS;
1464 } 1201 }
1465 1202
1466 if (ap->ops->dev_config) 1203 if (ap->ops->dev_config)
1467 ap->ops->dev_config(ap, &ap->device[i]); 1204 ap->ops->dev_config(ap, dev);
1205
1206 DPRINTK("EXIT, drv_stat = 0x%x\n", ata_chk_status(ap));
1207 return 0;
1208
1209err_out_nosup:
1210 printk(KERN_WARNING "ata%u: dev %u not supported, ignoring\n",
1211 ap->id, dev->devno);
1212 DPRINTK("EXIT, err\n");
1213 return rc;
1468} 1214}
1469 1215
1470/** 1216/**
@@ -1484,21 +1230,58 @@ void ata_dev_config(struct ata_port *ap, unsigned int i)
1484 1230
1485static int ata_bus_probe(struct ata_port *ap) 1231static int ata_bus_probe(struct ata_port *ap)
1486{ 1232{
1487 unsigned int i, found = 0; 1233 unsigned int classes[ATA_MAX_DEVICES];
1234 unsigned int i, rc, found = 0;
1488 1235
1489 ap->ops->phy_reset(ap); 1236 ata_port_probe(ap);
1490 if (ap->flags & ATA_FLAG_PORT_DISABLED) 1237
1491 goto err_out; 1238 /* reset */
1239 if (ap->ops->probe_reset) {
1240 rc = ap->ops->probe_reset(ap, classes);
1241 if (rc) {
1242 printk("ata%u: reset failed (errno=%d)\n", ap->id, rc);
1243 return rc;
1244 }
1245
1246 for (i = 0; i < ATA_MAX_DEVICES; i++)
1247 if (classes[i] == ATA_DEV_UNKNOWN)
1248 classes[i] = ATA_DEV_NONE;
1249 } else {
1250 ap->ops->phy_reset(ap);
1492 1251
1252 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1253 if (!(ap->flags & ATA_FLAG_PORT_DISABLED))
1254 classes[i] = ap->device[i].class;
1255 else
1256 ap->device[i].class = ATA_DEV_UNKNOWN;
1257 }
1258 ata_port_probe(ap);
1259 }
1260
1261 /* read IDENTIFY page and configure devices */
1493 for (i = 0; i < ATA_MAX_DEVICES; i++) { 1262 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1494 ata_dev_identify(ap, i); 1263 struct ata_device *dev = &ap->device[i];
1495 if (ata_dev_present(&ap->device[i])) { 1264
1496 found = 1; 1265 dev->class = classes[i];
1497 ata_dev_config(ap,i); 1266
1267 if (!ata_dev_present(dev))
1268 continue;
1269
1270 WARN_ON(dev->id != NULL);
1271 if (ata_dev_read_id(ap, dev, &dev->class, 1, &dev->id)) {
1272 dev->class = ATA_DEV_NONE;
1273 continue;
1274 }
1275
1276 if (ata_dev_configure(ap, dev, 1)) {
1277 dev->class++; /* disable device */
1278 continue;
1498 } 1279 }
1280
1281 found = 1;
1499 } 1282 }
1500 1283
1501 if ((!found) || (ap->flags & ATA_FLAG_PORT_DISABLED)) 1284 if (!found)
1502 goto err_out_disable; 1285 goto err_out_disable;
1503 1286
1504 ata_set_mode(ap); 1287 ata_set_mode(ap);
@@ -1509,7 +1292,6 @@ static int ata_bus_probe(struct ata_port *ap)
1509 1292
1510err_out_disable: 1293err_out_disable:
1511 ap->ops->port_disable(ap); 1294 ap->ops->port_disable(ap);
1512err_out:
1513 return -1; 1295 return -1;
1514} 1296}
1515 1297
@@ -1530,6 +1312,41 @@ void ata_port_probe(struct ata_port *ap)
1530} 1312}
1531 1313
1532/** 1314/**
1315 * sata_print_link_status - Print SATA link status
1316 * @ap: SATA port to printk link status about
1317 *
1318 * This function prints link speed and status of a SATA link.
1319 *
1320 * LOCKING:
1321 * None.
1322 */
1323static void sata_print_link_status(struct ata_port *ap)
1324{
1325 u32 sstatus, tmp;
1326 const char *speed;
1327
1328 if (!ap->ops->scr_read)
1329 return;
1330
1331 sstatus = scr_read(ap, SCR_STATUS);
1332
1333 if (sata_dev_present(ap)) {
1334 tmp = (sstatus >> 4) & 0xf;
1335 if (tmp & (1 << 0))
1336 speed = "1.5";
1337 else if (tmp & (1 << 1))
1338 speed = "3.0";
1339 else
1340 speed = "<unknown>";
1341 printk(KERN_INFO "ata%u: SATA link up %s Gbps (SStatus %X)\n",
1342 ap->id, speed, sstatus);
1343 } else {
1344 printk(KERN_INFO "ata%u: SATA link down (SStatus %X)\n",
1345 ap->id, sstatus);
1346 }
1347}
1348
1349/**
1533 * __sata_phy_reset - Wake/reset a low-level SATA PHY 1350 * __sata_phy_reset - Wake/reset a low-level SATA PHY
1534 * @ap: SATA port associated with target SATA PHY. 1351 * @ap: SATA port associated with target SATA PHY.
1535 * 1352 *
@@ -1563,27 +1380,14 @@ void __sata_phy_reset(struct ata_port *ap)
1563 break; 1380 break;
1564 } while (time_before(jiffies, timeout)); 1381 } while (time_before(jiffies, timeout));
1565 1382
1566 /* TODO: phy layer with polling, timeouts, etc. */ 1383 /* print link status */
1567 sstatus = scr_read(ap, SCR_STATUS); 1384 sata_print_link_status(ap);
1568 if (sata_dev_present(ap)) {
1569 const char *speed;
1570 u32 tmp;
1571 1385
1572 tmp = (sstatus >> 4) & 0xf; 1386 /* TODO: phy layer with polling, timeouts, etc. */
1573 if (tmp & (1 << 0)) 1387 if (sata_dev_present(ap))
1574 speed = "1.5";
1575 else if (tmp & (1 << 1))
1576 speed = "3.0";
1577 else
1578 speed = "<unknown>";
1579 printk(KERN_INFO "ata%u: SATA link up %s Gbps (SStatus %X)\n",
1580 ap->id, speed, sstatus);
1581 ata_port_probe(ap); 1388 ata_port_probe(ap);
1582 } else { 1389 else
1583 printk(KERN_INFO "ata%u: SATA link down (SStatus %X)\n",
1584 ap->id, sstatus);
1585 ata_port_disable(ap); 1390 ata_port_disable(ap);
1586 }
1587 1391
1588 if (ap->flags & ATA_FLAG_PORT_DISABLED) 1392 if (ap->flags & ATA_FLAG_PORT_DISABLED)
1589 return; 1393 return;
@@ -1756,9 +1560,9 @@ int ata_timing_compute(struct ata_device *adev, unsigned short speed,
1756 ata_timing_quantize(t, t, T, UT); 1560 ata_timing_quantize(t, t, T, UT);
1757 1561
1758 /* 1562 /*
1759 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY, S.M.A.R.T 1563 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY,
1760 * and some other commands. We have to ensure that the DMA cycle timing is 1564 * S.M.A.R.T * and some other commands. We have to ensure that the
1761 * slower/equal than the fastest PIO timing. 1565 * DMA cycle timing is slower/equal than the fastest PIO timing.
1762 */ 1566 */
1763 1567
1764 if (speed > XFER_PIO_4) { 1568 if (speed > XFER_PIO_4) {
@@ -1767,7 +1571,7 @@ int ata_timing_compute(struct ata_device *adev, unsigned short speed,
1767 } 1571 }
1768 1572
1769 /* 1573 /*
1770 * Lenghten active & recovery time so that cycle time is correct. 1574 * Lengthen active & recovery time so that cycle time is correct.
1771 */ 1575 */
1772 1576
1773 if (t->act8b + t->rec8b < t->cyc8b) { 1577 if (t->act8b + t->rec8b < t->cyc8b) {
@@ -1821,6 +1625,12 @@ static void ata_dev_set_mode(struct ata_port *ap, struct ata_device *dev)
1821 idx = ofs + dev->xfer_shift; 1625 idx = ofs + dev->xfer_shift;
1822 WARN_ON(idx >= ARRAY_SIZE(xfer_mode_str)); 1626 WARN_ON(idx >= ARRAY_SIZE(xfer_mode_str));
1823 1627
1628 if (ata_dev_revalidate(ap, dev, 0)) {
1629 printk(KERN_ERR "ata%u: failed to revalidate after set "
1630 "xfermode, disabled\n", ap->id);
1631 ata_port_disable(ap);
1632 }
1633
1824 DPRINTK("idx=%d xfer_shift=%u, xfer_mode=0x%x, base=0x%x, offset=%d\n", 1634 DPRINTK("idx=%d xfer_shift=%u, xfer_mode=0x%x, base=0x%x, offset=%d\n",
1825 idx, dev->xfer_shift, (int)dev->xfer_mode, (int)base, ofs); 1635 idx, dev->xfer_shift, (int)dev->xfer_mode, (int)base, ofs);
1826 1636
@@ -1886,7 +1696,6 @@ static void ata_host_set_dma(struct ata_port *ap, u8 xfer_mode,
1886 * 1696 *
1887 * LOCKING: 1697 * LOCKING:
1888 * PCI/etc. bus probe sem. 1698 * PCI/etc. bus probe sem.
1889 *
1890 */ 1699 */
1891static void ata_set_mode(struct ata_port *ap) 1700static void ata_set_mode(struct ata_port *ap)
1892{ 1701{
@@ -1926,6 +1735,26 @@ err_out:
1926} 1735}
1927 1736
1928/** 1737/**
1738 * ata_tf_to_host - issue ATA taskfile to host controller
1739 * @ap: port to which command is being issued
1740 * @tf: ATA taskfile register set
1741 *
1742 * Issues ATA taskfile register set to ATA host controller,
1743 * with proper synchronization with interrupt handler and
1744 * other threads.
1745 *
1746 * LOCKING:
1747 * spin_lock_irqsave(host_set lock)
1748 */
1749
1750static inline void ata_tf_to_host(struct ata_port *ap,
1751 const struct ata_taskfile *tf)
1752{
1753 ap->ops->tf_load(ap, tf);
1754 ap->ops->exec_command(ap, tf);
1755}
1756
1757/**
1929 * ata_busy_sleep - sleep until BSY clears, or timeout 1758 * ata_busy_sleep - sleep until BSY clears, or timeout
1930 * @ap: port containing status register to be polled 1759 * @ap: port containing status register to be polled
1931 * @tmout_pat: impatience timeout 1760 * @tmout_pat: impatience timeout
@@ -1935,12 +1764,10 @@ err_out:
1935 * or a timeout occurs. 1764 * or a timeout occurs.
1936 * 1765 *
1937 * LOCKING: None. 1766 * LOCKING: None.
1938 *
1939 */ 1767 */
1940 1768
1941static unsigned int ata_busy_sleep (struct ata_port *ap, 1769unsigned int ata_busy_sleep (struct ata_port *ap,
1942 unsigned long tmout_pat, 1770 unsigned long tmout_pat, unsigned long tmout)
1943 unsigned long tmout)
1944{ 1771{
1945 unsigned long timer_start, timeout; 1772 unsigned long timer_start, timeout;
1946 u8 status; 1773 u8 status;
@@ -2159,9 +1986,9 @@ void ata_bus_reset(struct ata_port *ap)
2159 /* 1986 /*
2160 * determine by signature whether we have ATA or ATAPI devices 1987 * determine by signature whether we have ATA or ATAPI devices
2161 */ 1988 */
2162 err = ata_dev_try_classify(ap, 0); 1989 ap->device[0].class = ata_dev_try_classify(ap, 0, &err);
2163 if ((slave_possible) && (err != 0x81)) 1990 if ((slave_possible) && (err != 0x81))
2164 ata_dev_try_classify(ap, 1); 1991 ap->device[1].class = ata_dev_try_classify(ap, 1, &err);
2165 1992
2166 /* re-enable interrupts */ 1993 /* re-enable interrupts */
2167 if (ap->ioaddr.ctl_addr) /* FIXME: hack. create a hook instead */ 1994 if (ap->ioaddr.ctl_addr) /* FIXME: hack. create a hook instead */
@@ -2196,6 +2023,448 @@ err_out:
2196 DPRINTK("EXIT\n"); 2023 DPRINTK("EXIT\n");
2197} 2024}
2198 2025
2026static int sata_phy_resume(struct ata_port *ap)
2027{
2028 unsigned long timeout = jiffies + (HZ * 5);
2029 u32 sstatus;
2030
2031 scr_write_flush(ap, SCR_CONTROL, 0x300);
2032
2033 /* Wait for phy to become ready, if necessary. */
2034 do {
2035 msleep(200);
2036 sstatus = scr_read(ap, SCR_STATUS);
2037 if ((sstatus & 0xf) != 1)
2038 return 0;
2039 } while (time_before(jiffies, timeout));
2040
2041 return -1;
2042}
2043
2044/**
2045 * ata_std_probeinit - initialize probing
2046 * @ap: port to be probed
2047 *
2048 * @ap is about to be probed. Initialize it. This function is
2049 * to be used as standard callback for ata_drive_probe_reset().
2050 *
2051 * NOTE!!! Do not use this function as probeinit if a low level
2052 * driver implements only hardreset. Just pass NULL as probeinit
2053 * in that case. Using this function is probably okay but doing
2054 * so makes reset sequence different from the original
2055 * ->phy_reset implementation and Jeff nervous. :-P
2056 */
2057extern void ata_std_probeinit(struct ata_port *ap)
2058{
2059 if (ap->flags & ATA_FLAG_SATA && ap->ops->scr_read) {
2060 sata_phy_resume(ap);
2061 if (sata_dev_present(ap))
2062 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
2063 }
2064}
2065
2066/**
2067 * ata_std_softreset - reset host port via ATA SRST
2068 * @ap: port to reset
2069 * @verbose: fail verbosely
2070 * @classes: resulting classes of attached devices
2071 *
2072 * Reset host port using ATA SRST. This function is to be used
2073 * as standard callback for ata_drive_*_reset() functions.
2074 *
2075 * LOCKING:
2076 * Kernel thread context (may sleep)
2077 *
2078 * RETURNS:
2079 * 0 on success, -errno otherwise.
2080 */
2081int ata_std_softreset(struct ata_port *ap, int verbose, unsigned int *classes)
2082{
2083 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
2084 unsigned int devmask = 0, err_mask;
2085 u8 err;
2086
2087 DPRINTK("ENTER\n");
2088
2089 if (ap->ops->scr_read && !sata_dev_present(ap)) {
2090 classes[0] = ATA_DEV_NONE;
2091 goto out;
2092 }
2093
2094 /* determine if device 0/1 are present */
2095 if (ata_devchk(ap, 0))
2096 devmask |= (1 << 0);
2097 if (slave_possible && ata_devchk(ap, 1))
2098 devmask |= (1 << 1);
2099
2100 /* select device 0 again */
2101 ap->ops->dev_select(ap, 0);
2102
2103 /* issue bus reset */
2104 DPRINTK("about to softreset, devmask=%x\n", devmask);
2105 err_mask = ata_bus_softreset(ap, devmask);
2106 if (err_mask) {
2107 if (verbose)
2108 printk(KERN_ERR "ata%u: SRST failed (err_mask=0x%x)\n",
2109 ap->id, err_mask);
2110 else
2111 DPRINTK("EXIT, softreset failed (err_mask=0x%x)\n",
2112 err_mask);
2113 return -EIO;
2114 }
2115
2116 /* determine by signature whether we have ATA or ATAPI devices */
2117 classes[0] = ata_dev_try_classify(ap, 0, &err);
2118 if (slave_possible && err != 0x81)
2119 classes[1] = ata_dev_try_classify(ap, 1, &err);
2120
2121 out:
2122 DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]);
2123 return 0;
2124}
2125
2126/**
2127 * sata_std_hardreset - reset host port via SATA phy reset
2128 * @ap: port to reset
2129 * @verbose: fail verbosely
2130 * @class: resulting class of attached device
2131 *
2132 * SATA phy-reset host port using DET bits of SControl register.
2133 * This function is to be used as standard callback for
2134 * ata_drive_*_reset().
2135 *
2136 * LOCKING:
2137 * Kernel thread context (may sleep)
2138 *
2139 * RETURNS:
2140 * 0 on success, -errno otherwise.
2141 */
2142int sata_std_hardreset(struct ata_port *ap, int verbose, unsigned int *class)
2143{
2144 DPRINTK("ENTER\n");
2145
2146 /* Issue phy wake/reset */
2147 scr_write_flush(ap, SCR_CONTROL, 0x301);
2148
2149 /*
2150 * Couldn't find anything in SATA I/II specs, but AHCI-1.1
2151 * 10.4.2 says at least 1 ms.
2152 */
2153 msleep(1);
2154
2155 /* Bring phy back */
2156 sata_phy_resume(ap);
2157
2158 /* TODO: phy layer with polling, timeouts, etc. */
2159 if (!sata_dev_present(ap)) {
2160 *class = ATA_DEV_NONE;
2161 DPRINTK("EXIT, link offline\n");
2162 return 0;
2163 }
2164
2165 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
2166 if (verbose)
2167 printk(KERN_ERR "ata%u: COMRESET failed "
2168 "(device not ready)\n", ap->id);
2169 else
2170 DPRINTK("EXIT, device not ready\n");
2171 return -EIO;
2172 }
2173
2174 ap->ops->dev_select(ap, 0); /* probably unnecessary */
2175
2176 *class = ata_dev_try_classify(ap, 0, NULL);
2177
2178 DPRINTK("EXIT, class=%u\n", *class);
2179 return 0;
2180}
2181
2182/**
2183 * ata_std_postreset - standard postreset callback
2184 * @ap: the target ata_port
2185 * @classes: classes of attached devices
2186 *
2187 * This function is invoked after a successful reset. Note that
2188 * the device might have been reset more than once using
2189 * different reset methods before postreset is invoked.
2190 *
2191 * This function is to be used as standard callback for
2192 * ata_drive_*_reset().
2193 *
2194 * LOCKING:
2195 * Kernel thread context (may sleep)
2196 */
2197void ata_std_postreset(struct ata_port *ap, unsigned int *classes)
2198{
2199 DPRINTK("ENTER\n");
2200
2201 /* set cable type if it isn't already set */
2202 if (ap->cbl == ATA_CBL_NONE && ap->flags & ATA_FLAG_SATA)
2203 ap->cbl = ATA_CBL_SATA;
2204
2205 /* print link status */
2206 if (ap->cbl == ATA_CBL_SATA)
2207 sata_print_link_status(ap);
2208
2209 /* re-enable interrupts */
2210 if (ap->ioaddr.ctl_addr) /* FIXME: hack. create a hook instead */
2211 ata_irq_on(ap);
2212
2213 /* is double-select really necessary? */
2214 if (classes[0] != ATA_DEV_NONE)
2215 ap->ops->dev_select(ap, 1);
2216 if (classes[1] != ATA_DEV_NONE)
2217 ap->ops->dev_select(ap, 0);
2218
2219 /* bail out if no device is present */
2220 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
2221 DPRINTK("EXIT, no device\n");
2222 return;
2223 }
2224
2225 /* set up device control */
2226 if (ap->ioaddr.ctl_addr) {
2227 if (ap->flags & ATA_FLAG_MMIO)
2228 writeb(ap->ctl, (void __iomem *) ap->ioaddr.ctl_addr);
2229 else
2230 outb(ap->ctl, ap->ioaddr.ctl_addr);
2231 }
2232
2233 DPRINTK("EXIT\n");
2234}
2235
2236/**
2237 * ata_std_probe_reset - standard probe reset method
2238 * @ap: prot to perform probe-reset
2239 * @classes: resulting classes of attached devices
2240 *
2241 * The stock off-the-shelf ->probe_reset method.
2242 *
2243 * LOCKING:
2244 * Kernel thread context (may sleep)
2245 *
2246 * RETURNS:
2247 * 0 on success, -errno otherwise.
2248 */
2249int ata_std_probe_reset(struct ata_port *ap, unsigned int *classes)
2250{
2251 ata_reset_fn_t hardreset;
2252
2253 hardreset = NULL;
2254 if (ap->flags & ATA_FLAG_SATA && ap->ops->scr_read)
2255 hardreset = sata_std_hardreset;
2256
2257 return ata_drive_probe_reset(ap, ata_std_probeinit,
2258 ata_std_softreset, hardreset,
2259 ata_std_postreset, classes);
2260}
2261
2262static int do_probe_reset(struct ata_port *ap, ata_reset_fn_t reset,
2263 ata_postreset_fn_t postreset,
2264 unsigned int *classes)
2265{
2266 int i, rc;
2267
2268 for (i = 0; i < ATA_MAX_DEVICES; i++)
2269 classes[i] = ATA_DEV_UNKNOWN;
2270
2271 rc = reset(ap, 0, classes);
2272 if (rc)
2273 return rc;
2274
2275 /* If any class isn't ATA_DEV_UNKNOWN, consider classification
2276 * is complete and convert all ATA_DEV_UNKNOWN to
2277 * ATA_DEV_NONE.
2278 */
2279 for (i = 0; i < ATA_MAX_DEVICES; i++)
2280 if (classes[i] != ATA_DEV_UNKNOWN)
2281 break;
2282
2283 if (i < ATA_MAX_DEVICES)
2284 for (i = 0; i < ATA_MAX_DEVICES; i++)
2285 if (classes[i] == ATA_DEV_UNKNOWN)
2286 classes[i] = ATA_DEV_NONE;
2287
2288 if (postreset)
2289 postreset(ap, classes);
2290
2291 return classes[0] != ATA_DEV_UNKNOWN ? 0 : -ENODEV;
2292}
2293
2294/**
2295 * ata_drive_probe_reset - Perform probe reset with given methods
2296 * @ap: port to reset
2297 * @probeinit: probeinit method (can be NULL)
2298 * @softreset: softreset method (can be NULL)
2299 * @hardreset: hardreset method (can be NULL)
2300 * @postreset: postreset method (can be NULL)
2301 * @classes: resulting classes of attached devices
2302 *
2303 * Reset the specified port and classify attached devices using
2304 * given methods. This function prefers softreset but tries all
2305 * possible reset sequences to reset and classify devices. This
2306 * function is intended to be used for constructing ->probe_reset
2307 * callback by low level drivers.
2308 *
2309 * Reset methods should follow the following rules.
2310 *
2311 * - Return 0 on sucess, -errno on failure.
2312 * - If classification is supported, fill classes[] with
2313 * recognized class codes.
2314 * - If classification is not supported, leave classes[] alone.
2315 * - If verbose is non-zero, print error message on failure;
2316 * otherwise, shut up.
2317 *
2318 * LOCKING:
2319 * Kernel thread context (may sleep)
2320 *
2321 * RETURNS:
2322 * 0 on success, -EINVAL if no reset method is avaliable, -ENODEV
2323 * if classification fails, and any error code from reset
2324 * methods.
2325 */
2326int ata_drive_probe_reset(struct ata_port *ap, ata_probeinit_fn_t probeinit,
2327 ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
2328 ata_postreset_fn_t postreset, unsigned int *classes)
2329{
2330 int rc = -EINVAL;
2331
2332 if (probeinit)
2333 probeinit(ap);
2334
2335 if (softreset) {
2336 rc = do_probe_reset(ap, softreset, postreset, classes);
2337 if (rc == 0)
2338 return 0;
2339 }
2340
2341 if (!hardreset)
2342 return rc;
2343
2344 rc = do_probe_reset(ap, hardreset, postreset, classes);
2345 if (rc == 0 || rc != -ENODEV)
2346 return rc;
2347
2348 if (softreset)
2349 rc = do_probe_reset(ap, softreset, postreset, classes);
2350
2351 return rc;
2352}
2353
2354/**
2355 * ata_dev_same_device - Determine whether new ID matches configured device
2356 * @ap: port on which the device to compare against resides
2357 * @dev: device to compare against
2358 * @new_class: class of the new device
2359 * @new_id: IDENTIFY page of the new device
2360 *
2361 * Compare @new_class and @new_id against @dev and determine
2362 * whether @dev is the device indicated by @new_class and
2363 * @new_id.
2364 *
2365 * LOCKING:
2366 * None.
2367 *
2368 * RETURNS:
2369 * 1 if @dev matches @new_class and @new_id, 0 otherwise.
2370 */
2371static int ata_dev_same_device(struct ata_port *ap, struct ata_device *dev,
2372 unsigned int new_class, const u16 *new_id)
2373{
2374 const u16 *old_id = dev->id;
2375 unsigned char model[2][41], serial[2][21];
2376 u64 new_n_sectors;
2377
2378 if (dev->class != new_class) {
2379 printk(KERN_INFO
2380 "ata%u: dev %u class mismatch %d != %d\n",
2381 ap->id, dev->devno, dev->class, new_class);
2382 return 0;
2383 }
2384
2385 ata_id_c_string(old_id, model[0], ATA_ID_PROD_OFS, sizeof(model[0]));
2386 ata_id_c_string(new_id, model[1], ATA_ID_PROD_OFS, sizeof(model[1]));
2387 ata_id_c_string(old_id, serial[0], ATA_ID_SERNO_OFS, sizeof(serial[0]));
2388 ata_id_c_string(new_id, serial[1], ATA_ID_SERNO_OFS, sizeof(serial[1]));
2389 new_n_sectors = ata_id_n_sectors(new_id);
2390
2391 if (strcmp(model[0], model[1])) {
2392 printk(KERN_INFO
2393 "ata%u: dev %u model number mismatch '%s' != '%s'\n",
2394 ap->id, dev->devno, model[0], model[1]);
2395 return 0;
2396 }
2397
2398 if (strcmp(serial[0], serial[1])) {
2399 printk(KERN_INFO
2400 "ata%u: dev %u serial number mismatch '%s' != '%s'\n",
2401 ap->id, dev->devno, serial[0], serial[1]);
2402 return 0;
2403 }
2404
2405 if (dev->class == ATA_DEV_ATA && dev->n_sectors != new_n_sectors) {
2406 printk(KERN_INFO
2407 "ata%u: dev %u n_sectors mismatch %llu != %llu\n",
2408 ap->id, dev->devno, (unsigned long long)dev->n_sectors,
2409 (unsigned long long)new_n_sectors);
2410 return 0;
2411 }
2412
2413 return 1;
2414}
2415
2416/**
2417 * ata_dev_revalidate - Revalidate ATA device
2418 * @ap: port on which the device to revalidate resides
2419 * @dev: device to revalidate
2420 * @post_reset: is this revalidation after reset?
2421 *
2422 * Re-read IDENTIFY page and make sure @dev is still attached to
2423 * the port.
2424 *
2425 * LOCKING:
2426 * Kernel thread context (may sleep)
2427 *
2428 * RETURNS:
2429 * 0 on success, negative errno otherwise
2430 */
2431int ata_dev_revalidate(struct ata_port *ap, struct ata_device *dev,
2432 int post_reset)
2433{
2434 unsigned int class;
2435 u16 *id;
2436 int rc;
2437
2438 if (!ata_dev_present(dev))
2439 return -ENODEV;
2440
2441 class = dev->class;
2442 id = NULL;
2443
2444 /* allocate & read ID data */
2445 rc = ata_dev_read_id(ap, dev, &class, post_reset, &id);
2446 if (rc)
2447 goto fail;
2448
2449 /* is the device still there? */
2450 if (!ata_dev_same_device(ap, dev, class, id)) {
2451 rc = -ENODEV;
2452 goto fail;
2453 }
2454
2455 kfree(dev->id);
2456 dev->id = id;
2457
2458 /* configure device according to the new ID */
2459 return ata_dev_configure(ap, dev, 0);
2460
2461 fail:
2462 printk(KERN_ERR "ata%u: dev %u revalidation failed (errno=%d)\n",
2463 ap->id, dev->devno, rc);
2464 kfree(id);
2465 return rc;
2466}
2467
2199static void ata_pr_blacklisted(const struct ata_port *ap, 2468static void ata_pr_blacklisted(const struct ata_port *ap,
2200 const struct ata_device *dev) 2469 const struct ata_device *dev)
2201{ 2470{
@@ -2237,24 +2506,13 @@ static const char * const ata_dma_blacklist [] = {
2237 2506
2238static int ata_dma_blacklisted(const struct ata_device *dev) 2507static int ata_dma_blacklisted(const struct ata_device *dev)
2239{ 2508{
2240 unsigned char model_num[40]; 2509 unsigned char model_num[41];
2241 char *s;
2242 unsigned int len;
2243 int i; 2510 int i;
2244 2511
2245 ata_dev_id_string(dev->id, model_num, ATA_ID_PROD_OFS, 2512 ata_id_c_string(dev->id, model_num, ATA_ID_PROD_OFS, sizeof(model_num));
2246 sizeof(model_num));
2247 s = &model_num[0];
2248 len = strnlen(s, sizeof(model_num));
2249
2250 /* ATAPI specifies that empty space is blank-filled; remove blanks */
2251 while ((len > 0) && (s[len - 1] == ' ')) {
2252 len--;
2253 s[len] = 0;
2254 }
2255 2513
2256 for (i = 0; i < ARRAY_SIZE(ata_dma_blacklist); i++) 2514 for (i = 0; i < ARRAY_SIZE(ata_dma_blacklist); i++)
2257 if (!strncmp(ata_dma_blacklist[i], s, len)) 2515 if (!strcmp(ata_dma_blacklist[i], model_num))
2258 return 1; 2516 return 1;
2259 2517
2260 return 0; 2518 return 0;
@@ -2268,7 +2526,7 @@ static unsigned int ata_get_mode_mask(const struct ata_port *ap, int shift)
2268 master = &ap->device[0]; 2526 master = &ap->device[0];
2269 slave = &ap->device[1]; 2527 slave = &ap->device[1];
2270 2528
2271 assert (ata_dev_present(master) || ata_dev_present(slave)); 2529 WARN_ON(!ata_dev_present(master) && !ata_dev_present(slave));
2272 2530
2273 if (shift == ATA_SHIFT_UDMA) { 2531 if (shift == ATA_SHIFT_UDMA) {
2274 mask = ap->udma_mask; 2532 mask = ap->udma_mask;
@@ -2420,63 +2678,28 @@ static void ata_dev_set_xfermode(struct ata_port *ap, struct ata_device *dev)
2420} 2678}
2421 2679
2422/** 2680/**
2423 * ata_dev_reread_id - Reread the device identify device info
2424 * @ap: port where the device is
2425 * @dev: device to reread the identify device info
2426 *
2427 * LOCKING:
2428 */
2429
2430static void ata_dev_reread_id(struct ata_port *ap, struct ata_device *dev)
2431{
2432 struct ata_taskfile tf;
2433
2434 ata_tf_init(ap, &tf, dev->devno);
2435
2436 if (dev->class == ATA_DEV_ATA) {
2437 tf.command = ATA_CMD_ID_ATA;
2438 DPRINTK("do ATA identify\n");
2439 } else {
2440 tf.command = ATA_CMD_ID_ATAPI;
2441 DPRINTK("do ATAPI identify\n");
2442 }
2443
2444 tf.flags |= ATA_TFLAG_DEVICE;
2445 tf.protocol = ATA_PROT_PIO;
2446
2447 if (ata_exec_internal(ap, dev, &tf, DMA_FROM_DEVICE,
2448 dev->id, sizeof(dev->id)))
2449 goto err_out;
2450
2451 swap_buf_le16(dev->id, ATA_ID_WORDS);
2452
2453 ata_dump_id(dev);
2454
2455 DPRINTK("EXIT\n");
2456
2457 return;
2458err_out:
2459 printk(KERN_ERR "ata%u: failed to reread ID, disabled\n", ap->id);
2460 ata_port_disable(ap);
2461}
2462
2463/**
2464 * ata_dev_init_params - Issue INIT DEV PARAMS command 2681 * ata_dev_init_params - Issue INIT DEV PARAMS command
2465 * @ap: Port associated with device @dev 2682 * @ap: Port associated with device @dev
2466 * @dev: Device to which command will be sent 2683 * @dev: Device to which command will be sent
2467 * 2684 *
2468 * LOCKING: 2685 * LOCKING:
2686 * Kernel thread context (may sleep)
2687 *
2688 * RETURNS:
2689 * 0 on success, AC_ERR_* mask otherwise.
2469 */ 2690 */
2470 2691
2471static void ata_dev_init_params(struct ata_port *ap, struct ata_device *dev) 2692static unsigned int ata_dev_init_params(struct ata_port *ap,
2693 struct ata_device *dev)
2472{ 2694{
2473 struct ata_taskfile tf; 2695 struct ata_taskfile tf;
2696 unsigned int err_mask;
2474 u16 sectors = dev->id[6]; 2697 u16 sectors = dev->id[6];
2475 u16 heads = dev->id[3]; 2698 u16 heads = dev->id[3];
2476 2699
2477 /* Number of sectors per track 1-255. Number of heads 1-16 */ 2700 /* Number of sectors per track 1-255. Number of heads 1-16 */
2478 if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16) 2701 if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
2479 return; 2702 return 0;
2480 2703
2481 /* set up init dev params taskfile */ 2704 /* set up init dev params taskfile */
2482 DPRINTK("init dev params \n"); 2705 DPRINTK("init dev params \n");
@@ -2488,13 +2711,10 @@ static void ata_dev_init_params(struct ata_port *ap, struct ata_device *dev)
2488 tf.nsect = sectors; 2711 tf.nsect = sectors;
2489 tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */ 2712 tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
2490 2713
2491 if (ata_exec_internal(ap, dev, &tf, DMA_NONE, NULL, 0)) { 2714 err_mask = ata_exec_internal(ap, dev, &tf, DMA_NONE, NULL, 0);
2492 printk(KERN_ERR "ata%u: failed to init parameters, disabled\n",
2493 ap->id);
2494 ata_port_disable(ap);
2495 }
2496 2715
2497 DPRINTK("EXIT\n"); 2716 DPRINTK("EXIT, err_mask=%x\n", err_mask);
2717 return err_mask;
2498} 2718}
2499 2719
2500/** 2720/**
@@ -2514,11 +2734,11 @@ static void ata_sg_clean(struct ata_queued_cmd *qc)
2514 int dir = qc->dma_dir; 2734 int dir = qc->dma_dir;
2515 void *pad_buf = NULL; 2735 void *pad_buf = NULL;
2516 2736
2517 assert(qc->flags & ATA_QCFLAG_DMAMAP); 2737 WARN_ON(!(qc->flags & ATA_QCFLAG_DMAMAP));
2518 assert(sg != NULL); 2738 WARN_ON(sg == NULL);
2519 2739
2520 if (qc->flags & ATA_QCFLAG_SINGLE) 2740 if (qc->flags & ATA_QCFLAG_SINGLE)
2521 assert(qc->n_elem <= 1); 2741 WARN_ON(qc->n_elem > 1);
2522 2742
2523 VPRINTK("unmapping %u sg elements\n", qc->n_elem); 2743 VPRINTK("unmapping %u sg elements\n", qc->n_elem);
2524 2744
@@ -2573,8 +2793,8 @@ static void ata_fill_sg(struct ata_queued_cmd *qc)
2573 struct scatterlist *sg; 2793 struct scatterlist *sg;
2574 unsigned int idx; 2794 unsigned int idx;
2575 2795
2576 assert(qc->__sg != NULL); 2796 WARN_ON(qc->__sg == NULL);
2577 assert(qc->n_elem > 0 || qc->pad_len > 0); 2797 WARN_ON(qc->n_elem == 0 && qc->pad_len == 0);
2578 2798
2579 idx = 0; 2799 idx = 0;
2580 ata_for_each_sg(sg, qc) { 2800 ata_for_each_sg(sg, qc) {
@@ -2727,7 +2947,7 @@ static int ata_sg_setup_one(struct ata_queued_cmd *qc)
2727 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ); 2947 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
2728 struct scatterlist *psg = &qc->pad_sgent; 2948 struct scatterlist *psg = &qc->pad_sgent;
2729 2949
2730 assert(qc->dev->class == ATA_DEV_ATAPI); 2950 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
2731 2951
2732 memset(pad_buf, 0, ATA_DMA_PAD_SZ); 2952 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
2733 2953
@@ -2791,7 +3011,7 @@ static int ata_sg_setup(struct ata_queued_cmd *qc)
2791 int n_elem, pre_n_elem, dir, trim_sg = 0; 3011 int n_elem, pre_n_elem, dir, trim_sg = 0;
2792 3012
2793 VPRINTK("ENTER, ata%u\n", ap->id); 3013 VPRINTK("ENTER, ata%u\n", ap->id);
2794 assert(qc->flags & ATA_QCFLAG_SG); 3014 WARN_ON(!(qc->flags & ATA_QCFLAG_SG));
2795 3015
2796 /* we must lengthen transfers to end on a 32-bit boundary */ 3016 /* we must lengthen transfers to end on a 32-bit boundary */
2797 qc->pad_len = lsg->length & 3; 3017 qc->pad_len = lsg->length & 3;
@@ -2800,7 +3020,7 @@ static int ata_sg_setup(struct ata_queued_cmd *qc)
2800 struct scatterlist *psg = &qc->pad_sgent; 3020 struct scatterlist *psg = &qc->pad_sgent;
2801 unsigned int offset; 3021 unsigned int offset;
2802 3022
2803 assert(qc->dev->class == ATA_DEV_ATAPI); 3023 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
2804 3024
2805 memset(pad_buf, 0, ATA_DMA_PAD_SZ); 3025 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
2806 3026
@@ -2876,7 +3096,7 @@ void ata_poll_qc_complete(struct ata_queued_cmd *qc)
2876} 3096}
2877 3097
2878/** 3098/**
2879 * ata_pio_poll - 3099 * ata_pio_poll - poll using PIO, depending on current state
2880 * @ap: the target ata_port 3100 * @ap: the target ata_port
2881 * 3101 *
2882 * LOCKING: 3102 * LOCKING:
@@ -2894,7 +3114,7 @@ static unsigned long ata_pio_poll(struct ata_port *ap)
2894 unsigned int reg_state = HSM_ST_UNKNOWN; 3114 unsigned int reg_state = HSM_ST_UNKNOWN;
2895 3115
2896 qc = ata_qc_from_tag(ap, ap->active_tag); 3116 qc = ata_qc_from_tag(ap, ap->active_tag);
2897 assert(qc != NULL); 3117 WARN_ON(qc == NULL);
2898 3118
2899 switch (ap->hsm_task_state) { 3119 switch (ap->hsm_task_state) {
2900 case HSM_ST: 3120 case HSM_ST:
@@ -2915,7 +3135,7 @@ static unsigned long ata_pio_poll(struct ata_port *ap)
2915 status = ata_chk_status(ap); 3135 status = ata_chk_status(ap);
2916 if (status & ATA_BUSY) { 3136 if (status & ATA_BUSY) {
2917 if (time_after(jiffies, ap->pio_task_timeout)) { 3137 if (time_after(jiffies, ap->pio_task_timeout)) {
2918 qc->err_mask |= AC_ERR_ATA_BUS; 3138 qc->err_mask |= AC_ERR_TIMEOUT;
2919 ap->hsm_task_state = HSM_ST_TMOUT; 3139 ap->hsm_task_state = HSM_ST_TMOUT;
2920 return 0; 3140 return 0;
2921 } 3141 }
@@ -2962,7 +3182,7 @@ static int ata_pio_complete (struct ata_port *ap)
2962 } 3182 }
2963 3183
2964 qc = ata_qc_from_tag(ap, ap->active_tag); 3184 qc = ata_qc_from_tag(ap, ap->active_tag);
2965 assert(qc != NULL); 3185 WARN_ON(qc == NULL);
2966 3186
2967 drv_stat = ata_wait_idle(ap); 3187 drv_stat = ata_wait_idle(ap);
2968 if (!ata_ok(drv_stat)) { 3188 if (!ata_ok(drv_stat)) {
@@ -2973,7 +3193,7 @@ static int ata_pio_complete (struct ata_port *ap)
2973 3193
2974 ap->hsm_task_state = HSM_ST_IDLE; 3194 ap->hsm_task_state = HSM_ST_IDLE;
2975 3195
2976 assert(qc->err_mask == 0); 3196 WARN_ON(qc->err_mask);
2977 ata_poll_qc_complete(qc); 3197 ata_poll_qc_complete(qc);
2978 3198
2979 /* another command may start at this point */ 3199 /* another command may start at this point */
@@ -2983,7 +3203,7 @@ static int ata_pio_complete (struct ata_port *ap)
2983 3203
2984 3204
2985/** 3205/**
2986 * swap_buf_le16 - swap halves of 16-words in place 3206 * swap_buf_le16 - swap halves of 16-bit words in place
2987 * @buf: Buffer to swap 3207 * @buf: Buffer to swap
2988 * @buf_words: Number of 16-bit words in buffer. 3208 * @buf_words: Number of 16-bit words in buffer.
2989 * 3209 *
@@ -3293,7 +3513,7 @@ static void atapi_pio_bytes(struct ata_queued_cmd *qc)
3293err_out: 3513err_out:
3294 printk(KERN_INFO "ata%u: dev %u: ATAPI check failed\n", 3514 printk(KERN_INFO "ata%u: dev %u: ATAPI check failed\n",
3295 ap->id, dev->devno); 3515 ap->id, dev->devno);
3296 qc->err_mask |= AC_ERR_ATA_BUS; 3516 qc->err_mask |= AC_ERR_HSM;
3297 ap->hsm_task_state = HSM_ST_ERR; 3517 ap->hsm_task_state = HSM_ST_ERR;
3298} 3518}
3299 3519
@@ -3330,7 +3550,7 @@ static void ata_pio_block(struct ata_port *ap)
3330 } 3550 }
3331 3551
3332 qc = ata_qc_from_tag(ap, ap->active_tag); 3552 qc = ata_qc_from_tag(ap, ap->active_tag);
3333 assert(qc != NULL); 3553 WARN_ON(qc == NULL);
3334 3554
3335 /* check error */ 3555 /* check error */
3336 if (status & (ATA_ERR | ATA_DF)) { 3556 if (status & (ATA_ERR | ATA_DF)) {
@@ -3351,7 +3571,7 @@ static void ata_pio_block(struct ata_port *ap)
3351 } else { 3571 } else {
3352 /* handle BSY=0, DRQ=0 as error */ 3572 /* handle BSY=0, DRQ=0 as error */
3353 if ((status & ATA_DRQ) == 0) { 3573 if ((status & ATA_DRQ) == 0) {
3354 qc->err_mask |= AC_ERR_ATA_BUS; 3574 qc->err_mask |= AC_ERR_HSM;
3355 ap->hsm_task_state = HSM_ST_ERR; 3575 ap->hsm_task_state = HSM_ST_ERR;
3356 return; 3576 return;
3357 } 3577 }
@@ -3365,7 +3585,7 @@ static void ata_pio_error(struct ata_port *ap)
3365 struct ata_queued_cmd *qc; 3585 struct ata_queued_cmd *qc;
3366 3586
3367 qc = ata_qc_from_tag(ap, ap->active_tag); 3587 qc = ata_qc_from_tag(ap, ap->active_tag);
3368 assert(qc != NULL); 3588 WARN_ON(qc == NULL);
3369 3589
3370 if (qc->tf.command != ATA_CMD_PACKET) 3590 if (qc->tf.command != ATA_CMD_PACKET)
3371 printk(KERN_WARNING "ata%u: PIO error\n", ap->id); 3591 printk(KERN_WARNING "ata%u: PIO error\n", ap->id);
@@ -3373,7 +3593,7 @@ static void ata_pio_error(struct ata_port *ap)
3373 /* make sure qc->err_mask is available to 3593 /* make sure qc->err_mask is available to
3374 * know what's wrong and recover 3594 * know what's wrong and recover
3375 */ 3595 */
3376 assert(qc->err_mask); 3596 WARN_ON(qc->err_mask == 0);
3377 3597
3378 ap->hsm_task_state = HSM_ST_IDLE; 3598 ap->hsm_task_state = HSM_ST_IDLE;
3379 3599
@@ -3414,7 +3634,7 @@ fsm_start:
3414 } 3634 }
3415 3635
3416 if (timeout) 3636 if (timeout)
3417 queue_delayed_work(ata_wq, &ap->pio_task, timeout); 3637 ata_queue_delayed_pio_task(ap, timeout);
3418 else if (!qc_completed) 3638 else if (!qc_completed)
3419 goto fsm_start; 3639 goto fsm_start;
3420} 3640}
@@ -3447,15 +3667,10 @@ static void ata_qc_timeout(struct ata_queued_cmd *qc)
3447 3667
3448 DPRINTK("ENTER\n"); 3668 DPRINTK("ENTER\n");
3449 3669
3450 spin_lock_irqsave(&host_set->lock, flags); 3670 ata_flush_pio_tasks(ap);
3671 ap->hsm_task_state = HSM_ST_IDLE;
3451 3672
3452 /* hack alert! We cannot use the supplied completion 3673 spin_lock_irqsave(&host_set->lock, flags);
3453 * function from inside the ->eh_strategy_handler() thread.
3454 * libata is the only user of ->eh_strategy_handler() in
3455 * any kernel, so the default scsi_done() assumes it is
3456 * not being called from the SCSI EH.
3457 */
3458 qc->scsidone = scsi_finish_command;
3459 3674
3460 switch (qc->tf.protocol) { 3675 switch (qc->tf.protocol) {
3461 3676
@@ -3480,12 +3695,13 @@ static void ata_qc_timeout(struct ata_queued_cmd *qc)
3480 3695
3481 /* complete taskfile transaction */ 3696 /* complete taskfile transaction */
3482 qc->err_mask |= ac_err_mask(drv_stat); 3697 qc->err_mask |= ac_err_mask(drv_stat);
3483 ata_qc_complete(qc);
3484 break; 3698 break;
3485 } 3699 }
3486 3700
3487 spin_unlock_irqrestore(&host_set->lock, flags); 3701 spin_unlock_irqrestore(&host_set->lock, flags);
3488 3702
3703 ata_eh_qc_complete(qc);
3704
3489 DPRINTK("EXIT\n"); 3705 DPRINTK("EXIT\n");
3490} 3706}
3491 3707
@@ -3510,20 +3726,10 @@ static void ata_qc_timeout(struct ata_queued_cmd *qc)
3510 3726
3511void ata_eng_timeout(struct ata_port *ap) 3727void ata_eng_timeout(struct ata_port *ap)
3512{ 3728{
3513 struct ata_queued_cmd *qc;
3514
3515 DPRINTK("ENTER\n"); 3729 DPRINTK("ENTER\n");
3516 3730
3517 qc = ata_qc_from_tag(ap, ap->active_tag); 3731 ata_qc_timeout(ata_qc_from_tag(ap, ap->active_tag));
3518 if (qc)
3519 ata_qc_timeout(qc);
3520 else {
3521 printk(KERN_ERR "ata%u: BUG: timeout without command\n",
3522 ap->id);
3523 goto out;
3524 }
3525 3732
3526out:
3527 DPRINTK("EXIT\n"); 3733 DPRINTK("EXIT\n");
3528} 3734}
3529 3735
@@ -3579,21 +3785,6 @@ struct ata_queued_cmd *ata_qc_new_init(struct ata_port *ap,
3579 return qc; 3785 return qc;
3580} 3786}
3581 3787
3582static void __ata_qc_complete(struct ata_queued_cmd *qc)
3583{
3584 struct ata_port *ap = qc->ap;
3585 unsigned int tag;
3586
3587 qc->flags = 0;
3588 tag = qc->tag;
3589 if (likely(ata_tag_valid(tag))) {
3590 if (tag == ap->active_tag)
3591 ap->active_tag = ATA_TAG_POISON;
3592 qc->tag = ATA_TAG_POISON;
3593 clear_bit(tag, &ap->qactive);
3594 }
3595}
3596
3597/** 3788/**
3598 * ata_qc_free - free unused ata_queued_cmd 3789 * ata_qc_free - free unused ata_queued_cmd
3599 * @qc: Command to complete 3790 * @qc: Command to complete
@@ -3606,29 +3797,25 @@ static void __ata_qc_complete(struct ata_queued_cmd *qc)
3606 */ 3797 */
3607void ata_qc_free(struct ata_queued_cmd *qc) 3798void ata_qc_free(struct ata_queued_cmd *qc)
3608{ 3799{
3609 assert(qc != NULL); /* ata_qc_from_tag _might_ return NULL */ 3800 struct ata_port *ap = qc->ap;
3801 unsigned int tag;
3610 3802
3611 __ata_qc_complete(qc); 3803 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
3612}
3613 3804
3614/** 3805 qc->flags = 0;
3615 * ata_qc_complete - Complete an active ATA command 3806 tag = qc->tag;
3616 * @qc: Command to complete 3807 if (likely(ata_tag_valid(tag))) {
3617 * @err_mask: ATA Status register contents 3808 if (tag == ap->active_tag)
3618 * 3809 ap->active_tag = ATA_TAG_POISON;
3619 * Indicate to the mid and upper layers that an ATA 3810 qc->tag = ATA_TAG_POISON;
3620 * command has completed, with either an ok or not-ok status. 3811 clear_bit(tag, &ap->qactive);
3621 * 3812 }
3622 * LOCKING: 3813}
3623 * spin_lock_irqsave(host_set lock)
3624 */
3625 3814
3626void ata_qc_complete(struct ata_queued_cmd *qc) 3815void __ata_qc_complete(struct ata_queued_cmd *qc)
3627{ 3816{
3628 int rc; 3817 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
3629 3818 WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
3630 assert(qc != NULL); /* ata_qc_from_tag _might_ return NULL */
3631 assert(qc->flags & ATA_QCFLAG_ACTIVE);
3632 3819
3633 if (likely(qc->flags & ATA_QCFLAG_DMAMAP)) 3820 if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
3634 ata_sg_clean(qc); 3821 ata_sg_clean(qc);
@@ -3640,17 +3827,7 @@ void ata_qc_complete(struct ata_queued_cmd *qc)
3640 qc->flags &= ~ATA_QCFLAG_ACTIVE; 3827 qc->flags &= ~ATA_QCFLAG_ACTIVE;
3641 3828
3642 /* call completion callback */ 3829 /* call completion callback */
3643 rc = qc->complete_fn(qc); 3830 qc->complete_fn(qc);
3644
3645 /* if callback indicates not to complete command (non-zero),
3646 * return immediately
3647 */
3648 if (rc != 0)
3649 return;
3650
3651 __ata_qc_complete(qc);
3652
3653 VPRINTK("EXIT\n");
3654} 3831}
3655 3832
3656static inline int ata_should_dma_map(struct ata_queued_cmd *qc) 3833static inline int ata_should_dma_map(struct ata_queued_cmd *qc)
@@ -3690,20 +3867,20 @@ static inline int ata_should_dma_map(struct ata_queued_cmd *qc)
3690 * spin_lock_irqsave(host_set lock) 3867 * spin_lock_irqsave(host_set lock)
3691 * 3868 *
3692 * RETURNS: 3869 * RETURNS:
3693 * Zero on success, negative on error. 3870 * Zero on success, AC_ERR_* mask on failure
3694 */ 3871 */
3695 3872
3696int ata_qc_issue(struct ata_queued_cmd *qc) 3873unsigned int ata_qc_issue(struct ata_queued_cmd *qc)
3697{ 3874{
3698 struct ata_port *ap = qc->ap; 3875 struct ata_port *ap = qc->ap;
3699 3876
3700 if (ata_should_dma_map(qc)) { 3877 if (ata_should_dma_map(qc)) {
3701 if (qc->flags & ATA_QCFLAG_SG) { 3878 if (qc->flags & ATA_QCFLAG_SG) {
3702 if (ata_sg_setup(qc)) 3879 if (ata_sg_setup(qc))
3703 goto err_out; 3880 goto sg_err;
3704 } else if (qc->flags & ATA_QCFLAG_SINGLE) { 3881 } else if (qc->flags & ATA_QCFLAG_SINGLE) {
3705 if (ata_sg_setup_one(qc)) 3882 if (ata_sg_setup_one(qc))
3706 goto err_out; 3883 goto sg_err;
3707 } 3884 }
3708 } else { 3885 } else {
3709 qc->flags &= ~ATA_QCFLAG_DMAMAP; 3886 qc->flags &= ~ATA_QCFLAG_DMAMAP;
@@ -3716,8 +3893,9 @@ int ata_qc_issue(struct ata_queued_cmd *qc)
3716 3893
3717 return ap->ops->qc_issue(qc); 3894 return ap->ops->qc_issue(qc);
3718 3895
3719err_out: 3896sg_err:
3720 return -1; 3897 qc->flags &= ~ATA_QCFLAG_DMAMAP;
3898 return AC_ERR_SYSTEM;
3721} 3899}
3722 3900
3723 3901
@@ -3736,10 +3914,10 @@ err_out:
3736 * spin_lock_irqsave(host_set lock) 3914 * spin_lock_irqsave(host_set lock)
3737 * 3915 *
3738 * RETURNS: 3916 * RETURNS:
3739 * Zero on success, negative on error. 3917 * Zero on success, AC_ERR_* mask on failure
3740 */ 3918 */
3741 3919
3742int ata_qc_issue_prot(struct ata_queued_cmd *qc) 3920unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc)
3743{ 3921{
3744 struct ata_port *ap = qc->ap; 3922 struct ata_port *ap = qc->ap;
3745 3923
@@ -3760,31 +3938,31 @@ int ata_qc_issue_prot(struct ata_queued_cmd *qc)
3760 ata_qc_set_polling(qc); 3938 ata_qc_set_polling(qc);
3761 ata_tf_to_host(ap, &qc->tf); 3939 ata_tf_to_host(ap, &qc->tf);
3762 ap->hsm_task_state = HSM_ST; 3940 ap->hsm_task_state = HSM_ST;
3763 queue_work(ata_wq, &ap->pio_task); 3941 ata_queue_pio_task(ap);
3764 break; 3942 break;
3765 3943
3766 case ATA_PROT_ATAPI: 3944 case ATA_PROT_ATAPI:
3767 ata_qc_set_polling(qc); 3945 ata_qc_set_polling(qc);
3768 ata_tf_to_host(ap, &qc->tf); 3946 ata_tf_to_host(ap, &qc->tf);
3769 queue_work(ata_wq, &ap->packet_task); 3947 ata_queue_packet_task(ap);
3770 break; 3948 break;
3771 3949
3772 case ATA_PROT_ATAPI_NODATA: 3950 case ATA_PROT_ATAPI_NODATA:
3773 ap->flags |= ATA_FLAG_NOINTR; 3951 ap->flags |= ATA_FLAG_NOINTR;
3774 ata_tf_to_host(ap, &qc->tf); 3952 ata_tf_to_host(ap, &qc->tf);
3775 queue_work(ata_wq, &ap->packet_task); 3953 ata_queue_packet_task(ap);
3776 break; 3954 break;
3777 3955
3778 case ATA_PROT_ATAPI_DMA: 3956 case ATA_PROT_ATAPI_DMA:
3779 ap->flags |= ATA_FLAG_NOINTR; 3957 ap->flags |= ATA_FLAG_NOINTR;
3780 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */ 3958 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
3781 ap->ops->bmdma_setup(qc); /* set up bmdma */ 3959 ap->ops->bmdma_setup(qc); /* set up bmdma */
3782 queue_work(ata_wq, &ap->packet_task); 3960 ata_queue_packet_task(ap);
3783 break; 3961 break;
3784 3962
3785 default: 3963 default:
3786 WARN_ON(1); 3964 WARN_ON(1);
3787 return -1; 3965 return AC_ERR_SYSTEM;
3788 } 3966 }
3789 3967
3790 return 0; 3968 return 0;
@@ -4168,26 +4346,26 @@ static void atapi_packet_task(void *_data)
4168 u8 status; 4346 u8 status;
4169 4347
4170 qc = ata_qc_from_tag(ap, ap->active_tag); 4348 qc = ata_qc_from_tag(ap, ap->active_tag);
4171 assert(qc != NULL); 4349 WARN_ON(qc == NULL);
4172 assert(qc->flags & ATA_QCFLAG_ACTIVE); 4350 WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
4173 4351
4174 /* sleep-wait for BSY to clear */ 4352 /* sleep-wait for BSY to clear */
4175 DPRINTK("busy wait\n"); 4353 DPRINTK("busy wait\n");
4176 if (ata_busy_sleep(ap, ATA_TMOUT_CDB_QUICK, ATA_TMOUT_CDB)) { 4354 if (ata_busy_sleep(ap, ATA_TMOUT_CDB_QUICK, ATA_TMOUT_CDB)) {
4177 qc->err_mask |= AC_ERR_ATA_BUS; 4355 qc->err_mask |= AC_ERR_TIMEOUT;
4178 goto err_out; 4356 goto err_out;
4179 } 4357 }
4180 4358
4181 /* make sure DRQ is set */ 4359 /* make sure DRQ is set */
4182 status = ata_chk_status(ap); 4360 status = ata_chk_status(ap);
4183 if ((status & (ATA_BUSY | ATA_DRQ)) != ATA_DRQ) { 4361 if ((status & (ATA_BUSY | ATA_DRQ)) != ATA_DRQ) {
4184 qc->err_mask |= AC_ERR_ATA_BUS; 4362 qc->err_mask |= AC_ERR_HSM;
4185 goto err_out; 4363 goto err_out;
4186 } 4364 }
4187 4365
4188 /* send SCSI cdb */ 4366 /* send SCSI cdb */
4189 DPRINTK("send cdb\n"); 4367 DPRINTK("send cdb\n");
4190 assert(ap->cdb_len >= 12); 4368 WARN_ON(qc->dev->cdb_len < 12);
4191 4369
4192 if (qc->tf.protocol == ATA_PROT_ATAPI_DMA || 4370 if (qc->tf.protocol == ATA_PROT_ATAPI_DMA ||
4193 qc->tf.protocol == ATA_PROT_ATAPI_NODATA) { 4371 qc->tf.protocol == ATA_PROT_ATAPI_NODATA) {
@@ -4201,16 +4379,16 @@ static void atapi_packet_task(void *_data)
4201 */ 4379 */
4202 spin_lock_irqsave(&ap->host_set->lock, flags); 4380 spin_lock_irqsave(&ap->host_set->lock, flags);
4203 ap->flags &= ~ATA_FLAG_NOINTR; 4381 ap->flags &= ~ATA_FLAG_NOINTR;
4204 ata_data_xfer(ap, qc->cdb, ap->cdb_len, 1); 4382 ata_data_xfer(ap, qc->cdb, qc->dev->cdb_len, 1);
4205 if (qc->tf.protocol == ATA_PROT_ATAPI_DMA) 4383 if (qc->tf.protocol == ATA_PROT_ATAPI_DMA)
4206 ap->ops->bmdma_start(qc); /* initiate bmdma */ 4384 ap->ops->bmdma_start(qc); /* initiate bmdma */
4207 spin_unlock_irqrestore(&ap->host_set->lock, flags); 4385 spin_unlock_irqrestore(&ap->host_set->lock, flags);
4208 } else { 4386 } else {
4209 ata_data_xfer(ap, qc->cdb, ap->cdb_len, 1); 4387 ata_data_xfer(ap, qc->cdb, qc->dev->cdb_len, 1);
4210 4388
4211 /* PIO commands are handled by polling */ 4389 /* PIO commands are handled by polling */
4212 ap->hsm_task_state = HSM_ST; 4390 ap->hsm_task_state = HSM_ST;
4213 queue_work(ata_wq, &ap->pio_task); 4391 ata_queue_pio_task(ap);
4214 } 4392 }
4215 4393
4216 return; 4394 return;
@@ -4220,19 +4398,6 @@ err_out:
4220} 4398}
4221 4399
4222 4400
4223/**
4224 * ata_port_start - Set port up for dma.
4225 * @ap: Port to initialize
4226 *
4227 * Called just after data structures for each port are
4228 * initialized. Allocates space for PRD table.
4229 *
4230 * May be used as the port_start() entry in ata_port_operations.
4231 *
4232 * LOCKING:
4233 * Inherited from caller.
4234 */
4235
4236/* 4401/*
4237 * Execute a 'simple' command, that only consists of the opcode 'cmd' itself, 4402 * Execute a 'simple' command, that only consists of the opcode 'cmd' itself,
4238 * without filling any other registers 4403 * without filling any other registers
@@ -4284,6 +4449,8 @@ static int ata_start_drive(struct ata_port *ap, struct ata_device *dev)
4284 4449
4285/** 4450/**
4286 * ata_device_resume - wakeup a previously suspended devices 4451 * ata_device_resume - wakeup a previously suspended devices
4452 * @ap: port the device is connected to
4453 * @dev: the device to resume
4287 * 4454 *
4288 * Kick the drive back into action, by sending it an idle immediate 4455 * Kick the drive back into action, by sending it an idle immediate
4289 * command and making sure its transfer mode matches between drive 4456 * command and making sure its transfer mode matches between drive
@@ -4306,10 +4473,11 @@ int ata_device_resume(struct ata_port *ap, struct ata_device *dev)
4306 4473
4307/** 4474/**
4308 * ata_device_suspend - prepare a device for suspend 4475 * ata_device_suspend - prepare a device for suspend
4476 * @ap: port the device is connected to
4477 * @dev: the device to suspend
4309 * 4478 *
4310 * Flush the cache on the drive, if appropriate, then issue a 4479 * Flush the cache on the drive, if appropriate, then issue a
4311 * standbynow command. 4480 * standbynow command.
4312 *
4313 */ 4481 */
4314int ata_device_suspend(struct ata_port *ap, struct ata_device *dev) 4482int ata_device_suspend(struct ata_port *ap, struct ata_device *dev)
4315{ 4483{
@@ -4323,6 +4491,19 @@ int ata_device_suspend(struct ata_port *ap, struct ata_device *dev)
4323 return 0; 4491 return 0;
4324} 4492}
4325 4493
4494/**
4495 * ata_port_start - Set port up for dma.
4496 * @ap: Port to initialize
4497 *
4498 * Called just after data structures for each port are
4499 * initialized. Allocates space for PRD table.
4500 *
4501 * May be used as the port_start() entry in ata_port_operations.
4502 *
4503 * LOCKING:
4504 * Inherited from caller.
4505 */
4506
4326int ata_port_start (struct ata_port *ap) 4507int ata_port_start (struct ata_port *ap)
4327{ 4508{
4328 struct device *dev = ap->host_set->dev; 4509 struct device *dev = ap->host_set->dev;
@@ -4438,6 +4619,7 @@ static void ata_host_init(struct ata_port *ap, struct Scsi_Host *host,
4438 4619
4439 INIT_WORK(&ap->packet_task, atapi_packet_task, ap); 4620 INIT_WORK(&ap->packet_task, atapi_packet_task, ap);
4440 INIT_WORK(&ap->pio_task, ata_pio_task, ap); 4621 INIT_WORK(&ap->pio_task, ata_pio_task, ap);
4622 INIT_LIST_HEAD(&ap->eh_done_q);
4441 4623
4442 for (i = 0; i < ATA_MAX_DEVICES; i++) 4624 for (i = 0; i < ATA_MAX_DEVICES; i++)
4443 ap->device[i].devno = i; 4625 ap->device[i].devno = i;
@@ -4579,9 +4761,9 @@ int ata_device_add(const struct ata_probe_ent *ent)
4579 4761
4580 ap = host_set->ports[i]; 4762 ap = host_set->ports[i];
4581 4763
4582 DPRINTK("ata%u: probe begin\n", ap->id); 4764 DPRINTK("ata%u: bus probe begin\n", ap->id);
4583 rc = ata_bus_probe(ap); 4765 rc = ata_bus_probe(ap);
4584 DPRINTK("ata%u: probe end\n", ap->id); 4766 DPRINTK("ata%u: bus probe end\n", ap->id);
4585 4767
4586 if (rc) { 4768 if (rc) {
4587 /* FIXME: do something useful here? 4769 /* FIXME: do something useful here?
@@ -4605,7 +4787,7 @@ int ata_device_add(const struct ata_probe_ent *ent)
4605 } 4787 }
4606 4788
4607 /* probes are done, now scan each port's disk(s) */ 4789 /* probes are done, now scan each port's disk(s) */
4608 DPRINTK("probe begin\n"); 4790 DPRINTK("host probe begin\n");
4609 for (i = 0; i < count; i++) { 4791 for (i = 0; i < count; i++) {
4610 struct ata_port *ap = host_set->ports[i]; 4792 struct ata_port *ap = host_set->ports[i];
4611 4793
@@ -4691,11 +4873,14 @@ void ata_host_set_remove(struct ata_host_set *host_set)
4691int ata_scsi_release(struct Scsi_Host *host) 4873int ata_scsi_release(struct Scsi_Host *host)
4692{ 4874{
4693 struct ata_port *ap = (struct ata_port *) &host->hostdata[0]; 4875 struct ata_port *ap = (struct ata_port *) &host->hostdata[0];
4876 int i;
4694 4877
4695 DPRINTK("ENTER\n"); 4878 DPRINTK("ENTER\n");
4696 4879
4697 ap->ops->port_disable(ap); 4880 ap->ops->port_disable(ap);
4698 ata_host_remove(ap, 0); 4881 ata_host_remove(ap, 0);
4882 for (i = 0; i < ATA_MAX_DEVICES; i++)
4883 kfree(ap->device[i].id);
4699 4884
4700 DPRINTK("EXIT\n"); 4885 DPRINTK("EXIT\n");
4701 return 1; 4886 return 1;
@@ -4727,32 +4912,6 @@ void ata_std_ports(struct ata_ioports *ioaddr)
4727 ioaddr->command_addr = ioaddr->cmd_addr + ATA_REG_CMD; 4912 ioaddr->command_addr = ioaddr->cmd_addr + ATA_REG_CMD;
4728} 4913}
4729 4914
4730static struct ata_probe_ent *
4731ata_probe_ent_alloc(struct device *dev, const struct ata_port_info *port)
4732{
4733 struct ata_probe_ent *probe_ent;
4734
4735 probe_ent = kzalloc(sizeof(*probe_ent), GFP_KERNEL);
4736 if (!probe_ent) {
4737 printk(KERN_ERR DRV_NAME "(%s): out of memory\n",
4738 kobject_name(&(dev->kobj)));
4739 return NULL;
4740 }
4741
4742 INIT_LIST_HEAD(&probe_ent->node);
4743 probe_ent->dev = dev;
4744
4745 probe_ent->sht = port->sht;
4746 probe_ent->host_flags = port->host_flags;
4747 probe_ent->pio_mask = port->pio_mask;
4748 probe_ent->mwdma_mask = port->mwdma_mask;
4749 probe_ent->udma_mask = port->udma_mask;
4750 probe_ent->port_ops = port->port_ops;
4751
4752 return probe_ent;
4753}
4754
4755
4756 4915
4757#ifdef CONFIG_PCI 4916#ifdef CONFIG_PCI
4758 4917
@@ -4764,256 +4923,6 @@ void ata_pci_host_stop (struct ata_host_set *host_set)
4764} 4923}
4765 4924
4766/** 4925/**
4767 * ata_pci_init_native_mode - Initialize native-mode driver
4768 * @pdev: pci device to be initialized
4769 * @port: array[2] of pointers to port info structures.
4770 * @ports: bitmap of ports present
4771 *
4772 * Utility function which allocates and initializes an
4773 * ata_probe_ent structure for a standard dual-port
4774 * PIO-based IDE controller. The returned ata_probe_ent
4775 * structure can be passed to ata_device_add(). The returned
4776 * ata_probe_ent structure should then be freed with kfree().
4777 *
4778 * The caller need only pass the address of the primary port, the
4779 * secondary will be deduced automatically. If the device has non
4780 * standard secondary port mappings this function can be called twice,
4781 * once for each interface.
4782 */
4783
4784struct ata_probe_ent *
4785ata_pci_init_native_mode(struct pci_dev *pdev, struct ata_port_info **port, int ports)
4786{
4787 struct ata_probe_ent *probe_ent =
4788 ata_probe_ent_alloc(pci_dev_to_dev(pdev), port[0]);
4789 int p = 0;
4790
4791 if (!probe_ent)
4792 return NULL;
4793
4794 probe_ent->irq = pdev->irq;
4795 probe_ent->irq_flags = SA_SHIRQ;
4796 probe_ent->private_data = port[0]->private_data;
4797
4798 if (ports & ATA_PORT_PRIMARY) {
4799 probe_ent->port[p].cmd_addr = pci_resource_start(pdev, 0);
4800 probe_ent->port[p].altstatus_addr =
4801 probe_ent->port[p].ctl_addr =
4802 pci_resource_start(pdev, 1) | ATA_PCI_CTL_OFS;
4803 probe_ent->port[p].bmdma_addr = pci_resource_start(pdev, 4);
4804 ata_std_ports(&probe_ent->port[p]);
4805 p++;
4806 }
4807
4808 if (ports & ATA_PORT_SECONDARY) {
4809 probe_ent->port[p].cmd_addr = pci_resource_start(pdev, 2);
4810 probe_ent->port[p].altstatus_addr =
4811 probe_ent->port[p].ctl_addr =
4812 pci_resource_start(pdev, 3) | ATA_PCI_CTL_OFS;
4813 probe_ent->port[p].bmdma_addr = pci_resource_start(pdev, 4) + 8;
4814 ata_std_ports(&probe_ent->port[p]);
4815 p++;
4816 }
4817
4818 probe_ent->n_ports = p;
4819 return probe_ent;
4820}
4821
4822static struct ata_probe_ent *ata_pci_init_legacy_port(struct pci_dev *pdev, struct ata_port_info *port, int port_num)
4823{
4824 struct ata_probe_ent *probe_ent;
4825
4826 probe_ent = ata_probe_ent_alloc(pci_dev_to_dev(pdev), port);
4827 if (!probe_ent)
4828 return NULL;
4829
4830 probe_ent->legacy_mode = 1;
4831 probe_ent->n_ports = 1;
4832 probe_ent->hard_port_no = port_num;
4833 probe_ent->private_data = port->private_data;
4834
4835 switch(port_num)
4836 {
4837 case 0:
4838 probe_ent->irq = 14;
4839 probe_ent->port[0].cmd_addr = 0x1f0;
4840 probe_ent->port[0].altstatus_addr =
4841 probe_ent->port[0].ctl_addr = 0x3f6;
4842 break;
4843 case 1:
4844 probe_ent->irq = 15;
4845 probe_ent->port[0].cmd_addr = 0x170;
4846 probe_ent->port[0].altstatus_addr =
4847 probe_ent->port[0].ctl_addr = 0x376;
4848 break;
4849 }
4850 probe_ent->port[0].bmdma_addr = pci_resource_start(pdev, 4) + 8 * port_num;
4851 ata_std_ports(&probe_ent->port[0]);
4852 return probe_ent;
4853}
4854
4855/**
4856 * ata_pci_init_one - Initialize/register PCI IDE host controller
4857 * @pdev: Controller to be initialized
4858 * @port_info: Information from low-level host driver
4859 * @n_ports: Number of ports attached to host controller
4860 *
4861 * This is a helper function which can be called from a driver's
4862 * xxx_init_one() probe function if the hardware uses traditional
4863 * IDE taskfile registers.
4864 *
4865 * This function calls pci_enable_device(), reserves its register
4866 * regions, sets the dma mask, enables bus master mode, and calls
4867 * ata_device_add()
4868 *
4869 * LOCKING:
4870 * Inherited from PCI layer (may sleep).
4871 *
4872 * RETURNS:
4873 * Zero on success, negative on errno-based value on error.
4874 */
4875
4876int ata_pci_init_one (struct pci_dev *pdev, struct ata_port_info **port_info,
4877 unsigned int n_ports)
4878{
4879 struct ata_probe_ent *probe_ent = NULL, *probe_ent2 = NULL;
4880 struct ata_port_info *port[2];
4881 u8 tmp8, mask;
4882 unsigned int legacy_mode = 0;
4883 int disable_dev_on_err = 1;
4884 int rc;
4885
4886 DPRINTK("ENTER\n");
4887
4888 port[0] = port_info[0];
4889 if (n_ports > 1)
4890 port[1] = port_info[1];
4891 else
4892 port[1] = port[0];
4893
4894 if ((port[0]->host_flags & ATA_FLAG_NO_LEGACY) == 0
4895 && (pdev->class >> 8) == PCI_CLASS_STORAGE_IDE) {
4896 /* TODO: What if one channel is in native mode ... */
4897 pci_read_config_byte(pdev, PCI_CLASS_PROG, &tmp8);
4898 mask = (1 << 2) | (1 << 0);
4899 if ((tmp8 & mask) != mask)
4900 legacy_mode = (1 << 3);
4901 }
4902
4903 /* FIXME... */
4904 if ((!legacy_mode) && (n_ports > 2)) {
4905 printk(KERN_ERR "ata: BUG: native mode, n_ports > 2\n");
4906 n_ports = 2;
4907 /* For now */
4908 }
4909
4910 /* FIXME: Really for ATA it isn't safe because the device may be
4911 multi-purpose and we want to leave it alone if it was already
4912 enabled. Secondly for shared use as Arjan says we want refcounting
4913
4914 Checking dev->is_enabled is insufficient as this is not set at
4915 boot for the primary video which is BIOS enabled
4916 */
4917
4918 rc = pci_enable_device(pdev);
4919 if (rc)
4920 return rc;
4921
4922 rc = pci_request_regions(pdev, DRV_NAME);
4923 if (rc) {
4924 disable_dev_on_err = 0;
4925 goto err_out;
4926 }
4927
4928 /* FIXME: Should use platform specific mappers for legacy port ranges */
4929 if (legacy_mode) {
4930 if (!request_region(0x1f0, 8, "libata")) {
4931 struct resource *conflict, res;
4932 res.start = 0x1f0;
4933 res.end = 0x1f0 + 8 - 1;
4934 conflict = ____request_resource(&ioport_resource, &res);
4935 if (!strcmp(conflict->name, "libata"))
4936 legacy_mode |= (1 << 0);
4937 else {
4938 disable_dev_on_err = 0;
4939 printk(KERN_WARNING "ata: 0x1f0 IDE port busy\n");
4940 }
4941 } else
4942 legacy_mode |= (1 << 0);
4943
4944 if (!request_region(0x170, 8, "libata")) {
4945 struct resource *conflict, res;
4946 res.start = 0x170;
4947 res.end = 0x170 + 8 - 1;
4948 conflict = ____request_resource(&ioport_resource, &res);
4949 if (!strcmp(conflict->name, "libata"))
4950 legacy_mode |= (1 << 1);
4951 else {
4952 disable_dev_on_err = 0;
4953 printk(KERN_WARNING "ata: 0x170 IDE port busy\n");
4954 }
4955 } else
4956 legacy_mode |= (1 << 1);
4957 }
4958
4959 /* we have legacy mode, but all ports are unavailable */
4960 if (legacy_mode == (1 << 3)) {
4961 rc = -EBUSY;
4962 goto err_out_regions;
4963 }
4964
4965 rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
4966 if (rc)
4967 goto err_out_regions;
4968 rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
4969 if (rc)
4970 goto err_out_regions;
4971
4972 if (legacy_mode) {
4973 if (legacy_mode & (1 << 0))
4974 probe_ent = ata_pci_init_legacy_port(pdev, port[0], 0);
4975 if (legacy_mode & (1 << 1))
4976 probe_ent2 = ata_pci_init_legacy_port(pdev, port[1], 1);
4977 } else {
4978 if (n_ports == 2)
4979 probe_ent = ata_pci_init_native_mode(pdev, port, ATA_PORT_PRIMARY | ATA_PORT_SECONDARY);
4980 else
4981 probe_ent = ata_pci_init_native_mode(pdev, port, ATA_PORT_PRIMARY);
4982 }
4983 if (!probe_ent && !probe_ent2) {
4984 rc = -ENOMEM;
4985 goto err_out_regions;
4986 }
4987
4988 pci_set_master(pdev);
4989
4990 /* FIXME: check ata_device_add return */
4991 if (legacy_mode) {
4992 if (legacy_mode & (1 << 0))
4993 ata_device_add(probe_ent);
4994 if (legacy_mode & (1 << 1))
4995 ata_device_add(probe_ent2);
4996 } else
4997 ata_device_add(probe_ent);
4998
4999 kfree(probe_ent);
5000 kfree(probe_ent2);
5001
5002 return 0;
5003
5004err_out_regions:
5005 if (legacy_mode & (1 << 0))
5006 release_region(0x1f0, 8);
5007 if (legacy_mode & (1 << 1))
5008 release_region(0x170, 8);
5009 pci_release_regions(pdev);
5010err_out:
5011 if (disable_dev_on_err)
5012 pci_disable_device(pdev);
5013 return rc;
5014}
5015
5016/**
5017 * ata_pci_remove_one - PCI layer callback for device removal 4926 * ata_pci_remove_one - PCI layer callback for device removal
5018 * @pdev: PCI device that was removed 4927 * @pdev: PCI device that was removed
5019 * 4928 *
@@ -5143,7 +5052,7 @@ EXPORT_SYMBOL_GPL(ata_device_add);
5143EXPORT_SYMBOL_GPL(ata_host_set_remove); 5052EXPORT_SYMBOL_GPL(ata_host_set_remove);
5144EXPORT_SYMBOL_GPL(ata_sg_init); 5053EXPORT_SYMBOL_GPL(ata_sg_init);
5145EXPORT_SYMBOL_GPL(ata_sg_init_one); 5054EXPORT_SYMBOL_GPL(ata_sg_init_one);
5146EXPORT_SYMBOL_GPL(ata_qc_complete); 5055EXPORT_SYMBOL_GPL(__ata_qc_complete);
5147EXPORT_SYMBOL_GPL(ata_qc_issue_prot); 5056EXPORT_SYMBOL_GPL(ata_qc_issue_prot);
5148EXPORT_SYMBOL_GPL(ata_eng_timeout); 5057EXPORT_SYMBOL_GPL(ata_eng_timeout);
5149EXPORT_SYMBOL_GPL(ata_tf_load); 5058EXPORT_SYMBOL_GPL(ata_tf_load);
@@ -5169,18 +5078,29 @@ EXPORT_SYMBOL_GPL(ata_port_probe);
5169EXPORT_SYMBOL_GPL(sata_phy_reset); 5078EXPORT_SYMBOL_GPL(sata_phy_reset);
5170EXPORT_SYMBOL_GPL(__sata_phy_reset); 5079EXPORT_SYMBOL_GPL(__sata_phy_reset);
5171EXPORT_SYMBOL_GPL(ata_bus_reset); 5080EXPORT_SYMBOL_GPL(ata_bus_reset);
5081EXPORT_SYMBOL_GPL(ata_std_probeinit);
5082EXPORT_SYMBOL_GPL(ata_std_softreset);
5083EXPORT_SYMBOL_GPL(sata_std_hardreset);
5084EXPORT_SYMBOL_GPL(ata_std_postreset);
5085EXPORT_SYMBOL_GPL(ata_std_probe_reset);
5086EXPORT_SYMBOL_GPL(ata_drive_probe_reset);
5087EXPORT_SYMBOL_GPL(ata_dev_revalidate);
5172EXPORT_SYMBOL_GPL(ata_port_disable); 5088EXPORT_SYMBOL_GPL(ata_port_disable);
5173EXPORT_SYMBOL_GPL(ata_ratelimit); 5089EXPORT_SYMBOL_GPL(ata_ratelimit);
5090EXPORT_SYMBOL_GPL(ata_busy_sleep);
5174EXPORT_SYMBOL_GPL(ata_scsi_ioctl); 5091EXPORT_SYMBOL_GPL(ata_scsi_ioctl);
5175EXPORT_SYMBOL_GPL(ata_scsi_queuecmd); 5092EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
5093EXPORT_SYMBOL_GPL(ata_scsi_timed_out);
5176EXPORT_SYMBOL_GPL(ata_scsi_error); 5094EXPORT_SYMBOL_GPL(ata_scsi_error);
5177EXPORT_SYMBOL_GPL(ata_scsi_slave_config); 5095EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
5178EXPORT_SYMBOL_GPL(ata_scsi_release); 5096EXPORT_SYMBOL_GPL(ata_scsi_release);
5179EXPORT_SYMBOL_GPL(ata_host_intr); 5097EXPORT_SYMBOL_GPL(ata_host_intr);
5180EXPORT_SYMBOL_GPL(ata_dev_classify); 5098EXPORT_SYMBOL_GPL(ata_dev_classify);
5181EXPORT_SYMBOL_GPL(ata_dev_id_string); 5099EXPORT_SYMBOL_GPL(ata_id_string);
5182EXPORT_SYMBOL_GPL(ata_dev_config); 5100EXPORT_SYMBOL_GPL(ata_id_c_string);
5183EXPORT_SYMBOL_GPL(ata_scsi_simulate); 5101EXPORT_SYMBOL_GPL(ata_scsi_simulate);
5102EXPORT_SYMBOL_GPL(ata_eh_qc_complete);
5103EXPORT_SYMBOL_GPL(ata_eh_qc_retry);
5184 5104
5185EXPORT_SYMBOL_GPL(ata_pio_need_iordy); 5105EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
5186EXPORT_SYMBOL_GPL(ata_timing_compute); 5106EXPORT_SYMBOL_GPL(ata_timing_compute);
diff --git a/drivers/scsi/libata-scsi.c b/drivers/scsi/libata-scsi.c
index 59503c9ccac9..d0bd94abb413 100644
--- a/drivers/scsi/libata-scsi.c
+++ b/drivers/scsi/libata-scsi.c
@@ -151,7 +151,7 @@ int ata_cmd_ioctl(struct scsi_device *scsidev, void __user *arg)
151 struct scsi_sense_hdr sshdr; 151 struct scsi_sense_hdr sshdr;
152 enum dma_data_direction data_dir; 152 enum dma_data_direction data_dir;
153 153
154 if (NULL == (void *)arg) 154 if (arg == NULL)
155 return -EINVAL; 155 return -EINVAL;
156 156
157 if (copy_from_user(args, arg, sizeof(args))) 157 if (copy_from_user(args, arg, sizeof(args)))
@@ -201,7 +201,7 @@ int ata_cmd_ioctl(struct scsi_device *scsidev, void __user *arg)
201 /* Need code to retrieve data from check condition? */ 201 /* Need code to retrieve data from check condition? */
202 202
203 if ((argbuf) 203 if ((argbuf)
204 && copy_to_user((void *)(arg + sizeof(args)), argbuf, argsize)) 204 && copy_to_user(arg + sizeof(args), argbuf, argsize))
205 rc = -EFAULT; 205 rc = -EFAULT;
206error: 206error:
207 if (argbuf) 207 if (argbuf)
@@ -228,7 +228,7 @@ int ata_task_ioctl(struct scsi_device *scsidev, void __user *arg)
228 u8 args[7]; 228 u8 args[7];
229 struct scsi_sense_hdr sshdr; 229 struct scsi_sense_hdr sshdr;
230 230
231 if (NULL == (void *)arg) 231 if (arg == NULL)
232 return -EINVAL; 232 return -EINVAL;
233 233
234 if (copy_from_user(args, arg, sizeof(args))) 234 if (copy_from_user(args, arg, sizeof(args)))
@@ -553,7 +553,7 @@ void ata_gen_ata_desc_sense(struct ata_queued_cmd *qc)
553 /* 553 /*
554 * Read the controller registers. 554 * Read the controller registers.
555 */ 555 */
556 assert(NULL != qc->ap->ops->tf_read); 556 WARN_ON(qc->ap->ops->tf_read == NULL);
557 qc->ap->ops->tf_read(qc->ap, tf); 557 qc->ap->ops->tf_read(qc->ap, tf);
558 558
559 /* 559 /*
@@ -628,7 +628,7 @@ void ata_gen_fixed_sense(struct ata_queued_cmd *qc)
628 /* 628 /*
629 * Read the controller registers. 629 * Read the controller registers.
630 */ 630 */
631 assert(NULL != qc->ap->ops->tf_read); 631 WARN_ON(qc->ap->ops->tf_read == NULL);
632 qc->ap->ops->tf_read(qc->ap, tf); 632 qc->ap->ops->tf_read(qc->ap, tf);
633 633
634 /* 634 /*
@@ -684,23 +684,23 @@ int ata_scsi_slave_config(struct scsi_device *sdev)
684 if (sdev->id < ATA_MAX_DEVICES) { 684 if (sdev->id < ATA_MAX_DEVICES) {
685 struct ata_port *ap; 685 struct ata_port *ap;
686 struct ata_device *dev; 686 struct ata_device *dev;
687 unsigned int max_sectors;
687 688
688 ap = (struct ata_port *) &sdev->host->hostdata[0]; 689 ap = (struct ata_port *) &sdev->host->hostdata[0];
689 dev = &ap->device[sdev->id]; 690 dev = &ap->device[sdev->id];
690 691
691 /* TODO: 1024 is an arbitrary number, not the 692 /* TODO: 2048 is an arbitrary number, not the
692 * hardware maximum. This should be increased to 693 * hardware maximum. This should be increased to
693 * 65534 when Jens Axboe's patch for dynamically 694 * 65534 when Jens Axboe's patch for dynamically
694 * determining max_sectors is merged. 695 * determining max_sectors is merged.
695 */ 696 */
696 if ((dev->flags & ATA_DFLAG_LBA48) && 697 max_sectors = ATA_MAX_SECTORS;
697 ((dev->flags & ATA_DFLAG_LOCK_SECTORS) == 0)) { 698 if (dev->flags & ATA_DFLAG_LBA48)
698 /* 699 max_sectors = 2048;
699 * do not overwrite sdev->host->max_sectors, since 700 if (dev->max_sectors)
700 * other drives on this host may not support LBA48 701 max_sectors = dev->max_sectors;
701 */ 702
702 blk_queue_max_sectors(sdev->request_queue, 2048); 703 blk_queue_max_sectors(sdev->request_queue, max_sectors);
703 }
704 704
705 /* 705 /*
706 * SATA DMA transfers must be multiples of 4 byte, so 706 * SATA DMA transfers must be multiples of 4 byte, so
@@ -717,6 +717,47 @@ int ata_scsi_slave_config(struct scsi_device *sdev)
717} 717}
718 718
719/** 719/**
720 * ata_scsi_timed_out - SCSI layer time out callback
721 * @cmd: timed out SCSI command
722 *
723 * Handles SCSI layer timeout. We race with normal completion of
724 * the qc for @cmd. If the qc is already gone, we lose and let
725 * the scsi command finish (EH_HANDLED). Otherwise, the qc has
726 * timed out and EH should be invoked. Prevent ata_qc_complete()
727 * from finishing it by setting EH_SCHEDULED and return
728 * EH_NOT_HANDLED.
729 *
730 * LOCKING:
731 * Called from timer context
732 *
733 * RETURNS:
734 * EH_HANDLED or EH_NOT_HANDLED
735 */
736enum scsi_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd)
737{
738 struct Scsi_Host *host = cmd->device->host;
739 struct ata_port *ap = (struct ata_port *) &host->hostdata[0];
740 unsigned long flags;
741 struct ata_queued_cmd *qc;
742 enum scsi_eh_timer_return ret = EH_HANDLED;
743
744 DPRINTK("ENTER\n");
745
746 spin_lock_irqsave(&ap->host_set->lock, flags);
747 qc = ata_qc_from_tag(ap, ap->active_tag);
748 if (qc) {
749 WARN_ON(qc->scsicmd != cmd);
750 qc->flags |= ATA_QCFLAG_EH_SCHEDULED;
751 qc->err_mask |= AC_ERR_TIMEOUT;
752 ret = EH_NOT_HANDLED;
753 }
754 spin_unlock_irqrestore(&ap->host_set->lock, flags);
755
756 DPRINTK("EXIT, ret=%d\n", ret);
757 return ret;
758}
759
760/**
720 * ata_scsi_error - SCSI layer error handler callback 761 * ata_scsi_error - SCSI layer error handler callback
721 * @host: SCSI host on which error occurred 762 * @host: SCSI host on which error occurred
722 * 763 *
@@ -732,23 +773,82 @@ int ata_scsi_slave_config(struct scsi_device *sdev)
732int ata_scsi_error(struct Scsi_Host *host) 773int ata_scsi_error(struct Scsi_Host *host)
733{ 774{
734 struct ata_port *ap; 775 struct ata_port *ap;
776 unsigned long flags;
735 777
736 DPRINTK("ENTER\n"); 778 DPRINTK("ENTER\n");
737 779
738 ap = (struct ata_port *) &host->hostdata[0]; 780 ap = (struct ata_port *) &host->hostdata[0];
781
782 spin_lock_irqsave(&ap->host_set->lock, flags);
783 WARN_ON(ap->flags & ATA_FLAG_IN_EH);
784 ap->flags |= ATA_FLAG_IN_EH;
785 WARN_ON(ata_qc_from_tag(ap, ap->active_tag) == NULL);
786 spin_unlock_irqrestore(&ap->host_set->lock, flags);
787
739 ap->ops->eng_timeout(ap); 788 ap->ops->eng_timeout(ap);
740 789
741 /* TODO: this is per-command; when queueing is supported 790 WARN_ON(host->host_failed || !list_empty(&host->eh_cmd_q));
742 * this code will either change or move to a more 791
743 * appropriate place 792 scsi_eh_flush_done_q(&ap->eh_done_q);
744 */ 793
745 host->host_failed--; 794 spin_lock_irqsave(&ap->host_set->lock, flags);
746 INIT_LIST_HEAD(&host->eh_cmd_q); 795 ap->flags &= ~ATA_FLAG_IN_EH;
796 spin_unlock_irqrestore(&ap->host_set->lock, flags);
747 797
748 DPRINTK("EXIT\n"); 798 DPRINTK("EXIT\n");
749 return 0; 799 return 0;
750} 800}
751 801
802static void ata_eh_scsidone(struct scsi_cmnd *scmd)
803{
804 /* nada */
805}
806
807static void __ata_eh_qc_complete(struct ata_queued_cmd *qc)
808{
809 struct ata_port *ap = qc->ap;
810 struct scsi_cmnd *scmd = qc->scsicmd;
811 unsigned long flags;
812
813 spin_lock_irqsave(&ap->host_set->lock, flags);
814 qc->scsidone = ata_eh_scsidone;
815 __ata_qc_complete(qc);
816 WARN_ON(ata_tag_valid(qc->tag));
817 spin_unlock_irqrestore(&ap->host_set->lock, flags);
818
819 scsi_eh_finish_cmd(scmd, &ap->eh_done_q);
820}
821
822/**
823 * ata_eh_qc_complete - Complete an active ATA command from EH
824 * @qc: Command to complete
825 *
826 * Indicate to the mid and upper layers that an ATA command has
827 * completed. To be used from EH.
828 */
829void ata_eh_qc_complete(struct ata_queued_cmd *qc)
830{
831 struct scsi_cmnd *scmd = qc->scsicmd;
832 scmd->retries = scmd->allowed;
833 __ata_eh_qc_complete(qc);
834}
835
836/**
837 * ata_eh_qc_retry - Tell midlayer to retry an ATA command after EH
838 * @qc: Command to retry
839 *
840 * Indicate to the mid and upper layers that an ATA command
841 * should be retried. To be used from EH.
842 *
843 * SCSI midlayer limits the number of retries to scmd->allowed.
844 * This function might need to adjust scmd->retries for commands
845 * which get retried due to unrelated NCQ failures.
846 */
847void ata_eh_qc_retry(struct ata_queued_cmd *qc)
848{
849 __ata_eh_qc_complete(qc);
850}
851
752/** 852/**
753 * ata_scsi_start_stop_xlat - Translate SCSI START STOP UNIT command 853 * ata_scsi_start_stop_xlat - Translate SCSI START STOP UNIT command
754 * @qc: Storage for translated ATA taskfile 854 * @qc: Storage for translated ATA taskfile
@@ -985,9 +1085,13 @@ static unsigned int ata_scsi_verify_xlat(struct ata_queued_cmd *qc, const u8 *sc
985 if (dev->flags & ATA_DFLAG_LBA) { 1085 if (dev->flags & ATA_DFLAG_LBA) {
986 tf->flags |= ATA_TFLAG_LBA; 1086 tf->flags |= ATA_TFLAG_LBA;
987 1087
988 if (dev->flags & ATA_DFLAG_LBA48) { 1088 if (lba_28_ok(block, n_block)) {
989 if (n_block > (64 * 1024)) 1089 /* use LBA28 */
990 goto invalid_fld; 1090 tf->command = ATA_CMD_VERIFY;
1091 tf->device |= (block >> 24) & 0xf;
1092 } else if (lba_48_ok(block, n_block)) {
1093 if (!(dev->flags & ATA_DFLAG_LBA48))
1094 goto out_of_range;
991 1095
992 /* use LBA48 */ 1096 /* use LBA48 */
993 tf->flags |= ATA_TFLAG_LBA48; 1097 tf->flags |= ATA_TFLAG_LBA48;
@@ -998,15 +1102,9 @@ static unsigned int ata_scsi_verify_xlat(struct ata_queued_cmd *qc, const u8 *sc
998 tf->hob_lbah = (block >> 40) & 0xff; 1102 tf->hob_lbah = (block >> 40) & 0xff;
999 tf->hob_lbam = (block >> 32) & 0xff; 1103 tf->hob_lbam = (block >> 32) & 0xff;
1000 tf->hob_lbal = (block >> 24) & 0xff; 1104 tf->hob_lbal = (block >> 24) & 0xff;
1001 } else { 1105 } else
1002 if (n_block > 256) 1106 /* request too large even for LBA48 */
1003 goto invalid_fld; 1107 goto out_of_range;
1004
1005 /* use LBA28 */
1006 tf->command = ATA_CMD_VERIFY;
1007
1008 tf->device |= (block >> 24) & 0xf;
1009 }
1010 1108
1011 tf->nsect = n_block & 0xff; 1109 tf->nsect = n_block & 0xff;
1012 1110
@@ -1019,8 +1117,8 @@ static unsigned int ata_scsi_verify_xlat(struct ata_queued_cmd *qc, const u8 *sc
1019 /* CHS */ 1117 /* CHS */
1020 u32 sect, head, cyl, track; 1118 u32 sect, head, cyl, track;
1021 1119
1022 if (n_block > 256) 1120 if (!lba_28_ok(block, n_block))
1023 goto invalid_fld; 1121 goto out_of_range;
1024 1122
1025 /* Convert LBA to CHS */ 1123 /* Convert LBA to CHS */
1026 track = (u32)block / dev->sectors; 1124 track = (u32)block / dev->sectors;
@@ -1139,9 +1237,11 @@ static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc, const u8 *scsicm
1139 if (dev->flags & ATA_DFLAG_LBA) { 1237 if (dev->flags & ATA_DFLAG_LBA) {
1140 tf->flags |= ATA_TFLAG_LBA; 1238 tf->flags |= ATA_TFLAG_LBA;
1141 1239
1142 if (dev->flags & ATA_DFLAG_LBA48) { 1240 if (lba_28_ok(block, n_block)) {
1143 /* The request -may- be too large for LBA48. */ 1241 /* use LBA28 */
1144 if ((block >> 48) || (n_block > 65536)) 1242 tf->device |= (block >> 24) & 0xf;
1243 } else if (lba_48_ok(block, n_block)) {
1244 if (!(dev->flags & ATA_DFLAG_LBA48))
1145 goto out_of_range; 1245 goto out_of_range;
1146 1246
1147 /* use LBA48 */ 1247 /* use LBA48 */
@@ -1152,15 +1252,9 @@ static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc, const u8 *scsicm
1152 tf->hob_lbah = (block >> 40) & 0xff; 1252 tf->hob_lbah = (block >> 40) & 0xff;
1153 tf->hob_lbam = (block >> 32) & 0xff; 1253 tf->hob_lbam = (block >> 32) & 0xff;
1154 tf->hob_lbal = (block >> 24) & 0xff; 1254 tf->hob_lbal = (block >> 24) & 0xff;
1155 } else { 1255 } else
1156 /* use LBA28 */ 1256 /* request too large even for LBA48 */
1157 1257 goto out_of_range;
1158 /* The request -may- be too large for LBA28. */
1159 if ((block >> 28) || (n_block > 256))
1160 goto out_of_range;
1161
1162 tf->device |= (block >> 24) & 0xf;
1163 }
1164 1258
1165 if (unlikely(ata_rwcmd_protocol(qc) < 0)) 1259 if (unlikely(ata_rwcmd_protocol(qc) < 0))
1166 goto invalid_fld; 1260 goto invalid_fld;
@@ -1178,7 +1272,7 @@ static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc, const u8 *scsicm
1178 u32 sect, head, cyl, track; 1272 u32 sect, head, cyl, track;
1179 1273
1180 /* The request -may- be too large for CHS addressing. */ 1274 /* The request -may- be too large for CHS addressing. */
1181 if ((block >> 28) || (n_block > 256)) 1275 if (!lba_28_ok(block, n_block))
1182 goto out_of_range; 1276 goto out_of_range;
1183 1277
1184 if (unlikely(ata_rwcmd_protocol(qc) < 0)) 1278 if (unlikely(ata_rwcmd_protocol(qc) < 0))
@@ -1225,7 +1319,7 @@ nothing_to_do:
1225 return 1; 1319 return 1;
1226} 1320}
1227 1321
1228static int ata_scsi_qc_complete(struct ata_queued_cmd *qc) 1322static void ata_scsi_qc_complete(struct ata_queued_cmd *qc)
1229{ 1323{
1230 struct scsi_cmnd *cmd = qc->scsicmd; 1324 struct scsi_cmnd *cmd = qc->scsicmd;
1231 u8 *cdb = cmd->cmnd; 1325 u8 *cdb = cmd->cmnd;
@@ -1262,7 +1356,7 @@ static int ata_scsi_qc_complete(struct ata_queued_cmd *qc)
1262 1356
1263 qc->scsidone(cmd); 1357 qc->scsidone(cmd);
1264 1358
1265 return 0; 1359 ata_qc_free(qc);
1266} 1360}
1267 1361
1268/** 1362/**
@@ -1328,8 +1422,9 @@ static void ata_scsi_translate(struct ata_port *ap, struct ata_device *dev,
1328 goto early_finish; 1422 goto early_finish;
1329 1423
1330 /* select device, send command to hardware */ 1424 /* select device, send command to hardware */
1331 if (ata_qc_issue(qc)) 1425 qc->err_mask = ata_qc_issue(qc);
1332 goto err_did; 1426 if (qc->err_mask)
1427 ata_qc_complete(qc);
1333 1428
1334 VPRINTK("EXIT\n"); 1429 VPRINTK("EXIT\n");
1335 return; 1430 return;
@@ -1472,8 +1567,8 @@ unsigned int ata_scsiop_inq_std(struct ata_scsi_args *args, u8 *rbuf,
1472 1567
1473 if (buflen > 35) { 1568 if (buflen > 35) {
1474 memcpy(&rbuf[8], "ATA ", 8); 1569 memcpy(&rbuf[8], "ATA ", 8);
1475 ata_dev_id_string(args->id, &rbuf[16], ATA_ID_PROD_OFS, 16); 1570 ata_id_string(args->id, &rbuf[16], ATA_ID_PROD_OFS, 16);
1476 ata_dev_id_string(args->id, &rbuf[32], ATA_ID_FW_REV_OFS, 4); 1571 ata_id_string(args->id, &rbuf[32], ATA_ID_FW_REV_OFS, 4);
1477 if (rbuf[32] == 0 || rbuf[32] == ' ') 1572 if (rbuf[32] == 0 || rbuf[32] == ' ')
1478 memcpy(&rbuf[32], "n/a ", 4); 1573 memcpy(&rbuf[32], "n/a ", 4);
1479 } 1574 }
@@ -1547,8 +1642,8 @@ unsigned int ata_scsiop_inq_80(struct ata_scsi_args *args, u8 *rbuf,
1547 memcpy(rbuf, hdr, sizeof(hdr)); 1642 memcpy(rbuf, hdr, sizeof(hdr));
1548 1643
1549 if (buflen > (ATA_SERNO_LEN + 4 - 1)) 1644 if (buflen > (ATA_SERNO_LEN + 4 - 1))
1550 ata_dev_id_string(args->id, (unsigned char *) &rbuf[4], 1645 ata_id_string(args->id, (unsigned char *) &rbuf[4],
1551 ATA_ID_SERNO_OFS, ATA_SERNO_LEN); 1646 ATA_ID_SERNO_OFS, ATA_SERNO_LEN);
1552 1647
1553 return 0; 1648 return 0;
1554} 1649}
@@ -1713,15 +1808,12 @@ static int ata_dev_supports_fua(u16 *id)
1713 if (!ata_id_has_fua(id)) 1808 if (!ata_id_has_fua(id))
1714 return 0; 1809 return 0;
1715 1810
1716 model[40] = '\0'; 1811 ata_id_c_string(id, model, ATA_ID_PROD_OFS, sizeof(model));
1717 fw[8] = '\0'; 1812 ata_id_c_string(id, fw, ATA_ID_FW_REV_OFS, sizeof(fw));
1718
1719 ata_dev_id_string(id, model, ATA_ID_PROD_OFS, sizeof(model) - 1);
1720 ata_dev_id_string(id, fw, ATA_ID_FW_REV_OFS, sizeof(fw) - 1);
1721 1813
1722 if (strncmp(model, "Maxtor", 6)) 1814 if (strcmp(model, "Maxtor"))
1723 return 1; 1815 return 1;
1724 if (strncmp(fw, "BANC1G10", 8)) 1816 if (strcmp(fw, "BANC1G10"))
1725 return 1; 1817 return 1;
1726 1818
1727 return 0; /* blacklisted */ 1819 return 0; /* blacklisted */
@@ -2015,7 +2107,7 @@ void ata_scsi_badcmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *), u8
2015 done(cmd); 2107 done(cmd);
2016} 2108}
2017 2109
2018static int atapi_sense_complete(struct ata_queued_cmd *qc) 2110static void atapi_sense_complete(struct ata_queued_cmd *qc)
2019{ 2111{
2020 if (qc->err_mask && ((qc->err_mask & AC_ERR_DEV) == 0)) 2112 if (qc->err_mask && ((qc->err_mask & AC_ERR_DEV) == 0))
2021 /* FIXME: not quite right; we don't want the 2113 /* FIXME: not quite right; we don't want the
@@ -2026,7 +2118,7 @@ static int atapi_sense_complete(struct ata_queued_cmd *qc)
2026 ata_gen_ata_desc_sense(qc); 2118 ata_gen_ata_desc_sense(qc);
2027 2119
2028 qc->scsidone(qc->scsicmd); 2120 qc->scsidone(qc->scsicmd);
2029 return 0; 2121 ata_qc_free(qc);
2030} 2122}
2031 2123
2032/* is it pointless to prefer PIO for "safety reasons"? */ 2124/* is it pointless to prefer PIO for "safety reasons"? */
@@ -2056,7 +2148,7 @@ static void atapi_request_sense(struct ata_queued_cmd *qc)
2056 ata_sg_init_one(qc, cmd->sense_buffer, sizeof(cmd->sense_buffer)); 2148 ata_sg_init_one(qc, cmd->sense_buffer, sizeof(cmd->sense_buffer));
2057 qc->dma_dir = DMA_FROM_DEVICE; 2149 qc->dma_dir = DMA_FROM_DEVICE;
2058 2150
2059 memset(&qc->cdb, 0, ap->cdb_len); 2151 memset(&qc->cdb, 0, qc->dev->cdb_len);
2060 qc->cdb[0] = REQUEST_SENSE; 2152 qc->cdb[0] = REQUEST_SENSE;
2061 qc->cdb[4] = SCSI_SENSE_BUFFERSIZE; 2153 qc->cdb[4] = SCSI_SENSE_BUFFERSIZE;
2062 2154
@@ -2075,15 +2167,14 @@ static void atapi_request_sense(struct ata_queued_cmd *qc)
2075 2167
2076 qc->complete_fn = atapi_sense_complete; 2168 qc->complete_fn = atapi_sense_complete;
2077 2169
2078 if (ata_qc_issue(qc)) { 2170 qc->err_mask = ata_qc_issue(qc);
2079 qc->err_mask |= AC_ERR_OTHER; 2171 if (qc->err_mask)
2080 ata_qc_complete(qc); 2172 ata_qc_complete(qc);
2081 }
2082 2173
2083 DPRINTK("EXIT\n"); 2174 DPRINTK("EXIT\n");
2084} 2175}
2085 2176
2086static int atapi_qc_complete(struct ata_queued_cmd *qc) 2177static void atapi_qc_complete(struct ata_queued_cmd *qc)
2087{ 2178{
2088 struct scsi_cmnd *cmd = qc->scsicmd; 2179 struct scsi_cmnd *cmd = qc->scsicmd;
2089 unsigned int err_mask = qc->err_mask; 2180 unsigned int err_mask = qc->err_mask;
@@ -2093,7 +2184,7 @@ static int atapi_qc_complete(struct ata_queued_cmd *qc)
2093 if (unlikely(err_mask & AC_ERR_DEV)) { 2184 if (unlikely(err_mask & AC_ERR_DEV)) {
2094 cmd->result = SAM_STAT_CHECK_CONDITION; 2185 cmd->result = SAM_STAT_CHECK_CONDITION;
2095 atapi_request_sense(qc); 2186 atapi_request_sense(qc);
2096 return 1; 2187 return;
2097 } 2188 }
2098 2189
2099 else if (unlikely(err_mask)) 2190 else if (unlikely(err_mask))
@@ -2133,7 +2224,7 @@ static int atapi_qc_complete(struct ata_queued_cmd *qc)
2133 } 2224 }
2134 2225
2135 qc->scsidone(cmd); 2226 qc->scsidone(cmd);
2136 return 0; 2227 ata_qc_free(qc);
2137} 2228}
2138/** 2229/**
2139 * atapi_xlat - Initialize PACKET taskfile 2230 * atapi_xlat - Initialize PACKET taskfile
@@ -2159,7 +2250,7 @@ static unsigned int atapi_xlat(struct ata_queued_cmd *qc, const u8 *scsicmd)
2159 if (ata_check_atapi_dma(qc)) 2250 if (ata_check_atapi_dma(qc))
2160 using_pio = 1; 2251 using_pio = 1;
2161 2252
2162 memcpy(&qc->cdb, scsicmd, qc->ap->cdb_len); 2253 memcpy(&qc->cdb, scsicmd, dev->cdb_len);
2163 2254
2164 qc->complete_fn = atapi_qc_complete; 2255 qc->complete_fn = atapi_qc_complete;
2165 2256
@@ -2519,7 +2610,8 @@ out_unlock:
2519 2610
2520/** 2611/**
2521 * ata_scsi_simulate - simulate SCSI command on ATA device 2612 * ata_scsi_simulate - simulate SCSI command on ATA device
2522 * @id: current IDENTIFY data for target device. 2613 * @ap: port the device is connected to
2614 * @dev: the target device
2523 * @cmd: SCSI command being sent to device. 2615 * @cmd: SCSI command being sent to device.
2524 * @done: SCSI command completion function. 2616 * @done: SCSI command completion function.
2525 * 2617 *
diff --git a/drivers/scsi/libata.h b/drivers/scsi/libata.h
index fddaf479a544..d822eba05f3c 100644
--- a/drivers/scsi/libata.h
+++ b/drivers/scsi/libata.h
@@ -46,7 +46,7 @@ extern struct ata_queued_cmd *ata_qc_new_init(struct ata_port *ap,
46 struct ata_device *dev); 46 struct ata_device *dev);
47extern int ata_rwcmd_protocol(struct ata_queued_cmd *qc); 47extern int ata_rwcmd_protocol(struct ata_queued_cmd *qc);
48extern void ata_qc_free(struct ata_queued_cmd *qc); 48extern void ata_qc_free(struct ata_queued_cmd *qc);
49extern int ata_qc_issue(struct ata_queued_cmd *qc); 49extern unsigned int ata_qc_issue(struct ata_queued_cmd *qc);
50extern int ata_check_atapi_dma(struct ata_queued_cmd *qc); 50extern int ata_check_atapi_dma(struct ata_queued_cmd *qc);
51extern void ata_dev_select(struct ata_port *ap, unsigned int device, 51extern void ata_dev_select(struct ata_port *ap, unsigned int device,
52 unsigned int wait, unsigned int can_sleep); 52 unsigned int wait, unsigned int can_sleep);
diff --git a/drivers/scsi/pdc_adma.c b/drivers/scsi/pdc_adma.c
index e8df0c9ec1e6..5f33cc932e70 100644
--- a/drivers/scsi/pdc_adma.c
+++ b/drivers/scsi/pdc_adma.c
@@ -131,7 +131,7 @@ static void adma_host_stop(struct ata_host_set *host_set);
131static void adma_port_stop(struct ata_port *ap); 131static void adma_port_stop(struct ata_port *ap);
132static void adma_phy_reset(struct ata_port *ap); 132static void adma_phy_reset(struct ata_port *ap);
133static void adma_qc_prep(struct ata_queued_cmd *qc); 133static void adma_qc_prep(struct ata_queued_cmd *qc);
134static int adma_qc_issue(struct ata_queued_cmd *qc); 134static unsigned int adma_qc_issue(struct ata_queued_cmd *qc);
135static int adma_check_atapi_dma(struct ata_queued_cmd *qc); 135static int adma_check_atapi_dma(struct ata_queued_cmd *qc);
136static void adma_bmdma_stop(struct ata_queued_cmd *qc); 136static void adma_bmdma_stop(struct ata_queued_cmd *qc);
137static u8 adma_bmdma_status(struct ata_port *ap); 137static u8 adma_bmdma_status(struct ata_port *ap);
@@ -143,11 +143,11 @@ static struct scsi_host_template adma_ata_sht = {
143 .name = DRV_NAME, 143 .name = DRV_NAME,
144 .ioctl = ata_scsi_ioctl, 144 .ioctl = ata_scsi_ioctl,
145 .queuecommand = ata_scsi_queuecmd, 145 .queuecommand = ata_scsi_queuecmd,
146 .eh_timed_out = ata_scsi_timed_out,
146 .eh_strategy_handler = ata_scsi_error, 147 .eh_strategy_handler = ata_scsi_error,
147 .can_queue = ATA_DEF_QUEUE, 148 .can_queue = ATA_DEF_QUEUE,
148 .this_id = ATA_SHT_THIS_ID, 149 .this_id = ATA_SHT_THIS_ID,
149 .sg_tablesize = LIBATA_MAX_PRD, 150 .sg_tablesize = LIBATA_MAX_PRD,
150 .max_sectors = ATA_MAX_SECTORS,
151 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 151 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
152 .emulated = ATA_SHT_EMULATED, 152 .emulated = ATA_SHT_EMULATED,
153 .use_clustering = ENABLE_CLUSTERING, 153 .use_clustering = ENABLE_CLUSTERING,
@@ -419,7 +419,7 @@ static inline void adma_packet_start(struct ata_queued_cmd *qc)
419 writew(aPIOMD4 | aGO, chan + ADMA_CONTROL); 419 writew(aPIOMD4 | aGO, chan + ADMA_CONTROL);
420} 420}
421 421
422static int adma_qc_issue(struct ata_queued_cmd *qc) 422static unsigned int adma_qc_issue(struct ata_queued_cmd *qc)
423{ 423{
424 struct adma_port_priv *pp = qc->ap->private_data; 424 struct adma_port_priv *pp = qc->ap->private_data;
425 425
diff --git a/drivers/scsi/sata_mv.c b/drivers/scsi/sata_mv.c
index 2770005324b4..aceaf56999a5 100644
--- a/drivers/scsi/sata_mv.c
+++ b/drivers/scsi/sata_mv.c
@@ -37,7 +37,7 @@
37#include <asm/io.h> 37#include <asm/io.h>
38 38
39#define DRV_NAME "sata_mv" 39#define DRV_NAME "sata_mv"
40#define DRV_VERSION "0.5" 40#define DRV_VERSION "0.6"
41 41
42enum { 42enum {
43 /* BAR's are enumerated in terms of pci_resource_start() terms */ 43 /* BAR's are enumerated in terms of pci_resource_start() terms */
@@ -228,7 +228,9 @@ enum {
228 MV_HP_ERRATA_50XXB2 = (1 << 2), 228 MV_HP_ERRATA_50XXB2 = (1 << 2),
229 MV_HP_ERRATA_60X1B2 = (1 << 3), 229 MV_HP_ERRATA_60X1B2 = (1 << 3),
230 MV_HP_ERRATA_60X1C0 = (1 << 4), 230 MV_HP_ERRATA_60X1C0 = (1 << 4),
231 MV_HP_50XX = (1 << 5), 231 MV_HP_ERRATA_XX42A0 = (1 << 5),
232 MV_HP_50XX = (1 << 6),
233 MV_HP_GEN_IIE = (1 << 7),
232 234
233 /* Port private flags (pp_flags) */ 235 /* Port private flags (pp_flags) */
234 MV_PP_FLAG_EDMA_EN = (1 << 0), 236 MV_PP_FLAG_EDMA_EN = (1 << 0),
@@ -237,6 +239,9 @@ enum {
237 239
238#define IS_50XX(hpriv) ((hpriv)->hp_flags & MV_HP_50XX) 240#define IS_50XX(hpriv) ((hpriv)->hp_flags & MV_HP_50XX)
239#define IS_60XX(hpriv) (((hpriv)->hp_flags & MV_HP_50XX) == 0) 241#define IS_60XX(hpriv) (((hpriv)->hp_flags & MV_HP_50XX) == 0)
242#define IS_GEN_I(hpriv) IS_50XX(hpriv)
243#define IS_GEN_II(hpriv) IS_60XX(hpriv)
244#define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
240 245
241enum { 246enum {
242 /* Our DMA boundary is determined by an ePRD being unable to handle 247 /* Our DMA boundary is determined by an ePRD being unable to handle
@@ -255,6 +260,8 @@ enum chip_type {
255 chip_5080, 260 chip_5080,
256 chip_604x, 261 chip_604x,
257 chip_608x, 262 chip_608x,
263 chip_6042,
264 chip_7042,
258}; 265};
259 266
260/* Command ReQuest Block: 32B */ 267/* Command ReQuest Block: 32B */
@@ -265,6 +272,14 @@ struct mv_crqb {
265 u16 ata_cmd[11]; 272 u16 ata_cmd[11];
266}; 273};
267 274
275struct mv_crqb_iie {
276 u32 addr;
277 u32 addr_hi;
278 u32 flags;
279 u32 len;
280 u32 ata_cmd[4];
281};
282
268/* Command ResPonse Block: 8B */ 283/* Command ResPonse Block: 8B */
269struct mv_crpb { 284struct mv_crpb {
270 u16 id; 285 u16 id;
@@ -328,7 +343,8 @@ static void mv_host_stop(struct ata_host_set *host_set);
328static int mv_port_start(struct ata_port *ap); 343static int mv_port_start(struct ata_port *ap);
329static void mv_port_stop(struct ata_port *ap); 344static void mv_port_stop(struct ata_port *ap);
330static void mv_qc_prep(struct ata_queued_cmd *qc); 345static void mv_qc_prep(struct ata_queued_cmd *qc);
331static int mv_qc_issue(struct ata_queued_cmd *qc); 346static void mv_qc_prep_iie(struct ata_queued_cmd *qc);
347static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
332static irqreturn_t mv_interrupt(int irq, void *dev_instance, 348static irqreturn_t mv_interrupt(int irq, void *dev_instance,
333 struct pt_regs *regs); 349 struct pt_regs *regs);
334static void mv_eng_timeout(struct ata_port *ap); 350static void mv_eng_timeout(struct ata_port *ap);
@@ -362,11 +378,11 @@ static struct scsi_host_template mv_sht = {
362 .name = DRV_NAME, 378 .name = DRV_NAME,
363 .ioctl = ata_scsi_ioctl, 379 .ioctl = ata_scsi_ioctl,
364 .queuecommand = ata_scsi_queuecmd, 380 .queuecommand = ata_scsi_queuecmd,
381 .eh_timed_out = ata_scsi_timed_out,
365 .eh_strategy_handler = ata_scsi_error, 382 .eh_strategy_handler = ata_scsi_error,
366 .can_queue = MV_USE_Q_DEPTH, 383 .can_queue = MV_USE_Q_DEPTH,
367 .this_id = ATA_SHT_THIS_ID, 384 .this_id = ATA_SHT_THIS_ID,
368 .sg_tablesize = MV_MAX_SG_CT / 2, 385 .sg_tablesize = MV_MAX_SG_CT / 2,
369 .max_sectors = ATA_MAX_SECTORS,
370 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 386 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
371 .emulated = ATA_SHT_EMULATED, 387 .emulated = ATA_SHT_EMULATED,
372 .use_clustering = ATA_SHT_USE_CLUSTERING, 388 .use_clustering = ATA_SHT_USE_CLUSTERING,
@@ -430,6 +446,33 @@ static const struct ata_port_operations mv6_ops = {
430 .host_stop = mv_host_stop, 446 .host_stop = mv_host_stop,
431}; 447};
432 448
449static const struct ata_port_operations mv_iie_ops = {
450 .port_disable = ata_port_disable,
451
452 .tf_load = ata_tf_load,
453 .tf_read = ata_tf_read,
454 .check_status = ata_check_status,
455 .exec_command = ata_exec_command,
456 .dev_select = ata_std_dev_select,
457
458 .phy_reset = mv_phy_reset,
459
460 .qc_prep = mv_qc_prep_iie,
461 .qc_issue = mv_qc_issue,
462
463 .eng_timeout = mv_eng_timeout,
464
465 .irq_handler = mv_interrupt,
466 .irq_clear = mv_irq_clear,
467
468 .scr_read = mv_scr_read,
469 .scr_write = mv_scr_write,
470
471 .port_start = mv_port_start,
472 .port_stop = mv_port_stop,
473 .host_stop = mv_host_stop,
474};
475
433static const struct ata_port_info mv_port_info[] = { 476static const struct ata_port_info mv_port_info[] = {
434 { /* chip_504x */ 477 { /* chip_504x */
435 .sht = &mv_sht, 478 .sht = &mv_sht,
@@ -467,6 +510,21 @@ static const struct ata_port_info mv_port_info[] = {
467 .udma_mask = 0x7f, /* udma0-6 */ 510 .udma_mask = 0x7f, /* udma0-6 */
468 .port_ops = &mv6_ops, 511 .port_ops = &mv6_ops,
469 }, 512 },
513 { /* chip_6042 */
514 .sht = &mv_sht,
515 .host_flags = (MV_COMMON_FLAGS | MV_6XXX_FLAGS),
516 .pio_mask = 0x1f, /* pio0-4 */
517 .udma_mask = 0x7f, /* udma0-6 */
518 .port_ops = &mv_iie_ops,
519 },
520 { /* chip_7042 */
521 .sht = &mv_sht,
522 .host_flags = (MV_COMMON_FLAGS | MV_6XXX_FLAGS |
523 MV_FLAG_DUAL_HC),
524 .pio_mask = 0x1f, /* pio0-4 */
525 .udma_mask = 0x7f, /* udma0-6 */
526 .port_ops = &mv_iie_ops,
527 },
470}; 528};
471 529
472static const struct pci_device_id mv_pci_tbl[] = { 530static const struct pci_device_id mv_pci_tbl[] = {
@@ -477,6 +535,7 @@ static const struct pci_device_id mv_pci_tbl[] = {
477 535
478 {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x6040), 0, 0, chip_604x}, 536 {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x6040), 0, 0, chip_604x},
479 {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x6041), 0, 0, chip_604x}, 537 {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x6041), 0, 0, chip_604x},
538 {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x6042), 0, 0, chip_6042},
480 {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x6080), 0, 0, chip_608x}, 539 {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x6080), 0, 0, chip_608x},
481 {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x6081), 0, 0, chip_608x}, 540 {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x6081), 0, 0, chip_608x},
482 541
@@ -572,8 +631,8 @@ static void mv_irq_clear(struct ata_port *ap)
572 * @base: port base address 631 * @base: port base address
573 * @pp: port private data 632 * @pp: port private data
574 * 633 *
575 * Verify the local cache of the eDMA state is accurate with an 634 * Verify the local cache of the eDMA state is accurate with a
576 * assert. 635 * WARN_ON.
577 * 636 *
578 * LOCKING: 637 * LOCKING:
579 * Inherited from caller. 638 * Inherited from caller.
@@ -584,15 +643,15 @@ static void mv_start_dma(void __iomem *base, struct mv_port_priv *pp)
584 writelfl(EDMA_EN, base + EDMA_CMD_OFS); 643 writelfl(EDMA_EN, base + EDMA_CMD_OFS);
585 pp->pp_flags |= MV_PP_FLAG_EDMA_EN; 644 pp->pp_flags |= MV_PP_FLAG_EDMA_EN;
586 } 645 }
587 assert(EDMA_EN & readl(base + EDMA_CMD_OFS)); 646 WARN_ON(!(EDMA_EN & readl(base + EDMA_CMD_OFS)));
588} 647}
589 648
590/** 649/**
591 * mv_stop_dma - Disable eDMA engine 650 * mv_stop_dma - Disable eDMA engine
592 * @ap: ATA channel to manipulate 651 * @ap: ATA channel to manipulate
593 * 652 *
594 * Verify the local cache of the eDMA state is accurate with an 653 * Verify the local cache of the eDMA state is accurate with a
595 * assert. 654 * WARN_ON.
596 * 655 *
597 * LOCKING: 656 * LOCKING:
598 * Inherited from caller. 657 * Inherited from caller.
@@ -610,7 +669,7 @@ static void mv_stop_dma(struct ata_port *ap)
610 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS); 669 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
611 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN; 670 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
612 } else { 671 } else {
613 assert(!(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS))); 672 WARN_ON(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS));
614 } 673 }
615 674
616 /* now properly wait for the eDMA to stop */ 675 /* now properly wait for the eDMA to stop */
@@ -773,6 +832,33 @@ static inline void mv_priv_free(struct mv_port_priv *pp, struct device *dev)
773 dma_free_coherent(dev, MV_PORT_PRIV_DMA_SZ, pp->crpb, pp->crpb_dma); 832 dma_free_coherent(dev, MV_PORT_PRIV_DMA_SZ, pp->crpb, pp->crpb_dma);
774} 833}
775 834
835static void mv_edma_cfg(struct mv_host_priv *hpriv, void __iomem *port_mmio)
836{
837 u32 cfg = readl(port_mmio + EDMA_CFG_OFS);
838
839 /* set up non-NCQ EDMA configuration */
840 cfg &= ~0x1f; /* clear queue depth */
841 cfg &= ~EDMA_CFG_NCQ; /* clear NCQ mode */
842 cfg &= ~(1 << 9); /* disable equeue */
843
844 if (IS_GEN_I(hpriv))
845 cfg |= (1 << 8); /* enab config burst size mask */
846
847 else if (IS_GEN_II(hpriv))
848 cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN;
849
850 else if (IS_GEN_IIE(hpriv)) {
851 cfg |= (1 << 23); /* dis RX PM port mask */
852 cfg &= ~(1 << 16); /* dis FIS-based switching (for now) */
853 cfg &= ~(1 << 19); /* dis 128-entry queue (for now?) */
854 cfg |= (1 << 18); /* enab early completion */
855 cfg |= (1 << 17); /* enab host q cache */
856 cfg |= (1 << 22); /* enab cutthrough */
857 }
858
859 writelfl(cfg, port_mmio + EDMA_CFG_OFS);
860}
861
776/** 862/**
777 * mv_port_start - Port specific init/start routine. 863 * mv_port_start - Port specific init/start routine.
778 * @ap: ATA channel to manipulate 864 * @ap: ATA channel to manipulate
@@ -786,6 +872,7 @@ static inline void mv_priv_free(struct mv_port_priv *pp, struct device *dev)
786static int mv_port_start(struct ata_port *ap) 872static int mv_port_start(struct ata_port *ap)
787{ 873{
788 struct device *dev = ap->host_set->dev; 874 struct device *dev = ap->host_set->dev;
875 struct mv_host_priv *hpriv = ap->host_set->private_data;
789 struct mv_port_priv *pp; 876 struct mv_port_priv *pp;
790 void __iomem *port_mmio = mv_ap_base(ap); 877 void __iomem *port_mmio = mv_ap_base(ap);
791 void *mem; 878 void *mem;
@@ -829,17 +916,26 @@ static int mv_port_start(struct ata_port *ap)
829 pp->sg_tbl = mem; 916 pp->sg_tbl = mem;
830 pp->sg_tbl_dma = mem_dma; 917 pp->sg_tbl_dma = mem_dma;
831 918
832 writelfl(EDMA_CFG_Q_DEPTH | EDMA_CFG_RD_BRST_EXT | 919 mv_edma_cfg(hpriv, port_mmio);
833 EDMA_CFG_WR_BUFF_LEN, port_mmio + EDMA_CFG_OFS);
834 920
835 writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS); 921 writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS);
836 writelfl(pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK, 922 writelfl(pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK,
837 port_mmio + EDMA_REQ_Q_IN_PTR_OFS); 923 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
838 924
839 writelfl(0, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS); 925 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
840 writelfl(0, port_mmio + EDMA_RSP_Q_IN_PTR_OFS); 926 writelfl(pp->crqb_dma & 0xffffffff,
927 port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
928 else
929 writelfl(0, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
841 930
842 writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS); 931 writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS);
932
933 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
934 writelfl(pp->crpb_dma & 0xffffffff,
935 port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
936 else
937 writelfl(0, port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
938
843 writelfl(pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK, 939 writelfl(pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK,
844 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS); 940 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
845 941
@@ -960,21 +1056,19 @@ static void mv_qc_prep(struct ata_queued_cmd *qc)
960 struct ata_taskfile *tf; 1056 struct ata_taskfile *tf;
961 u16 flags = 0; 1057 u16 flags = 0;
962 1058
963 if (ATA_PROT_DMA != qc->tf.protocol) { 1059 if (ATA_PROT_DMA != qc->tf.protocol)
964 return; 1060 return;
965 }
966 1061
967 /* the req producer index should be the same as we remember it */ 1062 /* the req producer index should be the same as we remember it */
968 assert(((readl(mv_ap_base(qc->ap) + EDMA_REQ_Q_IN_PTR_OFS) >> 1063 WARN_ON(((readl(mv_ap_base(qc->ap) + EDMA_REQ_Q_IN_PTR_OFS) >>
969 EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) == 1064 EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) !=
970 pp->req_producer); 1065 pp->req_producer);
971 1066
972 /* Fill in command request block 1067 /* Fill in command request block
973 */ 1068 */
974 if (!(qc->tf.flags & ATA_TFLAG_WRITE)) { 1069 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
975 flags |= CRQB_FLAG_READ; 1070 flags |= CRQB_FLAG_READ;
976 } 1071 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
977 assert(MV_MAX_Q_DEPTH > qc->tag);
978 flags |= qc->tag << CRQB_TAG_SHIFT; 1072 flags |= qc->tag << CRQB_TAG_SHIFT;
979 1073
980 pp->crqb[pp->req_producer].sg_addr = 1074 pp->crqb[pp->req_producer].sg_addr =
@@ -1029,9 +1123,76 @@ static void mv_qc_prep(struct ata_queued_cmd *qc)
1029 mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0); 1123 mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0);
1030 mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */ 1124 mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */
1031 1125
1032 if (!(qc->flags & ATA_QCFLAG_DMAMAP)) { 1126 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1127 return;
1128 mv_fill_sg(qc);
1129}
1130
1131/**
1132 * mv_qc_prep_iie - Host specific command preparation.
1133 * @qc: queued command to prepare
1134 *
1135 * This routine simply redirects to the general purpose routine
1136 * if command is not DMA. Else, it handles prep of the CRQB
1137 * (command request block), does some sanity checking, and calls
1138 * the SG load routine.
1139 *
1140 * LOCKING:
1141 * Inherited from caller.
1142 */
1143static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
1144{
1145 struct ata_port *ap = qc->ap;
1146 struct mv_port_priv *pp = ap->private_data;
1147 struct mv_crqb_iie *crqb;
1148 struct ata_taskfile *tf;
1149 u32 flags = 0;
1150
1151 if (ATA_PROT_DMA != qc->tf.protocol)
1152 return;
1153
1154 /* the req producer index should be the same as we remember it */
1155 WARN_ON(((readl(mv_ap_base(qc->ap) + EDMA_REQ_Q_IN_PTR_OFS) >>
1156 EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) !=
1157 pp->req_producer);
1158
1159 /* Fill in Gen IIE command request block
1160 */
1161 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1162 flags |= CRQB_FLAG_READ;
1163
1164 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
1165 flags |= qc->tag << CRQB_TAG_SHIFT;
1166
1167 crqb = (struct mv_crqb_iie *) &pp->crqb[pp->req_producer];
1168 crqb->addr = cpu_to_le32(pp->sg_tbl_dma & 0xffffffff);
1169 crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16);
1170 crqb->flags = cpu_to_le32(flags);
1171
1172 tf = &qc->tf;
1173 crqb->ata_cmd[0] = cpu_to_le32(
1174 (tf->command << 16) |
1175 (tf->feature << 24)
1176 );
1177 crqb->ata_cmd[1] = cpu_to_le32(
1178 (tf->lbal << 0) |
1179 (tf->lbam << 8) |
1180 (tf->lbah << 16) |
1181 (tf->device << 24)
1182 );
1183 crqb->ata_cmd[2] = cpu_to_le32(
1184 (tf->hob_lbal << 0) |
1185 (tf->hob_lbam << 8) |
1186 (tf->hob_lbah << 16) |
1187 (tf->hob_feature << 24)
1188 );
1189 crqb->ata_cmd[3] = cpu_to_le32(
1190 (tf->nsect << 0) |
1191 (tf->hob_nsect << 8)
1192 );
1193
1194 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1033 return; 1195 return;
1034 }
1035 mv_fill_sg(qc); 1196 mv_fill_sg(qc);
1036} 1197}
1037 1198
@@ -1047,7 +1208,7 @@ static void mv_qc_prep(struct ata_queued_cmd *qc)
1047 * LOCKING: 1208 * LOCKING:
1048 * Inherited from caller. 1209 * Inherited from caller.
1049 */ 1210 */
1050static int mv_qc_issue(struct ata_queued_cmd *qc) 1211static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
1051{ 1212{
1052 void __iomem *port_mmio = mv_ap_base(qc->ap); 1213 void __iomem *port_mmio = mv_ap_base(qc->ap);
1053 struct mv_port_priv *pp = qc->ap->private_data; 1214 struct mv_port_priv *pp = qc->ap->private_data;
@@ -1065,12 +1226,12 @@ static int mv_qc_issue(struct ata_queued_cmd *qc)
1065 in_ptr = readl(port_mmio + EDMA_REQ_Q_IN_PTR_OFS); 1226 in_ptr = readl(port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
1066 1227
1067 /* the req producer index should be the same as we remember it */ 1228 /* the req producer index should be the same as we remember it */
1068 assert(((in_ptr >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) == 1229 WARN_ON(((in_ptr >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) !=
1069 pp->req_producer); 1230 pp->req_producer);
1070 /* until we do queuing, the queue should be empty at this point */ 1231 /* until we do queuing, the queue should be empty at this point */
1071 assert(((in_ptr >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) == 1232 WARN_ON(((in_ptr >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) !=
1072 ((readl(port_mmio + EDMA_REQ_Q_OUT_PTR_OFS) >> 1233 ((readl(port_mmio + EDMA_REQ_Q_OUT_PTR_OFS) >>
1073 EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK)); 1234 EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK));
1074 1235
1075 mv_inc_q_index(&pp->req_producer); /* now incr producer index */ 1236 mv_inc_q_index(&pp->req_producer); /* now incr producer index */
1076 1237
@@ -1090,7 +1251,7 @@ static int mv_qc_issue(struct ata_queued_cmd *qc)
1090 * 1251 *
1091 * This routine is for use when the port is in DMA mode, when it 1252 * This routine is for use when the port is in DMA mode, when it
1092 * will be using the CRPB (command response block) method of 1253 * will be using the CRPB (command response block) method of
1093 * returning command completion information. We assert indices 1254 * returning command completion information. We check indices
1094 * are good, grab status, and bump the response consumer index to 1255 * are good, grab status, and bump the response consumer index to
1095 * prove that we're up to date. 1256 * prove that we're up to date.
1096 * 1257 *
@@ -1106,16 +1267,16 @@ static u8 mv_get_crpb_status(struct ata_port *ap)
1106 out_ptr = readl(port_mmio + EDMA_RSP_Q_OUT_PTR_OFS); 1267 out_ptr = readl(port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
1107 1268
1108 /* the response consumer index should be the same as we remember it */ 1269 /* the response consumer index should be the same as we remember it */
1109 assert(((out_ptr >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) == 1270 WARN_ON(((out_ptr >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) !=
1110 pp->rsp_consumer); 1271 pp->rsp_consumer);
1111 1272
1112 /* increment our consumer index... */ 1273 /* increment our consumer index... */
1113 pp->rsp_consumer = mv_inc_q_index(&pp->rsp_consumer); 1274 pp->rsp_consumer = mv_inc_q_index(&pp->rsp_consumer);
1114 1275
1115 /* and, until we do NCQ, there should only be 1 CRPB waiting */ 1276 /* and, until we do NCQ, there should only be 1 CRPB waiting */
1116 assert(((readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS) >> 1277 WARN_ON(((readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS) >>
1117 EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) == 1278 EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) !=
1118 pp->rsp_consumer); 1279 pp->rsp_consumer);
1119 1280
1120 /* write out our inc'd consumer index so EDMA knows we're caught up */ 1281 /* write out our inc'd consumer index so EDMA knows we're caught up */
1121 out_ptr &= EDMA_RSP_Q_BASE_LO_MASK; 1282 out_ptr &= EDMA_RSP_Q_BASE_LO_MASK;
@@ -1681,6 +1842,12 @@ static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
1681 m2 |= hpriv->signal[port].pre; 1842 m2 |= hpriv->signal[port].pre;
1682 m2 &= ~(1 << 16); 1843 m2 &= ~(1 << 16);
1683 1844
1845 /* according to mvSata 3.6.1, some IIE values are fixed */
1846 if (IS_GEN_IIE(hpriv)) {
1847 m2 &= ~0xC30FF01F;
1848 m2 |= 0x0000900F;
1849 }
1850
1684 writel(m2, port_mmio + PHY_MODE2); 1851 writel(m2, port_mmio + PHY_MODE2);
1685} 1852}
1686 1853
@@ -1846,7 +2013,6 @@ static void mv_phy_reset(struct ata_port *ap)
1846static void mv_eng_timeout(struct ata_port *ap) 2013static void mv_eng_timeout(struct ata_port *ap)
1847{ 2014{
1848 struct ata_queued_cmd *qc; 2015 struct ata_queued_cmd *qc;
1849 unsigned long flags;
1850 2016
1851 printk(KERN_ERR "ata%u: Entering mv_eng_timeout\n",ap->id); 2017 printk(KERN_ERR "ata%u: Entering mv_eng_timeout\n",ap->id);
1852 DPRINTK("All regs @ start of eng_timeout\n"); 2018 DPRINTK("All regs @ start of eng_timeout\n");
@@ -1861,22 +2027,8 @@ static void mv_eng_timeout(struct ata_port *ap)
1861 mv_err_intr(ap); 2027 mv_err_intr(ap);
1862 mv_stop_and_reset(ap); 2028 mv_stop_and_reset(ap);
1863 2029
1864 if (!qc) { 2030 qc->err_mask |= AC_ERR_TIMEOUT;
1865 printk(KERN_ERR "ata%u: BUG: timeout without command\n", 2031 ata_eh_qc_complete(qc);
1866 ap->id);
1867 } else {
1868 /* hack alert! We cannot use the supplied completion
1869 * function from inside the ->eh_strategy_handler() thread.
1870 * libata is the only user of ->eh_strategy_handler() in
1871 * any kernel, so the default scsi_done() assumes it is
1872 * not being called from the SCSI EH.
1873 */
1874 spin_lock_irqsave(&ap->host_set->lock, flags);
1875 qc->scsidone = scsi_finish_command;
1876 qc->err_mask |= AC_ERR_OTHER;
1877 ata_qc_complete(qc);
1878 spin_unlock_irqrestore(&ap->host_set->lock, flags);
1879 }
1880} 2032}
1881 2033
1882/** 2034/**
@@ -1995,6 +2147,27 @@ static int mv_chip_id(struct pci_dev *pdev, struct mv_host_priv *hpriv,
1995 } 2147 }
1996 break; 2148 break;
1997 2149
2150 case chip_7042:
2151 case chip_6042:
2152 hpriv->ops = &mv6xxx_ops;
2153
2154 hp_flags |= MV_HP_GEN_IIE;
2155
2156 switch (rev_id) {
2157 case 0x0:
2158 hp_flags |= MV_HP_ERRATA_XX42A0;
2159 break;
2160 case 0x1:
2161 hp_flags |= MV_HP_ERRATA_60X1C0;
2162 break;
2163 default:
2164 dev_printk(KERN_WARNING, &pdev->dev,
2165 "Applying 60X1C0 workarounds to unknown rev\n");
2166 hp_flags |= MV_HP_ERRATA_60X1C0;
2167 break;
2168 }
2169 break;
2170
1998 default: 2171 default:
1999 printk(KERN_ERR DRV_NAME ": BUG: invalid board index %u\n", board_idx); 2172 printk(KERN_ERR DRV_NAME ": BUG: invalid board index %u\n", board_idx);
2000 return 1; 2173 return 1;
diff --git a/drivers/scsi/sata_nv.c b/drivers/scsi/sata_nv.c
index bbbb55eeb73a..caffadc2e0ae 100644
--- a/drivers/scsi/sata_nv.c
+++ b/drivers/scsi/sata_nv.c
@@ -229,11 +229,11 @@ static struct scsi_host_template nv_sht = {
229 .name = DRV_NAME, 229 .name = DRV_NAME,
230 .ioctl = ata_scsi_ioctl, 230 .ioctl = ata_scsi_ioctl,
231 .queuecommand = ata_scsi_queuecmd, 231 .queuecommand = ata_scsi_queuecmd,
232 .eh_timed_out = ata_scsi_timed_out,
232 .eh_strategy_handler = ata_scsi_error, 233 .eh_strategy_handler = ata_scsi_error,
233 .can_queue = ATA_DEF_QUEUE, 234 .can_queue = ATA_DEF_QUEUE,
234 .this_id = ATA_SHT_THIS_ID, 235 .this_id = ATA_SHT_THIS_ID,
235 .sg_tablesize = LIBATA_MAX_PRD, 236 .sg_tablesize = LIBATA_MAX_PRD,
236 .max_sectors = ATA_MAX_SECTORS,
237 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 237 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
238 .emulated = ATA_SHT_EMULATED, 238 .emulated = ATA_SHT_EMULATED,
239 .use_clustering = ATA_SHT_USE_CLUSTERING, 239 .use_clustering = ATA_SHT_USE_CLUSTERING,
diff --git a/drivers/scsi/sata_promise.c b/drivers/scsi/sata_promise.c
index b0b0a69b3563..84cb3940ad88 100644
--- a/drivers/scsi/sata_promise.c
+++ b/drivers/scsi/sata_promise.c
@@ -46,7 +46,7 @@
46#include "sata_promise.h" 46#include "sata_promise.h"
47 47
48#define DRV_NAME "sata_promise" 48#define DRV_NAME "sata_promise"
49#define DRV_VERSION "1.03" 49#define DRV_VERSION "1.04"
50 50
51 51
52enum { 52enum {
@@ -58,6 +58,7 @@ enum {
58 PDC_GLOBAL_CTL = 0x48, /* Global control/status (per port) */ 58 PDC_GLOBAL_CTL = 0x48, /* Global control/status (per port) */
59 PDC_CTLSTAT = 0x60, /* IDE control and status (per port) */ 59 PDC_CTLSTAT = 0x60, /* IDE control and status (per port) */
60 PDC_SATA_PLUG_CSR = 0x6C, /* SATA Plug control/status reg */ 60 PDC_SATA_PLUG_CSR = 0x6C, /* SATA Plug control/status reg */
61 PDC2_SATA_PLUG_CSR = 0x60, /* SATAII Plug control/status reg */
61 PDC_SLEW_CTL = 0x470, /* slew rate control reg */ 62 PDC_SLEW_CTL = 0x470, /* slew rate control reg */
62 63
63 PDC_ERR_MASK = (1<<19) | (1<<20) | (1<<21) | (1<<22) | 64 PDC_ERR_MASK = (1<<19) | (1<<20) | (1<<21) | (1<<22) |
@@ -67,8 +68,10 @@ enum {
67 board_20319 = 1, /* FastTrak S150 TX4 */ 68 board_20319 = 1, /* FastTrak S150 TX4 */
68 board_20619 = 2, /* FastTrak TX4000 */ 69 board_20619 = 2, /* FastTrak TX4000 */
69 board_20771 = 3, /* FastTrak TX2300 */ 70 board_20771 = 3, /* FastTrak TX2300 */
71 board_2057x = 4, /* SATAII150 Tx2plus */
72 board_40518 = 5, /* SATAII150 Tx4 */
70 73
71 PDC_HAS_PATA = (1 << 1), /* PDC20375 has PATA */ 74 PDC_HAS_PATA = (1 << 1), /* PDC20375/20575 has PATA */
72 75
73 PDC_RESET = (1 << 11), /* HDMA reset */ 76 PDC_RESET = (1 << 11), /* HDMA reset */
74 77
@@ -82,6 +85,10 @@ struct pdc_port_priv {
82 dma_addr_t pkt_dma; 85 dma_addr_t pkt_dma;
83}; 86};
84 87
88struct pdc_host_priv {
89 int hotplug_offset;
90};
91
85static u32 pdc_sata_scr_read (struct ata_port *ap, unsigned int sc_reg); 92static u32 pdc_sata_scr_read (struct ata_port *ap, unsigned int sc_reg);
86static void pdc_sata_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val); 93static void pdc_sata_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val);
87static int pdc_ata_init_one (struct pci_dev *pdev, const struct pci_device_id *ent); 94static int pdc_ata_init_one (struct pci_dev *pdev, const struct pci_device_id *ent);
@@ -95,7 +102,8 @@ static void pdc_qc_prep(struct ata_queued_cmd *qc);
95static void pdc_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf); 102static void pdc_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf);
96static void pdc_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf); 103static void pdc_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf);
97static void pdc_irq_clear(struct ata_port *ap); 104static void pdc_irq_clear(struct ata_port *ap);
98static int pdc_qc_issue_prot(struct ata_queued_cmd *qc); 105static unsigned int pdc_qc_issue_prot(struct ata_queued_cmd *qc);
106static void pdc_host_stop(struct ata_host_set *host_set);
99 107
100 108
101static struct scsi_host_template pdc_ata_sht = { 109static struct scsi_host_template pdc_ata_sht = {
@@ -103,11 +111,11 @@ static struct scsi_host_template pdc_ata_sht = {
103 .name = DRV_NAME, 111 .name = DRV_NAME,
104 .ioctl = ata_scsi_ioctl, 112 .ioctl = ata_scsi_ioctl,
105 .queuecommand = ata_scsi_queuecmd, 113 .queuecommand = ata_scsi_queuecmd,
114 .eh_timed_out = ata_scsi_timed_out,
106 .eh_strategy_handler = ata_scsi_error, 115 .eh_strategy_handler = ata_scsi_error,
107 .can_queue = ATA_DEF_QUEUE, 116 .can_queue = ATA_DEF_QUEUE,
108 .this_id = ATA_SHT_THIS_ID, 117 .this_id = ATA_SHT_THIS_ID,
109 .sg_tablesize = LIBATA_MAX_PRD, 118 .sg_tablesize = LIBATA_MAX_PRD,
110 .max_sectors = ATA_MAX_SECTORS,
111 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 119 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
112 .emulated = ATA_SHT_EMULATED, 120 .emulated = ATA_SHT_EMULATED,
113 .use_clustering = ATA_SHT_USE_CLUSTERING, 121 .use_clustering = ATA_SHT_USE_CLUSTERING,
@@ -137,7 +145,7 @@ static const struct ata_port_operations pdc_sata_ops = {
137 .scr_write = pdc_sata_scr_write, 145 .scr_write = pdc_sata_scr_write,
138 .port_start = pdc_port_start, 146 .port_start = pdc_port_start,
139 .port_stop = pdc_port_stop, 147 .port_stop = pdc_port_stop,
140 .host_stop = ata_pci_host_stop, 148 .host_stop = pdc_host_stop,
141}; 149};
142 150
143static const struct ata_port_operations pdc_pata_ops = { 151static const struct ata_port_operations pdc_pata_ops = {
@@ -158,7 +166,7 @@ static const struct ata_port_operations pdc_pata_ops = {
158 166
159 .port_start = pdc_port_start, 167 .port_start = pdc_port_start,
160 .port_stop = pdc_port_stop, 168 .port_stop = pdc_port_stop,
161 .host_stop = ata_pci_host_stop, 169 .host_stop = pdc_host_stop,
162}; 170};
163 171
164static const struct ata_port_info pdc_port_info[] = { 172static const struct ata_port_info pdc_port_info[] = {
@@ -201,6 +209,26 @@ static const struct ata_port_info pdc_port_info[] = {
201 .udma_mask = 0x7f, /* udma0-6 ; FIXME */ 209 .udma_mask = 0x7f, /* udma0-6 ; FIXME */
202 .port_ops = &pdc_sata_ops, 210 .port_ops = &pdc_sata_ops,
203 }, 211 },
212
213 /* board_2057x */
214 {
215 .sht = &pdc_ata_sht,
216 .host_flags = PDC_COMMON_FLAGS | ATA_FLAG_SATA,
217 .pio_mask = 0x1f, /* pio0-4 */
218 .mwdma_mask = 0x07, /* mwdma0-2 */
219 .udma_mask = 0x7f, /* udma0-6 ; FIXME */
220 .port_ops = &pdc_sata_ops,
221 },
222
223 /* board_40518 */
224 {
225 .sht = &pdc_ata_sht,
226 .host_flags = PDC_COMMON_FLAGS | ATA_FLAG_SATA,
227 .pio_mask = 0x1f, /* pio0-4 */
228 .mwdma_mask = 0x07, /* mwdma0-2 */
229 .udma_mask = 0x7f, /* udma0-6 ; FIXME */
230 .port_ops = &pdc_sata_ops,
231 },
204}; 232};
205 233
206static const struct pci_device_id pdc_ata_pci_tbl[] = { 234static const struct pci_device_id pdc_ata_pci_tbl[] = {
@@ -217,9 +245,9 @@ static const struct pci_device_id pdc_ata_pci_tbl[] = {
217 { PCI_VENDOR_ID_PROMISE, 0x3376, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 245 { PCI_VENDOR_ID_PROMISE, 0x3376, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
218 board_2037x }, 246 board_2037x },
219 { PCI_VENDOR_ID_PROMISE, 0x3574, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 247 { PCI_VENDOR_ID_PROMISE, 0x3574, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
220 board_2037x }, 248 board_2057x },
221 { PCI_VENDOR_ID_PROMISE, 0x3d75, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 249 { PCI_VENDOR_ID_PROMISE, 0x3d75, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
222 board_2037x }, 250 board_2057x },
223 { PCI_VENDOR_ID_PROMISE, 0x3d73, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 251 { PCI_VENDOR_ID_PROMISE, 0x3d73, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
224 board_2037x }, 252 board_2037x },
225 253
@@ -227,12 +255,14 @@ static const struct pci_device_id pdc_ata_pci_tbl[] = {
227 board_20319 }, 255 board_20319 },
228 { PCI_VENDOR_ID_PROMISE, 0x3319, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 256 { PCI_VENDOR_ID_PROMISE, 0x3319, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
229 board_20319 }, 257 board_20319 },
258 { PCI_VENDOR_ID_PROMISE, 0x3515, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
259 board_20319 },
230 { PCI_VENDOR_ID_PROMISE, 0x3519, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 260 { PCI_VENDOR_ID_PROMISE, 0x3519, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
231 board_20319 }, 261 board_20319 },
232 { PCI_VENDOR_ID_PROMISE, 0x3d17, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 262 { PCI_VENDOR_ID_PROMISE, 0x3d17, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
233 board_20319 }, 263 board_20319 },
234 { PCI_VENDOR_ID_PROMISE, 0x3d18, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 264 { PCI_VENDOR_ID_PROMISE, 0x3d18, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
235 board_20319 }, 265 board_40518 },
236 266
237 { PCI_VENDOR_ID_PROMISE, 0x6629, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 267 { PCI_VENDOR_ID_PROMISE, 0x6629, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
238 board_20619 }, 268 board_20619 },
@@ -261,12 +291,11 @@ static int pdc_port_start(struct ata_port *ap)
261 if (rc) 291 if (rc)
262 return rc; 292 return rc;
263 293
264 pp = kmalloc(sizeof(*pp), GFP_KERNEL); 294 pp = kzalloc(sizeof(*pp), GFP_KERNEL);
265 if (!pp) { 295 if (!pp) {
266 rc = -ENOMEM; 296 rc = -ENOMEM;
267 goto err_out; 297 goto err_out;
268 } 298 }
269 memset(pp, 0, sizeof(*pp));
270 299
271 pp->pkt = dma_alloc_coherent(dev, 128, &pp->pkt_dma, GFP_KERNEL); 300 pp->pkt = dma_alloc_coherent(dev, 128, &pp->pkt_dma, GFP_KERNEL);
272 if (!pp->pkt) { 301 if (!pp->pkt) {
@@ -298,6 +327,16 @@ static void pdc_port_stop(struct ata_port *ap)
298} 327}
299 328
300 329
330static void pdc_host_stop(struct ata_host_set *host_set)
331{
332 struct pdc_host_priv *hp = host_set->private_data;
333
334 ata_pci_host_stop(host_set);
335
336 kfree(hp);
337}
338
339
301static void pdc_reset_port(struct ata_port *ap) 340static void pdc_reset_port(struct ata_port *ap)
302{ 341{
303 void __iomem *mmio = (void __iomem *) ap->ioaddr.cmd_addr + PDC_CTLSTAT; 342 void __iomem *mmio = (void __iomem *) ap->ioaddr.cmd_addr + PDC_CTLSTAT;
@@ -394,19 +433,6 @@ static void pdc_eng_timeout(struct ata_port *ap)
394 spin_lock_irqsave(&host_set->lock, flags); 433 spin_lock_irqsave(&host_set->lock, flags);
395 434
396 qc = ata_qc_from_tag(ap, ap->active_tag); 435 qc = ata_qc_from_tag(ap, ap->active_tag);
397 if (!qc) {
398 printk(KERN_ERR "ata%u: BUG: timeout without command\n",
399 ap->id);
400 goto out;
401 }
402
403 /* hack alert! We cannot use the supplied completion
404 * function from inside the ->eh_strategy_handler() thread.
405 * libata is the only user of ->eh_strategy_handler() in
406 * any kernel, so the default scsi_done() assumes it is
407 * not being called from the SCSI EH.
408 */
409 qc->scsidone = scsi_finish_command;
410 436
411 switch (qc->tf.protocol) { 437 switch (qc->tf.protocol) {
412 case ATA_PROT_DMA: 438 case ATA_PROT_DMA:
@@ -414,7 +440,6 @@ static void pdc_eng_timeout(struct ata_port *ap)
414 printk(KERN_ERR "ata%u: command timeout\n", ap->id); 440 printk(KERN_ERR "ata%u: command timeout\n", ap->id);
415 drv_stat = ata_wait_idle(ap); 441 drv_stat = ata_wait_idle(ap);
416 qc->err_mask |= __ac_err_mask(drv_stat); 442 qc->err_mask |= __ac_err_mask(drv_stat);
417 ata_qc_complete(qc);
418 break; 443 break;
419 444
420 default: 445 default:
@@ -424,12 +449,11 @@ static void pdc_eng_timeout(struct ata_port *ap)
424 ap->id, qc->tf.command, drv_stat); 449 ap->id, qc->tf.command, drv_stat);
425 450
426 qc->err_mask |= ac_err_mask(drv_stat); 451 qc->err_mask |= ac_err_mask(drv_stat);
427 ata_qc_complete(qc);
428 break; 452 break;
429 } 453 }
430 454
431out:
432 spin_unlock_irqrestore(&host_set->lock, flags); 455 spin_unlock_irqrestore(&host_set->lock, flags);
456 ata_eh_qc_complete(qc);
433 DPRINTK("EXIT\n"); 457 DPRINTK("EXIT\n");
434} 458}
435 459
@@ -495,14 +519,15 @@ static irqreturn_t pdc_interrupt (int irq, void *dev_instance, struct pt_regs *r
495 VPRINTK("QUICK EXIT 2\n"); 519 VPRINTK("QUICK EXIT 2\n");
496 return IRQ_NONE; 520 return IRQ_NONE;
497 } 521 }
522
523 spin_lock(&host_set->lock);
524
498 mask &= 0xffff; /* only 16 tags possible */ 525 mask &= 0xffff; /* only 16 tags possible */
499 if (!mask) { 526 if (!mask) {
500 VPRINTK("QUICK EXIT 3\n"); 527 VPRINTK("QUICK EXIT 3\n");
501 return IRQ_NONE; 528 goto done_irq;
502 } 529 }
503 530
504 spin_lock(&host_set->lock);
505
506 writel(mask, mmio_base + PDC_INT_SEQMASK); 531 writel(mask, mmio_base + PDC_INT_SEQMASK);
507 532
508 for (i = 0; i < host_set->n_ports; i++) { 533 for (i = 0; i < host_set->n_ports; i++) {
@@ -519,10 +544,10 @@ static irqreturn_t pdc_interrupt (int irq, void *dev_instance, struct pt_regs *r
519 } 544 }
520 } 545 }
521 546
522 spin_unlock(&host_set->lock);
523
524 VPRINTK("EXIT\n"); 547 VPRINTK("EXIT\n");
525 548
549done_irq:
550 spin_unlock(&host_set->lock);
526 return IRQ_RETVAL(handled); 551 return IRQ_RETVAL(handled);
527} 552}
528 553
@@ -544,7 +569,7 @@ static inline void pdc_packet_start(struct ata_queued_cmd *qc)
544 readl((void __iomem *) ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT); /* flush */ 569 readl((void __iomem *) ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT); /* flush */
545} 570}
546 571
547static int pdc_qc_issue_prot(struct ata_queued_cmd *qc) 572static unsigned int pdc_qc_issue_prot(struct ata_queued_cmd *qc)
548{ 573{
549 switch (qc->tf.protocol) { 574 switch (qc->tf.protocol) {
550 case ATA_PROT_DMA: 575 case ATA_PROT_DMA:
@@ -600,6 +625,8 @@ static void pdc_ata_setup_port(struct ata_ioports *port, unsigned long base)
600static void pdc_host_init(unsigned int chip_id, struct ata_probe_ent *pe) 625static void pdc_host_init(unsigned int chip_id, struct ata_probe_ent *pe)
601{ 626{
602 void __iomem *mmio = pe->mmio_base; 627 void __iomem *mmio = pe->mmio_base;
628 struct pdc_host_priv *hp = pe->private_data;
629 int hotplug_offset = hp->hotplug_offset;
603 u32 tmp; 630 u32 tmp;
604 631
605 /* 632 /*
@@ -614,12 +641,12 @@ static void pdc_host_init(unsigned int chip_id, struct ata_probe_ent *pe)
614 writel(tmp, mmio + PDC_FLASH_CTL); 641 writel(tmp, mmio + PDC_FLASH_CTL);
615 642
616 /* clear plug/unplug flags for all ports */ 643 /* clear plug/unplug flags for all ports */
617 tmp = readl(mmio + PDC_SATA_PLUG_CSR); 644 tmp = readl(mmio + hotplug_offset);
618 writel(tmp | 0xff, mmio + PDC_SATA_PLUG_CSR); 645 writel(tmp | 0xff, mmio + hotplug_offset);
619 646
620 /* mask plug/unplug ints */ 647 /* mask plug/unplug ints */
621 tmp = readl(mmio + PDC_SATA_PLUG_CSR); 648 tmp = readl(mmio + hotplug_offset);
622 writel(tmp | 0xff0000, mmio + PDC_SATA_PLUG_CSR); 649 writel(tmp | 0xff0000, mmio + hotplug_offset);
623 650
624 /* reduce TBG clock to 133 Mhz. */ 651 /* reduce TBG clock to 133 Mhz. */
625 tmp = readl(mmio + PDC_TBG_MODE); 652 tmp = readl(mmio + PDC_TBG_MODE);
@@ -641,6 +668,7 @@ static int pdc_ata_init_one (struct pci_dev *pdev, const struct pci_device_id *e
641{ 668{
642 static int printed_version; 669 static int printed_version;
643 struct ata_probe_ent *probe_ent = NULL; 670 struct ata_probe_ent *probe_ent = NULL;
671 struct pdc_host_priv *hp;
644 unsigned long base; 672 unsigned long base;
645 void __iomem *mmio_base; 673 void __iomem *mmio_base;
646 unsigned int board_idx = (unsigned int) ent->driver_data; 674 unsigned int board_idx = (unsigned int) ent->driver_data;
@@ -671,13 +699,12 @@ static int pdc_ata_init_one (struct pci_dev *pdev, const struct pci_device_id *e
671 if (rc) 699 if (rc)
672 goto err_out_regions; 700 goto err_out_regions;
673 701
674 probe_ent = kmalloc(sizeof(*probe_ent), GFP_KERNEL); 702 probe_ent = kzalloc(sizeof(*probe_ent), GFP_KERNEL);
675 if (probe_ent == NULL) { 703 if (probe_ent == NULL) {
676 rc = -ENOMEM; 704 rc = -ENOMEM;
677 goto err_out_regions; 705 goto err_out_regions;
678 } 706 }
679 707
680 memset(probe_ent, 0, sizeof(*probe_ent));
681 probe_ent->dev = pci_dev_to_dev(pdev); 708 probe_ent->dev = pci_dev_to_dev(pdev);
682 INIT_LIST_HEAD(&probe_ent->node); 709 INIT_LIST_HEAD(&probe_ent->node);
683 710
@@ -688,6 +715,16 @@ static int pdc_ata_init_one (struct pci_dev *pdev, const struct pci_device_id *e
688 } 715 }
689 base = (unsigned long) mmio_base; 716 base = (unsigned long) mmio_base;
690 717
718 hp = kzalloc(sizeof(*hp), GFP_KERNEL);
719 if (hp == NULL) {
720 rc = -ENOMEM;
721 goto err_out_free_ent;
722 }
723
724 /* Set default hotplug offset */
725 hp->hotplug_offset = PDC_SATA_PLUG_CSR;
726 probe_ent->private_data = hp;
727
691 probe_ent->sht = pdc_port_info[board_idx].sht; 728 probe_ent->sht = pdc_port_info[board_idx].sht;
692 probe_ent->host_flags = pdc_port_info[board_idx].host_flags; 729 probe_ent->host_flags = pdc_port_info[board_idx].host_flags;
693 probe_ent->pio_mask = pdc_port_info[board_idx].pio_mask; 730 probe_ent->pio_mask = pdc_port_info[board_idx].pio_mask;
@@ -707,6 +744,10 @@ static int pdc_ata_init_one (struct pci_dev *pdev, const struct pci_device_id *e
707 744
708 /* notice 4-port boards */ 745 /* notice 4-port boards */
709 switch (board_idx) { 746 switch (board_idx) {
747 case board_40518:
748 /* Override hotplug offset for SATAII150 */
749 hp->hotplug_offset = PDC2_SATA_PLUG_CSR;
750 /* Fall through */
710 case board_20319: 751 case board_20319:
711 probe_ent->n_ports = 4; 752 probe_ent->n_ports = 4;
712 753
@@ -716,6 +757,10 @@ static int pdc_ata_init_one (struct pci_dev *pdev, const struct pci_device_id *e
716 probe_ent->port[2].scr_addr = base + 0x600; 757 probe_ent->port[2].scr_addr = base + 0x600;
717 probe_ent->port[3].scr_addr = base + 0x700; 758 probe_ent->port[3].scr_addr = base + 0x700;
718 break; 759 break;
760 case board_2057x:
761 /* Override hotplug offset for SATAII150 */
762 hp->hotplug_offset = PDC2_SATA_PLUG_CSR;
763 /* Fall through */
719 case board_2037x: 764 case board_2037x:
720 probe_ent->n_ports = 2; 765 probe_ent->n_ports = 2;
721 break; 766 break;
@@ -741,8 +786,10 @@ static int pdc_ata_init_one (struct pci_dev *pdev, const struct pci_device_id *e
741 /* initialize adapter */ 786 /* initialize adapter */
742 pdc_host_init(board_idx, probe_ent); 787 pdc_host_init(board_idx, probe_ent);
743 788
744 /* FIXME: check ata_device_add return value */ 789 /* FIXME: Need any other frees than hp? */
745 ata_device_add(probe_ent); 790 if (!ata_device_add(probe_ent))
791 kfree(hp);
792
746 kfree(probe_ent); 793 kfree(probe_ent);
747 794
748 return 0; 795 return 0;
diff --git a/drivers/scsi/sata_qstor.c b/drivers/scsi/sata_qstor.c
index 80480f0fb2b8..9602f43a298e 100644
--- a/drivers/scsi/sata_qstor.c
+++ b/drivers/scsi/sata_qstor.c
@@ -120,7 +120,7 @@ static void qs_host_stop(struct ata_host_set *host_set);
120static void qs_port_stop(struct ata_port *ap); 120static void qs_port_stop(struct ata_port *ap);
121static void qs_phy_reset(struct ata_port *ap); 121static void qs_phy_reset(struct ata_port *ap);
122static void qs_qc_prep(struct ata_queued_cmd *qc); 122static void qs_qc_prep(struct ata_queued_cmd *qc);
123static int qs_qc_issue(struct ata_queued_cmd *qc); 123static unsigned int qs_qc_issue(struct ata_queued_cmd *qc);
124static int qs_check_atapi_dma(struct ata_queued_cmd *qc); 124static int qs_check_atapi_dma(struct ata_queued_cmd *qc);
125static void qs_bmdma_stop(struct ata_queued_cmd *qc); 125static void qs_bmdma_stop(struct ata_queued_cmd *qc);
126static u8 qs_bmdma_status(struct ata_port *ap); 126static u8 qs_bmdma_status(struct ata_port *ap);
@@ -132,11 +132,11 @@ static struct scsi_host_template qs_ata_sht = {
132 .name = DRV_NAME, 132 .name = DRV_NAME,
133 .ioctl = ata_scsi_ioctl, 133 .ioctl = ata_scsi_ioctl,
134 .queuecommand = ata_scsi_queuecmd, 134 .queuecommand = ata_scsi_queuecmd,
135 .eh_timed_out = ata_scsi_timed_out,
135 .eh_strategy_handler = ata_scsi_error, 136 .eh_strategy_handler = ata_scsi_error,
136 .can_queue = ATA_DEF_QUEUE, 137 .can_queue = ATA_DEF_QUEUE,
137 .this_id = ATA_SHT_THIS_ID, 138 .this_id = ATA_SHT_THIS_ID,
138 .sg_tablesize = QS_MAX_PRD, 139 .sg_tablesize = QS_MAX_PRD,
139 .max_sectors = ATA_MAX_SECTORS,
140 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 140 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
141 .emulated = ATA_SHT_EMULATED, 141 .emulated = ATA_SHT_EMULATED,
142 //FIXME .use_clustering = ATA_SHT_USE_CLUSTERING, 142 //FIXME .use_clustering = ATA_SHT_USE_CLUSTERING,
@@ -276,8 +276,8 @@ static unsigned int qs_fill_sg(struct ata_queued_cmd *qc)
276 unsigned int nelem; 276 unsigned int nelem;
277 u8 *prd = pp->pkt + QS_CPB_BYTES; 277 u8 *prd = pp->pkt + QS_CPB_BYTES;
278 278
279 assert(qc->__sg != NULL); 279 WARN_ON(qc->__sg == NULL);
280 assert(qc->n_elem > 0 || qc->pad_len > 0); 280 WARN_ON(qc->n_elem == 0 && qc->pad_len == 0);
281 281
282 nelem = 0; 282 nelem = 0;
283 ata_for_each_sg(sg, qc) { 283 ata_for_each_sg(sg, qc) {
@@ -352,7 +352,7 @@ static inline void qs_packet_start(struct ata_queued_cmd *qc)
352 readl(chan + QS_CCT_CFF); /* flush */ 352 readl(chan + QS_CCT_CFF); /* flush */
353} 353}
354 354
355static int qs_qc_issue(struct ata_queued_cmd *qc) 355static unsigned int qs_qc_issue(struct ata_queued_cmd *qc)
356{ 356{
357 struct qs_port_priv *pp = qc->ap->private_data; 357 struct qs_port_priv *pp = qc->ap->private_data;
358 358
diff --git a/drivers/scsi/sata_sil.c b/drivers/scsi/sata_sil.c
index 9face3c6aa21..4f2a67ed39d8 100644
--- a/drivers/scsi/sata_sil.c
+++ b/drivers/scsi/sata_sil.c
@@ -49,24 +49,30 @@
49#define DRV_VERSION "0.9" 49#define DRV_VERSION "0.9"
50 50
51enum { 51enum {
52 /*
53 * host flags
54 */
52 SIL_FLAG_RERR_ON_DMA_ACT = (1 << 29), 55 SIL_FLAG_RERR_ON_DMA_ACT = (1 << 29),
53 SIL_FLAG_MOD15WRITE = (1 << 30), 56 SIL_FLAG_MOD15WRITE = (1 << 30),
57 SIL_DFL_HOST_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
58 ATA_FLAG_MMIO,
54 59
60 /*
61 * Controller IDs
62 */
55 sil_3112 = 0, 63 sil_3112 = 0,
56 sil_3112_m15w = 1, 64 sil_3512 = 1,
57 sil_3512 = 2, 65 sil_3114 = 2,
58 sil_3114 = 3,
59
60 SIL_FIFO_R0 = 0x40,
61 SIL_FIFO_W0 = 0x41,
62 SIL_FIFO_R1 = 0x44,
63 SIL_FIFO_W1 = 0x45,
64 SIL_FIFO_R2 = 0x240,
65 SIL_FIFO_W2 = 0x241,
66 SIL_FIFO_R3 = 0x244,
67 SIL_FIFO_W3 = 0x245,
68 66
67 /*
68 * Register offsets
69 */
69 SIL_SYSCFG = 0x48, 70 SIL_SYSCFG = 0x48,
71
72 /*
73 * Register bits
74 */
75 /* SYSCFG */
70 SIL_MASK_IDE0_INT = (1 << 22), 76 SIL_MASK_IDE0_INT = (1 << 22),
71 SIL_MASK_IDE1_INT = (1 << 23), 77 SIL_MASK_IDE1_INT = (1 << 23),
72 SIL_MASK_IDE2_INT = (1 << 24), 78 SIL_MASK_IDE2_INT = (1 << 24),
@@ -75,9 +81,12 @@ enum {
75 SIL_MASK_4PORT = SIL_MASK_2PORT | 81 SIL_MASK_4PORT = SIL_MASK_2PORT |
76 SIL_MASK_IDE2_INT | SIL_MASK_IDE3_INT, 82 SIL_MASK_IDE2_INT | SIL_MASK_IDE3_INT,
77 83
78 SIL_IDE2_BMDMA = 0x200, 84 /* BMDMA/BMDMA2 */
79
80 SIL_INTR_STEERING = (1 << 1), 85 SIL_INTR_STEERING = (1 << 1),
86
87 /*
88 * Others
89 */
81 SIL_QUIRK_MOD15WRITE = (1 << 0), 90 SIL_QUIRK_MOD15WRITE = (1 << 0),
82 SIL_QUIRK_UDMA5MAX = (1 << 1), 91 SIL_QUIRK_UDMA5MAX = (1 << 1),
83}; 92};
@@ -90,13 +99,13 @@ static void sil_post_set_mode (struct ata_port *ap);
90 99
91 100
92static const struct pci_device_id sil_pci_tbl[] = { 101static const struct pci_device_id sil_pci_tbl[] = {
93 { 0x1095, 0x3112, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3112_m15w }, 102 { 0x1095, 0x3112, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3112 },
94 { 0x1095, 0x0240, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3112_m15w }, 103 { 0x1095, 0x0240, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3112 },
95 { 0x1095, 0x3512, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3512 }, 104 { 0x1095, 0x3512, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3512 },
96 { 0x1095, 0x3114, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3114 }, 105 { 0x1095, 0x3114, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3114 },
97 { 0x1002, 0x436e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3112_m15w }, 106 { 0x1002, 0x436e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3112 },
98 { 0x1002, 0x4379, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3112_m15w }, 107 { 0x1002, 0x4379, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3112 },
99 { 0x1002, 0x437a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3112_m15w }, 108 { 0x1002, 0x437a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3112 },
100 { } /* terminate list */ 109 { } /* terminate list */
101}; 110};
102 111
@@ -137,11 +146,11 @@ static struct scsi_host_template sil_sht = {
137 .name = DRV_NAME, 146 .name = DRV_NAME,
138 .ioctl = ata_scsi_ioctl, 147 .ioctl = ata_scsi_ioctl,
139 .queuecommand = ata_scsi_queuecmd, 148 .queuecommand = ata_scsi_queuecmd,
149 .eh_timed_out = ata_scsi_timed_out,
140 .eh_strategy_handler = ata_scsi_error, 150 .eh_strategy_handler = ata_scsi_error,
141 .can_queue = ATA_DEF_QUEUE, 151 .can_queue = ATA_DEF_QUEUE,
142 .this_id = ATA_SHT_THIS_ID, 152 .this_id = ATA_SHT_THIS_ID,
143 .sg_tablesize = LIBATA_MAX_PRD, 153 .sg_tablesize = LIBATA_MAX_PRD,
144 .max_sectors = ATA_MAX_SECTORS,
145 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 154 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
146 .emulated = ATA_SHT_EMULATED, 155 .emulated = ATA_SHT_EMULATED,
147 .use_clustering = ATA_SHT_USE_CLUSTERING, 156 .use_clustering = ATA_SHT_USE_CLUSTERING,
@@ -159,7 +168,7 @@ static const struct ata_port_operations sil_ops = {
159 .check_status = ata_check_status, 168 .check_status = ata_check_status,
160 .exec_command = ata_exec_command, 169 .exec_command = ata_exec_command,
161 .dev_select = ata_std_dev_select, 170 .dev_select = ata_std_dev_select,
162 .phy_reset = sata_phy_reset, 171 .probe_reset = ata_std_probe_reset,
163 .post_set_mode = sil_post_set_mode, 172 .post_set_mode = sil_post_set_mode,
164 .bmdma_setup = ata_bmdma_setup, 173 .bmdma_setup = ata_bmdma_setup,
165 .bmdma_start = ata_bmdma_start, 174 .bmdma_start = ata_bmdma_start,
@@ -181,19 +190,7 @@ static const struct ata_port_info sil_port_info[] = {
181 /* sil_3112 */ 190 /* sil_3112 */
182 { 191 {
183 .sht = &sil_sht, 192 .sht = &sil_sht,
184 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 193 .host_flags = SIL_DFL_HOST_FLAGS | SIL_FLAG_MOD15WRITE,
185 ATA_FLAG_SRST | ATA_FLAG_MMIO,
186 .pio_mask = 0x1f, /* pio0-4 */
187 .mwdma_mask = 0x07, /* mwdma0-2 */
188 .udma_mask = 0x3f, /* udma0-5 */
189 .port_ops = &sil_ops,
190 },
191 /* sil_3112_15w - keep it sync'd w/ sil_3112 */
192 {
193 .sht = &sil_sht,
194 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
195 ATA_FLAG_SRST | ATA_FLAG_MMIO |
196 SIL_FLAG_MOD15WRITE,
197 .pio_mask = 0x1f, /* pio0-4 */ 194 .pio_mask = 0x1f, /* pio0-4 */
198 .mwdma_mask = 0x07, /* mwdma0-2 */ 195 .mwdma_mask = 0x07, /* mwdma0-2 */
199 .udma_mask = 0x3f, /* udma0-5 */ 196 .udma_mask = 0x3f, /* udma0-5 */
@@ -202,9 +199,7 @@ static const struct ata_port_info sil_port_info[] = {
202 /* sil_3512 */ 199 /* sil_3512 */
203 { 200 {
204 .sht = &sil_sht, 201 .sht = &sil_sht,
205 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 202 .host_flags = SIL_DFL_HOST_FLAGS | SIL_FLAG_RERR_ON_DMA_ACT,
206 ATA_FLAG_SRST | ATA_FLAG_MMIO |
207 SIL_FLAG_RERR_ON_DMA_ACT,
208 .pio_mask = 0x1f, /* pio0-4 */ 203 .pio_mask = 0x1f, /* pio0-4 */
209 .mwdma_mask = 0x07, /* mwdma0-2 */ 204 .mwdma_mask = 0x07, /* mwdma0-2 */
210 .udma_mask = 0x3f, /* udma0-5 */ 205 .udma_mask = 0x3f, /* udma0-5 */
@@ -213,9 +208,7 @@ static const struct ata_port_info sil_port_info[] = {
213 /* sil_3114 */ 208 /* sil_3114 */
214 { 209 {
215 .sht = &sil_sht, 210 .sht = &sil_sht,
216 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 211 .host_flags = SIL_DFL_HOST_FLAGS | SIL_FLAG_RERR_ON_DMA_ACT,
217 ATA_FLAG_SRST | ATA_FLAG_MMIO |
218 SIL_FLAG_RERR_ON_DMA_ACT,
219 .pio_mask = 0x1f, /* pio0-4 */ 212 .pio_mask = 0x1f, /* pio0-4 */
220 .mwdma_mask = 0x07, /* mwdma0-2 */ 213 .mwdma_mask = 0x07, /* mwdma0-2 */
221 .udma_mask = 0x3f, /* udma0-5 */ 214 .udma_mask = 0x3f, /* udma0-5 */
@@ -229,16 +222,17 @@ static const struct {
229 unsigned long tf; /* ATA taskfile register block */ 222 unsigned long tf; /* ATA taskfile register block */
230 unsigned long ctl; /* ATA control/altstatus register block */ 223 unsigned long ctl; /* ATA control/altstatus register block */
231 unsigned long bmdma; /* DMA register block */ 224 unsigned long bmdma; /* DMA register block */
225 unsigned long fifo_cfg; /* FIFO Valid Byte Count and Control */
232 unsigned long scr; /* SATA control register block */ 226 unsigned long scr; /* SATA control register block */
233 unsigned long sien; /* SATA Interrupt Enable register */ 227 unsigned long sien; /* SATA Interrupt Enable register */
234 unsigned long xfer_mode;/* data transfer mode register */ 228 unsigned long xfer_mode;/* data transfer mode register */
235 unsigned long sfis_cfg; /* SATA FIS reception config register */ 229 unsigned long sfis_cfg; /* SATA FIS reception config register */
236} sil_port[] = { 230} sil_port[] = {
237 /* port 0 ... */ 231 /* port 0 ... */
238 { 0x80, 0x8A, 0x00, 0x100, 0x148, 0xb4, 0x14c }, 232 { 0x80, 0x8A, 0x00, 0x40, 0x100, 0x148, 0xb4, 0x14c },
239 { 0xC0, 0xCA, 0x08, 0x180, 0x1c8, 0xf4, 0x1cc }, 233 { 0xC0, 0xCA, 0x08, 0x44, 0x180, 0x1c8, 0xf4, 0x1cc },
240 { 0x280, 0x28A, 0x200, 0x300, 0x348, 0x2b4, 0x34c }, 234 { 0x280, 0x28A, 0x200, 0x240, 0x300, 0x348, 0x2b4, 0x34c },
241 { 0x2C0, 0x2CA, 0x208, 0x380, 0x3c8, 0x2f4, 0x3cc }, 235 { 0x2C0, 0x2CA, 0x208, 0x244, 0x380, 0x3c8, 0x2f4, 0x3cc },
242 /* ... port 3 */ 236 /* ... port 3 */
243}; 237};
244 238
@@ -354,22 +348,12 @@ static void sil_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val)
354static void sil_dev_config(struct ata_port *ap, struct ata_device *dev) 348static void sil_dev_config(struct ata_port *ap, struct ata_device *dev)
355{ 349{
356 unsigned int n, quirks = 0; 350 unsigned int n, quirks = 0;
357 unsigned char model_num[40]; 351 unsigned char model_num[41];
358 const char *s;
359 unsigned int len;
360 352
361 ata_dev_id_string(dev->id, model_num, ATA_ID_PROD_OFS, 353 ata_id_c_string(dev->id, model_num, ATA_ID_PROD_OFS, sizeof(model_num));
362 sizeof(model_num));
363 s = &model_num[0];
364 len = strnlen(s, sizeof(model_num));
365
366 /* ATAPI specifies that empty space is blank-filled; remove blanks */
367 while ((len > 0) && (s[len - 1] == ' '))
368 len--;
369 354
370 for (n = 0; sil_blacklist[n].product; n++) 355 for (n = 0; sil_blacklist[n].product; n++)
371 if (!memcmp(sil_blacklist[n].product, s, 356 if (!strcmp(sil_blacklist[n].product, model_num)) {
372 strlen(sil_blacklist[n].product))) {
373 quirks = sil_blacklist[n].quirk; 357 quirks = sil_blacklist[n].quirk;
374 break; 358 break;
375 } 359 }
@@ -380,16 +364,14 @@ static void sil_dev_config(struct ata_port *ap, struct ata_device *dev)
380 (quirks & SIL_QUIRK_MOD15WRITE))) { 364 (quirks & SIL_QUIRK_MOD15WRITE))) {
381 printk(KERN_INFO "ata%u(%u): applying Seagate errata fix (mod15write workaround)\n", 365 printk(KERN_INFO "ata%u(%u): applying Seagate errata fix (mod15write workaround)\n",
382 ap->id, dev->devno); 366 ap->id, dev->devno);
383 ap->host->max_sectors = 15; 367 dev->max_sectors = 15;
384 ap->host->hostt->max_sectors = 15;
385 dev->flags |= ATA_DFLAG_LOCK_SECTORS;
386 return; 368 return;
387 } 369 }
388 370
389 /* limit to udma5 */ 371 /* limit to udma5 */
390 if (quirks & SIL_QUIRK_UDMA5MAX) { 372 if (quirks & SIL_QUIRK_UDMA5MAX) {
391 printk(KERN_INFO "ata%u(%u): applying Maxtor errata fix %s\n", 373 printk(KERN_INFO "ata%u(%u): applying Maxtor errata fix %s\n",
392 ap->id, dev->devno, s); 374 ap->id, dev->devno, model_num);
393 ap->udma_mask &= ATA_UDMA5; 375 ap->udma_mask &= ATA_UDMA5;
394 return; 376 return;
395 } 377 }
@@ -431,13 +413,12 @@ static int sil_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
431 if (rc) 413 if (rc)
432 goto err_out_regions; 414 goto err_out_regions;
433 415
434 probe_ent = kmalloc(sizeof(*probe_ent), GFP_KERNEL); 416 probe_ent = kzalloc(sizeof(*probe_ent), GFP_KERNEL);
435 if (probe_ent == NULL) { 417 if (probe_ent == NULL) {
436 rc = -ENOMEM; 418 rc = -ENOMEM;
437 goto err_out_regions; 419 goto err_out_regions;
438 } 420 }
439 421
440 memset(probe_ent, 0, sizeof(*probe_ent));
441 INIT_LIST_HEAD(&probe_ent->node); 422 INIT_LIST_HEAD(&probe_ent->node);
442 probe_ent->dev = pci_dev_to_dev(pdev); 423 probe_ent->dev = pci_dev_to_dev(pdev);
443 probe_ent->port_ops = sil_port_info[ent->driver_data].port_ops; 424 probe_ent->port_ops = sil_port_info[ent->driver_data].port_ops;
@@ -474,19 +455,12 @@ static int sil_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
474 if (cls) { 455 if (cls) {
475 cls >>= 3; 456 cls >>= 3;
476 cls++; /* cls = (line_size/8)+1 */ 457 cls++; /* cls = (line_size/8)+1 */
477 writeb(cls, mmio_base + SIL_FIFO_R0); 458 for (i = 0; i < probe_ent->n_ports; i++)
478 writeb(cls, mmio_base + SIL_FIFO_W0); 459 writew(cls << 8 | cls,
479 writeb(cls, mmio_base + SIL_FIFO_R1); 460 mmio_base + sil_port[i].fifo_cfg);
480 writeb(cls, mmio_base + SIL_FIFO_W1);
481 if (ent->driver_data == sil_3114) {
482 writeb(cls, mmio_base + SIL_FIFO_R2);
483 writeb(cls, mmio_base + SIL_FIFO_W2);
484 writeb(cls, mmio_base + SIL_FIFO_R3);
485 writeb(cls, mmio_base + SIL_FIFO_W3);
486 }
487 } else 461 } else
488 dev_printk(KERN_WARNING, &pdev->dev, 462 dev_printk(KERN_WARNING, &pdev->dev,
489 "cache line size not set. Driver may not function\n"); 463 "cache line size not set. Driver may not function\n");
490 464
491 /* Apply R_ERR on DMA activate FIS errata workaround */ 465 /* Apply R_ERR on DMA activate FIS errata workaround */
492 if (probe_ent->host_flags & SIL_FLAG_RERR_ON_DMA_ACT) { 466 if (probe_ent->host_flags & SIL_FLAG_RERR_ON_DMA_ACT) {
@@ -509,10 +483,10 @@ static int sil_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
509 irq_mask = SIL_MASK_4PORT; 483 irq_mask = SIL_MASK_4PORT;
510 484
511 /* flip the magic "make 4 ports work" bit */ 485 /* flip the magic "make 4 ports work" bit */
512 tmp = readl(mmio_base + SIL_IDE2_BMDMA); 486 tmp = readl(mmio_base + sil_port[2].bmdma);
513 if ((tmp & SIL_INTR_STEERING) == 0) 487 if ((tmp & SIL_INTR_STEERING) == 0)
514 writel(tmp | SIL_INTR_STEERING, 488 writel(tmp | SIL_INTR_STEERING,
515 mmio_base + SIL_IDE2_BMDMA); 489 mmio_base + sil_port[2].bmdma);
516 490
517 } else { 491 } else {
518 irq_mask = SIL_MASK_2PORT; 492 irq_mask = SIL_MASK_2PORT;
diff --git a/drivers/scsi/sata_sil24.c b/drivers/scsi/sata_sil24.c
index 923130185a9e..8fb62427be84 100644
--- a/drivers/scsi/sata_sil24.c
+++ b/drivers/scsi/sata_sil24.c
@@ -249,9 +249,9 @@ static u8 sil24_check_status(struct ata_port *ap);
249static u32 sil24_scr_read(struct ata_port *ap, unsigned sc_reg); 249static u32 sil24_scr_read(struct ata_port *ap, unsigned sc_reg);
250static void sil24_scr_write(struct ata_port *ap, unsigned sc_reg, u32 val); 250static void sil24_scr_write(struct ata_port *ap, unsigned sc_reg, u32 val);
251static void sil24_tf_read(struct ata_port *ap, struct ata_taskfile *tf); 251static void sil24_tf_read(struct ata_port *ap, struct ata_taskfile *tf);
252static void sil24_phy_reset(struct ata_port *ap); 252static int sil24_probe_reset(struct ata_port *ap, unsigned int *classes);
253static void sil24_qc_prep(struct ata_queued_cmd *qc); 253static void sil24_qc_prep(struct ata_queued_cmd *qc);
254static int sil24_qc_issue(struct ata_queued_cmd *qc); 254static unsigned int sil24_qc_issue(struct ata_queued_cmd *qc);
255static void sil24_irq_clear(struct ata_port *ap); 255static void sil24_irq_clear(struct ata_port *ap);
256static void sil24_eng_timeout(struct ata_port *ap); 256static void sil24_eng_timeout(struct ata_port *ap);
257static irqreturn_t sil24_interrupt(int irq, void *dev_instance, struct pt_regs *regs); 257static irqreturn_t sil24_interrupt(int irq, void *dev_instance, struct pt_regs *regs);
@@ -262,6 +262,7 @@ static int sil24_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
262 262
263static const struct pci_device_id sil24_pci_tbl[] = { 263static const struct pci_device_id sil24_pci_tbl[] = {
264 { 0x1095, 0x3124, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BID_SIL3124 }, 264 { 0x1095, 0x3124, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BID_SIL3124 },
265 { 0x8086, 0x3124, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BID_SIL3124 },
265 { 0x1095, 0x3132, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BID_SIL3132 }, 266 { 0x1095, 0x3132, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BID_SIL3132 },
266 { 0x1095, 0x3131, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BID_SIL3131 }, 267 { 0x1095, 0x3131, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BID_SIL3131 },
267 { 0x1095, 0x3531, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BID_SIL3131 }, 268 { 0x1095, 0x3531, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BID_SIL3131 },
@@ -280,11 +281,11 @@ static struct scsi_host_template sil24_sht = {
280 .name = DRV_NAME, 281 .name = DRV_NAME,
281 .ioctl = ata_scsi_ioctl, 282 .ioctl = ata_scsi_ioctl,
282 .queuecommand = ata_scsi_queuecmd, 283 .queuecommand = ata_scsi_queuecmd,
284 .eh_timed_out = ata_scsi_timed_out,
283 .eh_strategy_handler = ata_scsi_error, 285 .eh_strategy_handler = ata_scsi_error,
284 .can_queue = ATA_DEF_QUEUE, 286 .can_queue = ATA_DEF_QUEUE,
285 .this_id = ATA_SHT_THIS_ID, 287 .this_id = ATA_SHT_THIS_ID,
286 .sg_tablesize = LIBATA_MAX_PRD, 288 .sg_tablesize = LIBATA_MAX_PRD,
287 .max_sectors = ATA_MAX_SECTORS,
288 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 289 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
289 .emulated = ATA_SHT_EMULATED, 290 .emulated = ATA_SHT_EMULATED,
290 .use_clustering = ATA_SHT_USE_CLUSTERING, 291 .use_clustering = ATA_SHT_USE_CLUSTERING,
@@ -305,7 +306,7 @@ static const struct ata_port_operations sil24_ops = {
305 306
306 .tf_read = sil24_tf_read, 307 .tf_read = sil24_tf_read,
307 308
308 .phy_reset = sil24_phy_reset, 309 .probe_reset = sil24_probe_reset,
309 310
310 .qc_prep = sil24_qc_prep, 311 .qc_prep = sil24_qc_prep,
311 .qc_issue = sil24_qc_issue, 312 .qc_issue = sil24_qc_issue,
@@ -335,8 +336,8 @@ static struct ata_port_info sil24_port_info[] = {
335 { 336 {
336 .sht = &sil24_sht, 337 .sht = &sil24_sht,
337 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 338 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
338 ATA_FLAG_SRST | ATA_FLAG_MMIO | 339 ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA |
339 ATA_FLAG_PIO_DMA | SIL24_NPORTS2FLAG(4), 340 SIL24_NPORTS2FLAG(4),
340 .pio_mask = 0x1f, /* pio0-4 */ 341 .pio_mask = 0x1f, /* pio0-4 */
341 .mwdma_mask = 0x07, /* mwdma0-2 */ 342 .mwdma_mask = 0x07, /* mwdma0-2 */
342 .udma_mask = 0x3f, /* udma0-5 */ 343 .udma_mask = 0x3f, /* udma0-5 */
@@ -346,8 +347,8 @@ static struct ata_port_info sil24_port_info[] = {
346 { 347 {
347 .sht = &sil24_sht, 348 .sht = &sil24_sht,
348 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 349 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
349 ATA_FLAG_SRST | ATA_FLAG_MMIO | 350 ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA |
350 ATA_FLAG_PIO_DMA | SIL24_NPORTS2FLAG(2), 351 SIL24_NPORTS2FLAG(2),
351 .pio_mask = 0x1f, /* pio0-4 */ 352 .pio_mask = 0x1f, /* pio0-4 */
352 .mwdma_mask = 0x07, /* mwdma0-2 */ 353 .mwdma_mask = 0x07, /* mwdma0-2 */
353 .udma_mask = 0x3f, /* udma0-5 */ 354 .udma_mask = 0x3f, /* udma0-5 */
@@ -357,8 +358,8 @@ static struct ata_port_info sil24_port_info[] = {
357 { 358 {
358 .sht = &sil24_sht, 359 .sht = &sil24_sht,
359 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 360 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
360 ATA_FLAG_SRST | ATA_FLAG_MMIO | 361 ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA |
361 ATA_FLAG_PIO_DMA | SIL24_NPORTS2FLAG(1), 362 SIL24_NPORTS2FLAG(1),
362 .pio_mask = 0x1f, /* pio0-4 */ 363 .pio_mask = 0x1f, /* pio0-4 */
363 .mwdma_mask = 0x07, /* mwdma0-2 */ 364 .mwdma_mask = 0x07, /* mwdma0-2 */
364 .udma_mask = 0x3f, /* udma0-5 */ 365 .udma_mask = 0x3f, /* udma0-5 */
@@ -370,7 +371,7 @@ static void sil24_dev_config(struct ata_port *ap, struct ata_device *dev)
370{ 371{
371 void __iomem *port = (void __iomem *)ap->ioaddr.cmd_addr; 372 void __iomem *port = (void __iomem *)ap->ioaddr.cmd_addr;
372 373
373 if (ap->cdb_len == 16) 374 if (dev->cdb_len == 16)
374 writel(PORT_CS_CDB16, port + PORT_CTRL_STAT); 375 writel(PORT_CS_CDB16, port + PORT_CTRL_STAT);
375 else 376 else
376 writel(PORT_CS_CDB16, port + PORT_CTRL_CLR); 377 writel(PORT_CS_CDB16, port + PORT_CTRL_CLR);
@@ -427,7 +428,8 @@ static void sil24_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
427 *tf = pp->tf; 428 *tf = pp->tf;
428} 429}
429 430
430static int sil24_issue_SRST(struct ata_port *ap) 431static int sil24_softreset(struct ata_port *ap, int verbose,
432 unsigned int *class)
431{ 433{
432 void __iomem *port = (void __iomem *)ap->ioaddr.cmd_addr; 434 void __iomem *port = (void __iomem *)ap->ioaddr.cmd_addr;
433 struct sil24_port_priv *pp = ap->private_data; 435 struct sil24_port_priv *pp = ap->private_data;
@@ -436,6 +438,8 @@ static int sil24_issue_SRST(struct ata_port *ap)
436 u32 irq_enable, irq_stat; 438 u32 irq_enable, irq_stat;
437 int cnt; 439 int cnt;
438 440
441 DPRINTK("ENTER\n");
442
439 /* temporarily turn off IRQs during SRST */ 443 /* temporarily turn off IRQs during SRST */
440 irq_enable = readl(port + PORT_IRQ_ENABLE_SET); 444 irq_enable = readl(port + PORT_IRQ_ENABLE_SET);
441 writel(irq_enable, port + PORT_IRQ_ENABLE_CLR); 445 writel(irq_enable, port + PORT_IRQ_ENABLE_CLR);
@@ -465,30 +469,36 @@ static int sil24_issue_SRST(struct ata_port *ap)
465 /* restore IRQs */ 469 /* restore IRQs */
466 writel(irq_enable, port + PORT_IRQ_ENABLE_SET); 470 writel(irq_enable, port + PORT_IRQ_ENABLE_SET);
467 471
468 if (!(irq_stat & PORT_IRQ_COMPLETE)) 472 if (sata_dev_present(ap)) {
469 return -1; 473 if (!(irq_stat & PORT_IRQ_COMPLETE)) {
474 DPRINTK("EXIT, srst failed\n");
475 return -EIO;
476 }
470 477
471 /* update TF */ 478 sil24_update_tf(ap);
472 sil24_update_tf(ap); 479 *class = ata_dev_classify(&pp->tf);
480 }
481 if (*class == ATA_DEV_UNKNOWN)
482 *class = ATA_DEV_NONE;
483
484 DPRINTK("EXIT, class=%u\n", *class);
473 return 0; 485 return 0;
474} 486}
475 487
476static void sil24_phy_reset(struct ata_port *ap) 488static int sil24_hardreset(struct ata_port *ap, int verbose,
489 unsigned int *class)
477{ 490{
478 struct sil24_port_priv *pp = ap->private_data; 491 unsigned int dummy_class;
479
480 __sata_phy_reset(ap);
481 if (ap->flags & ATA_FLAG_PORT_DISABLED)
482 return;
483 492
484 if (sil24_issue_SRST(ap) < 0) { 493 /* sil24 doesn't report device signature after hard reset */
485 printk(KERN_ERR DRV_NAME 494 return sata_std_hardreset(ap, verbose, &dummy_class);
486 " ata%u: SRST failed, disabling port\n", ap->id); 495}
487 ap->ops->port_disable(ap);
488 return;
489 }
490 496
491 ap->device->class = ata_dev_classify(&pp->tf); 497static int sil24_probe_reset(struct ata_port *ap, unsigned int *classes)
498{
499 return ata_drive_probe_reset(ap, ata_std_probeinit,
500 sil24_softreset, sil24_hardreset,
501 ata_std_postreset, classes);
492} 502}
493 503
494static inline void sil24_fill_sg(struct ata_queued_cmd *qc, 504static inline void sil24_fill_sg(struct ata_queued_cmd *qc,
@@ -533,7 +543,7 @@ static void sil24_qc_prep(struct ata_queued_cmd *qc)
533 prb = &cb->atapi.prb; 543 prb = &cb->atapi.prb;
534 sge = cb->atapi.sge; 544 sge = cb->atapi.sge;
535 memset(cb->atapi.cdb, 0, 32); 545 memset(cb->atapi.cdb, 0, 32);
536 memcpy(cb->atapi.cdb, qc->cdb, ap->cdb_len); 546 memcpy(cb->atapi.cdb, qc->cdb, qc->dev->cdb_len);
537 547
538 if (qc->tf.protocol != ATA_PROT_ATAPI_NODATA) { 548 if (qc->tf.protocol != ATA_PROT_ATAPI_NODATA) {
539 if (qc->tf.flags & ATA_TFLAG_WRITE) 549 if (qc->tf.flags & ATA_TFLAG_WRITE)
@@ -557,7 +567,7 @@ static void sil24_qc_prep(struct ata_queued_cmd *qc)
557 sil24_fill_sg(qc, sge); 567 sil24_fill_sg(qc, sge);
558} 568}
559 569
560static int sil24_qc_issue(struct ata_queued_cmd *qc) 570static unsigned int sil24_qc_issue(struct ata_queued_cmd *qc)
561{ 571{
562 struct ata_port *ap = qc->ap; 572 struct ata_port *ap = qc->ap;
563 void __iomem *port = (void __iomem *)ap->ioaddr.cmd_addr; 573 void __iomem *port = (void __iomem *)ap->ioaddr.cmd_addr;
@@ -638,23 +648,10 @@ static void sil24_eng_timeout(struct ata_port *ap)
638 struct ata_queued_cmd *qc; 648 struct ata_queued_cmd *qc;
639 649
640 qc = ata_qc_from_tag(ap, ap->active_tag); 650 qc = ata_qc_from_tag(ap, ap->active_tag);
641 if (!qc) {
642 printk(KERN_ERR "ata%u: BUG: timeout without command\n",
643 ap->id);
644 return;
645 }
646 651
647 /*
648 * hack alert! We cannot use the supplied completion
649 * function from inside the ->eh_strategy_handler() thread.
650 * libata is the only user of ->eh_strategy_handler() in
651 * any kernel, so the default scsi_done() assumes it is
652 * not being called from the SCSI EH.
653 */
654 printk(KERN_ERR "ata%u: command timeout\n", ap->id); 652 printk(KERN_ERR "ata%u: command timeout\n", ap->id);
655 qc->scsidone = scsi_finish_command; 653 qc->err_mask |= AC_ERR_TIMEOUT;
656 qc->err_mask |= AC_ERR_OTHER; 654 ata_eh_qc_complete(qc);
657 ata_qc_complete(qc);
658 655
659 sil24_reset_controller(ap); 656 sil24_reset_controller(ap);
660} 657}
@@ -895,6 +892,7 @@ static int sil24_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
895 probe_ent->sht = pinfo->sht; 892 probe_ent->sht = pinfo->sht;
896 probe_ent->host_flags = pinfo->host_flags; 893 probe_ent->host_flags = pinfo->host_flags;
897 probe_ent->pio_mask = pinfo->pio_mask; 894 probe_ent->pio_mask = pinfo->pio_mask;
895 probe_ent->mwdma_mask = pinfo->mwdma_mask;
898 probe_ent->udma_mask = pinfo->udma_mask; 896 probe_ent->udma_mask = pinfo->udma_mask;
899 probe_ent->port_ops = pinfo->port_ops; 897 probe_ent->port_ops = pinfo->port_ops;
900 probe_ent->n_ports = SIL24_FLAG2NPORTS(pinfo->host_flags); 898 probe_ent->n_ports = SIL24_FLAG2NPORTS(pinfo->host_flags);
diff --git a/drivers/scsi/sata_sis.c b/drivers/scsi/sata_sis.c
index 2df8c5632ac3..7fd45f86de99 100644
--- a/drivers/scsi/sata_sis.c
+++ b/drivers/scsi/sata_sis.c
@@ -87,11 +87,11 @@ static struct scsi_host_template sis_sht = {
87 .name = DRV_NAME, 87 .name = DRV_NAME,
88 .ioctl = ata_scsi_ioctl, 88 .ioctl = ata_scsi_ioctl,
89 .queuecommand = ata_scsi_queuecmd, 89 .queuecommand = ata_scsi_queuecmd,
90 .eh_timed_out = ata_scsi_timed_out,
90 .eh_strategy_handler = ata_scsi_error, 91 .eh_strategy_handler = ata_scsi_error,
91 .can_queue = ATA_DEF_QUEUE, 92 .can_queue = ATA_DEF_QUEUE,
92 .this_id = ATA_SHT_THIS_ID, 93 .this_id = ATA_SHT_THIS_ID,
93 .sg_tablesize = ATA_MAX_PRD, 94 .sg_tablesize = ATA_MAX_PRD,
94 .max_sectors = ATA_MAX_SECTORS,
95 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 95 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
96 .emulated = ATA_SHT_EMULATED, 96 .emulated = ATA_SHT_EMULATED,
97 .use_clustering = ATA_SHT_USE_CLUSTERING, 97 .use_clustering = ATA_SHT_USE_CLUSTERING,
diff --git a/drivers/scsi/sata_svw.c b/drivers/scsi/sata_svw.c
index d8472563fde8..4aaccd53e736 100644
--- a/drivers/scsi/sata_svw.c
+++ b/drivers/scsi/sata_svw.c
@@ -288,11 +288,11 @@ static struct scsi_host_template k2_sata_sht = {
288 .name = DRV_NAME, 288 .name = DRV_NAME,
289 .ioctl = ata_scsi_ioctl, 289 .ioctl = ata_scsi_ioctl,
290 .queuecommand = ata_scsi_queuecmd, 290 .queuecommand = ata_scsi_queuecmd,
291 .eh_timed_out = ata_scsi_timed_out,
291 .eh_strategy_handler = ata_scsi_error, 292 .eh_strategy_handler = ata_scsi_error,
292 .can_queue = ATA_DEF_QUEUE, 293 .can_queue = ATA_DEF_QUEUE,
293 .this_id = ATA_SHT_THIS_ID, 294 .this_id = ATA_SHT_THIS_ID,
294 .sg_tablesize = LIBATA_MAX_PRD, 295 .sg_tablesize = LIBATA_MAX_PRD,
295 .max_sectors = ATA_MAX_SECTORS,
296 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 296 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
297 .emulated = ATA_SHT_EMULATED, 297 .emulated = ATA_SHT_EMULATED,
298 .use_clustering = ATA_SHT_USE_CLUSTERING, 298 .use_clustering = ATA_SHT_USE_CLUSTERING,
diff --git a/drivers/scsi/sata_sx4.c b/drivers/scsi/sata_sx4.c
index bc87c16c80d2..9f8a76815402 100644
--- a/drivers/scsi/sata_sx4.c
+++ b/drivers/scsi/sata_sx4.c
@@ -174,7 +174,7 @@ static void pdc20621_get_from_dimm(struct ata_probe_ent *pe,
174static void pdc20621_put_to_dimm(struct ata_probe_ent *pe, 174static void pdc20621_put_to_dimm(struct ata_probe_ent *pe,
175 void *psource, u32 offset, u32 size); 175 void *psource, u32 offset, u32 size);
176static void pdc20621_irq_clear(struct ata_port *ap); 176static void pdc20621_irq_clear(struct ata_port *ap);
177static int pdc20621_qc_issue_prot(struct ata_queued_cmd *qc); 177static unsigned int pdc20621_qc_issue_prot(struct ata_queued_cmd *qc);
178 178
179 179
180static struct scsi_host_template pdc_sata_sht = { 180static struct scsi_host_template pdc_sata_sht = {
@@ -182,11 +182,11 @@ static struct scsi_host_template pdc_sata_sht = {
182 .name = DRV_NAME, 182 .name = DRV_NAME,
183 .ioctl = ata_scsi_ioctl, 183 .ioctl = ata_scsi_ioctl,
184 .queuecommand = ata_scsi_queuecmd, 184 .queuecommand = ata_scsi_queuecmd,
185 .eh_timed_out = ata_scsi_timed_out,
185 .eh_strategy_handler = ata_scsi_error, 186 .eh_strategy_handler = ata_scsi_error,
186 .can_queue = ATA_DEF_QUEUE, 187 .can_queue = ATA_DEF_QUEUE,
187 .this_id = ATA_SHT_THIS_ID, 188 .this_id = ATA_SHT_THIS_ID,
188 .sg_tablesize = LIBATA_MAX_PRD, 189 .sg_tablesize = LIBATA_MAX_PRD,
189 .max_sectors = ATA_MAX_SECTORS,
190 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 190 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
191 .emulated = ATA_SHT_EMULATED, 191 .emulated = ATA_SHT_EMULATED,
192 .use_clustering = ATA_SHT_USE_CLUSTERING, 192 .use_clustering = ATA_SHT_USE_CLUSTERING,
@@ -460,7 +460,7 @@ static void pdc20621_dma_prep(struct ata_queued_cmd *qc)
460 unsigned int i, idx, total_len = 0, sgt_len; 460 unsigned int i, idx, total_len = 0, sgt_len;
461 u32 *buf = (u32 *) &pp->dimm_buf[PDC_DIMM_HEADER_SZ]; 461 u32 *buf = (u32 *) &pp->dimm_buf[PDC_DIMM_HEADER_SZ];
462 462
463 assert(qc->flags & ATA_QCFLAG_DMAMAP); 463 WARN_ON(!(qc->flags & ATA_QCFLAG_DMAMAP));
464 464
465 VPRINTK("ata%u: ENTER\n", ap->id); 465 VPRINTK("ata%u: ENTER\n", ap->id);
466 466
@@ -678,7 +678,7 @@ static void pdc20621_packet_start(struct ata_queued_cmd *qc)
678 } 678 }
679} 679}
680 680
681static int pdc20621_qc_issue_prot(struct ata_queued_cmd *qc) 681static unsigned int pdc20621_qc_issue_prot(struct ata_queued_cmd *qc)
682{ 682{
683 switch (qc->tf.protocol) { 683 switch (qc->tf.protocol) {
684 case ATA_PROT_DMA: 684 case ATA_PROT_DMA:
@@ -866,26 +866,12 @@ static void pdc_eng_timeout(struct ata_port *ap)
866 spin_lock_irqsave(&host_set->lock, flags); 866 spin_lock_irqsave(&host_set->lock, flags);
867 867
868 qc = ata_qc_from_tag(ap, ap->active_tag); 868 qc = ata_qc_from_tag(ap, ap->active_tag);
869 if (!qc) {
870 printk(KERN_ERR "ata%u: BUG: timeout without command\n",
871 ap->id);
872 goto out;
873 }
874
875 /* hack alert! We cannot use the supplied completion
876 * function from inside the ->eh_strategy_handler() thread.
877 * libata is the only user of ->eh_strategy_handler() in
878 * any kernel, so the default scsi_done() assumes it is
879 * not being called from the SCSI EH.
880 */
881 qc->scsidone = scsi_finish_command;
882 869
883 switch (qc->tf.protocol) { 870 switch (qc->tf.protocol) {
884 case ATA_PROT_DMA: 871 case ATA_PROT_DMA:
885 case ATA_PROT_NODATA: 872 case ATA_PROT_NODATA:
886 printk(KERN_ERR "ata%u: command timeout\n", ap->id); 873 printk(KERN_ERR "ata%u: command timeout\n", ap->id);
887 qc->err_mask |= __ac_err_mask(ata_wait_idle(ap)); 874 qc->err_mask |= __ac_err_mask(ata_wait_idle(ap));
888 ata_qc_complete(qc);
889 break; 875 break;
890 876
891 default: 877 default:
@@ -895,12 +881,11 @@ static void pdc_eng_timeout(struct ata_port *ap)
895 ap->id, qc->tf.command, drv_stat); 881 ap->id, qc->tf.command, drv_stat);
896 882
897 qc->err_mask |= ac_err_mask(drv_stat); 883 qc->err_mask |= ac_err_mask(drv_stat);
898 ata_qc_complete(qc);
899 break; 884 break;
900 } 885 }
901 886
902out:
903 spin_unlock_irqrestore(&host_set->lock, flags); 887 spin_unlock_irqrestore(&host_set->lock, flags);
888 ata_eh_qc_complete(qc);
904 DPRINTK("EXIT\n"); 889 DPRINTK("EXIT\n");
905} 890}
906 891
diff --git a/drivers/scsi/sata_uli.c b/drivers/scsi/sata_uli.c
index 9635ca700977..37a487b7d655 100644
--- a/drivers/scsi/sata_uli.c
+++ b/drivers/scsi/sata_uli.c
@@ -75,11 +75,11 @@ static struct scsi_host_template uli_sht = {
75 .name = DRV_NAME, 75 .name = DRV_NAME,
76 .ioctl = ata_scsi_ioctl, 76 .ioctl = ata_scsi_ioctl,
77 .queuecommand = ata_scsi_queuecmd, 77 .queuecommand = ata_scsi_queuecmd,
78 .eh_timed_out = ata_scsi_timed_out,
78 .eh_strategy_handler = ata_scsi_error, 79 .eh_strategy_handler = ata_scsi_error,
79 .can_queue = ATA_DEF_QUEUE, 80 .can_queue = ATA_DEF_QUEUE,
80 .this_id = ATA_SHT_THIS_ID, 81 .this_id = ATA_SHT_THIS_ID,
81 .sg_tablesize = LIBATA_MAX_PRD, 82 .sg_tablesize = LIBATA_MAX_PRD,
82 .max_sectors = ATA_MAX_SECTORS,
83 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 83 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
84 .emulated = ATA_SHT_EMULATED, 84 .emulated = ATA_SHT_EMULATED,
85 .use_clustering = ATA_SHT_USE_CLUSTERING, 85 .use_clustering = ATA_SHT_USE_CLUSTERING,
diff --git a/drivers/scsi/sata_via.c b/drivers/scsi/sata_via.c
index 6d5b0a794cfd..ff65a0b0457f 100644
--- a/drivers/scsi/sata_via.c
+++ b/drivers/scsi/sata_via.c
@@ -94,11 +94,11 @@ static struct scsi_host_template svia_sht = {
94 .name = DRV_NAME, 94 .name = DRV_NAME,
95 .ioctl = ata_scsi_ioctl, 95 .ioctl = ata_scsi_ioctl,
96 .queuecommand = ata_scsi_queuecmd, 96 .queuecommand = ata_scsi_queuecmd,
97 .eh_timed_out = ata_scsi_timed_out,
97 .eh_strategy_handler = ata_scsi_error, 98 .eh_strategy_handler = ata_scsi_error,
98 .can_queue = ATA_DEF_QUEUE, 99 .can_queue = ATA_DEF_QUEUE,
99 .this_id = ATA_SHT_THIS_ID, 100 .this_id = ATA_SHT_THIS_ID,
100 .sg_tablesize = LIBATA_MAX_PRD, 101 .sg_tablesize = LIBATA_MAX_PRD,
101 .max_sectors = ATA_MAX_SECTORS,
102 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 102 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
103 .emulated = ATA_SHT_EMULATED, 103 .emulated = ATA_SHT_EMULATED,
104 .use_clustering = ATA_SHT_USE_CLUSTERING, 104 .use_clustering = ATA_SHT_USE_CLUSTERING,
diff --git a/drivers/scsi/sata_vsc.c b/drivers/scsi/sata_vsc.c
index e484e8db6810..b574379a7a82 100644
--- a/drivers/scsi/sata_vsc.c
+++ b/drivers/scsi/sata_vsc.c
@@ -251,11 +251,11 @@ static struct scsi_host_template vsc_sata_sht = {
251 .name = DRV_NAME, 251 .name = DRV_NAME,
252 .ioctl = ata_scsi_ioctl, 252 .ioctl = ata_scsi_ioctl,
253 .queuecommand = ata_scsi_queuecmd, 253 .queuecommand = ata_scsi_queuecmd,
254 .eh_timed_out = ata_scsi_timed_out,
254 .eh_strategy_handler = ata_scsi_error, 255 .eh_strategy_handler = ata_scsi_error,
255 .can_queue = ATA_DEF_QUEUE, 256 .can_queue = ATA_DEF_QUEUE,
256 .this_id = ATA_SHT_THIS_ID, 257 .this_id = ATA_SHT_THIS_ID,
257 .sg_tablesize = LIBATA_MAX_PRD, 258 .sg_tablesize = LIBATA_MAX_PRD,
258 .max_sectors = ATA_MAX_SECTORS,
259 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 259 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
260 .emulated = ATA_SHT_EMULATED, 260 .emulated = ATA_SHT_EMULATED,
261 .use_clustering = ATA_SHT_USE_CLUSTERING, 261 .use_clustering = ATA_SHT_USE_CLUSTERING,
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index ff82ccfbb106..5d169a2881b9 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -584,8 +584,7 @@ static int scsi_request_sense(struct scsi_cmnd *scmd)
584 * keep a list of pending commands for final completion, and once we 584 * keep a list of pending commands for final completion, and once we
585 * are ready to leave error handling we handle completion for real. 585 * are ready to leave error handling we handle completion for real.
586 **/ 586 **/
587static void scsi_eh_finish_cmd(struct scsi_cmnd *scmd, 587void scsi_eh_finish_cmd(struct scsi_cmnd *scmd, struct list_head *done_q)
588 struct list_head *done_q)
589{ 588{
590 scmd->device->host->host_failed--; 589 scmd->device->host->host_failed--;
591 scmd->eh_eflags = 0; 590 scmd->eh_eflags = 0;
@@ -597,6 +596,7 @@ static void scsi_eh_finish_cmd(struct scsi_cmnd *scmd,
597 scsi_setup_cmd_retry(scmd); 596 scsi_setup_cmd_retry(scmd);
598 list_move_tail(&scmd->eh_entry, done_q); 597 list_move_tail(&scmd->eh_entry, done_q);
599} 598}
599EXPORT_SYMBOL(scsi_eh_finish_cmd);
600 600
601/** 601/**
602 * scsi_eh_get_sense - Get device sense data. 602 * scsi_eh_get_sense - Get device sense data.
@@ -1425,7 +1425,7 @@ static void scsi_eh_ready_devs(struct Scsi_Host *shost,
1425 * @done_q: list_head of processed commands. 1425 * @done_q: list_head of processed commands.
1426 * 1426 *
1427 **/ 1427 **/
1428static void scsi_eh_flush_done_q(struct list_head *done_q) 1428void scsi_eh_flush_done_q(struct list_head *done_q)
1429{ 1429{
1430 struct scsi_cmnd *scmd, *next; 1430 struct scsi_cmnd *scmd, *next;
1431 1431
@@ -1454,6 +1454,7 @@ static void scsi_eh_flush_done_q(struct list_head *done_q)
1454 } 1454 }
1455 } 1455 }
1456} 1456}
1457EXPORT_SYMBOL(scsi_eh_flush_done_q);
1457 1458
1458/** 1459/**
1459 * scsi_unjam_host - Attempt to fix a host which has a cmd that failed. 1460 * scsi_unjam_host - Attempt to fix a host which has a cmd that failed.
diff --git a/include/linux/ata.h b/include/linux/ata.h
index 94f77cce27fa..b02a16c435e7 100644
--- a/include/linux/ata.h
+++ b/include/linux/ata.h
@@ -267,6 +267,16 @@ struct ata_taskfile {
267 ((u64) (id)[(n) + 1] << 16) | \ 267 ((u64) (id)[(n) + 1] << 16) | \
268 ((u64) (id)[(n) + 0]) ) 268 ((u64) (id)[(n) + 0]) )
269 269
270static inline unsigned int ata_id_major_version(const u16 *id)
271{
272 unsigned int mver;
273
274 for (mver = 14; mver >= 1; mver--)
275 if (id[ATA_ID_MAJOR_VER] & (1 << mver))
276 break;
277 return mver;
278}
279
270static inline int ata_id_current_chs_valid(const u16 *id) 280static inline int ata_id_current_chs_valid(const u16 *id)
271{ 281{
272 /* For ATA-1 devices, if the INITIALIZE DEVICE PARAMETERS command 282 /* For ATA-1 devices, if the INITIALIZE DEVICE PARAMETERS command
@@ -302,4 +312,16 @@ static inline int ata_ok(u8 status)
302 == ATA_DRDY); 312 == ATA_DRDY);
303} 313}
304 314
315static inline int lba_28_ok(u64 block, u32 n_block)
316{
317 /* check the ending block number */
318 return ((block + n_block - 1) < ((u64)1 << 28)) && (n_block <= 256);
319}
320
321static inline int lba_48_ok(u64 block, u32 n_block)
322{
323 /* check the ending block number */
324 return ((block + n_block - 1) < ((u64)1 << 48)) && (n_block <= 65536);
325}
326
305#endif /* __LINUX_ATA_H__ */ 327#endif /* __LINUX_ATA_H__ */
diff --git a/include/linux/libata.h b/include/linux/libata.h
index c91be5e64ede..66dce58f1941 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -35,7 +35,8 @@
35#include <linux/workqueue.h> 35#include <linux/workqueue.h>
36 36
37/* 37/*
38 * compile-time options 38 * compile-time options: to be removed as soon as all the drivers are
39 * converted to the new debugging mechanism
39 */ 40 */
40#undef ATA_DEBUG /* debugging output */ 41#undef ATA_DEBUG /* debugging output */
41#undef ATA_VERBOSE_DEBUG /* yet more debugging output */ 42#undef ATA_VERBOSE_DEBUG /* yet more debugging output */
@@ -61,15 +62,37 @@
61 62
62#define BPRINTK(fmt, args...) if (ap->flags & ATA_FLAG_DEBUGMSG) printk(KERN_ERR "%s: " fmt, __FUNCTION__, ## args) 63#define BPRINTK(fmt, args...) if (ap->flags & ATA_FLAG_DEBUGMSG) printk(KERN_ERR "%s: " fmt, __FUNCTION__, ## args)
63 64
64#ifdef ATA_NDEBUG 65/* NEW: debug levels */
65#define assert(expr) 66#define HAVE_LIBATA_MSG 1
66#else 67
67#define assert(expr) \ 68enum {
68 if(unlikely(!(expr))) { \ 69 ATA_MSG_DRV = 0x0001,
69 printk(KERN_ERR "Assertion failed! %s,%s,%s,line=%d\n", \ 70 ATA_MSG_INFO = 0x0002,
70 #expr,__FILE__,__FUNCTION__,__LINE__); \ 71 ATA_MSG_PROBE = 0x0004,
71 } 72 ATA_MSG_WARN = 0x0008,
72#endif 73 ATA_MSG_MALLOC = 0x0010,
74 ATA_MSG_CTL = 0x0020,
75 ATA_MSG_INTR = 0x0040,
76 ATA_MSG_ERR = 0x0080,
77};
78
79#define ata_msg_drv(p) ((p)->msg_enable & ATA_MSG_DRV)
80#define ata_msg_info(p) ((p)->msg_enable & ATA_MSG_INFO)
81#define ata_msg_probe(p) ((p)->msg_enable & ATA_MSG_PROBE)
82#define ata_msg_warn(p) ((p)->msg_enable & ATA_MSG_WARN)
83#define ata_msg_malloc(p) ((p)->msg_enable & ATA_MSG_MALLOC)
84#define ata_msg_ctl(p) ((p)->msg_enable & ATA_MSG_CTL)
85#define ata_msg_intr(p) ((p)->msg_enable & ATA_MSG_INTR)
86#define ata_msg_err(p) ((p)->msg_enable & ATA_MSG_ERR)
87
88static inline u32 ata_msg_init(int dval, int default_msg_enable_bits)
89{
90 if (dval < 0 || dval >= (sizeof(u32) * 8))
91 return default_msg_enable_bits; /* should be 0x1 - only driver info msgs */
92 if (!dval)
93 return 0;
94 return (1 << dval) - 1;
95}
73 96
74/* defines only for the constants which don't work well as enums */ 97/* defines only for the constants which don't work well as enums */
75#define ATA_TAG_POISON 0xfafbfcfdU 98#define ATA_TAG_POISON 0xfafbfcfdU
@@ -99,8 +122,7 @@ enum {
99 /* struct ata_device stuff */ 122 /* struct ata_device stuff */
100 ATA_DFLAG_LBA48 = (1 << 0), /* device supports LBA48 */ 123 ATA_DFLAG_LBA48 = (1 << 0), /* device supports LBA48 */
101 ATA_DFLAG_PIO = (1 << 1), /* device currently in PIO mode */ 124 ATA_DFLAG_PIO = (1 << 1), /* device currently in PIO mode */
102 ATA_DFLAG_LOCK_SECTORS = (1 << 2), /* don't adjust max_sectors */ 125 ATA_DFLAG_LBA = (1 << 2), /* device supports LBA */
103 ATA_DFLAG_LBA = (1 << 3), /* device supports LBA */
104 126
105 ATA_DEV_UNKNOWN = 0, /* unknown device */ 127 ATA_DEV_UNKNOWN = 0, /* unknown device */
106 ATA_DEV_ATA = 1, /* ATA device */ 128 ATA_DEV_ATA = 1, /* ATA device */
@@ -115,9 +137,9 @@ enum {
115 ATA_FLAG_PORT_DISABLED = (1 << 2), /* port is disabled, ignore it */ 137 ATA_FLAG_PORT_DISABLED = (1 << 2), /* port is disabled, ignore it */
116 ATA_FLAG_SATA = (1 << 3), 138 ATA_FLAG_SATA = (1 << 3),
117 ATA_FLAG_NO_LEGACY = (1 << 4), /* no legacy mode check */ 139 ATA_FLAG_NO_LEGACY = (1 << 4), /* no legacy mode check */
118 ATA_FLAG_SRST = (1 << 5), /* use ATA SRST, not E.D.D. */ 140 ATA_FLAG_SRST = (1 << 5), /* (obsolete) use ATA SRST, not E.D.D. */
119 ATA_FLAG_MMIO = (1 << 6), /* use MMIO, not PIO */ 141 ATA_FLAG_MMIO = (1 << 6), /* use MMIO, not PIO */
120 ATA_FLAG_SATA_RESET = (1 << 7), /* use COMRESET */ 142 ATA_FLAG_SATA_RESET = (1 << 7), /* (obsolete) use COMRESET */
121 ATA_FLAG_PIO_DMA = (1 << 8), /* PIO cmds via DMA */ 143 ATA_FLAG_PIO_DMA = (1 << 8), /* PIO cmds via DMA */
122 ATA_FLAG_NOINTR = (1 << 9), /* FIXME: Remove this once 144 ATA_FLAG_NOINTR = (1 << 9), /* FIXME: Remove this once
123 * proper HSM is in place. */ 145 * proper HSM is in place. */
@@ -129,10 +151,14 @@ enum {
129 ATA_FLAG_PIO_LBA48 = (1 << 13), /* Host DMA engine is LBA28 only */ 151 ATA_FLAG_PIO_LBA48 = (1 << 13), /* Host DMA engine is LBA28 only */
130 ATA_FLAG_IRQ_MASK = (1 << 14), /* Mask IRQ in PIO xfers */ 152 ATA_FLAG_IRQ_MASK = (1 << 14), /* Mask IRQ in PIO xfers */
131 153
154 ATA_FLAG_FLUSH_PIO_TASK = (1 << 15), /* Flush PIO task */
155 ATA_FLAG_IN_EH = (1 << 16), /* EH in progress */
156
132 ATA_QCFLAG_ACTIVE = (1 << 1), /* cmd not yet ack'd to scsi lyer */ 157 ATA_QCFLAG_ACTIVE = (1 << 1), /* cmd not yet ack'd to scsi lyer */
133 ATA_QCFLAG_SG = (1 << 3), /* have s/g table? */ 158 ATA_QCFLAG_SG = (1 << 3), /* have s/g table? */
134 ATA_QCFLAG_SINGLE = (1 << 4), /* no s/g, just a single buffer */ 159 ATA_QCFLAG_SINGLE = (1 << 4), /* no s/g, just a single buffer */
135 ATA_QCFLAG_DMAMAP = ATA_QCFLAG_SG | ATA_QCFLAG_SINGLE, 160 ATA_QCFLAG_DMAMAP = ATA_QCFLAG_SG | ATA_QCFLAG_SINGLE,
161 ATA_QCFLAG_EH_SCHEDULED = (1 << 5), /* EH scheduled */
136 162
137 /* various lengths of time */ 163 /* various lengths of time */
138 ATA_TMOUT_EDD = 5 * HZ, /* heuristic */ 164 ATA_TMOUT_EDD = 5 * HZ, /* heuristic */
@@ -189,10 +215,15 @@ enum hsm_task_states {
189}; 215};
190 216
191enum ata_completion_errors { 217enum ata_completion_errors {
192 AC_ERR_OTHER = (1 << 0), 218 AC_ERR_DEV = (1 << 0), /* device reported error */
193 AC_ERR_DEV = (1 << 1), 219 AC_ERR_HSM = (1 << 1), /* host state machine violation */
194 AC_ERR_ATA_BUS = (1 << 2), 220 AC_ERR_TIMEOUT = (1 << 2), /* timeout */
195 AC_ERR_HOST_BUS = (1 << 3), 221 AC_ERR_MEDIA = (1 << 3), /* media error */
222 AC_ERR_ATA_BUS = (1 << 4), /* ATA bus error */
223 AC_ERR_HOST_BUS = (1 << 5), /* host bus error */
224 AC_ERR_SYSTEM = (1 << 6), /* system error */
225 AC_ERR_INVALID = (1 << 7), /* invalid argument */
226 AC_ERR_OTHER = (1 << 8), /* unknown */
196}; 227};
197 228
198/* forward declarations */ 229/* forward declarations */
@@ -202,7 +233,10 @@ struct ata_port;
202struct ata_queued_cmd; 233struct ata_queued_cmd;
203 234
204/* typedefs */ 235/* typedefs */
205typedef int (*ata_qc_cb_t) (struct ata_queued_cmd *qc); 236typedef void (*ata_qc_cb_t) (struct ata_queued_cmd *qc);
237typedef void (*ata_probeinit_fn_t)(struct ata_port *);
238typedef int (*ata_reset_fn_t)(struct ata_port *, int, unsigned int *);
239typedef void (*ata_postreset_fn_t)(struct ata_port *ap, unsigned int *);
206 240
207struct ata_ioports { 241struct ata_ioports {
208 unsigned long cmd_addr; 242 unsigned long cmd_addr;
@@ -305,7 +339,7 @@ struct ata_device {
305 unsigned long flags; /* ATA_DFLAG_xxx */ 339 unsigned long flags; /* ATA_DFLAG_xxx */
306 unsigned int class; /* ATA_DEV_xxx */ 340 unsigned int class; /* ATA_DEV_xxx */
307 unsigned int devno; /* 0 or 1 */ 341 unsigned int devno; /* 0 or 1 */
308 u16 id[ATA_ID_WORDS]; /* IDENTIFY xxx DEVICE data */ 342 u16 *id; /* IDENTIFY xxx DEVICE data */
309 u8 pio_mode; 343 u8 pio_mode;
310 u8 dma_mode; 344 u8 dma_mode;
311 u8 xfer_mode; 345 u8 xfer_mode;
@@ -313,6 +347,8 @@ struct ata_device {
313 347
314 unsigned int multi_count; /* sectors count for 348 unsigned int multi_count; /* sectors count for
315 READ/WRITE MULTIPLE */ 349 READ/WRITE MULTIPLE */
350 unsigned int max_sectors; /* per-device max sectors */
351 unsigned int cdb_len;
316 352
317 /* for CHS addressing */ 353 /* for CHS addressing */
318 u16 cylinders; /* Number of cylinders */ 354 u16 cylinders; /* Number of cylinders */
@@ -342,7 +378,6 @@ struct ata_port {
342 unsigned int mwdma_mask; 378 unsigned int mwdma_mask;
343 unsigned int udma_mask; 379 unsigned int udma_mask;
344 unsigned int cbl; /* cable type; ATA_CBL_xxx */ 380 unsigned int cbl; /* cable type; ATA_CBL_xxx */
345 unsigned int cdb_len;
346 381
347 struct ata_device device[ATA_MAX_DEVICES]; 382 struct ata_device device[ATA_MAX_DEVICES];
348 383
@@ -359,6 +394,9 @@ struct ata_port {
359 unsigned int hsm_task_state; 394 unsigned int hsm_task_state;
360 unsigned long pio_task_timeout; 395 unsigned long pio_task_timeout;
361 396
397 u32 msg_enable;
398 struct list_head eh_done_q;
399
362 void *private_data; 400 void *private_data;
363}; 401};
364 402
@@ -378,7 +416,9 @@ struct ata_port_operations {
378 u8 (*check_altstatus)(struct ata_port *ap); 416 u8 (*check_altstatus)(struct ata_port *ap);
379 void (*dev_select)(struct ata_port *ap, unsigned int device); 417 void (*dev_select)(struct ata_port *ap, unsigned int device);
380 418
381 void (*phy_reset) (struct ata_port *ap); 419 void (*phy_reset) (struct ata_port *ap); /* obsolete */
420 int (*probe_reset) (struct ata_port *ap, unsigned int *classes);
421
382 void (*post_set_mode) (struct ata_port *ap); 422 void (*post_set_mode) (struct ata_port *ap);
383 423
384 int (*check_atapi_dma) (struct ata_queued_cmd *qc); 424 int (*check_atapi_dma) (struct ata_queued_cmd *qc);
@@ -387,7 +427,7 @@ struct ata_port_operations {
387 void (*bmdma_start) (struct ata_queued_cmd *qc); 427 void (*bmdma_start) (struct ata_queued_cmd *qc);
388 428
389 void (*qc_prep) (struct ata_queued_cmd *qc); 429 void (*qc_prep) (struct ata_queued_cmd *qc);
390 int (*qc_issue) (struct ata_queued_cmd *qc); 430 unsigned int (*qc_issue) (struct ata_queued_cmd *qc);
391 431
392 void (*eng_timeout) (struct ata_port *ap); 432 void (*eng_timeout) (struct ata_port *ap);
393 433
@@ -435,6 +475,18 @@ extern void ata_port_probe(struct ata_port *);
435extern void __sata_phy_reset(struct ata_port *ap); 475extern void __sata_phy_reset(struct ata_port *ap);
436extern void sata_phy_reset(struct ata_port *ap); 476extern void sata_phy_reset(struct ata_port *ap);
437extern void ata_bus_reset(struct ata_port *ap); 477extern void ata_bus_reset(struct ata_port *ap);
478extern int ata_drive_probe_reset(struct ata_port *ap,
479 ata_probeinit_fn_t probeinit,
480 ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
481 ata_postreset_fn_t postreset, unsigned int *classes);
482extern void ata_std_probeinit(struct ata_port *ap);
483extern int ata_std_softreset(struct ata_port *ap, int verbose,
484 unsigned int *classes);
485extern int sata_std_hardreset(struct ata_port *ap, int verbose,
486 unsigned int *class);
487extern void ata_std_postreset(struct ata_port *ap, unsigned int *classes);
488extern int ata_dev_revalidate(struct ata_port *ap, struct ata_device *dev,
489 int post_reset);
438extern void ata_port_disable(struct ata_port *); 490extern void ata_port_disable(struct ata_port *);
439extern void ata_std_ports(struct ata_ioports *ioaddr); 491extern void ata_std_ports(struct ata_ioports *ioaddr);
440#ifdef CONFIG_PCI 492#ifdef CONFIG_PCI
@@ -449,7 +501,10 @@ extern void ata_host_set_remove(struct ata_host_set *host_set);
449extern int ata_scsi_detect(struct scsi_host_template *sht); 501extern int ata_scsi_detect(struct scsi_host_template *sht);
450extern int ata_scsi_ioctl(struct scsi_device *dev, int cmd, void __user *arg); 502extern int ata_scsi_ioctl(struct scsi_device *dev, int cmd, void __user *arg);
451extern int ata_scsi_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)); 503extern int ata_scsi_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *));
504extern enum scsi_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd);
452extern int ata_scsi_error(struct Scsi_Host *host); 505extern int ata_scsi_error(struct Scsi_Host *host);
506extern void ata_eh_qc_complete(struct ata_queued_cmd *qc);
507extern void ata_eh_qc_retry(struct ata_queued_cmd *qc);
453extern int ata_scsi_release(struct Scsi_Host *host); 508extern int ata_scsi_release(struct Scsi_Host *host);
454extern unsigned int ata_host_intr(struct ata_port *ap, struct ata_queued_cmd *qc); 509extern unsigned int ata_host_intr(struct ata_port *ap, struct ata_queued_cmd *qc);
455extern int ata_scsi_device_resume(struct scsi_device *); 510extern int ata_scsi_device_resume(struct scsi_device *);
@@ -457,6 +512,9 @@ extern int ata_scsi_device_suspend(struct scsi_device *);
457extern int ata_device_resume(struct ata_port *, struct ata_device *); 512extern int ata_device_resume(struct ata_port *, struct ata_device *);
458extern int ata_device_suspend(struct ata_port *, struct ata_device *); 513extern int ata_device_suspend(struct ata_port *, struct ata_device *);
459extern int ata_ratelimit(void); 514extern int ata_ratelimit(void);
515extern unsigned int ata_busy_sleep(struct ata_port *ap,
516 unsigned long timeout_pat,
517 unsigned long timeout);
460 518
461/* 519/*
462 * Default driver ops implementations 520 * Default driver ops implementations
@@ -470,26 +528,28 @@ extern void ata_std_dev_select (struct ata_port *ap, unsigned int device);
470extern u8 ata_check_status(struct ata_port *ap); 528extern u8 ata_check_status(struct ata_port *ap);
471extern u8 ata_altstatus(struct ata_port *ap); 529extern u8 ata_altstatus(struct ata_port *ap);
472extern void ata_exec_command(struct ata_port *ap, const struct ata_taskfile *tf); 530extern void ata_exec_command(struct ata_port *ap, const struct ata_taskfile *tf);
531extern int ata_std_probe_reset(struct ata_port *ap, unsigned int *classes);
473extern int ata_port_start (struct ata_port *ap); 532extern int ata_port_start (struct ata_port *ap);
474extern void ata_port_stop (struct ata_port *ap); 533extern void ata_port_stop (struct ata_port *ap);
475extern void ata_host_stop (struct ata_host_set *host_set); 534extern void ata_host_stop (struct ata_host_set *host_set);
476extern irqreturn_t ata_interrupt (int irq, void *dev_instance, struct pt_regs *regs); 535extern irqreturn_t ata_interrupt (int irq, void *dev_instance, struct pt_regs *regs);
477extern void ata_qc_prep(struct ata_queued_cmd *qc); 536extern void ata_qc_prep(struct ata_queued_cmd *qc);
478extern int ata_qc_issue_prot(struct ata_queued_cmd *qc); 537extern unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc);
479extern void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf, 538extern void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf,
480 unsigned int buflen); 539 unsigned int buflen);
481extern void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg, 540extern void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
482 unsigned int n_elem); 541 unsigned int n_elem);
483extern unsigned int ata_dev_classify(const struct ata_taskfile *tf); 542extern unsigned int ata_dev_classify(const struct ata_taskfile *tf);
484extern void ata_dev_id_string(const u16 *id, unsigned char *s, 543extern void ata_id_string(const u16 *id, unsigned char *s,
485 unsigned int ofs, unsigned int len); 544 unsigned int ofs, unsigned int len);
486extern void ata_dev_config(struct ata_port *ap, unsigned int i); 545extern void ata_id_c_string(const u16 *id, unsigned char *s,
546 unsigned int ofs, unsigned int len);
487extern void ata_bmdma_setup (struct ata_queued_cmd *qc); 547extern void ata_bmdma_setup (struct ata_queued_cmd *qc);
488extern void ata_bmdma_start (struct ata_queued_cmd *qc); 548extern void ata_bmdma_start (struct ata_queued_cmd *qc);
489extern void ata_bmdma_stop(struct ata_queued_cmd *qc); 549extern void ata_bmdma_stop(struct ata_queued_cmd *qc);
490extern u8 ata_bmdma_status(struct ata_port *ap); 550extern u8 ata_bmdma_status(struct ata_port *ap);
491extern void ata_bmdma_irq_clear(struct ata_port *ap); 551extern void ata_bmdma_irq_clear(struct ata_port *ap);
492extern void ata_qc_complete(struct ata_queued_cmd *qc); 552extern void __ata_qc_complete(struct ata_queued_cmd *qc);
493extern void ata_eng_timeout(struct ata_port *ap); 553extern void ata_eng_timeout(struct ata_port *ap);
494extern void ata_scsi_simulate(struct ata_port *ap, struct ata_device *dev, 554extern void ata_scsi_simulate(struct ata_port *ap, struct ata_device *dev,
495 struct scsi_cmnd *cmd, 555 struct scsi_cmnd *cmd,
@@ -586,10 +646,14 @@ static inline unsigned int ata_tag_valid(unsigned int tag)
586 return (tag < ATA_MAX_QUEUE) ? 1 : 0; 646 return (tag < ATA_MAX_QUEUE) ? 1 : 0;
587} 647}
588 648
649static inline unsigned int ata_class_present(unsigned int class)
650{
651 return class == ATA_DEV_ATA || class == ATA_DEV_ATAPI;
652}
653
589static inline unsigned int ata_dev_present(const struct ata_device *dev) 654static inline unsigned int ata_dev_present(const struct ata_device *dev)
590{ 655{
591 return ((dev->class == ATA_DEV_ATA) || 656 return ata_class_present(dev->class);
592 (dev->class == ATA_DEV_ATAPI));
593} 657}
594 658
595static inline u8 ata_chk_status(struct ata_port *ap) 659static inline u8 ata_chk_status(struct ata_port *ap)
@@ -657,9 +721,9 @@ static inline u8 ata_wait_idle(struct ata_port *ap)
657 721
658 if (status & (ATA_BUSY | ATA_DRQ)) { 722 if (status & (ATA_BUSY | ATA_DRQ)) {
659 unsigned long l = ap->ioaddr.status_addr; 723 unsigned long l = ap->ioaddr.status_addr;
660 printk(KERN_WARNING 724 if (ata_msg_warn(ap))
661 "ATA: abnormal status 0x%X on port 0x%lX\n", 725 printk(KERN_WARNING "ATA: abnormal status 0x%X on port 0x%lX\n",
662 status, l); 726 status, l);
663 } 727 }
664 728
665 return status; 729 return status;
@@ -701,6 +765,24 @@ static inline void ata_qc_reinit(struct ata_queued_cmd *qc)
701 ata_tf_init(qc->ap, &qc->tf, qc->dev->devno); 765 ata_tf_init(qc->ap, &qc->tf, qc->dev->devno);
702} 766}
703 767
768/**
769 * ata_qc_complete - Complete an active ATA command
770 * @qc: Command to complete
771 * @err_mask: ATA Status register contents
772 *
773 * Indicate to the mid and upper layers that an ATA
774 * command has completed, with either an ok or not-ok status.
775 *
776 * LOCKING:
777 * spin_lock_irqsave(host_set lock)
778 */
779static inline void ata_qc_complete(struct ata_queued_cmd *qc)
780{
781 if (unlikely(qc->flags & ATA_QCFLAG_EH_SCHEDULED))
782 return;
783
784 __ata_qc_complete(qc);
785}
704 786
705/** 787/**
706 * ata_irq_on - Enable interrupts on a port. 788 * ata_irq_on - Enable interrupts on a port.
@@ -751,7 +833,8 @@ static inline u8 ata_irq_ack(struct ata_port *ap, unsigned int chk_drq)
751 833
752 status = ata_busy_wait(ap, bits, 1000); 834 status = ata_busy_wait(ap, bits, 1000);
753 if (status & bits) 835 if (status & bits)
754 DPRINTK("abnormal status 0x%X\n", status); 836 if (ata_msg_err(ap))
837 printk(KERN_ERR "abnormal status 0x%X\n", status);
755 838
756 /* get controller status; clear intr, err bits */ 839 /* get controller status; clear intr, err bits */
757 if (ap->flags & ATA_FLAG_MMIO) { 840 if (ap->flags & ATA_FLAG_MMIO) {
@@ -769,8 +852,10 @@ static inline u8 ata_irq_ack(struct ata_port *ap, unsigned int chk_drq)
769 post_stat = inb(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS); 852 post_stat = inb(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
770 } 853 }
771 854
772 VPRINTK("irq ack: host_stat 0x%X, new host_stat 0x%X, drv_stat 0x%X\n", 855 if (ata_msg_intr(ap))
773 host_stat, post_stat, status); 856 printk(KERN_INFO "%s: irq ack: host_stat 0x%X, new host_stat 0x%X, drv_stat 0x%X\n",
857 __FUNCTION__,
858 host_stat, post_stat, status);
774 859
775 return status; 860 return status;
776} 861}
@@ -807,7 +892,7 @@ static inline int ata_try_flush_cache(const struct ata_device *dev)
807static inline unsigned int ac_err_mask(u8 status) 892static inline unsigned int ac_err_mask(u8 status)
808{ 893{
809 if (status & ATA_BUSY) 894 if (status & ATA_BUSY)
810 return AC_ERR_ATA_BUS; 895 return AC_ERR_HSM;
811 if (status & (ATA_ERR | ATA_DF)) 896 if (status & (ATA_ERR | ATA_DF))
812 return AC_ERR_DEV; 897 return AC_ERR_DEV;
813 return 0; 898 return 0;
diff --git a/include/scsi/scsi_eh.h b/include/scsi/scsi_eh.h
index fabd879c2f2e..d160880b2a87 100644
--- a/include/scsi/scsi_eh.h
+++ b/include/scsi/scsi_eh.h
@@ -35,6 +35,9 @@ static inline int scsi_sense_valid(struct scsi_sense_hdr *sshdr)
35} 35}
36 36
37 37
38extern void scsi_eh_finish_cmd(struct scsi_cmnd *scmd,
39 struct list_head *done_q);
40extern void scsi_eh_flush_done_q(struct list_head *done_q);
38extern void scsi_report_bus_reset(struct Scsi_Host *, int); 41extern void scsi_report_bus_reset(struct Scsi_Host *, int);
39extern void scsi_report_device_reset(struct Scsi_Host *, int, int); 42extern void scsi_report_device_reset(struct Scsi_Host *, int, int);
40extern int scsi_block_when_processing_errors(struct scsi_device *); 43extern int scsi_block_when_processing_errors(struct scsi_device *);