aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/scsi')
-rw-r--r--drivers/scsi/Makefile2
-rw-r--r--drivers/scsi/ahci.c196
-rw-r--r--drivers/scsi/ata_piix.c392
-rw-r--r--drivers/scsi/libata-bmdma.c703
-rw-r--r--drivers/scsi/libata-core.c2763
-rw-r--r--drivers/scsi/libata-scsi.c240
-rw-r--r--drivers/scsi/libata.h3
-rw-r--r--drivers/scsi/pdc_adma.c6
-rw-r--r--drivers/scsi/sata_mv.c279
-rw-r--r--drivers/scsi/sata_nv.c2
-rw-r--r--drivers/scsi/sata_promise.c129
-rw-r--r--drivers/scsi/sata_qstor.c10
-rw-r--r--drivers/scsi/sata_sil.c126
-rw-r--r--drivers/scsi/sata_sil24.c90
-rw-r--r--drivers/scsi/sata_sis.c2
-rw-r--r--drivers/scsi/sata_svw.c2
-rw-r--r--drivers/scsi/sata_sx4.c25
-rw-r--r--drivers/scsi/sata_uli.c2
-rw-r--r--drivers/scsi/sata_via.c2
-rw-r--r--drivers/scsi/sata_vsc.c2
-rw-r--r--drivers/scsi/scsi_error.c7
21 files changed, 3027 insertions, 1956 deletions
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index 320e765fa0cd..15dc2e00e1b2 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -163,7 +163,7 @@ ncr53c8xx-flags-$(CONFIG_SCSI_ZALON) \
163CFLAGS_ncr53c8xx.o := $(ncr53c8xx-flags-y) $(ncr53c8xx-flags-m) 163CFLAGS_ncr53c8xx.o := $(ncr53c8xx-flags-y) $(ncr53c8xx-flags-m)
164zalon7xx-objs := zalon.o ncr53c8xx.o 164zalon7xx-objs := zalon.o ncr53c8xx.o
165NCR_Q720_mod-objs := NCR_Q720.o ncr53c8xx.o 165NCR_Q720_mod-objs := NCR_Q720.o ncr53c8xx.o
166libata-objs := libata-core.o libata-scsi.o 166libata-objs := libata-core.o libata-scsi.o libata-bmdma.o
167oktagon_esp_mod-objs := oktagon_esp.o oktagon_io.o 167oktagon_esp_mod-objs := oktagon_esp.o oktagon_io.o
168 168
169# Files generated that shall be removed upon make clean 169# Files generated that shall be removed upon make clean
diff --git a/drivers/scsi/ahci.c b/drivers/scsi/ahci.c
index 559ff7aae3f1..00dfdefe2967 100644
--- a/drivers/scsi/ahci.c
+++ b/drivers/scsi/ahci.c
@@ -66,6 +66,8 @@ enum {
66 AHCI_IRQ_ON_SG = (1 << 31), 66 AHCI_IRQ_ON_SG = (1 << 31),
67 AHCI_CMD_ATAPI = (1 << 5), 67 AHCI_CMD_ATAPI = (1 << 5),
68 AHCI_CMD_WRITE = (1 << 6), 68 AHCI_CMD_WRITE = (1 << 6),
69 AHCI_CMD_RESET = (1 << 8),
70 AHCI_CMD_CLR_BUSY = (1 << 10),
69 71
70 RX_FIS_D2H_REG = 0x40, /* offset of D2H Register FIS data */ 72 RX_FIS_D2H_REG = 0x40, /* offset of D2H Register FIS data */
71 73
@@ -85,6 +87,7 @@ enum {
85 87
86 /* HOST_CAP bits */ 88 /* HOST_CAP bits */
87 HOST_CAP_64 = (1 << 31), /* PCI DAC (64-bit DMA) support */ 89 HOST_CAP_64 = (1 << 31), /* PCI DAC (64-bit DMA) support */
90 HOST_CAP_CLO = (1 << 24), /* Command List Override support */
88 91
89 /* registers for each SATA port */ 92 /* registers for each SATA port */
90 PORT_LST_ADDR = 0x00, /* command list DMA addr */ 93 PORT_LST_ADDR = 0x00, /* command list DMA addr */
@@ -138,6 +141,7 @@ enum {
138 PORT_CMD_LIST_ON = (1 << 15), /* cmd list DMA engine running */ 141 PORT_CMD_LIST_ON = (1 << 15), /* cmd list DMA engine running */
139 PORT_CMD_FIS_ON = (1 << 14), /* FIS DMA engine running */ 142 PORT_CMD_FIS_ON = (1 << 14), /* FIS DMA engine running */
140 PORT_CMD_FIS_RX = (1 << 4), /* Enable FIS receive DMA engine */ 143 PORT_CMD_FIS_RX = (1 << 4), /* Enable FIS receive DMA engine */
144 PORT_CMD_CLO = (1 << 3), /* Command list override */
141 PORT_CMD_POWER_ON = (1 << 2), /* Power up device */ 145 PORT_CMD_POWER_ON = (1 << 2), /* Power up device */
142 PORT_CMD_SPIN_UP = (1 << 1), /* Spin up device */ 146 PORT_CMD_SPIN_UP = (1 << 1), /* Spin up device */
143 PORT_CMD_START = (1 << 0), /* Enable port DMA engine */ 147 PORT_CMD_START = (1 << 0), /* Enable port DMA engine */
@@ -184,9 +188,9 @@ struct ahci_port_priv {
184static u32 ahci_scr_read (struct ata_port *ap, unsigned int sc_reg); 188static u32 ahci_scr_read (struct ata_port *ap, unsigned int sc_reg);
185static void ahci_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val); 189static void ahci_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val);
186static int ahci_init_one (struct pci_dev *pdev, const struct pci_device_id *ent); 190static int ahci_init_one (struct pci_dev *pdev, const struct pci_device_id *ent);
187static int ahci_qc_issue(struct ata_queued_cmd *qc); 191static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc);
188static irqreturn_t ahci_interrupt (int irq, void *dev_instance, struct pt_regs *regs); 192static irqreturn_t ahci_interrupt (int irq, void *dev_instance, struct pt_regs *regs);
189static void ahci_phy_reset(struct ata_port *ap); 193static int ahci_probe_reset(struct ata_port *ap, unsigned int *classes);
190static void ahci_irq_clear(struct ata_port *ap); 194static void ahci_irq_clear(struct ata_port *ap);
191static void ahci_eng_timeout(struct ata_port *ap); 195static void ahci_eng_timeout(struct ata_port *ap);
192static int ahci_port_start(struct ata_port *ap); 196static int ahci_port_start(struct ata_port *ap);
@@ -202,11 +206,11 @@ static struct scsi_host_template ahci_sht = {
202 .name = DRV_NAME, 206 .name = DRV_NAME,
203 .ioctl = ata_scsi_ioctl, 207 .ioctl = ata_scsi_ioctl,
204 .queuecommand = ata_scsi_queuecmd, 208 .queuecommand = ata_scsi_queuecmd,
209 .eh_timed_out = ata_scsi_timed_out,
205 .eh_strategy_handler = ata_scsi_error, 210 .eh_strategy_handler = ata_scsi_error,
206 .can_queue = ATA_DEF_QUEUE, 211 .can_queue = ATA_DEF_QUEUE,
207 .this_id = ATA_SHT_THIS_ID, 212 .this_id = ATA_SHT_THIS_ID,
208 .sg_tablesize = AHCI_MAX_SG, 213 .sg_tablesize = AHCI_MAX_SG,
209 .max_sectors = ATA_MAX_SECTORS,
210 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 214 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
211 .emulated = ATA_SHT_EMULATED, 215 .emulated = ATA_SHT_EMULATED,
212 .use_clustering = AHCI_USE_CLUSTERING, 216 .use_clustering = AHCI_USE_CLUSTERING,
@@ -225,7 +229,7 @@ static const struct ata_port_operations ahci_ops = {
225 229
226 .tf_read = ahci_tf_read, 230 .tf_read = ahci_tf_read,
227 231
228 .phy_reset = ahci_phy_reset, 232 .probe_reset = ahci_probe_reset,
229 233
230 .qc_prep = ahci_qc_prep, 234 .qc_prep = ahci_qc_prep,
231 .qc_issue = ahci_qc_issue, 235 .qc_issue = ahci_qc_issue,
@@ -247,8 +251,7 @@ static const struct ata_port_info ahci_port_info[] = {
247 { 251 {
248 .sht = &ahci_sht, 252 .sht = &ahci_sht,
249 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 253 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
250 ATA_FLAG_SATA_RESET | ATA_FLAG_MMIO | 254 ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA,
251 ATA_FLAG_PIO_DMA,
252 .pio_mask = 0x1f, /* pio0-4 */ 255 .pio_mask = 0x1f, /* pio0-4 */
253 .udma_mask = 0x7f, /* udma0-6 ; FIXME */ 256 .udma_mask = 0x7f, /* udma0-6 ; FIXME */
254 .port_ops = &ahci_ops, 257 .port_ops = &ahci_ops,
@@ -450,17 +453,48 @@ static void ahci_scr_write (struct ata_port *ap, unsigned int sc_reg_in,
450 writel(val, (void __iomem *) ap->ioaddr.scr_addr + (sc_reg * 4)); 453 writel(val, (void __iomem *) ap->ioaddr.scr_addr + (sc_reg * 4));
451} 454}
452 455
453static void ahci_phy_reset(struct ata_port *ap) 456static int ahci_stop_engine(struct ata_port *ap)
454{ 457{
455 void __iomem *port_mmio = (void __iomem *) ap->ioaddr.cmd_addr; 458 void __iomem *mmio = ap->host_set->mmio_base;
456 struct ata_taskfile tf; 459 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
457 struct ata_device *dev = &ap->device[0]; 460 int work;
458 u32 new_tmp, tmp; 461 u32 tmp;
459 462
460 __sata_phy_reset(ap); 463 tmp = readl(port_mmio + PORT_CMD);
464 tmp &= ~PORT_CMD_START;
465 writel(tmp, port_mmio + PORT_CMD);
461 466
462 if (ap->flags & ATA_FLAG_PORT_DISABLED) 467 /* wait for engine to stop. TODO: this could be
463 return; 468 * as long as 500 msec
469 */
470 work = 1000;
471 while (work-- > 0) {
472 tmp = readl(port_mmio + PORT_CMD);
473 if ((tmp & PORT_CMD_LIST_ON) == 0)
474 return 0;
475 udelay(10);
476 }
477
478 return -EIO;
479}
480
481static void ahci_start_engine(struct ata_port *ap)
482{
483 void __iomem *mmio = ap->host_set->mmio_base;
484 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
485 u32 tmp;
486
487 tmp = readl(port_mmio + PORT_CMD);
488 tmp |= PORT_CMD_START;
489 writel(tmp, port_mmio + PORT_CMD);
490 readl(port_mmio + PORT_CMD); /* flush */
491}
492
493static unsigned int ahci_dev_classify(struct ata_port *ap)
494{
495 void __iomem *port_mmio = (void __iomem *) ap->ioaddr.cmd_addr;
496 struct ata_taskfile tf;
497 u32 tmp;
464 498
465 tmp = readl(port_mmio + PORT_SIG); 499 tmp = readl(port_mmio + PORT_SIG);
466 tf.lbah = (tmp >> 24) & 0xff; 500 tf.lbah = (tmp >> 24) & 0xff;
@@ -468,15 +502,46 @@ static void ahci_phy_reset(struct ata_port *ap)
468 tf.lbal = (tmp >> 8) & 0xff; 502 tf.lbal = (tmp >> 8) & 0xff;
469 tf.nsect = (tmp) & 0xff; 503 tf.nsect = (tmp) & 0xff;
470 504
471 dev->class = ata_dev_classify(&tf); 505 return ata_dev_classify(&tf);
472 if (!ata_dev_present(dev)) { 506}
473 ata_port_disable(ap); 507
474 return; 508static void ahci_fill_cmd_slot(struct ahci_port_priv *pp, u32 opts)
475 } 509{
510 pp->cmd_slot[0].opts = cpu_to_le32(opts);
511 pp->cmd_slot[0].status = 0;
512 pp->cmd_slot[0].tbl_addr = cpu_to_le32(pp->cmd_tbl_dma & 0xffffffff);
513 pp->cmd_slot[0].tbl_addr_hi = cpu_to_le32((pp->cmd_tbl_dma >> 16) >> 16);
514}
515
516static int ahci_hardreset(struct ata_port *ap, int verbose, unsigned int *class)
517{
518 int rc;
519
520 DPRINTK("ENTER\n");
521
522 ahci_stop_engine(ap);
523 rc = sata_std_hardreset(ap, verbose, class);
524 ahci_start_engine(ap);
525
526 if (rc == 0)
527 *class = ahci_dev_classify(ap);
528 if (*class == ATA_DEV_UNKNOWN)
529 *class = ATA_DEV_NONE;
530
531 DPRINTK("EXIT, rc=%d, class=%u\n", rc, *class);
532 return rc;
533}
534
535static void ahci_postreset(struct ata_port *ap, unsigned int *class)
536{
537 void __iomem *port_mmio = (void __iomem *) ap->ioaddr.cmd_addr;
538 u32 new_tmp, tmp;
539
540 ata_std_postreset(ap, class);
476 541
477 /* Make sure port's ATAPI bit is set appropriately */ 542 /* Make sure port's ATAPI bit is set appropriately */
478 new_tmp = tmp = readl(port_mmio + PORT_CMD); 543 new_tmp = tmp = readl(port_mmio + PORT_CMD);
479 if (dev->class == ATA_DEV_ATAPI) 544 if (*class == ATA_DEV_ATAPI)
480 new_tmp |= PORT_CMD_ATAPI; 545 new_tmp |= PORT_CMD_ATAPI;
481 else 546 else
482 new_tmp &= ~PORT_CMD_ATAPI; 547 new_tmp &= ~PORT_CMD_ATAPI;
@@ -486,6 +551,12 @@ static void ahci_phy_reset(struct ata_port *ap)
486 } 551 }
487} 552}
488 553
554static int ahci_probe_reset(struct ata_port *ap, unsigned int *classes)
555{
556 return ata_drive_probe_reset(ap, NULL, NULL, ahci_hardreset,
557 ahci_postreset, classes);
558}
559
489static u8 ahci_check_status(struct ata_port *ap) 560static u8 ahci_check_status(struct ata_port *ap)
490{ 561{
491 void __iomem *mmio = (void __iomem *) ap->ioaddr.cmd_addr; 562 void __iomem *mmio = (void __iomem *) ap->ioaddr.cmd_addr;
@@ -533,42 +604,36 @@ static void ahci_qc_prep(struct ata_queued_cmd *qc)
533{ 604{
534 struct ata_port *ap = qc->ap; 605 struct ata_port *ap = qc->ap;
535 struct ahci_port_priv *pp = ap->private_data; 606 struct ahci_port_priv *pp = ap->private_data;
607 int is_atapi = is_atapi_taskfile(&qc->tf);
536 u32 opts; 608 u32 opts;
537 const u32 cmd_fis_len = 5; /* five dwords */ 609 const u32 cmd_fis_len = 5; /* five dwords */
538 unsigned int n_elem; 610 unsigned int n_elem;
539 611
540 /* 612 /*
541 * Fill in command slot information (currently only one slot,
542 * slot 0, is currently since we don't do queueing)
543 */
544
545 opts = cmd_fis_len;
546 if (qc->tf.flags & ATA_TFLAG_WRITE)
547 opts |= AHCI_CMD_WRITE;
548 if (is_atapi_taskfile(&qc->tf))
549 opts |= AHCI_CMD_ATAPI;
550
551 pp->cmd_slot[0].opts = cpu_to_le32(opts);
552 pp->cmd_slot[0].status = 0;
553 pp->cmd_slot[0].tbl_addr = cpu_to_le32(pp->cmd_tbl_dma & 0xffffffff);
554 pp->cmd_slot[0].tbl_addr_hi = cpu_to_le32((pp->cmd_tbl_dma >> 16) >> 16);
555
556 /*
557 * Fill in command table information. First, the header, 613 * Fill in command table information. First, the header,
558 * a SATA Register - Host to Device command FIS. 614 * a SATA Register - Host to Device command FIS.
559 */ 615 */
560 ata_tf_to_fis(&qc->tf, pp->cmd_tbl, 0); 616 ata_tf_to_fis(&qc->tf, pp->cmd_tbl, 0);
561 if (opts & AHCI_CMD_ATAPI) { 617 if (is_atapi) {
562 memset(pp->cmd_tbl + AHCI_CMD_TBL_CDB, 0, 32); 618 memset(pp->cmd_tbl + AHCI_CMD_TBL_CDB, 0, 32);
563 memcpy(pp->cmd_tbl + AHCI_CMD_TBL_CDB, qc->cdb, ap->cdb_len); 619 memcpy(pp->cmd_tbl + AHCI_CMD_TBL_CDB, qc->cdb,
620 qc->dev->cdb_len);
564 } 621 }
565 622
566 if (!(qc->flags & ATA_QCFLAG_DMAMAP)) 623 n_elem = 0;
567 return; 624 if (qc->flags & ATA_QCFLAG_DMAMAP)
625 n_elem = ahci_fill_sg(qc);
568 626
569 n_elem = ahci_fill_sg(qc); 627 /*
628 * Fill in command slot information.
629 */
630 opts = cmd_fis_len | n_elem << 16;
631 if (qc->tf.flags & ATA_TFLAG_WRITE)
632 opts |= AHCI_CMD_WRITE;
633 if (is_atapi)
634 opts |= AHCI_CMD_ATAPI;
570 635
571 pp->cmd_slot[0].opts |= cpu_to_le32(n_elem << 16); 636 ahci_fill_cmd_slot(pp, opts);
572} 637}
573 638
574static void ahci_restart_port(struct ata_port *ap, u32 irq_stat) 639static void ahci_restart_port(struct ata_port *ap, u32 irq_stat)
@@ -576,7 +641,6 @@ static void ahci_restart_port(struct ata_port *ap, u32 irq_stat)
576 void __iomem *mmio = ap->host_set->mmio_base; 641 void __iomem *mmio = ap->host_set->mmio_base;
577 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no); 642 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
578 u32 tmp; 643 u32 tmp;
579 int work;
580 644
581 if ((ap->device[0].class != ATA_DEV_ATAPI) || 645 if ((ap->device[0].class != ATA_DEV_ATAPI) ||
582 ((irq_stat & PORT_IRQ_TF_ERR) == 0)) 646 ((irq_stat & PORT_IRQ_TF_ERR) == 0))
@@ -592,20 +656,7 @@ static void ahci_restart_port(struct ata_port *ap, u32 irq_stat)
592 readl(port_mmio + PORT_SCR_ERR)); 656 readl(port_mmio + PORT_SCR_ERR));
593 657
594 /* stop DMA */ 658 /* stop DMA */
595 tmp = readl(port_mmio + PORT_CMD); 659 ahci_stop_engine(ap);
596 tmp &= ~PORT_CMD_START;
597 writel(tmp, port_mmio + PORT_CMD);
598
599 /* wait for engine to stop. TODO: this could be
600 * as long as 500 msec
601 */
602 work = 1000;
603 while (work-- > 0) {
604 tmp = readl(port_mmio + PORT_CMD);
605 if ((tmp & PORT_CMD_LIST_ON) == 0)
606 break;
607 udelay(10);
608 }
609 660
610 /* clear SATA phy error, if any */ 661 /* clear SATA phy error, if any */
611 tmp = readl(port_mmio + PORT_SCR_ERR); 662 tmp = readl(port_mmio + PORT_SCR_ERR);
@@ -624,10 +675,7 @@ static void ahci_restart_port(struct ata_port *ap, u32 irq_stat)
624 } 675 }
625 676
626 /* re-start DMA */ 677 /* re-start DMA */
627 tmp = readl(port_mmio + PORT_CMD); 678 ahci_start_engine(ap);
628 tmp |= PORT_CMD_START;
629 writel(tmp, port_mmio + PORT_CMD);
630 readl(port_mmio + PORT_CMD); /* flush */
631} 679}
632 680
633static void ahci_eng_timeout(struct ata_port *ap) 681static void ahci_eng_timeout(struct ata_port *ap)
@@ -642,25 +690,13 @@ static void ahci_eng_timeout(struct ata_port *ap)
642 690
643 spin_lock_irqsave(&host_set->lock, flags); 691 spin_lock_irqsave(&host_set->lock, flags);
644 692
693 ahci_restart_port(ap, readl(port_mmio + PORT_IRQ_STAT));
645 qc = ata_qc_from_tag(ap, ap->active_tag); 694 qc = ata_qc_from_tag(ap, ap->active_tag);
646 if (!qc) { 695 qc->err_mask |= AC_ERR_TIMEOUT;
647 printk(KERN_ERR "ata%u: BUG: timeout without command\n",
648 ap->id);
649 } else {
650 ahci_restart_port(ap, readl(port_mmio + PORT_IRQ_STAT));
651
652 /* hack alert! We cannot use the supplied completion
653 * function from inside the ->eh_strategy_handler() thread.
654 * libata is the only user of ->eh_strategy_handler() in
655 * any kernel, so the default scsi_done() assumes it is
656 * not being called from the SCSI EH.
657 */
658 qc->scsidone = scsi_finish_command;
659 qc->err_mask |= AC_ERR_OTHER;
660 ata_qc_complete(qc);
661 }
662 696
663 spin_unlock_irqrestore(&host_set->lock, flags); 697 spin_unlock_irqrestore(&host_set->lock, flags);
698
699 ata_eh_qc_complete(qc);
664} 700}
665 701
666static inline int ahci_host_intr(struct ata_port *ap, struct ata_queued_cmd *qc) 702static inline int ahci_host_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
@@ -678,7 +714,7 @@ static inline int ahci_host_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
678 ci = readl(port_mmio + PORT_CMD_ISSUE); 714 ci = readl(port_mmio + PORT_CMD_ISSUE);
679 if (likely((ci & 0x1) == 0)) { 715 if (likely((ci & 0x1) == 0)) {
680 if (qc) { 716 if (qc) {
681 assert(qc->err_mask == 0); 717 WARN_ON(qc->err_mask);
682 ata_qc_complete(qc); 718 ata_qc_complete(qc);
683 qc = NULL; 719 qc = NULL;
684 } 720 }
@@ -697,7 +733,7 @@ static inline int ahci_host_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
697 ahci_restart_port(ap, status); 733 ahci_restart_port(ap, status);
698 734
699 if (qc) { 735 if (qc) {
700 qc->err_mask |= AC_ERR_OTHER; 736 qc->err_mask |= err_mask;
701 ata_qc_complete(qc); 737 ata_qc_complete(qc);
702 } 738 }
703 } 739 }
@@ -770,7 +806,7 @@ static irqreturn_t ahci_interrupt (int irq, void *dev_instance, struct pt_regs *
770 return IRQ_RETVAL(handled); 806 return IRQ_RETVAL(handled);
771} 807}
772 808
773static int ahci_qc_issue(struct ata_queued_cmd *qc) 809static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc)
774{ 810{
775 struct ata_port *ap = qc->ap; 811 struct ata_port *ap = qc->ap;
776 void __iomem *port_mmio = (void __iomem *) ap->ioaddr.cmd_addr; 812 void __iomem *port_mmio = (void __iomem *) ap->ioaddr.cmd_addr;
diff --git a/drivers/scsi/ata_piix.c b/drivers/scsi/ata_piix.c
index fc3ca051ceed..9327b62f97de 100644
--- a/drivers/scsi/ata_piix.c
+++ b/drivers/scsi/ata_piix.c
@@ -101,36 +101,54 @@ enum {
101 ICH5_PCS = 0x92, /* port control and status */ 101 ICH5_PCS = 0x92, /* port control and status */
102 PIIX_SCC = 0x0A, /* sub-class code register */ 102 PIIX_SCC = 0x0A, /* sub-class code register */
103 103
104 PIIX_FLAG_AHCI = (1 << 28), /* AHCI possible */ 104 PIIX_FLAG_IGNORE_PCS = (1 << 25), /* ignore PCS present bits */
105 PIIX_FLAG_CHECKINTR = (1 << 29), /* make sure PCI INTx enabled */ 105 PIIX_FLAG_SCR = (1 << 26), /* SCR available */
106 PIIX_FLAG_COMBINED = (1 << 30), /* combined mode possible */ 106 PIIX_FLAG_AHCI = (1 << 27), /* AHCI possible */
107 PIIX_FLAG_CHECKINTR = (1 << 28), /* make sure PCI INTx enabled */
108 PIIX_FLAG_COMBINED = (1 << 29), /* combined mode possible */
109 /* ICH6/7 use different scheme for map value */
110 PIIX_FLAG_COMBINED_ICH6 = PIIX_FLAG_COMBINED | (1 << 30),
107 111
108 /* combined mode. if set, PATA is channel 0. 112 /* combined mode. if set, PATA is channel 0.
109 * if clear, PATA is channel 1. 113 * if clear, PATA is channel 1.
110 */ 114 */
111 PIIX_COMB_PATA_P0 = (1 << 1),
112 PIIX_COMB = (1 << 2), /* combined mode enabled? */
113
114 PIIX_PORT_ENABLED = (1 << 0), 115 PIIX_PORT_ENABLED = (1 << 0),
115 PIIX_PORT_PRESENT = (1 << 4), 116 PIIX_PORT_PRESENT = (1 << 4),
116 117
117 PIIX_80C_PRI = (1 << 5) | (1 << 4), 118 PIIX_80C_PRI = (1 << 5) | (1 << 4),
118 PIIX_80C_SEC = (1 << 7) | (1 << 6), 119 PIIX_80C_SEC = (1 << 7) | (1 << 6),
119 120
120 ich5_pata = 0, 121 /* controller IDs */
121 ich5_sata = 1, 122 piix4_pata = 0,
122 piix4_pata = 2, 123 ich5_pata = 1,
123 ich6_sata = 3, 124 ich5_sata = 2,
124 ich6_sata_ahci = 4, 125 esb_sata = 3,
126 ich6_sata = 4,
127 ich6_sata_ahci = 5,
128 ich6m_sata_ahci = 6,
129
130 /* constants for mapping table */
131 P0 = 0, /* port 0 */
132 P1 = 1, /* port 1 */
133 P2 = 2, /* port 2 */
134 P3 = 3, /* port 3 */
135 IDE = -1, /* IDE */
136 NA = -2, /* not avaliable */
137 RV = -3, /* reserved */
125 138
126 PIIX_AHCI_DEVICE = 6, 139 PIIX_AHCI_DEVICE = 6,
127}; 140};
128 141
142struct piix_map_db {
143 const u32 mask;
144 const int map[][4];
145};
146
129static int piix_init_one (struct pci_dev *pdev, 147static int piix_init_one (struct pci_dev *pdev,
130 const struct pci_device_id *ent); 148 const struct pci_device_id *ent);
131 149
132static void piix_pata_phy_reset(struct ata_port *ap); 150static int piix_pata_probe_reset(struct ata_port *ap, unsigned int *classes);
133static void piix_sata_phy_reset(struct ata_port *ap); 151static int piix_sata_probe_reset(struct ata_port *ap, unsigned int *classes);
134static void piix_set_piomode (struct ata_port *ap, struct ata_device *adev); 152static void piix_set_piomode (struct ata_port *ap, struct ata_device *adev);
135static void piix_set_dmamode (struct ata_port *ap, struct ata_device *adev); 153static void piix_set_dmamode (struct ata_port *ap, struct ata_device *adev);
136 154
@@ -147,19 +165,32 @@ static const struct pci_device_id piix_pci_tbl[] = {
147 * list in drivers/pci/quirks.c. 165 * list in drivers/pci/quirks.c.
148 */ 166 */
149 167
168 /* 82801EB (ICH5) */
150 { 0x8086, 0x24d1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich5_sata }, 169 { 0x8086, 0x24d1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich5_sata },
170 /* 82801EB (ICH5) */
151 { 0x8086, 0x24df, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich5_sata }, 171 { 0x8086, 0x24df, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich5_sata },
152 { 0x8086, 0x25a3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich5_sata }, 172 /* 6300ESB (ICH5 variant with broken PCS present bits) */
153 { 0x8086, 0x25b0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich5_sata }, 173 { 0x8086, 0x25a3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, esb_sata },
174 /* 6300ESB pretending RAID */
175 { 0x8086, 0x25b0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, esb_sata },
176 /* 82801FB/FW (ICH6/ICH6W) */
154 { 0x8086, 0x2651, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata }, 177 { 0x8086, 0x2651, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata },
178 /* 82801FR/FRW (ICH6R/ICH6RW) */
155 { 0x8086, 0x2652, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata_ahci }, 179 { 0x8086, 0x2652, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata_ahci },
156 { 0x8086, 0x2653, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata_ahci }, 180 /* 82801FBM ICH6M (ICH6R with only port 0 and 2 implemented) */
181 { 0x8086, 0x2653, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6m_sata_ahci },
182 /* 82801GB/GR/GH (ICH7, identical to ICH6) */
157 { 0x8086, 0x27c0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata_ahci }, 183 { 0x8086, 0x27c0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata_ahci },
158 { 0x8086, 0x27c4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata_ahci }, 184 /* 2801GBM/GHM (ICH7M, identical to ICH6M) */
185 { 0x8086, 0x27c4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6m_sata_ahci },
186 /* Enterprise Southbridge 2 (where's the datasheet?) */
159 { 0x8086, 0x2680, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata_ahci }, 187 { 0x8086, 0x2680, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata_ahci },
188 /* SATA Controller 1 IDE (ICH8, no datasheet yet) */
160 { 0x8086, 0x2820, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata_ahci }, 189 { 0x8086, 0x2820, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata_ahci },
190 /* SATA Controller 2 IDE (ICH8, ditto) */
161 { 0x8086, 0x2825, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata_ahci }, 191 { 0x8086, 0x2825, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata_ahci },
162 { 0x8086, 0x2828, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata_ahci }, 192 /* Mobile SATA Controller IDE (ICH8M, ditto) */
193 { 0x8086, 0x2828, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6m_sata_ahci },
163 194
164 { } /* terminate list */ 195 { } /* terminate list */
165}; 196};
@@ -178,11 +209,11 @@ static struct scsi_host_template piix_sht = {
178 .name = DRV_NAME, 209 .name = DRV_NAME,
179 .ioctl = ata_scsi_ioctl, 210 .ioctl = ata_scsi_ioctl,
180 .queuecommand = ata_scsi_queuecmd, 211 .queuecommand = ata_scsi_queuecmd,
212 .eh_timed_out = ata_scsi_timed_out,
181 .eh_strategy_handler = ata_scsi_error, 213 .eh_strategy_handler = ata_scsi_error,
182 .can_queue = ATA_DEF_QUEUE, 214 .can_queue = ATA_DEF_QUEUE,
183 .this_id = ATA_SHT_THIS_ID, 215 .this_id = ATA_SHT_THIS_ID,
184 .sg_tablesize = LIBATA_MAX_PRD, 216 .sg_tablesize = LIBATA_MAX_PRD,
185 .max_sectors = ATA_MAX_SECTORS,
186 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 217 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
187 .emulated = ATA_SHT_EMULATED, 218 .emulated = ATA_SHT_EMULATED,
188 .use_clustering = ATA_SHT_USE_CLUSTERING, 219 .use_clustering = ATA_SHT_USE_CLUSTERING,
@@ -205,7 +236,7 @@ static const struct ata_port_operations piix_pata_ops = {
205 .exec_command = ata_exec_command, 236 .exec_command = ata_exec_command,
206 .dev_select = ata_std_dev_select, 237 .dev_select = ata_std_dev_select,
207 238
208 .phy_reset = piix_pata_phy_reset, 239 .probe_reset = piix_pata_probe_reset,
209 240
210 .bmdma_setup = ata_bmdma_setup, 241 .bmdma_setup = ata_bmdma_setup,
211 .bmdma_start = ata_bmdma_start, 242 .bmdma_start = ata_bmdma_start,
@@ -233,7 +264,7 @@ static const struct ata_port_operations piix_sata_ops = {
233 .exec_command = ata_exec_command, 264 .exec_command = ata_exec_command,
234 .dev_select = ata_std_dev_select, 265 .dev_select = ata_std_dev_select,
235 266
236 .phy_reset = piix_sata_phy_reset, 267 .probe_reset = piix_sata_probe_reset,
237 268
238 .bmdma_setup = ata_bmdma_setup, 269 .bmdma_setup = ata_bmdma_setup,
239 .bmdma_start = ata_bmdma_start, 270 .bmdma_start = ata_bmdma_start,
@@ -252,12 +283,62 @@ static const struct ata_port_operations piix_sata_ops = {
252 .host_stop = ata_host_stop, 283 .host_stop = ata_host_stop,
253}; 284};
254 285
286static struct piix_map_db ich5_map_db = {
287 .mask = 0x7,
288 .map = {
289 /* PM PS SM SS MAP */
290 { P0, NA, P1, NA }, /* 000b */
291 { P1, NA, P0, NA }, /* 001b */
292 { RV, RV, RV, RV },
293 { RV, RV, RV, RV },
294 { P0, P1, IDE, IDE }, /* 100b */
295 { P1, P0, IDE, IDE }, /* 101b */
296 { IDE, IDE, P0, P1 }, /* 110b */
297 { IDE, IDE, P1, P0 }, /* 111b */
298 },
299};
300
301static struct piix_map_db ich6_map_db = {
302 .mask = 0x3,
303 .map = {
304 /* PM PS SM SS MAP */
305 { P0, P1, P2, P3 }, /* 00b */
306 { IDE, IDE, P1, P3 }, /* 01b */
307 { P0, P2, IDE, IDE }, /* 10b */
308 { RV, RV, RV, RV },
309 },
310};
311
312static struct piix_map_db ich6m_map_db = {
313 .mask = 0x3,
314 .map = {
315 /* PM PS SM SS MAP */
316 { P0, P1, P2, P3 }, /* 00b */
317 { RV, RV, RV, RV },
318 { P0, P2, IDE, IDE }, /* 10b */
319 { RV, RV, RV, RV },
320 },
321};
322
255static struct ata_port_info piix_port_info[] = { 323static struct ata_port_info piix_port_info[] = {
324 /* piix4_pata */
325 {
326 .sht = &piix_sht,
327 .host_flags = ATA_FLAG_SLAVE_POSS,
328 .pio_mask = 0x1f, /* pio0-4 */
329#if 0
330 .mwdma_mask = 0x06, /* mwdma1-2 */
331#else
332 .mwdma_mask = 0x00, /* mwdma broken */
333#endif
334 .udma_mask = ATA_UDMA_MASK_40C,
335 .port_ops = &piix_pata_ops,
336 },
337
256 /* ich5_pata */ 338 /* ich5_pata */
257 { 339 {
258 .sht = &piix_sht, 340 .sht = &piix_sht,
259 .host_flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST | 341 .host_flags = ATA_FLAG_SLAVE_POSS | PIIX_FLAG_CHECKINTR,
260 PIIX_FLAG_CHECKINTR,
261 .pio_mask = 0x1f, /* pio0-4 */ 342 .pio_mask = 0x1f, /* pio0-4 */
262#if 0 343#if 0
263 .mwdma_mask = 0x06, /* mwdma1-2 */ 344 .mwdma_mask = 0x06, /* mwdma1-2 */
@@ -271,50 +352,63 @@ static struct ata_port_info piix_port_info[] = {
271 /* ich5_sata */ 352 /* ich5_sata */
272 { 353 {
273 .sht = &piix_sht, 354 .sht = &piix_sht,
274 .host_flags = ATA_FLAG_SATA | ATA_FLAG_SRST | 355 .host_flags = ATA_FLAG_SATA | PIIX_FLAG_COMBINED |
275 PIIX_FLAG_COMBINED | PIIX_FLAG_CHECKINTR, 356 PIIX_FLAG_CHECKINTR,
276 .pio_mask = 0x1f, /* pio0-4 */ 357 .pio_mask = 0x1f, /* pio0-4 */
277 .mwdma_mask = 0x07, /* mwdma0-2 */ 358 .mwdma_mask = 0x07, /* mwdma0-2 */
278 .udma_mask = 0x7f, /* udma0-6 */ 359 .udma_mask = 0x7f, /* udma0-6 */
279 .port_ops = &piix_sata_ops, 360 .port_ops = &piix_sata_ops,
361 .private_data = &ich5_map_db,
280 }, 362 },
281 363
282 /* piix4_pata */ 364 /* i6300esb_sata */
283 { 365 {
284 .sht = &piix_sht, 366 .sht = &piix_sht,
285 .host_flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST, 367 .host_flags = ATA_FLAG_SATA | PIIX_FLAG_COMBINED |
368 PIIX_FLAG_CHECKINTR | PIIX_FLAG_IGNORE_PCS,
286 .pio_mask = 0x1f, /* pio0-4 */ 369 .pio_mask = 0x1f, /* pio0-4 */
287#if 0 370 .mwdma_mask = 0x07, /* mwdma0-2 */
288 .mwdma_mask = 0x06, /* mwdma1-2 */ 371 .udma_mask = 0x7f, /* udma0-6 */
289#else 372 .port_ops = &piix_sata_ops,
290 .mwdma_mask = 0x00, /* mwdma broken */ 373 .private_data = &ich5_map_db,
291#endif
292 .udma_mask = ATA_UDMA_MASK_40C,
293 .port_ops = &piix_pata_ops,
294 }, 374 },
295 375
296 /* ich6_sata */ 376 /* ich6_sata */
297 { 377 {
298 .sht = &piix_sht, 378 .sht = &piix_sht,
299 .host_flags = ATA_FLAG_SATA | ATA_FLAG_SRST | 379 .host_flags = ATA_FLAG_SATA | PIIX_FLAG_COMBINED_ICH6 |
300 PIIX_FLAG_COMBINED | PIIX_FLAG_CHECKINTR | 380 PIIX_FLAG_CHECKINTR | PIIX_FLAG_SCR,
301 ATA_FLAG_SLAVE_POSS,
302 .pio_mask = 0x1f, /* pio0-4 */ 381 .pio_mask = 0x1f, /* pio0-4 */
303 .mwdma_mask = 0x07, /* mwdma0-2 */ 382 .mwdma_mask = 0x07, /* mwdma0-2 */
304 .udma_mask = 0x7f, /* udma0-6 */ 383 .udma_mask = 0x7f, /* udma0-6 */
305 .port_ops = &piix_sata_ops, 384 .port_ops = &piix_sata_ops,
385 .private_data = &ich6_map_db,
306 }, 386 },
307 387
308 /* ich6_sata_ahci */ 388 /* ich6_sata_ahci */
309 { 389 {
310 .sht = &piix_sht, 390 .sht = &piix_sht,
311 .host_flags = ATA_FLAG_SATA | ATA_FLAG_SRST | 391 .host_flags = ATA_FLAG_SATA | PIIX_FLAG_COMBINED_ICH6 |
312 PIIX_FLAG_COMBINED | PIIX_FLAG_CHECKINTR | 392 PIIX_FLAG_CHECKINTR | PIIX_FLAG_SCR |
313 ATA_FLAG_SLAVE_POSS | PIIX_FLAG_AHCI, 393 PIIX_FLAG_AHCI,
394 .pio_mask = 0x1f, /* pio0-4 */
395 .mwdma_mask = 0x07, /* mwdma0-2 */
396 .udma_mask = 0x7f, /* udma0-6 */
397 .port_ops = &piix_sata_ops,
398 .private_data = &ich6_map_db,
399 },
400
401 /* ich6m_sata_ahci */
402 {
403 .sht = &piix_sht,
404 .host_flags = ATA_FLAG_SATA | PIIX_FLAG_COMBINED_ICH6 |
405 PIIX_FLAG_CHECKINTR | PIIX_FLAG_SCR |
406 PIIX_FLAG_AHCI,
314 .pio_mask = 0x1f, /* pio0-4 */ 407 .pio_mask = 0x1f, /* pio0-4 */
315 .mwdma_mask = 0x07, /* mwdma0-2 */ 408 .mwdma_mask = 0x07, /* mwdma0-2 */
316 .udma_mask = 0x7f, /* udma0-6 */ 409 .udma_mask = 0x7f, /* udma0-6 */
317 .port_ops = &piix_sata_ops, 410 .port_ops = &piix_sata_ops,
411 .private_data = &ich6m_map_db,
318 }, 412 },
319}; 413};
320 414
@@ -363,102 +457,123 @@ cbl40:
363} 457}
364 458
365/** 459/**
366 * piix_pata_phy_reset - Probe specified port on PATA host controller 460 * piix_pata_probeinit - probeinit for PATA host controller
367 * @ap: Port to probe 461 * @ap: Target port
368 * 462 *
369 * Probe PATA phy. 463 * Probeinit including cable detection.
370 * 464 *
371 * LOCKING: 465 * LOCKING:
372 * None (inherited from caller). 466 * None (inherited from caller).
373 */ 467 */
468static void piix_pata_probeinit(struct ata_port *ap)
469{
470 piix_pata_cbl_detect(ap);
471 ata_std_probeinit(ap);
472}
374 473
375static void piix_pata_phy_reset(struct ata_port *ap) 474/**
475 * piix_pata_probe_reset - Perform reset on PATA port and classify
476 * @ap: Port to reset
477 * @classes: Resulting classes of attached devices
478 *
479 * Reset PATA phy and classify attached devices.
480 *
481 * LOCKING:
482 * None (inherited from caller).
483 */
484static int piix_pata_probe_reset(struct ata_port *ap, unsigned int *classes)
376{ 485{
377 struct pci_dev *pdev = to_pci_dev(ap->host_set->dev); 486 struct pci_dev *pdev = to_pci_dev(ap->host_set->dev);
378 487
379 if (!pci_test_config_bits(pdev, &piix_enable_bits[ap->hard_port_no])) { 488 if (!pci_test_config_bits(pdev, &piix_enable_bits[ap->hard_port_no])) {
380 ata_port_disable(ap);
381 printk(KERN_INFO "ata%u: port disabled. ignoring.\n", ap->id); 489 printk(KERN_INFO "ata%u: port disabled. ignoring.\n", ap->id);
382 return; 490 return 0;
383 } 491 }
384 492
385 piix_pata_cbl_detect(ap); 493 return ata_drive_probe_reset(ap, piix_pata_probeinit,
386 494 ata_std_softreset, NULL,
387 ata_port_probe(ap); 495 ata_std_postreset, classes);
388
389 ata_bus_reset(ap);
390} 496}
391 497
392/** 498/**
393 * piix_sata_probe - Probe PCI device for present SATA devices 499 * piix_sata_probe - Probe PCI device for present SATA devices
394 * @ap: Port associated with the PCI device we wish to probe 500 * @ap: Port associated with the PCI device we wish to probe
395 * 501 *
396 * Reads SATA PCI device's PCI config register Port Configuration 502 * Reads and configures SATA PCI device's PCI config register
397 * and Status (PCS) to determine port and device availability. 503 * Port Configuration and Status (PCS) to determine port and
504 * device availability.
398 * 505 *
399 * LOCKING: 506 * LOCKING:
400 * None (inherited from caller). 507 * None (inherited from caller).
401 * 508 *
402 * RETURNS: 509 * RETURNS:
403 * Non-zero if port is enabled, it may or may not have a device 510 * Mask of avaliable devices on the port.
404 * attached in that case (PRESENT bit would only be set if BIOS probe
405 * was done). Zero is returned if port is disabled.
406 */ 511 */
407static int piix_sata_probe (struct ata_port *ap) 512static unsigned int piix_sata_probe (struct ata_port *ap)
408{ 513{
409 struct pci_dev *pdev = to_pci_dev(ap->host_set->dev); 514 struct pci_dev *pdev = to_pci_dev(ap->host_set->dev);
410 int combined = (ap->flags & ATA_FLAG_SLAVE_POSS); 515 const unsigned int *map = ap->host_set->private_data;
411 int orig_mask, mask, i; 516 int base = 2 * ap->hard_port_no;
517 unsigned int present_mask = 0;
518 int port, i;
412 u8 pcs; 519 u8 pcs;
413 520
414 mask = (PIIX_PORT_PRESENT << ap->hard_port_no) |
415 (PIIX_PORT_ENABLED << ap->hard_port_no);
416
417 pci_read_config_byte(pdev, ICH5_PCS, &pcs); 521 pci_read_config_byte(pdev, ICH5_PCS, &pcs);
418 orig_mask = (int) pcs & 0xff; 522 DPRINTK("ata%u: ENTER, pcs=0x%x base=%d\n", ap->id, pcs, base);
419
420 /* TODO: this is vaguely wrong for ICH6 combined mode,
421 * where only two of the four SATA ports are mapped
422 * onto a single ATA channel. It is also vaguely inaccurate
423 * for ICH5, which has only two ports. However, this is ok,
424 * as further device presence detection code will handle
425 * any false positives produced here.
426 */
427 523
428 for (i = 0; i < 4; i++) { 524 /* enable all ports on this ap and wait for them to settle */
429 mask = (PIIX_PORT_ENABLED << i); 525 for (i = 0; i < 2; i++) {
526 port = map[base + i];
527 if (port >= 0)
528 pcs |= 1 << port;
529 }
530
531 pci_write_config_byte(pdev, ICH5_PCS, pcs);
532 msleep(100);
430 533
431 if ((orig_mask & mask) == mask) 534 /* let's see which devices are present */
432 if (combined || (i == ap->hard_port_no)) 535 pci_read_config_byte(pdev, ICH5_PCS, &pcs);
433 return 1; 536
537 for (i = 0; i < 2; i++) {
538 port = map[base + i];
539 if (port < 0)
540 continue;
541 if (ap->flags & PIIX_FLAG_IGNORE_PCS || pcs & 1 << (4 + port))
542 present_mask |= 1 << i;
543 else
544 pcs &= ~(1 << port);
434 } 545 }
435 546
436 return 0; 547 /* disable offline ports on non-AHCI controllers */
548 if (!(ap->flags & PIIX_FLAG_AHCI))
549 pci_write_config_byte(pdev, ICH5_PCS, pcs);
550
551 DPRINTK("ata%u: LEAVE, pcs=0x%x present_mask=0x%x\n",
552 ap->id, pcs, present_mask);
553
554 return present_mask;
437} 555}
438 556
439/** 557/**
440 * piix_sata_phy_reset - Probe specified port on SATA host controller 558 * piix_sata_probe_reset - Perform reset on SATA port and classify
441 * @ap: Port to probe 559 * @ap: Port to reset
560 * @classes: Resulting classes of attached devices
442 * 561 *
443 * Probe SATA phy. 562 * Reset SATA phy and classify attached devices.
444 * 563 *
445 * LOCKING: 564 * LOCKING:
446 * None (inherited from caller). 565 * None (inherited from caller).
447 */ 566 */
448 567static int piix_sata_probe_reset(struct ata_port *ap, unsigned int *classes)
449static void piix_sata_phy_reset(struct ata_port *ap)
450{ 568{
451 if (!piix_sata_probe(ap)) { 569 if (!piix_sata_probe(ap)) {
452 ata_port_disable(ap);
453 printk(KERN_INFO "ata%u: SATA port has no device.\n", ap->id); 570 printk(KERN_INFO "ata%u: SATA port has no device.\n", ap->id);
454 return; 571 return 0;
455 } 572 }
456 573
457 ap->cbl = ATA_CBL_SATA; 574 return ata_drive_probe_reset(ap, ata_std_probeinit,
458 575 ata_std_softreset, NULL,
459 ata_port_probe(ap); 576 ata_std_postreset, classes);
460
461 ata_bus_reset(ap);
462} 577}
463 578
464/** 579/**
@@ -627,6 +742,7 @@ static int piix_disable_ahci(struct pci_dev *pdev)
627 742
628/** 743/**
629 * piix_check_450nx_errata - Check for problem 450NX setup 744 * piix_check_450nx_errata - Check for problem 450NX setup
745 * @ata_dev: the PCI device to check
630 * 746 *
631 * Check for the present of 450NX errata #19 and errata #25. If 747 * Check for the present of 450NX errata #19 and errata #25. If
632 * they are found return an error code so we can turn off DMA 748 * they are found return an error code so we can turn off DMA
@@ -659,6 +775,54 @@ static int __devinit piix_check_450nx_errata(struct pci_dev *ata_dev)
659 return no_piix_dma; 775 return no_piix_dma;
660} 776}
661 777
778static void __devinit piix_init_sata_map(struct pci_dev *pdev,
779 struct ata_port_info *pinfo)
780{
781 struct piix_map_db *map_db = pinfo[0].private_data;
782 const unsigned int *map;
783 int i, invalid_map = 0;
784 u8 map_value;
785
786 pci_read_config_byte(pdev, ICH5_PMR, &map_value);
787
788 map = map_db->map[map_value & map_db->mask];
789
790 dev_printk(KERN_INFO, &pdev->dev, "MAP [");
791 for (i = 0; i < 4; i++) {
792 switch (map[i]) {
793 case RV:
794 invalid_map = 1;
795 printk(" XX");
796 break;
797
798 case NA:
799 printk(" --");
800 break;
801
802 case IDE:
803 WARN_ON((i & 1) || map[i + 1] != IDE);
804 pinfo[i / 2] = piix_port_info[ich5_pata];
805 i++;
806 printk(" IDE IDE");
807 break;
808
809 default:
810 printk(" P%d", map[i]);
811 if (i & 1)
812 pinfo[i / 2].host_flags |= ATA_FLAG_SLAVE_POSS;
813 break;
814 }
815 }
816 printk(" ]\n");
817
818 if (invalid_map)
819 dev_printk(KERN_ERR, &pdev->dev,
820 "invalid MAP value %u\n", map_value);
821
822 pinfo[0].private_data = (void *)map;
823 pinfo[1].private_data = (void *)map;
824}
825
662/** 826/**
663 * piix_init_one - Register PIIX ATA PCI device with kernel services 827 * piix_init_one - Register PIIX ATA PCI device with kernel services
664 * @pdev: PCI device to register 828 * @pdev: PCI device to register
@@ -677,9 +841,9 @@ static int __devinit piix_check_450nx_errata(struct pci_dev *ata_dev)
677static int piix_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) 841static int piix_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
678{ 842{
679 static int printed_version; 843 static int printed_version;
680 struct ata_port_info *port_info[2]; 844 struct ata_port_info port_info[2];
681 unsigned int combined = 0; 845 struct ata_port_info *ppinfo[2] = { &port_info[0], &port_info[1] };
682 unsigned int pata_chan = 0, sata_chan = 0; 846 unsigned long host_flags;
683 847
684 if (!printed_version++) 848 if (!printed_version++)
685 dev_printk(KERN_DEBUG, &pdev->dev, 849 dev_printk(KERN_DEBUG, &pdev->dev,
@@ -689,10 +853,12 @@ static int piix_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
689 if (!in_module_init) 853 if (!in_module_init)
690 return -ENODEV; 854 return -ENODEV;
691 855
692 port_info[0] = &piix_port_info[ent->driver_data]; 856 port_info[0] = piix_port_info[ent->driver_data];
693 port_info[1] = &piix_port_info[ent->driver_data]; 857 port_info[1] = piix_port_info[ent->driver_data];
858
859 host_flags = port_info[0].host_flags;
694 860
695 if (port_info[0]->host_flags & PIIX_FLAG_AHCI) { 861 if (host_flags & PIIX_FLAG_AHCI) {
696 u8 tmp; 862 u8 tmp;
697 pci_read_config_byte(pdev, PIIX_SCC, &tmp); 863 pci_read_config_byte(pdev, PIIX_SCC, &tmp);
698 if (tmp == PIIX_AHCI_DEVICE) { 864 if (tmp == PIIX_AHCI_DEVICE) {
@@ -702,18 +868,9 @@ static int piix_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
702 } 868 }
703 } 869 }
704 870
705 if (port_info[0]->host_flags & PIIX_FLAG_COMBINED) { 871 /* Initialize SATA map */
706 u8 tmp; 872 if (host_flags & ATA_FLAG_SATA)
707 pci_read_config_byte(pdev, ICH5_PMR, &tmp); 873 piix_init_sata_map(pdev, port_info);
708
709 if (tmp & PIIX_COMB) {
710 combined = 1;
711 if (tmp & PIIX_COMB_PATA_P0)
712 sata_chan = 1;
713 else
714 pata_chan = 1;
715 }
716 }
717 874
718 /* On ICH5, some BIOSen disable the interrupt using the 875 /* On ICH5, some BIOSen disable the interrupt using the
719 * PCI_COMMAND_INTX_DISABLE bit added in PCI 2.3. 876 * PCI_COMMAND_INTX_DISABLE bit added in PCI 2.3.
@@ -721,28 +878,19 @@ static int piix_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
721 * MSI is disabled (and it is disabled, as we don't use 878 * MSI is disabled (and it is disabled, as we don't use
722 * message-signalled interrupts currently). 879 * message-signalled interrupts currently).
723 */ 880 */
724 if (port_info[0]->host_flags & PIIX_FLAG_CHECKINTR) 881 if (host_flags & PIIX_FLAG_CHECKINTR)
725 pci_intx(pdev, 1); 882 pci_intx(pdev, 1);
726 883
727 if (combined) {
728 port_info[sata_chan] = &piix_port_info[ent->driver_data];
729 port_info[sata_chan]->host_flags |= ATA_FLAG_SLAVE_POSS;
730 port_info[pata_chan] = &piix_port_info[ich5_pata];
731
732 dev_printk(KERN_WARNING, &pdev->dev,
733 "combined mode detected (p=%u, s=%u)\n",
734 pata_chan, sata_chan);
735 }
736 if (piix_check_450nx_errata(pdev)) { 884 if (piix_check_450nx_errata(pdev)) {
737 /* This writes into the master table but it does not 885 /* This writes into the master table but it does not
738 really matter for this errata as we will apply it to 886 really matter for this errata as we will apply it to
739 all the PIIX devices on the board */ 887 all the PIIX devices on the board */
740 port_info[0]->mwdma_mask = 0; 888 port_info[0].mwdma_mask = 0;
741 port_info[0]->udma_mask = 0; 889 port_info[0].udma_mask = 0;
742 port_info[1]->mwdma_mask = 0; 890 port_info[1].mwdma_mask = 0;
743 port_info[1]->udma_mask = 0; 891 port_info[1].udma_mask = 0;
744 } 892 }
745 return ata_pci_init_one(pdev, port_info, 2); 893 return ata_pci_init_one(pdev, ppinfo, 2);
746} 894}
747 895
748static int __init piix_init(void) 896static int __init piix_init(void)
diff --git a/drivers/scsi/libata-bmdma.c b/drivers/scsi/libata-bmdma.c
new file mode 100644
index 000000000000..a93336adcd23
--- /dev/null
+++ b/drivers/scsi/libata-bmdma.c
@@ -0,0 +1,703 @@
1/*
2 * libata-bmdma.c - helper library for PCI IDE BMDMA
3 *
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
7 *
8 * Copyright 2003-2006 Red Hat, Inc. All rights reserved.
9 * Copyright 2003-2006 Jeff Garzik
10 *
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
15 * any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; see the file COPYING. If not, write to
24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25 *
26 *
27 * libata documentation is available via 'make {ps|pdf}docs',
28 * as Documentation/DocBook/libata.*
29 *
30 * Hardware documentation available from http://www.t13.org/ and
31 * http://www.sata-io.org/
32 *
33 */
34
35#include <linux/config.h>
36#include <linux/kernel.h>
37#include <linux/pci.h>
38#include <linux/libata.h>
39
40#include "libata.h"
41
42/**
43 * ata_tf_load_pio - send taskfile registers to host controller
44 * @ap: Port to which output is sent
45 * @tf: ATA taskfile register set
46 *
47 * Outputs ATA taskfile to standard ATA host controller.
48 *
49 * LOCKING:
50 * Inherited from caller.
51 */
52
53static void ata_tf_load_pio(struct ata_port *ap, const struct ata_taskfile *tf)
54{
55 struct ata_ioports *ioaddr = &ap->ioaddr;
56 unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
57
58 if (tf->ctl != ap->last_ctl) {
59 outb(tf->ctl, ioaddr->ctl_addr);
60 ap->last_ctl = tf->ctl;
61 ata_wait_idle(ap);
62 }
63
64 if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
65 outb(tf->hob_feature, ioaddr->feature_addr);
66 outb(tf->hob_nsect, ioaddr->nsect_addr);
67 outb(tf->hob_lbal, ioaddr->lbal_addr);
68 outb(tf->hob_lbam, ioaddr->lbam_addr);
69 outb(tf->hob_lbah, ioaddr->lbah_addr);
70 VPRINTK("hob: feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n",
71 tf->hob_feature,
72 tf->hob_nsect,
73 tf->hob_lbal,
74 tf->hob_lbam,
75 tf->hob_lbah);
76 }
77
78 if (is_addr) {
79 outb(tf->feature, ioaddr->feature_addr);
80 outb(tf->nsect, ioaddr->nsect_addr);
81 outb(tf->lbal, ioaddr->lbal_addr);
82 outb(tf->lbam, ioaddr->lbam_addr);
83 outb(tf->lbah, ioaddr->lbah_addr);
84 VPRINTK("feat 0x%X nsect 0x%X lba 0x%X 0x%X 0x%X\n",
85 tf->feature,
86 tf->nsect,
87 tf->lbal,
88 tf->lbam,
89 tf->lbah);
90 }
91
92 if (tf->flags & ATA_TFLAG_DEVICE) {
93 outb(tf->device, ioaddr->device_addr);
94 VPRINTK("device 0x%X\n", tf->device);
95 }
96
97 ata_wait_idle(ap);
98}
99
100/**
101 * ata_tf_load_mmio - send taskfile registers to host controller
102 * @ap: Port to which output is sent
103 * @tf: ATA taskfile register set
104 *
105 * Outputs ATA taskfile to standard ATA host controller using MMIO.
106 *
107 * LOCKING:
108 * Inherited from caller.
109 */
110
111static void ata_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf)
112{
113 struct ata_ioports *ioaddr = &ap->ioaddr;
114 unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
115
116 if (tf->ctl != ap->last_ctl) {
117 writeb(tf->ctl, (void __iomem *) ap->ioaddr.ctl_addr);
118 ap->last_ctl = tf->ctl;
119 ata_wait_idle(ap);
120 }
121
122 if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
123 writeb(tf->hob_feature, (void __iomem *) ioaddr->feature_addr);
124 writeb(tf->hob_nsect, (void __iomem *) ioaddr->nsect_addr);
125 writeb(tf->hob_lbal, (void __iomem *) ioaddr->lbal_addr);
126 writeb(tf->hob_lbam, (void __iomem *) ioaddr->lbam_addr);
127 writeb(tf->hob_lbah, (void __iomem *) ioaddr->lbah_addr);
128 VPRINTK("hob: feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n",
129 tf->hob_feature,
130 tf->hob_nsect,
131 tf->hob_lbal,
132 tf->hob_lbam,
133 tf->hob_lbah);
134 }
135
136 if (is_addr) {
137 writeb(tf->feature, (void __iomem *) ioaddr->feature_addr);
138 writeb(tf->nsect, (void __iomem *) ioaddr->nsect_addr);
139 writeb(tf->lbal, (void __iomem *) ioaddr->lbal_addr);
140 writeb(tf->lbam, (void __iomem *) ioaddr->lbam_addr);
141 writeb(tf->lbah, (void __iomem *) ioaddr->lbah_addr);
142 VPRINTK("feat 0x%X nsect 0x%X lba 0x%X 0x%X 0x%X\n",
143 tf->feature,
144 tf->nsect,
145 tf->lbal,
146 tf->lbam,
147 tf->lbah);
148 }
149
150 if (tf->flags & ATA_TFLAG_DEVICE) {
151 writeb(tf->device, (void __iomem *) ioaddr->device_addr);
152 VPRINTK("device 0x%X\n", tf->device);
153 }
154
155 ata_wait_idle(ap);
156}
157
158
159/**
160 * ata_tf_load - send taskfile registers to host controller
161 * @ap: Port to which output is sent
162 * @tf: ATA taskfile register set
163 *
164 * Outputs ATA taskfile to standard ATA host controller using MMIO
165 * or PIO as indicated by the ATA_FLAG_MMIO flag.
166 * Writes the control, feature, nsect, lbal, lbam, and lbah registers.
167 * Optionally (ATA_TFLAG_LBA48) writes hob_feature, hob_nsect,
168 * hob_lbal, hob_lbam, and hob_lbah.
169 *
170 * This function waits for idle (!BUSY and !DRQ) after writing
171 * registers. If the control register has a new value, this
172 * function also waits for idle after writing control and before
173 * writing the remaining registers.
174 *
175 * May be used as the tf_load() entry in ata_port_operations.
176 *
177 * LOCKING:
178 * Inherited from caller.
179 */
180void ata_tf_load(struct ata_port *ap, const struct ata_taskfile *tf)
181{
182 if (ap->flags & ATA_FLAG_MMIO)
183 ata_tf_load_mmio(ap, tf);
184 else
185 ata_tf_load_pio(ap, tf);
186}
187
188/**
189 * ata_exec_command_pio - issue ATA command to host controller
190 * @ap: port to which command is being issued
191 * @tf: ATA taskfile register set
192 *
193 * Issues PIO write to ATA command register, with proper
194 * synchronization with interrupt handler / other threads.
195 *
196 * LOCKING:
197 * spin_lock_irqsave(host_set lock)
198 */
199
200static void ata_exec_command_pio(struct ata_port *ap, const struct ata_taskfile *tf)
201{
202 DPRINTK("ata%u: cmd 0x%X\n", ap->id, tf->command);
203
204 outb(tf->command, ap->ioaddr.command_addr);
205 ata_pause(ap);
206}
207
208
209/**
210 * ata_exec_command_mmio - issue ATA command to host controller
211 * @ap: port to which command is being issued
212 * @tf: ATA taskfile register set
213 *
214 * Issues MMIO write to ATA command register, with proper
215 * synchronization with interrupt handler / other threads.
216 *
217 * LOCKING:
218 * spin_lock_irqsave(host_set lock)
219 */
220
221static void ata_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf)
222{
223 DPRINTK("ata%u: cmd 0x%X\n", ap->id, tf->command);
224
225 writeb(tf->command, (void __iomem *) ap->ioaddr.command_addr);
226 ata_pause(ap);
227}
228
229
230/**
231 * ata_exec_command - issue ATA command to host controller
232 * @ap: port to which command is being issued
233 * @tf: ATA taskfile register set
234 *
235 * Issues PIO/MMIO write to ATA command register, with proper
236 * synchronization with interrupt handler / other threads.
237 *
238 * LOCKING:
239 * spin_lock_irqsave(host_set lock)
240 */
241void ata_exec_command(struct ata_port *ap, const struct ata_taskfile *tf)
242{
243 if (ap->flags & ATA_FLAG_MMIO)
244 ata_exec_command_mmio(ap, tf);
245 else
246 ata_exec_command_pio(ap, tf);
247}
248
249/**
250 * ata_tf_read_pio - input device's ATA taskfile shadow registers
251 * @ap: Port from which input is read
252 * @tf: ATA taskfile register set for storing input
253 *
254 * Reads ATA taskfile registers for currently-selected device
255 * into @tf.
256 *
257 * LOCKING:
258 * Inherited from caller.
259 */
260
261static void ata_tf_read_pio(struct ata_port *ap, struct ata_taskfile *tf)
262{
263 struct ata_ioports *ioaddr = &ap->ioaddr;
264
265 tf->command = ata_check_status(ap);
266 tf->feature = inb(ioaddr->error_addr);
267 tf->nsect = inb(ioaddr->nsect_addr);
268 tf->lbal = inb(ioaddr->lbal_addr);
269 tf->lbam = inb(ioaddr->lbam_addr);
270 tf->lbah = inb(ioaddr->lbah_addr);
271 tf->device = inb(ioaddr->device_addr);
272
273 if (tf->flags & ATA_TFLAG_LBA48) {
274 outb(tf->ctl | ATA_HOB, ioaddr->ctl_addr);
275 tf->hob_feature = inb(ioaddr->error_addr);
276 tf->hob_nsect = inb(ioaddr->nsect_addr);
277 tf->hob_lbal = inb(ioaddr->lbal_addr);
278 tf->hob_lbam = inb(ioaddr->lbam_addr);
279 tf->hob_lbah = inb(ioaddr->lbah_addr);
280 }
281}
282
283/**
284 * ata_tf_read_mmio - input device's ATA taskfile shadow registers
285 * @ap: Port from which input is read
286 * @tf: ATA taskfile register set for storing input
287 *
288 * Reads ATA taskfile registers for currently-selected device
289 * into @tf via MMIO.
290 *
291 * LOCKING:
292 * Inherited from caller.
293 */
294
295static void ata_tf_read_mmio(struct ata_port *ap, struct ata_taskfile *tf)
296{
297 struct ata_ioports *ioaddr = &ap->ioaddr;
298
299 tf->command = ata_check_status(ap);
300 tf->feature = readb((void __iomem *)ioaddr->error_addr);
301 tf->nsect = readb((void __iomem *)ioaddr->nsect_addr);
302 tf->lbal = readb((void __iomem *)ioaddr->lbal_addr);
303 tf->lbam = readb((void __iomem *)ioaddr->lbam_addr);
304 tf->lbah = readb((void __iomem *)ioaddr->lbah_addr);
305 tf->device = readb((void __iomem *)ioaddr->device_addr);
306
307 if (tf->flags & ATA_TFLAG_LBA48) {
308 writeb(tf->ctl | ATA_HOB, (void __iomem *) ap->ioaddr.ctl_addr);
309 tf->hob_feature = readb((void __iomem *)ioaddr->error_addr);
310 tf->hob_nsect = readb((void __iomem *)ioaddr->nsect_addr);
311 tf->hob_lbal = readb((void __iomem *)ioaddr->lbal_addr);
312 tf->hob_lbam = readb((void __iomem *)ioaddr->lbam_addr);
313 tf->hob_lbah = readb((void __iomem *)ioaddr->lbah_addr);
314 }
315}
316
317
318/**
319 * ata_tf_read - input device's ATA taskfile shadow registers
320 * @ap: Port from which input is read
321 * @tf: ATA taskfile register set for storing input
322 *
323 * Reads ATA taskfile registers for currently-selected device
324 * into @tf.
325 *
326 * Reads nsect, lbal, lbam, lbah, and device. If ATA_TFLAG_LBA48
327 * is set, also reads the hob registers.
328 *
329 * May be used as the tf_read() entry in ata_port_operations.
330 *
331 * LOCKING:
332 * Inherited from caller.
333 */
334void ata_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
335{
336 if (ap->flags & ATA_FLAG_MMIO)
337 ata_tf_read_mmio(ap, tf);
338 else
339 ata_tf_read_pio(ap, tf);
340}
341
342/**
343 * ata_check_status_pio - Read device status reg & clear interrupt
344 * @ap: port where the device is
345 *
346 * Reads ATA taskfile status register for currently-selected device
347 * and return its value. This also clears pending interrupts
348 * from this device
349 *
350 * LOCKING:
351 * Inherited from caller.
352 */
353static u8 ata_check_status_pio(struct ata_port *ap)
354{
355 return inb(ap->ioaddr.status_addr);
356}
357
358/**
359 * ata_check_status_mmio - Read device status reg & clear interrupt
360 * @ap: port where the device is
361 *
362 * Reads ATA taskfile status register for currently-selected device
363 * via MMIO and return its value. This also clears pending interrupts
364 * from this device
365 *
366 * LOCKING:
367 * Inherited from caller.
368 */
369static u8 ata_check_status_mmio(struct ata_port *ap)
370{
371 return readb((void __iomem *) ap->ioaddr.status_addr);
372}
373
374
375/**
376 * ata_check_status - Read device status reg & clear interrupt
377 * @ap: port where the device is
378 *
379 * Reads ATA taskfile status register for currently-selected device
380 * and return its value. This also clears pending interrupts
381 * from this device
382 *
383 * May be used as the check_status() entry in ata_port_operations.
384 *
385 * LOCKING:
386 * Inherited from caller.
387 */
388u8 ata_check_status(struct ata_port *ap)
389{
390 if (ap->flags & ATA_FLAG_MMIO)
391 return ata_check_status_mmio(ap);
392 return ata_check_status_pio(ap);
393}
394
395
396/**
397 * ata_altstatus - Read device alternate status reg
398 * @ap: port where the device is
399 *
400 * Reads ATA taskfile alternate status register for
401 * currently-selected device and return its value.
402 *
403 * Note: may NOT be used as the check_altstatus() entry in
404 * ata_port_operations.
405 *
406 * LOCKING:
407 * Inherited from caller.
408 */
409u8 ata_altstatus(struct ata_port *ap)
410{
411 if (ap->ops->check_altstatus)
412 return ap->ops->check_altstatus(ap);
413
414 if (ap->flags & ATA_FLAG_MMIO)
415 return readb((void __iomem *)ap->ioaddr.altstatus_addr);
416 return inb(ap->ioaddr.altstatus_addr);
417}
418
419#ifdef CONFIG_PCI
420static struct ata_probe_ent *
421ata_probe_ent_alloc(struct device *dev, const struct ata_port_info *port)
422{
423 struct ata_probe_ent *probe_ent;
424
425 probe_ent = kzalloc(sizeof(*probe_ent), GFP_KERNEL);
426 if (!probe_ent) {
427 printk(KERN_ERR DRV_NAME "(%s): out of memory\n",
428 kobject_name(&(dev->kobj)));
429 return NULL;
430 }
431
432 INIT_LIST_HEAD(&probe_ent->node);
433 probe_ent->dev = dev;
434
435 probe_ent->sht = port->sht;
436 probe_ent->host_flags = port->host_flags;
437 probe_ent->pio_mask = port->pio_mask;
438 probe_ent->mwdma_mask = port->mwdma_mask;
439 probe_ent->udma_mask = port->udma_mask;
440 probe_ent->port_ops = port->port_ops;
441
442 return probe_ent;
443}
444
445
446/**
447 * ata_pci_init_native_mode - Initialize native-mode driver
448 * @pdev: pci device to be initialized
449 * @port: array[2] of pointers to port info structures.
450 * @ports: bitmap of ports present
451 *
452 * Utility function which allocates and initializes an
453 * ata_probe_ent structure for a standard dual-port
454 * PIO-based IDE controller. The returned ata_probe_ent
455 * structure can be passed to ata_device_add(). The returned
456 * ata_probe_ent structure should then be freed with kfree().
457 *
458 * The caller need only pass the address of the primary port, the
459 * secondary will be deduced automatically. If the device has non
460 * standard secondary port mappings this function can be called twice,
461 * once for each interface.
462 */
463
464struct ata_probe_ent *
465ata_pci_init_native_mode(struct pci_dev *pdev, struct ata_port_info **port, int ports)
466{
467 struct ata_probe_ent *probe_ent =
468 ata_probe_ent_alloc(pci_dev_to_dev(pdev), port[0]);
469 int p = 0;
470
471 if (!probe_ent)
472 return NULL;
473
474 probe_ent->irq = pdev->irq;
475 probe_ent->irq_flags = SA_SHIRQ;
476 probe_ent->private_data = port[0]->private_data;
477
478 if (ports & ATA_PORT_PRIMARY) {
479 probe_ent->port[p].cmd_addr = pci_resource_start(pdev, 0);
480 probe_ent->port[p].altstatus_addr =
481 probe_ent->port[p].ctl_addr =
482 pci_resource_start(pdev, 1) | ATA_PCI_CTL_OFS;
483 probe_ent->port[p].bmdma_addr = pci_resource_start(pdev, 4);
484 ata_std_ports(&probe_ent->port[p]);
485 p++;
486 }
487
488 if (ports & ATA_PORT_SECONDARY) {
489 probe_ent->port[p].cmd_addr = pci_resource_start(pdev, 2);
490 probe_ent->port[p].altstatus_addr =
491 probe_ent->port[p].ctl_addr =
492 pci_resource_start(pdev, 3) | ATA_PCI_CTL_OFS;
493 probe_ent->port[p].bmdma_addr = pci_resource_start(pdev, 4) + 8;
494 ata_std_ports(&probe_ent->port[p]);
495 p++;
496 }
497
498 probe_ent->n_ports = p;
499 return probe_ent;
500}
501
502
503static struct ata_probe_ent *ata_pci_init_legacy_port(struct pci_dev *pdev,
504 struct ata_port_info *port, int port_num)
505{
506 struct ata_probe_ent *probe_ent;
507
508 probe_ent = ata_probe_ent_alloc(pci_dev_to_dev(pdev), port);
509 if (!probe_ent)
510 return NULL;
511
512 probe_ent->legacy_mode = 1;
513 probe_ent->n_ports = 1;
514 probe_ent->hard_port_no = port_num;
515 probe_ent->private_data = port->private_data;
516
517 switch(port_num)
518 {
519 case 0:
520 probe_ent->irq = 14;
521 probe_ent->port[0].cmd_addr = 0x1f0;
522 probe_ent->port[0].altstatus_addr =
523 probe_ent->port[0].ctl_addr = 0x3f6;
524 break;
525 case 1:
526 probe_ent->irq = 15;
527 probe_ent->port[0].cmd_addr = 0x170;
528 probe_ent->port[0].altstatus_addr =
529 probe_ent->port[0].ctl_addr = 0x376;
530 break;
531 }
532
533 probe_ent->port[0].bmdma_addr =
534 pci_resource_start(pdev, 4) + 8 * port_num;
535 ata_std_ports(&probe_ent->port[0]);
536
537 return probe_ent;
538}
539
540
541/**
542 * ata_pci_init_one - Initialize/register PCI IDE host controller
543 * @pdev: Controller to be initialized
544 * @port_info: Information from low-level host driver
545 * @n_ports: Number of ports attached to host controller
546 *
547 * This is a helper function which can be called from a driver's
548 * xxx_init_one() probe function if the hardware uses traditional
549 * IDE taskfile registers.
550 *
551 * This function calls pci_enable_device(), reserves its register
552 * regions, sets the dma mask, enables bus master mode, and calls
553 * ata_device_add()
554 *
555 * LOCKING:
556 * Inherited from PCI layer (may sleep).
557 *
558 * RETURNS:
559 * Zero on success, negative on errno-based value on error.
560 */
561
562int ata_pci_init_one (struct pci_dev *pdev, struct ata_port_info **port_info,
563 unsigned int n_ports)
564{
565 struct ata_probe_ent *probe_ent = NULL, *probe_ent2 = NULL;
566 struct ata_port_info *port[2];
567 u8 tmp8, mask;
568 unsigned int legacy_mode = 0;
569 int disable_dev_on_err = 1;
570 int rc;
571
572 DPRINTK("ENTER\n");
573
574 port[0] = port_info[0];
575 if (n_ports > 1)
576 port[1] = port_info[1];
577 else
578 port[1] = port[0];
579
580 if ((port[0]->host_flags & ATA_FLAG_NO_LEGACY) == 0
581 && (pdev->class >> 8) == PCI_CLASS_STORAGE_IDE) {
582 /* TODO: What if one channel is in native mode ... */
583 pci_read_config_byte(pdev, PCI_CLASS_PROG, &tmp8);
584 mask = (1 << 2) | (1 << 0);
585 if ((tmp8 & mask) != mask)
586 legacy_mode = (1 << 3);
587 }
588
589 /* FIXME... */
590 if ((!legacy_mode) && (n_ports > 2)) {
591 printk(KERN_ERR "ata: BUG: native mode, n_ports > 2\n");
592 n_ports = 2;
593 /* For now */
594 }
595
596 /* FIXME: Really for ATA it isn't safe because the device may be
597 multi-purpose and we want to leave it alone if it was already
598 enabled. Secondly for shared use as Arjan says we want refcounting
599
600 Checking dev->is_enabled is insufficient as this is not set at
601 boot for the primary video which is BIOS enabled
602 */
603
604 rc = pci_enable_device(pdev);
605 if (rc)
606 return rc;
607
608 rc = pci_request_regions(pdev, DRV_NAME);
609 if (rc) {
610 disable_dev_on_err = 0;
611 goto err_out;
612 }
613
614 /* FIXME: Should use platform specific mappers for legacy port ranges */
615 if (legacy_mode) {
616 if (!request_region(0x1f0, 8, "libata")) {
617 struct resource *conflict, res;
618 res.start = 0x1f0;
619 res.end = 0x1f0 + 8 - 1;
620 conflict = ____request_resource(&ioport_resource, &res);
621 if (!strcmp(conflict->name, "libata"))
622 legacy_mode |= (1 << 0);
623 else {
624 disable_dev_on_err = 0;
625 printk(KERN_WARNING "ata: 0x1f0 IDE port busy\n");
626 }
627 } else
628 legacy_mode |= (1 << 0);
629
630 if (!request_region(0x170, 8, "libata")) {
631 struct resource *conflict, res;
632 res.start = 0x170;
633 res.end = 0x170 + 8 - 1;
634 conflict = ____request_resource(&ioport_resource, &res);
635 if (!strcmp(conflict->name, "libata"))
636 legacy_mode |= (1 << 1);
637 else {
638 disable_dev_on_err = 0;
639 printk(KERN_WARNING "ata: 0x170 IDE port busy\n");
640 }
641 } else
642 legacy_mode |= (1 << 1);
643 }
644
645 /* we have legacy mode, but all ports are unavailable */
646 if (legacy_mode == (1 << 3)) {
647 rc = -EBUSY;
648 goto err_out_regions;
649 }
650
651 rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
652 if (rc)
653 goto err_out_regions;
654 rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
655 if (rc)
656 goto err_out_regions;
657
658 if (legacy_mode) {
659 if (legacy_mode & (1 << 0))
660 probe_ent = ata_pci_init_legacy_port(pdev, port[0], 0);
661 if (legacy_mode & (1 << 1))
662 probe_ent2 = ata_pci_init_legacy_port(pdev, port[1], 1);
663 } else {
664 if (n_ports == 2)
665 probe_ent = ata_pci_init_native_mode(pdev, port, ATA_PORT_PRIMARY | ATA_PORT_SECONDARY);
666 else
667 probe_ent = ata_pci_init_native_mode(pdev, port, ATA_PORT_PRIMARY);
668 }
669 if (!probe_ent && !probe_ent2) {
670 rc = -ENOMEM;
671 goto err_out_regions;
672 }
673
674 pci_set_master(pdev);
675
676 /* FIXME: check ata_device_add return */
677 if (legacy_mode) {
678 if (legacy_mode & (1 << 0))
679 ata_device_add(probe_ent);
680 if (legacy_mode & (1 << 1))
681 ata_device_add(probe_ent2);
682 } else
683 ata_device_add(probe_ent);
684
685 kfree(probe_ent);
686 kfree(probe_ent2);
687
688 return 0;
689
690err_out_regions:
691 if (legacy_mode & (1 << 0))
692 release_region(0x1f0, 8);
693 if (legacy_mode & (1 << 1))
694 release_region(0x170, 8);
695 pci_release_regions(pdev);
696err_out:
697 if (disable_dev_on_err)
698 pci_disable_device(pdev);
699 return rc;
700}
701
702#endif /* CONFIG_PCI */
703
diff --git a/drivers/scsi/libata-core.c b/drivers/scsi/libata-core.c
index 4f91b0dc572b..5a0b67a602df 100644
--- a/drivers/scsi/libata-core.c
+++ b/drivers/scsi/libata-core.c
@@ -61,19 +61,12 @@
61 61
62#include "libata.h" 62#include "libata.h"
63 63
64static unsigned int ata_busy_sleep (struct ata_port *ap, 64static unsigned int ata_dev_init_params(struct ata_port *ap,
65 unsigned long tmout_pat, 65 struct ata_device *dev);
66 unsigned long tmout);
67static void ata_dev_reread_id(struct ata_port *ap, struct ata_device *dev);
68static void ata_dev_init_params(struct ata_port *ap, struct ata_device *dev);
69static void ata_set_mode(struct ata_port *ap); 66static void ata_set_mode(struct ata_port *ap);
70static void ata_dev_set_xfermode(struct ata_port *ap, struct ata_device *dev); 67static void ata_dev_set_xfermode(struct ata_port *ap, struct ata_device *dev);
71static unsigned int ata_get_mode_mask(const struct ata_port *ap, int shift); 68static unsigned int ata_dev_xfermask(struct ata_port *ap,
72static int fgb(u32 bitmap); 69 struct ata_device *dev);
73static int ata_choose_xfer_mode(const struct ata_port *ap,
74 u8 *xfer_mode_out,
75 unsigned int *xfer_shift_out);
76static void __ata_qc_complete(struct ata_queued_cmd *qc);
77 70
78static unsigned int ata_unique_id = 1; 71static unsigned int ata_unique_id = 1;
79static struct workqueue_struct *ata_wq; 72static struct workqueue_struct *ata_wq;
@@ -91,403 +84,6 @@ MODULE_DESCRIPTION("Library module for ATA devices");
91MODULE_LICENSE("GPL"); 84MODULE_LICENSE("GPL");
92MODULE_VERSION(DRV_VERSION); 85MODULE_VERSION(DRV_VERSION);
93 86
94/**
95 * ata_tf_load_pio - send taskfile registers to host controller
96 * @ap: Port to which output is sent
97 * @tf: ATA taskfile register set
98 *
99 * Outputs ATA taskfile to standard ATA host controller.
100 *
101 * LOCKING:
102 * Inherited from caller.
103 */
104
105static void ata_tf_load_pio(struct ata_port *ap, const struct ata_taskfile *tf)
106{
107 struct ata_ioports *ioaddr = &ap->ioaddr;
108 unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
109
110 if (tf->ctl != ap->last_ctl) {
111 outb(tf->ctl, ioaddr->ctl_addr);
112 ap->last_ctl = tf->ctl;
113 ata_wait_idle(ap);
114 }
115
116 if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
117 outb(tf->hob_feature, ioaddr->feature_addr);
118 outb(tf->hob_nsect, ioaddr->nsect_addr);
119 outb(tf->hob_lbal, ioaddr->lbal_addr);
120 outb(tf->hob_lbam, ioaddr->lbam_addr);
121 outb(tf->hob_lbah, ioaddr->lbah_addr);
122 VPRINTK("hob: feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n",
123 tf->hob_feature,
124 tf->hob_nsect,
125 tf->hob_lbal,
126 tf->hob_lbam,
127 tf->hob_lbah);
128 }
129
130 if (is_addr) {
131 outb(tf->feature, ioaddr->feature_addr);
132 outb(tf->nsect, ioaddr->nsect_addr);
133 outb(tf->lbal, ioaddr->lbal_addr);
134 outb(tf->lbam, ioaddr->lbam_addr);
135 outb(tf->lbah, ioaddr->lbah_addr);
136 VPRINTK("feat 0x%X nsect 0x%X lba 0x%X 0x%X 0x%X\n",
137 tf->feature,
138 tf->nsect,
139 tf->lbal,
140 tf->lbam,
141 tf->lbah);
142 }
143
144 if (tf->flags & ATA_TFLAG_DEVICE) {
145 outb(tf->device, ioaddr->device_addr);
146 VPRINTK("device 0x%X\n", tf->device);
147 }
148
149 ata_wait_idle(ap);
150}
151
152/**
153 * ata_tf_load_mmio - send taskfile registers to host controller
154 * @ap: Port to which output is sent
155 * @tf: ATA taskfile register set
156 *
157 * Outputs ATA taskfile to standard ATA host controller using MMIO.
158 *
159 * LOCKING:
160 * Inherited from caller.
161 */
162
163static void ata_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf)
164{
165 struct ata_ioports *ioaddr = &ap->ioaddr;
166 unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
167
168 if (tf->ctl != ap->last_ctl) {
169 writeb(tf->ctl, (void __iomem *) ap->ioaddr.ctl_addr);
170 ap->last_ctl = tf->ctl;
171 ata_wait_idle(ap);
172 }
173
174 if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
175 writeb(tf->hob_feature, (void __iomem *) ioaddr->feature_addr);
176 writeb(tf->hob_nsect, (void __iomem *) ioaddr->nsect_addr);
177 writeb(tf->hob_lbal, (void __iomem *) ioaddr->lbal_addr);
178 writeb(tf->hob_lbam, (void __iomem *) ioaddr->lbam_addr);
179 writeb(tf->hob_lbah, (void __iomem *) ioaddr->lbah_addr);
180 VPRINTK("hob: feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n",
181 tf->hob_feature,
182 tf->hob_nsect,
183 tf->hob_lbal,
184 tf->hob_lbam,
185 tf->hob_lbah);
186 }
187
188 if (is_addr) {
189 writeb(tf->feature, (void __iomem *) ioaddr->feature_addr);
190 writeb(tf->nsect, (void __iomem *) ioaddr->nsect_addr);
191 writeb(tf->lbal, (void __iomem *) ioaddr->lbal_addr);
192 writeb(tf->lbam, (void __iomem *) ioaddr->lbam_addr);
193 writeb(tf->lbah, (void __iomem *) ioaddr->lbah_addr);
194 VPRINTK("feat 0x%X nsect 0x%X lba 0x%X 0x%X 0x%X\n",
195 tf->feature,
196 tf->nsect,
197 tf->lbal,
198 tf->lbam,
199 tf->lbah);
200 }
201
202 if (tf->flags & ATA_TFLAG_DEVICE) {
203 writeb(tf->device, (void __iomem *) ioaddr->device_addr);
204 VPRINTK("device 0x%X\n", tf->device);
205 }
206
207 ata_wait_idle(ap);
208}
209
210
211/**
212 * ata_tf_load - send taskfile registers to host controller
213 * @ap: Port to which output is sent
214 * @tf: ATA taskfile register set
215 *
216 * Outputs ATA taskfile to standard ATA host controller using MMIO
217 * or PIO as indicated by the ATA_FLAG_MMIO flag.
218 * Writes the control, feature, nsect, lbal, lbam, and lbah registers.
219 * Optionally (ATA_TFLAG_LBA48) writes hob_feature, hob_nsect,
220 * hob_lbal, hob_lbam, and hob_lbah.
221 *
222 * This function waits for idle (!BUSY and !DRQ) after writing
223 * registers. If the control register has a new value, this
224 * function also waits for idle after writing control and before
225 * writing the remaining registers.
226 *
227 * May be used as the tf_load() entry in ata_port_operations.
228 *
229 * LOCKING:
230 * Inherited from caller.
231 */
232void ata_tf_load(struct ata_port *ap, const struct ata_taskfile *tf)
233{
234 if (ap->flags & ATA_FLAG_MMIO)
235 ata_tf_load_mmio(ap, tf);
236 else
237 ata_tf_load_pio(ap, tf);
238}
239
240/**
241 * ata_exec_command_pio - issue ATA command to host controller
242 * @ap: port to which command is being issued
243 * @tf: ATA taskfile register set
244 *
245 * Issues PIO write to ATA command register, with proper
246 * synchronization with interrupt handler / other threads.
247 *
248 * LOCKING:
249 * spin_lock_irqsave(host_set lock)
250 */
251
252static void ata_exec_command_pio(struct ata_port *ap, const struct ata_taskfile *tf)
253{
254 DPRINTK("ata%u: cmd 0x%X\n", ap->id, tf->command);
255
256 outb(tf->command, ap->ioaddr.command_addr);
257 ata_pause(ap);
258}
259
260
261/**
262 * ata_exec_command_mmio - issue ATA command to host controller
263 * @ap: port to which command is being issued
264 * @tf: ATA taskfile register set
265 *
266 * Issues MMIO write to ATA command register, with proper
267 * synchronization with interrupt handler / other threads.
268 *
269 * LOCKING:
270 * spin_lock_irqsave(host_set lock)
271 */
272
273static void ata_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf)
274{
275 DPRINTK("ata%u: cmd 0x%X\n", ap->id, tf->command);
276
277 writeb(tf->command, (void __iomem *) ap->ioaddr.command_addr);
278 ata_pause(ap);
279}
280
281
282/**
283 * ata_exec_command - issue ATA command to host controller
284 * @ap: port to which command is being issued
285 * @tf: ATA taskfile register set
286 *
287 * Issues PIO/MMIO write to ATA command register, with proper
288 * synchronization with interrupt handler / other threads.
289 *
290 * LOCKING:
291 * spin_lock_irqsave(host_set lock)
292 */
293void ata_exec_command(struct ata_port *ap, const struct ata_taskfile *tf)
294{
295 if (ap->flags & ATA_FLAG_MMIO)
296 ata_exec_command_mmio(ap, tf);
297 else
298 ata_exec_command_pio(ap, tf);
299}
300
301/**
302 * ata_tf_to_host - issue ATA taskfile to host controller
303 * @ap: port to which command is being issued
304 * @tf: ATA taskfile register set
305 *
306 * Issues ATA taskfile register set to ATA host controller,
307 * with proper synchronization with interrupt handler and
308 * other threads.
309 *
310 * LOCKING:
311 * spin_lock_irqsave(host_set lock)
312 */
313
314static inline void ata_tf_to_host(struct ata_port *ap,
315 const struct ata_taskfile *tf)
316{
317 ap->ops->tf_load(ap, tf);
318 ap->ops->exec_command(ap, tf);
319}
320
321/**
322 * ata_tf_read_pio - input device's ATA taskfile shadow registers
323 * @ap: Port from which input is read
324 * @tf: ATA taskfile register set for storing input
325 *
326 * Reads ATA taskfile registers for currently-selected device
327 * into @tf.
328 *
329 * LOCKING:
330 * Inherited from caller.
331 */
332
333static void ata_tf_read_pio(struct ata_port *ap, struct ata_taskfile *tf)
334{
335 struct ata_ioports *ioaddr = &ap->ioaddr;
336
337 tf->command = ata_check_status(ap);
338 tf->feature = inb(ioaddr->error_addr);
339 tf->nsect = inb(ioaddr->nsect_addr);
340 tf->lbal = inb(ioaddr->lbal_addr);
341 tf->lbam = inb(ioaddr->lbam_addr);
342 tf->lbah = inb(ioaddr->lbah_addr);
343 tf->device = inb(ioaddr->device_addr);
344
345 if (tf->flags & ATA_TFLAG_LBA48) {
346 outb(tf->ctl | ATA_HOB, ioaddr->ctl_addr);
347 tf->hob_feature = inb(ioaddr->error_addr);
348 tf->hob_nsect = inb(ioaddr->nsect_addr);
349 tf->hob_lbal = inb(ioaddr->lbal_addr);
350 tf->hob_lbam = inb(ioaddr->lbam_addr);
351 tf->hob_lbah = inb(ioaddr->lbah_addr);
352 }
353}
354
355/**
356 * ata_tf_read_mmio - input device's ATA taskfile shadow registers
357 * @ap: Port from which input is read
358 * @tf: ATA taskfile register set for storing input
359 *
360 * Reads ATA taskfile registers for currently-selected device
361 * into @tf via MMIO.
362 *
363 * LOCKING:
364 * Inherited from caller.
365 */
366
367static void ata_tf_read_mmio(struct ata_port *ap, struct ata_taskfile *tf)
368{
369 struct ata_ioports *ioaddr = &ap->ioaddr;
370
371 tf->command = ata_check_status(ap);
372 tf->feature = readb((void __iomem *)ioaddr->error_addr);
373 tf->nsect = readb((void __iomem *)ioaddr->nsect_addr);
374 tf->lbal = readb((void __iomem *)ioaddr->lbal_addr);
375 tf->lbam = readb((void __iomem *)ioaddr->lbam_addr);
376 tf->lbah = readb((void __iomem *)ioaddr->lbah_addr);
377 tf->device = readb((void __iomem *)ioaddr->device_addr);
378
379 if (tf->flags & ATA_TFLAG_LBA48) {
380 writeb(tf->ctl | ATA_HOB, (void __iomem *) ap->ioaddr.ctl_addr);
381 tf->hob_feature = readb((void __iomem *)ioaddr->error_addr);
382 tf->hob_nsect = readb((void __iomem *)ioaddr->nsect_addr);
383 tf->hob_lbal = readb((void __iomem *)ioaddr->lbal_addr);
384 tf->hob_lbam = readb((void __iomem *)ioaddr->lbam_addr);
385 tf->hob_lbah = readb((void __iomem *)ioaddr->lbah_addr);
386 }
387}
388
389
390/**
391 * ata_tf_read - input device's ATA taskfile shadow registers
392 * @ap: Port from which input is read
393 * @tf: ATA taskfile register set for storing input
394 *
395 * Reads ATA taskfile registers for currently-selected device
396 * into @tf.
397 *
398 * Reads nsect, lbal, lbam, lbah, and device. If ATA_TFLAG_LBA48
399 * is set, also reads the hob registers.
400 *
401 * May be used as the tf_read() entry in ata_port_operations.
402 *
403 * LOCKING:
404 * Inherited from caller.
405 */
406void ata_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
407{
408 if (ap->flags & ATA_FLAG_MMIO)
409 ata_tf_read_mmio(ap, tf);
410 else
411 ata_tf_read_pio(ap, tf);
412}
413
414/**
415 * ata_check_status_pio - Read device status reg & clear interrupt
416 * @ap: port where the device is
417 *
418 * Reads ATA taskfile status register for currently-selected device
419 * and return its value. This also clears pending interrupts
420 * from this device
421 *
422 * LOCKING:
423 * Inherited from caller.
424 */
425static u8 ata_check_status_pio(struct ata_port *ap)
426{
427 return inb(ap->ioaddr.status_addr);
428}
429
430/**
431 * ata_check_status_mmio - Read device status reg & clear interrupt
432 * @ap: port where the device is
433 *
434 * Reads ATA taskfile status register for currently-selected device
435 * via MMIO and return its value. This also clears pending interrupts
436 * from this device
437 *
438 * LOCKING:
439 * Inherited from caller.
440 */
441static u8 ata_check_status_mmio(struct ata_port *ap)
442{
443 return readb((void __iomem *) ap->ioaddr.status_addr);
444}
445
446
447/**
448 * ata_check_status - Read device status reg & clear interrupt
449 * @ap: port where the device is
450 *
451 * Reads ATA taskfile status register for currently-selected device
452 * and return its value. This also clears pending interrupts
453 * from this device
454 *
455 * May be used as the check_status() entry in ata_port_operations.
456 *
457 * LOCKING:
458 * Inherited from caller.
459 */
460u8 ata_check_status(struct ata_port *ap)
461{
462 if (ap->flags & ATA_FLAG_MMIO)
463 return ata_check_status_mmio(ap);
464 return ata_check_status_pio(ap);
465}
466
467
468/**
469 * ata_altstatus - Read device alternate status reg
470 * @ap: port where the device is
471 *
472 * Reads ATA taskfile alternate status register for
473 * currently-selected device and return its value.
474 *
475 * Note: may NOT be used as the check_altstatus() entry in
476 * ata_port_operations.
477 *
478 * LOCKING:
479 * Inherited from caller.
480 */
481u8 ata_altstatus(struct ata_port *ap)
482{
483 if (ap->ops->check_altstatus)
484 return ap->ops->check_altstatus(ap);
485
486 if (ap->flags & ATA_FLAG_MMIO)
487 return readb((void __iomem *)ap->ioaddr.altstatus_addr);
488 return inb(ap->ioaddr.altstatus_addr);
489}
490
491 87
492/** 88/**
493 * ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure 89 * ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
@@ -632,58 +228,148 @@ int ata_rwcmd_protocol(struct ata_queued_cmd *qc)
632 return -1; 228 return -1;
633} 229}
634 230
635static const char * const xfer_mode_str[] = { 231/**
636 "UDMA/16", 232 * ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask
637 "UDMA/25", 233 * @pio_mask: pio_mask
638 "UDMA/33", 234 * @mwdma_mask: mwdma_mask
639 "UDMA/44", 235 * @udma_mask: udma_mask
640 "UDMA/66", 236 *
641 "UDMA/100", 237 * Pack @pio_mask, @mwdma_mask and @udma_mask into a single
642 "UDMA/133", 238 * unsigned int xfer_mask.
643 "UDMA7", 239 *
644 "MWDMA0", 240 * LOCKING:
645 "MWDMA1", 241 * None.
646 "MWDMA2", 242 *
647 "PIO0", 243 * RETURNS:
648 "PIO1", 244 * Packed xfer_mask.
649 "PIO2", 245 */
650 "PIO3", 246static unsigned int ata_pack_xfermask(unsigned int pio_mask,
651 "PIO4", 247 unsigned int mwdma_mask,
248 unsigned int udma_mask)
249{
250 return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) |
251 ((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) |
252 ((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA);
253}
254
255static const struct ata_xfer_ent {
256 unsigned int shift, bits;
257 u8 base;
258} ata_xfer_tbl[] = {
259 { ATA_SHIFT_PIO, ATA_BITS_PIO, XFER_PIO_0 },
260 { ATA_SHIFT_MWDMA, ATA_BITS_MWDMA, XFER_MW_DMA_0 },
261 { ATA_SHIFT_UDMA, ATA_BITS_UDMA, XFER_UDMA_0 },
262 { -1, },
652}; 263};
653 264
654/** 265/**
655 * ata_udma_string - convert UDMA bit offset to string 266 * ata_xfer_mask2mode - Find matching XFER_* for the given xfer_mask
656 * @mask: mask of bits supported; only highest bit counts. 267 * @xfer_mask: xfer_mask of interest
657 * 268 *
658 * Determine string which represents the highest speed 269 * Return matching XFER_* value for @xfer_mask. Only the highest
659 * (highest bit in @udma_mask). 270 * bit of @xfer_mask is considered.
660 * 271 *
661 * LOCKING: 272 * LOCKING:
662 * None. 273 * None.
663 * 274 *
664 * RETURNS: 275 * RETURNS:
665 * Constant C string representing highest speed listed in 276 * Matching XFER_* value, 0 if no match found.
666 * @udma_mask, or the constant C string "<n/a>".
667 */ 277 */
278static u8 ata_xfer_mask2mode(unsigned int xfer_mask)
279{
280 int highbit = fls(xfer_mask) - 1;
281 const struct ata_xfer_ent *ent;
668 282
669static const char *ata_mode_string(unsigned int mask) 283 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
284 if (highbit >= ent->shift && highbit < ent->shift + ent->bits)
285 return ent->base + highbit - ent->shift;
286 return 0;
287}
288
289/**
290 * ata_xfer_mode2mask - Find matching xfer_mask for XFER_*
291 * @xfer_mode: XFER_* of interest
292 *
293 * Return matching xfer_mask for @xfer_mode.
294 *
295 * LOCKING:
296 * None.
297 *
298 * RETURNS:
299 * Matching xfer_mask, 0 if no match found.
300 */
301static unsigned int ata_xfer_mode2mask(u8 xfer_mode)
670{ 302{
671 int i; 303 const struct ata_xfer_ent *ent;
672 304
673 for (i = 7; i >= 0; i--) 305 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
674 if (mask & (1 << i)) 306 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
675 goto out; 307 return 1 << (ent->shift + xfer_mode - ent->base);
676 for (i = ATA_SHIFT_MWDMA + 2; i >= ATA_SHIFT_MWDMA; i--) 308 return 0;
677 if (mask & (1 << i)) 309}
678 goto out;
679 for (i = ATA_SHIFT_PIO + 4; i >= ATA_SHIFT_PIO; i--)
680 if (mask & (1 << i))
681 goto out;
682 310
683 return "<n/a>"; 311/**
312 * ata_xfer_mode2shift - Find matching xfer_shift for XFER_*
313 * @xfer_mode: XFER_* of interest
314 *
315 * Return matching xfer_shift for @xfer_mode.
316 *
317 * LOCKING:
318 * None.
319 *
320 * RETURNS:
321 * Matching xfer_shift, -1 if no match found.
322 */
323static int ata_xfer_mode2shift(unsigned int xfer_mode)
324{
325 const struct ata_xfer_ent *ent;
684 326
685out: 327 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
686 return xfer_mode_str[i]; 328 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
329 return ent->shift;
330 return -1;
331}
332
333/**
334 * ata_mode_string - convert xfer_mask to string
335 * @xfer_mask: mask of bits supported; only highest bit counts.
336 *
337 * Determine string which represents the highest speed
338 * (highest bit in @modemask).
339 *
340 * LOCKING:
341 * None.
342 *
343 * RETURNS:
344 * Constant C string representing highest speed listed in
345 * @mode_mask, or the constant C string "<n/a>".
346 */
347static const char *ata_mode_string(unsigned int xfer_mask)
348{
349 static const char * const xfer_mode_str[] = {
350 "PIO0",
351 "PIO1",
352 "PIO2",
353 "PIO3",
354 "PIO4",
355 "MWDMA0",
356 "MWDMA1",
357 "MWDMA2",
358 "UDMA/16",
359 "UDMA/25",
360 "UDMA/33",
361 "UDMA/44",
362 "UDMA/66",
363 "UDMA/100",
364 "UDMA/133",
365 "UDMA7",
366 };
367 int highbit;
368
369 highbit = fls(xfer_mask) - 1;
370 if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str))
371 return xfer_mode_str[highbit];
372 return "<n/a>";
687} 373}
688 374
689/** 375/**
@@ -838,6 +524,7 @@ unsigned int ata_dev_classify(const struct ata_taskfile *tf)
838 * ata_dev_try_classify - Parse returned ATA device signature 524 * ata_dev_try_classify - Parse returned ATA device signature
839 * @ap: ATA channel to examine 525 * @ap: ATA channel to examine
840 * @device: Device to examine (starting at zero) 526 * @device: Device to examine (starting at zero)
527 * @r_err: Value of error register on completion
841 * 528 *
842 * After an event -- SRST, E.D.D., or SATA COMRESET -- occurs, 529 * After an event -- SRST, E.D.D., or SATA COMRESET -- occurs,
843 * an ATA/ATAPI-defined set of values is placed in the ATA 530 * an ATA/ATAPI-defined set of values is placed in the ATA
@@ -850,11 +537,14 @@ unsigned int ata_dev_classify(const struct ata_taskfile *tf)
850 * 537 *
851 * LOCKING: 538 * LOCKING:
852 * caller. 539 * caller.
540 *
541 * RETURNS:
542 * Device type - %ATA_DEV_ATA, %ATA_DEV_ATAPI or %ATA_DEV_NONE.
853 */ 543 */
854 544
855static u8 ata_dev_try_classify(struct ata_port *ap, unsigned int device) 545static unsigned int
546ata_dev_try_classify(struct ata_port *ap, unsigned int device, u8 *r_err)
856{ 547{
857 struct ata_device *dev = &ap->device[device];
858 struct ata_taskfile tf; 548 struct ata_taskfile tf;
859 unsigned int class; 549 unsigned int class;
860 u8 err; 550 u8 err;
@@ -865,8 +555,8 @@ static u8 ata_dev_try_classify(struct ata_port *ap, unsigned int device)
865 555
866 ap->ops->tf_read(ap, &tf); 556 ap->ops->tf_read(ap, &tf);
867 err = tf.feature; 557 err = tf.feature;
868 558 if (r_err)
869 dev->class = ATA_DEV_NONE; 559 *r_err = err;
870 560
871 /* see if device passed diags */ 561 /* see if device passed diags */
872 if (err == 1) 562 if (err == 1)
@@ -874,22 +564,20 @@ static u8 ata_dev_try_classify(struct ata_port *ap, unsigned int device)
874 else if ((device == 0) && (err == 0x81)) 564 else if ((device == 0) && (err == 0x81))
875 /* do nothing */ ; 565 /* do nothing */ ;
876 else 566 else
877 return err; 567 return ATA_DEV_NONE;
878 568
879 /* determine if device if ATA or ATAPI */ 569 /* determine if device is ATA or ATAPI */
880 class = ata_dev_classify(&tf); 570 class = ata_dev_classify(&tf);
571
881 if (class == ATA_DEV_UNKNOWN) 572 if (class == ATA_DEV_UNKNOWN)
882 return err; 573 return ATA_DEV_NONE;
883 if ((class == ATA_DEV_ATA) && (ata_chk_status(ap) == 0)) 574 if ((class == ATA_DEV_ATA) && (ata_chk_status(ap) == 0))
884 return err; 575 return ATA_DEV_NONE;
885 576 return class;
886 dev->class = class;
887
888 return err;
889} 577}
890 578
891/** 579/**
892 * ata_dev_id_string - Convert IDENTIFY DEVICE page into string 580 * ata_id_string - Convert IDENTIFY DEVICE page into string
893 * @id: IDENTIFY DEVICE results we will examine 581 * @id: IDENTIFY DEVICE results we will examine
894 * @s: string into which data is output 582 * @s: string into which data is output
895 * @ofs: offset into identify device page 583 * @ofs: offset into identify device page
@@ -903,8 +591,8 @@ static u8 ata_dev_try_classify(struct ata_port *ap, unsigned int device)
903 * caller. 591 * caller.
904 */ 592 */
905 593
906void ata_dev_id_string(const u16 *id, unsigned char *s, 594void ata_id_string(const u16 *id, unsigned char *s,
907 unsigned int ofs, unsigned int len) 595 unsigned int ofs, unsigned int len)
908{ 596{
909 unsigned int c; 597 unsigned int c;
910 598
@@ -922,6 +610,49 @@ void ata_dev_id_string(const u16 *id, unsigned char *s,
922 } 610 }
923} 611}
924 612
613/**
614 * ata_id_c_string - Convert IDENTIFY DEVICE page into C string
615 * @id: IDENTIFY DEVICE results we will examine
616 * @s: string into which data is output
617 * @ofs: offset into identify device page
618 * @len: length of string to return. must be an odd number.
619 *
620 * This function is identical to ata_id_string except that it
621 * trims trailing spaces and terminates the resulting string with
622 * null. @len must be actual maximum length (even number) + 1.
623 *
624 * LOCKING:
625 * caller.
626 */
627void ata_id_c_string(const u16 *id, unsigned char *s,
628 unsigned int ofs, unsigned int len)
629{
630 unsigned char *p;
631
632 WARN_ON(!(len & 1));
633
634 ata_id_string(id, s, ofs, len - 1);
635
636 p = s + strnlen(s, len - 1);
637 while (p > s && p[-1] == ' ')
638 p--;
639 *p = '\0';
640}
641
642static u64 ata_id_n_sectors(const u16 *id)
643{
644 if (ata_id_has_lba(id)) {
645 if (ata_id_has_lba48(id))
646 return ata_id_u64(id, 100);
647 else
648 return ata_id_u32(id, 60);
649 } else {
650 if (ata_id_current_chs_valid(id))
651 return ata_id_u32(id, 57);
652 else
653 return id[1] * id[3] * id[6];
654 }
655}
925 656
926/** 657/**
927 * ata_noop_dev_select - Select device 0/1 on ATA bus 658 * ata_noop_dev_select - Select device 0/1 on ATA bus
@@ -1011,90 +742,169 @@ void ata_dev_select(struct ata_port *ap, unsigned int device,
1011 742
1012/** 743/**
1013 * ata_dump_id - IDENTIFY DEVICE info debugging output 744 * ata_dump_id - IDENTIFY DEVICE info debugging output
1014 * @dev: Device whose IDENTIFY DEVICE page we will dump 745 * @id: IDENTIFY DEVICE page to dump
1015 * 746 *
1016 * Dump selected 16-bit words from a detected device's 747 * Dump selected 16-bit words from the given IDENTIFY DEVICE
1017 * IDENTIFY PAGE page. 748 * page.
1018 * 749 *
1019 * LOCKING: 750 * LOCKING:
1020 * caller. 751 * caller.
1021 */ 752 */
1022 753
1023static inline void ata_dump_id(const struct ata_device *dev) 754static inline void ata_dump_id(const u16 *id)
1024{ 755{
1025 DPRINTK("49==0x%04x " 756 DPRINTK("49==0x%04x "
1026 "53==0x%04x " 757 "53==0x%04x "
1027 "63==0x%04x " 758 "63==0x%04x "
1028 "64==0x%04x " 759 "64==0x%04x "
1029 "75==0x%04x \n", 760 "75==0x%04x \n",
1030 dev->id[49], 761 id[49],
1031 dev->id[53], 762 id[53],
1032 dev->id[63], 763 id[63],
1033 dev->id[64], 764 id[64],
1034 dev->id[75]); 765 id[75]);
1035 DPRINTK("80==0x%04x " 766 DPRINTK("80==0x%04x "
1036 "81==0x%04x " 767 "81==0x%04x "
1037 "82==0x%04x " 768 "82==0x%04x "
1038 "83==0x%04x " 769 "83==0x%04x "
1039 "84==0x%04x \n", 770 "84==0x%04x \n",
1040 dev->id[80], 771 id[80],
1041 dev->id[81], 772 id[81],
1042 dev->id[82], 773 id[82],
1043 dev->id[83], 774 id[83],
1044 dev->id[84]); 775 id[84]);
1045 DPRINTK("88==0x%04x " 776 DPRINTK("88==0x%04x "
1046 "93==0x%04x\n", 777 "93==0x%04x\n",
1047 dev->id[88], 778 id[88],
1048 dev->id[93]); 779 id[93]);
1049} 780}
1050 781
1051/* 782/**
1052 * Compute the PIO modes available for this device. This is not as 783 * ata_id_xfermask - Compute xfermask from the given IDENTIFY data
1053 * trivial as it seems if we must consider early devices correctly. 784 * @id: IDENTIFY data to compute xfer mask from
785 *
786 * Compute the xfermask for this device. This is not as trivial
787 * as it seems if we must consider early devices correctly.
788 *
789 * FIXME: pre IDE drive timing (do we care ?).
790 *
791 * LOCKING:
792 * None.
1054 * 793 *
1055 * FIXME: pre IDE drive timing (do we care ?). 794 * RETURNS:
795 * Computed xfermask
1056 */ 796 */
1057 797static unsigned int ata_id_xfermask(const u16 *id)
1058static unsigned int ata_pio_modes(const struct ata_device *adev)
1059{ 798{
1060 u16 modes; 799 unsigned int pio_mask, mwdma_mask, udma_mask;
1061 800
1062 /* Usual case. Word 53 indicates word 64 is valid */ 801 /* Usual case. Word 53 indicates word 64 is valid */
1063 if (adev->id[ATA_ID_FIELD_VALID] & (1 << 1)) { 802 if (id[ATA_ID_FIELD_VALID] & (1 << 1)) {
1064 modes = adev->id[ATA_ID_PIO_MODES] & 0x03; 803 pio_mask = id[ATA_ID_PIO_MODES] & 0x03;
1065 modes <<= 3; 804 pio_mask <<= 3;
1066 modes |= 0x7; 805 pio_mask |= 0x7;
1067 return modes; 806 } else {
807 /* If word 64 isn't valid then Word 51 high byte holds
808 * the PIO timing number for the maximum. Turn it into
809 * a mask.
810 */
811 pio_mask = (2 << (id[ATA_ID_OLD_PIO_MODES] & 0xFF)) - 1 ;
812
813 /* But wait.. there's more. Design your standards by
814 * committee and you too can get a free iordy field to
815 * process. However its the speeds not the modes that
816 * are supported... Note drivers using the timing API
817 * will get this right anyway
818 */
1068 } 819 }
1069 820
1070 /* If word 64 isn't valid then Word 51 high byte holds the PIO timing 821 mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07;
1071 number for the maximum. Turn it into a mask and return it */ 822 udma_mask = id[ATA_ID_UDMA_MODES] & 0xff;
1072 modes = (2 << ((adev->id[ATA_ID_OLD_PIO_MODES] >> 8) & 0xFF)) - 1 ; 823
1073 return modes; 824 return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
1074 /* But wait.. there's more. Design your standards by committee and
1075 you too can get a free iordy field to process. However its the
1076 speeds not the modes that are supported... Note drivers using the
1077 timing API will get this right anyway */
1078} 825}
1079 826
1080struct ata_exec_internal_arg { 827/**
1081 unsigned int err_mask; 828 * ata_port_queue_task - Queue port_task
1082 struct ata_taskfile *tf; 829 * @ap: The ata_port to queue port_task for
1083 struct completion *waiting; 830 *
1084}; 831 * Schedule @fn(@data) for execution after @delay jiffies using
832 * port_task. There is one port_task per port and it's the
833 * user(low level driver)'s responsibility to make sure that only
834 * one task is active at any given time.
835 *
836 * libata core layer takes care of synchronization between
837 * port_task and EH. ata_port_queue_task() may be ignored for EH
838 * synchronization.
839 *
840 * LOCKING:
841 * Inherited from caller.
842 */
843void ata_port_queue_task(struct ata_port *ap, void (*fn)(void *), void *data,
844 unsigned long delay)
845{
846 int rc;
847
848 if (ap->flags & ATA_FLAG_FLUSH_PORT_TASK)
849 return;
1085 850
1086int ata_qc_complete_internal(struct ata_queued_cmd *qc) 851 PREPARE_WORK(&ap->port_task, fn, data);
852
853 if (!delay)
854 rc = queue_work(ata_wq, &ap->port_task);
855 else
856 rc = queue_delayed_work(ata_wq, &ap->port_task, delay);
857
858 /* rc == 0 means that another user is using port task */
859 WARN_ON(rc == 0);
860}
861
862/**
863 * ata_port_flush_task - Flush port_task
864 * @ap: The ata_port to flush port_task for
865 *
866 * After this function completes, port_task is guranteed not to
867 * be running or scheduled.
868 *
869 * LOCKING:
870 * Kernel thread context (may sleep)
871 */
872void ata_port_flush_task(struct ata_port *ap)
1087{ 873{
1088 struct ata_exec_internal_arg *arg = qc->private_data; 874 unsigned long flags;
1089 struct completion *waiting = arg->waiting;
1090 875
1091 if (!(qc->err_mask & ~AC_ERR_DEV)) 876 DPRINTK("ENTER\n");
1092 qc->ap->ops->tf_read(qc->ap, arg->tf);
1093 arg->err_mask = qc->err_mask;
1094 arg->waiting = NULL;
1095 complete(waiting);
1096 877
1097 return 0; 878 spin_lock_irqsave(&ap->host_set->lock, flags);
879 ap->flags |= ATA_FLAG_FLUSH_PORT_TASK;
880 spin_unlock_irqrestore(&ap->host_set->lock, flags);
881
882 DPRINTK("flush #1\n");
883 flush_workqueue(ata_wq);
884
885 /*
886 * At this point, if a task is running, it's guaranteed to see
887 * the FLUSH flag; thus, it will never queue pio tasks again.
888 * Cancel and flush.
889 */
890 if (!cancel_delayed_work(&ap->port_task)) {
891 DPRINTK("flush #2\n");
892 flush_workqueue(ata_wq);
893 }
894
895 spin_lock_irqsave(&ap->host_set->lock, flags);
896 ap->flags &= ~ATA_FLAG_FLUSH_PORT_TASK;
897 spin_unlock_irqrestore(&ap->host_set->lock, flags);
898
899 DPRINTK("EXIT\n");
900}
901
902void ata_qc_complete_internal(struct ata_queued_cmd *qc)
903{
904 struct completion *waiting = qc->private_data;
905
906 qc->ap->ops->tf_read(qc->ap, &qc->tf);
907 complete(waiting);
1098} 908}
1099 909
1100/** 910/**
@@ -1125,7 +935,7 @@ ata_exec_internal(struct ata_port *ap, struct ata_device *dev,
1125 struct ata_queued_cmd *qc; 935 struct ata_queued_cmd *qc;
1126 DECLARE_COMPLETION(wait); 936 DECLARE_COMPLETION(wait);
1127 unsigned long flags; 937 unsigned long flags;
1128 struct ata_exec_internal_arg arg; 938 unsigned int err_mask;
1129 939
1130 spin_lock_irqsave(&ap->host_set->lock, flags); 940 spin_lock_irqsave(&ap->host_set->lock, flags);
1131 941
@@ -1139,13 +949,12 @@ ata_exec_internal(struct ata_port *ap, struct ata_device *dev,
1139 qc->nsect = buflen / ATA_SECT_SIZE; 949 qc->nsect = buflen / ATA_SECT_SIZE;
1140 } 950 }
1141 951
1142 arg.waiting = &wait; 952 qc->private_data = &wait;
1143 arg.tf = tf;
1144 qc->private_data = &arg;
1145 qc->complete_fn = ata_qc_complete_internal; 953 qc->complete_fn = ata_qc_complete_internal;
1146 954
1147 if (ata_qc_issue(qc)) 955 qc->err_mask = ata_qc_issue(qc);
1148 goto issue_fail; 956 if (qc->err_mask)
957 ata_qc_complete(qc);
1149 958
1150 spin_unlock_irqrestore(&ap->host_set->lock, flags); 959 spin_unlock_irqrestore(&ap->host_set->lock, flags);
1151 960
@@ -1158,8 +967,8 @@ ata_exec_internal(struct ata_port *ap, struct ata_device *dev,
1158 * before the caller cleans up, it will result in a 967 * before the caller cleans up, it will result in a
1159 * spurious interrupt. We can live with that. 968 * spurious interrupt. We can live with that.
1160 */ 969 */
1161 if (arg.waiting) { 970 if (qc->flags & ATA_QCFLAG_ACTIVE) {
1162 qc->err_mask = AC_ERR_OTHER; 971 qc->err_mask = AC_ERR_TIMEOUT;
1163 ata_qc_complete(qc); 972 ata_qc_complete(qc);
1164 printk(KERN_WARNING "ata%u: qc timeout (cmd 0x%x)\n", 973 printk(KERN_WARNING "ata%u: qc timeout (cmd 0x%x)\n",
1165 ap->id, command); 974 ap->id, command);
@@ -1168,12 +977,12 @@ ata_exec_internal(struct ata_port *ap, struct ata_device *dev,
1168 spin_unlock_irqrestore(&ap->host_set->lock, flags); 977 spin_unlock_irqrestore(&ap->host_set->lock, flags);
1169 } 978 }
1170 979
1171 return arg.err_mask; 980 *tf = qc->tf;
981 err_mask = qc->err_mask;
1172 982
1173 issue_fail:
1174 ata_qc_free(qc); 983 ata_qc_free(qc);
1175 spin_unlock_irqrestore(&ap->host_set->lock, flags); 984
1176 return AC_ERR_OTHER; 985 return err_mask;
1177} 986}
1178 987
1179/** 988/**
@@ -1210,73 +1019,78 @@ unsigned int ata_pio_need_iordy(const struct ata_device *adev)
1210} 1019}
1211 1020
1212/** 1021/**
1213 * ata_dev_identify - obtain IDENTIFY x DEVICE page 1022 * ata_dev_read_id - Read ID data from the specified device
1214 * @ap: port on which device we wish to probe resides 1023 * @ap: port on which target device resides
1215 * @device: device bus address, starting at zero 1024 * @dev: target device
1216 * 1025 * @p_class: pointer to class of the target device (may be changed)
1217 * Following bus reset, we issue the IDENTIFY [PACKET] DEVICE 1026 * @post_reset: is this read ID post-reset?
1218 * command, and read back the 512-byte device information page. 1027 * @p_id: read IDENTIFY page (newly allocated)
1219 * The device information page is fed to us via the standard 1028 *
1220 * PIO-IN protocol, but we hand-code it here. (TODO: investigate 1029 * Read ID data from the specified device. ATA_CMD_ID_ATA is
1221 * using standard PIO-IN paths) 1030 * performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI
1222 * 1031 * devices. This function also takes care of EDD signature
1223 * After reading the device information page, we use several 1032 * misreporting (to be removed once EDD support is gone) and
1224 * bits of information from it to initialize data structures 1033 * issues ATA_CMD_INIT_DEV_PARAMS for pre-ATA4 drives.
1225 * that will be used during the lifetime of the ata_device.
1226 * Other data from the info page is used to disqualify certain
1227 * older ATA devices we do not wish to support.
1228 * 1034 *
1229 * LOCKING: 1035 * LOCKING:
1230 * Inherited from caller. Some functions called by this function 1036 * Kernel thread context (may sleep)
1231 * obtain the host_set lock. 1037 *
1038 * RETURNS:
1039 * 0 on success, -errno otherwise.
1232 */ 1040 */
1233 1041static int ata_dev_read_id(struct ata_port *ap, struct ata_device *dev,
1234static void ata_dev_identify(struct ata_port *ap, unsigned int device) 1042 unsigned int *p_class, int post_reset, u16 **p_id)
1235{ 1043{
1236 struct ata_device *dev = &ap->device[device]; 1044 unsigned int class = *p_class;
1237 unsigned int major_version;
1238 u16 tmp;
1239 unsigned long xfer_modes;
1240 unsigned int using_edd; 1045 unsigned int using_edd;
1241 struct ata_taskfile tf; 1046 struct ata_taskfile tf;
1242 unsigned int err_mask; 1047 unsigned int err_mask = 0;
1048 u16 *id;
1049 const char *reason;
1243 int rc; 1050 int rc;
1244 1051
1245 if (!ata_dev_present(dev)) { 1052 DPRINTK("ENTER, host %u, dev %u\n", ap->id, dev->devno);
1246 DPRINTK("ENTER/EXIT (host %u, dev %u) -- nodev\n",
1247 ap->id, device);
1248 return;
1249 }
1250 1053
1251 if (ap->flags & (ATA_FLAG_SRST | ATA_FLAG_SATA_RESET)) 1054 if (ap->ops->probe_reset ||
1055 ap->flags & (ATA_FLAG_SRST | ATA_FLAG_SATA_RESET))
1252 using_edd = 0; 1056 using_edd = 0;
1253 else 1057 else
1254 using_edd = 1; 1058 using_edd = 1;
1255 1059
1256 DPRINTK("ENTER, host %u, dev %u\n", ap->id, device); 1060 ata_dev_select(ap, dev->devno, 1, 1); /* select device 0/1 */
1257
1258 assert (dev->class == ATA_DEV_ATA || dev->class == ATA_DEV_ATAPI ||
1259 dev->class == ATA_DEV_NONE);
1260 1061
1261 ata_dev_select(ap, device, 1, 1); /* select device 0/1 */ 1062 id = kmalloc(sizeof(id[0]) * ATA_ID_WORDS, GFP_KERNEL);
1063 if (id == NULL) {
1064 rc = -ENOMEM;
1065 reason = "out of memory";
1066 goto err_out;
1067 }
1262 1068
1263retry: 1069 retry:
1264 ata_tf_init(ap, &tf, device); 1070 ata_tf_init(ap, &tf, dev->devno);
1265 1071
1266 if (dev->class == ATA_DEV_ATA) { 1072 switch (class) {
1073 case ATA_DEV_ATA:
1267 tf.command = ATA_CMD_ID_ATA; 1074 tf.command = ATA_CMD_ID_ATA;
1268 DPRINTK("do ATA identify\n"); 1075 break;
1269 } else { 1076 case ATA_DEV_ATAPI:
1270 tf.command = ATA_CMD_ID_ATAPI; 1077 tf.command = ATA_CMD_ID_ATAPI;
1271 DPRINTK("do ATAPI identify\n"); 1078 break;
1079 default:
1080 rc = -ENODEV;
1081 reason = "unsupported class";
1082 goto err_out;
1272 } 1083 }
1273 1084
1274 tf.protocol = ATA_PROT_PIO; 1085 tf.protocol = ATA_PROT_PIO;
1275 1086
1276 err_mask = ata_exec_internal(ap, dev, &tf, DMA_FROM_DEVICE, 1087 err_mask = ata_exec_internal(ap, dev, &tf, DMA_FROM_DEVICE,
1277 dev->id, sizeof(dev->id)); 1088 id, sizeof(id[0]) * ATA_ID_WORDS);
1278 1089
1279 if (err_mask) { 1090 if (err_mask) {
1091 rc = -EIO;
1092 reason = "I/O error";
1093
1280 if (err_mask & ~AC_ERR_DEV) 1094 if (err_mask & ~AC_ERR_DEV)
1281 goto err_out; 1095 goto err_out;
1282 1096
@@ -1291,56 +1105,32 @@ retry:
1291 * ATA software reset (SRST, the default) does not appear 1105 * ATA software reset (SRST, the default) does not appear
1292 * to have this problem. 1106 * to have this problem.
1293 */ 1107 */
1294 if ((using_edd) && (dev->class == ATA_DEV_ATA)) { 1108 if ((using_edd) && (class == ATA_DEV_ATA)) {
1295 u8 err = tf.feature; 1109 u8 err = tf.feature;
1296 if (err & ATA_ABORTED) { 1110 if (err & ATA_ABORTED) {
1297 dev->class = ATA_DEV_ATAPI; 1111 class = ATA_DEV_ATAPI;
1298 goto retry; 1112 goto retry;
1299 } 1113 }
1300 } 1114 }
1301 goto err_out; 1115 goto err_out;
1302 } 1116 }
1303 1117
1304 swap_buf_le16(dev->id, ATA_ID_WORDS); 1118 swap_buf_le16(id, ATA_ID_WORDS);
1305 1119
1306 /* print device capabilities */ 1120 /* print device capabilities */
1307 printk(KERN_DEBUG "ata%u: dev %u cfg " 1121 printk(KERN_DEBUG "ata%u: dev %u cfg "
1308 "49:%04x 82:%04x 83:%04x 84:%04x 85:%04x 86:%04x 87:%04x 88:%04x\n", 1122 "49:%04x 82:%04x 83:%04x 84:%04x 85:%04x 86:%04x 87:%04x 88:%04x\n",
1309 ap->id, device, dev->id[49], 1123 ap->id, dev->devno,
1310 dev->id[82], dev->id[83], dev->id[84], 1124 id[49], id[82], id[83], id[84], id[85], id[86], id[87], id[88]);
1311 dev->id[85], dev->id[86], dev->id[87],
1312 dev->id[88]);
1313 1125
1314 /* 1126 /* sanity check */
1315 * common ATA, ATAPI feature tests 1127 if ((class == ATA_DEV_ATA) != ata_id_is_ata(id)) {
1316 */ 1128 rc = -EINVAL;
1317 1129 reason = "device reports illegal type";
1318 /* we require DMA support (bits 8 of word 49) */ 1130 goto err_out;
1319 if (!ata_id_has_dma(dev->id)) {
1320 printk(KERN_DEBUG "ata%u: no dma\n", ap->id);
1321 goto err_out_nosup;
1322 } 1131 }
1323 1132
1324 /* quick-n-dirty find max transfer mode; for printk only */ 1133 if (post_reset && class == ATA_DEV_ATA) {
1325 xfer_modes = dev->id[ATA_ID_UDMA_MODES];
1326 if (!xfer_modes)
1327 xfer_modes = (dev->id[ATA_ID_MWDMA_MODES]) << ATA_SHIFT_MWDMA;
1328 if (!xfer_modes)
1329 xfer_modes = ata_pio_modes(dev);
1330
1331 ata_dump_id(dev);
1332
1333 /* ATA-specific feature tests */
1334 if (dev->class == ATA_DEV_ATA) {
1335 if (!ata_id_is_ata(dev->id)) /* sanity check */
1336 goto err_out_nosup;
1337
1338 /* get major version */
1339 tmp = dev->id[ATA_ID_MAJOR_VER];
1340 for (major_version = 14; major_version >= 1; major_version--)
1341 if (tmp & (1 << major_version))
1342 break;
1343
1344 /* 1134 /*
1345 * The exact sequence expected by certain pre-ATA4 drives is: 1135 * The exact sequence expected by certain pre-ATA4 drives is:
1346 * SRST RESET 1136 * SRST RESET
@@ -1349,122 +1139,187 @@ retry:
1349 * anything else.. 1139 * anything else..
1350 * Some drives were very specific about that exact sequence. 1140 * Some drives were very specific about that exact sequence.
1351 */ 1141 */
1352 if (major_version < 4 || (!ata_id_has_lba(dev->id))) { 1142 if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
1353 ata_dev_init_params(ap, dev); 1143 err_mask = ata_dev_init_params(ap, dev);
1144 if (err_mask) {
1145 rc = -EIO;
1146 reason = "INIT_DEV_PARAMS failed";
1147 goto err_out;
1148 }
1354 1149
1355 /* current CHS translation info (id[53-58]) might be 1150 /* current CHS translation info (id[53-58]) might be
1356 * changed. reread the identify device info. 1151 * changed. reread the identify device info.
1357 */ 1152 */
1358 ata_dev_reread_id(ap, dev); 1153 post_reset = 0;
1154 goto retry;
1359 } 1155 }
1156 }
1157
1158 *p_class = class;
1159 *p_id = id;
1160 return 0;
1161
1162 err_out:
1163 printk(KERN_WARNING "ata%u: dev %u failed to IDENTIFY (%s)\n",
1164 ap->id, dev->devno, reason);
1165 kfree(id);
1166 return rc;
1167}
1168
1169static inline u8 ata_dev_knobble(const struct ata_port *ap,
1170 struct ata_device *dev)
1171{
1172 return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
1173}
1174
1175/**
1176 * ata_dev_configure - Configure the specified ATA/ATAPI device
1177 * @ap: Port on which target device resides
1178 * @dev: Target device to configure
1179 * @print_info: Enable device info printout
1180 *
1181 * Configure @dev according to @dev->id. Generic and low-level
1182 * driver specific fixups are also applied.
1183 *
1184 * LOCKING:
1185 * Kernel thread context (may sleep)
1186 *
1187 * RETURNS:
1188 * 0 on success, -errno otherwise
1189 */
1190static int ata_dev_configure(struct ata_port *ap, struct ata_device *dev,
1191 int print_info)
1192{
1193 unsigned int xfer_mask;
1194 int i, rc;
1195
1196 if (!ata_dev_present(dev)) {
1197 DPRINTK("ENTER/EXIT (host %u, dev %u) -- nodev\n",
1198 ap->id, dev->devno);
1199 return 0;
1200 }
1201
1202 DPRINTK("ENTER, host %u, dev %u\n", ap->id, dev->devno);
1203
1204 /* initialize to-be-configured parameters */
1205 dev->flags = 0;
1206 dev->max_sectors = 0;
1207 dev->cdb_len = 0;
1208 dev->n_sectors = 0;
1209 dev->cylinders = 0;
1210 dev->heads = 0;
1211 dev->sectors = 0;
1212
1213 /*
1214 * common ATA, ATAPI feature tests
1215 */
1216
1217 /* we require DMA support (bits 8 of word 49) */
1218 if (!ata_id_has_dma(dev->id)) {
1219 printk(KERN_DEBUG "ata%u: no dma\n", ap->id);
1220 rc = -EINVAL;
1221 goto err_out_nosup;
1222 }
1223
1224 /* find max transfer mode; for printk only */
1225 xfer_mask = ata_id_xfermask(dev->id);
1226
1227 ata_dump_id(dev->id);
1228
1229 /* ATA-specific feature tests */
1230 if (dev->class == ATA_DEV_ATA) {
1231 dev->n_sectors = ata_id_n_sectors(dev->id);
1360 1232
1361 if (ata_id_has_lba(dev->id)) { 1233 if (ata_id_has_lba(dev->id)) {
1362 dev->flags |= ATA_DFLAG_LBA; 1234 const char *lba_desc;
1363 1235
1236 lba_desc = "LBA";
1237 dev->flags |= ATA_DFLAG_LBA;
1364 if (ata_id_has_lba48(dev->id)) { 1238 if (ata_id_has_lba48(dev->id)) {
1365 dev->flags |= ATA_DFLAG_LBA48; 1239 dev->flags |= ATA_DFLAG_LBA48;
1366 dev->n_sectors = ata_id_u64(dev->id, 100); 1240 lba_desc = "LBA48";
1367 } else {
1368 dev->n_sectors = ata_id_u32(dev->id, 60);
1369 } 1241 }
1370 1242
1371 /* print device info to dmesg */ 1243 /* print device info to dmesg */
1372 printk(KERN_INFO "ata%u: dev %u ATA-%d, max %s, %Lu sectors:%s\n", 1244 if (print_info)
1373 ap->id, device, 1245 printk(KERN_INFO "ata%u: dev %u ATA-%d, "
1374 major_version, 1246 "max %s, %Lu sectors: %s\n",
1375 ata_mode_string(xfer_modes), 1247 ap->id, dev->devno,
1376 (unsigned long long)dev->n_sectors, 1248 ata_id_major_version(dev->id),
1377 dev->flags & ATA_DFLAG_LBA48 ? " LBA48" : " LBA"); 1249 ata_mode_string(xfer_mask),
1378 } else { 1250 (unsigned long long)dev->n_sectors,
1251 lba_desc);
1252 } else {
1379 /* CHS */ 1253 /* CHS */
1380 1254
1381 /* Default translation */ 1255 /* Default translation */
1382 dev->cylinders = dev->id[1]; 1256 dev->cylinders = dev->id[1];
1383 dev->heads = dev->id[3]; 1257 dev->heads = dev->id[3];
1384 dev->sectors = dev->id[6]; 1258 dev->sectors = dev->id[6];
1385 dev->n_sectors = dev->cylinders * dev->heads * dev->sectors;
1386 1259
1387 if (ata_id_current_chs_valid(dev->id)) { 1260 if (ata_id_current_chs_valid(dev->id)) {
1388 /* Current CHS translation is valid. */ 1261 /* Current CHS translation is valid. */
1389 dev->cylinders = dev->id[54]; 1262 dev->cylinders = dev->id[54];
1390 dev->heads = dev->id[55]; 1263 dev->heads = dev->id[55];
1391 dev->sectors = dev->id[56]; 1264 dev->sectors = dev->id[56];
1392
1393 dev->n_sectors = ata_id_u32(dev->id, 57);
1394 } 1265 }
1395 1266
1396 /* print device info to dmesg */ 1267 /* print device info to dmesg */
1397 printk(KERN_INFO "ata%u: dev %u ATA-%d, max %s, %Lu sectors: CHS %d/%d/%d\n", 1268 if (print_info)
1398 ap->id, device, 1269 printk(KERN_INFO "ata%u: dev %u ATA-%d, "
1399 major_version, 1270 "max %s, %Lu sectors: CHS %u/%u/%u\n",
1400 ata_mode_string(xfer_modes), 1271 ap->id, dev->devno,
1401 (unsigned long long)dev->n_sectors, 1272 ata_id_major_version(dev->id),
1402 (int)dev->cylinders, (int)dev->heads, (int)dev->sectors); 1273 ata_mode_string(xfer_mask),
1403 1274 (unsigned long long)dev->n_sectors,
1275 dev->cylinders, dev->heads, dev->sectors);
1404 } 1276 }
1405 1277
1406 ap->host->max_cmd_len = 16; 1278 dev->cdb_len = 16;
1407 } 1279 }
1408 1280
1409 /* ATAPI-specific feature tests */ 1281 /* ATAPI-specific feature tests */
1410 else if (dev->class == ATA_DEV_ATAPI) { 1282 else if (dev->class == ATA_DEV_ATAPI) {
1411 if (ata_id_is_ata(dev->id)) /* sanity check */
1412 goto err_out_nosup;
1413
1414 rc = atapi_cdb_len(dev->id); 1283 rc = atapi_cdb_len(dev->id);
1415 if ((rc < 12) || (rc > ATAPI_CDB_LEN)) { 1284 if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
1416 printk(KERN_WARNING "ata%u: unsupported CDB len\n", ap->id); 1285 printk(KERN_WARNING "ata%u: unsupported CDB len\n", ap->id);
1286 rc = -EINVAL;
1417 goto err_out_nosup; 1287 goto err_out_nosup;
1418 } 1288 }
1419 ap->cdb_len = (unsigned int) rc; 1289 dev->cdb_len = (unsigned int) rc;
1420 ap->host->max_cmd_len = (unsigned char) ap->cdb_len;
1421 1290
1422 /* print device info to dmesg */ 1291 /* print device info to dmesg */
1423 printk(KERN_INFO "ata%u: dev %u ATAPI, max %s\n", 1292 if (print_info)
1424 ap->id, device, 1293 printk(KERN_INFO "ata%u: dev %u ATAPI, max %s\n",
1425 ata_mode_string(xfer_modes)); 1294 ap->id, dev->devno, ata_mode_string(xfer_mask));
1426 } 1295 }
1427 1296
1428 DPRINTK("EXIT, drv_stat = 0x%x\n", ata_chk_status(ap)); 1297 ap->host->max_cmd_len = 0;
1429 return; 1298 for (i = 0; i < ATA_MAX_DEVICES; i++)
1430 1299 ap->host->max_cmd_len = max_t(unsigned int,
1431err_out_nosup: 1300 ap->host->max_cmd_len,
1432 printk(KERN_WARNING "ata%u: dev %u not supported, ignoring\n", 1301 ap->device[i].cdb_len);
1433 ap->id, device);
1434err_out:
1435 dev->class++; /* converts ATA_DEV_xxx into ATA_DEV_xxx_UNSUP */
1436 DPRINTK("EXIT, err\n");
1437}
1438
1439
1440static inline u8 ata_dev_knobble(const struct ata_port *ap)
1441{
1442 return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(ap->device->id)));
1443}
1444
1445/**
1446 * ata_dev_config - Run device specific handlers and check for
1447 * SATA->PATA bridges
1448 * @ap: Bus
1449 * @i: Device
1450 *
1451 * LOCKING:
1452 */
1453 1302
1454void ata_dev_config(struct ata_port *ap, unsigned int i)
1455{
1456 /* limit bridge transfers to udma5, 200 sectors */ 1303 /* limit bridge transfers to udma5, 200 sectors */
1457 if (ata_dev_knobble(ap)) { 1304 if (ata_dev_knobble(ap, dev)) {
1458 printk(KERN_INFO "ata%u(%u): applying bridge limits\n", 1305 if (print_info)
1459 ap->id, ap->device->devno); 1306 printk(KERN_INFO "ata%u(%u): applying bridge limits\n",
1307 ap->id, dev->devno);
1460 ap->udma_mask &= ATA_UDMA5; 1308 ap->udma_mask &= ATA_UDMA5;
1461 ap->host->max_sectors = ATA_MAX_SECTORS; 1309 dev->max_sectors = ATA_MAX_SECTORS;
1462 ap->host->hostt->max_sectors = ATA_MAX_SECTORS;
1463 ap->device[i].flags |= ATA_DFLAG_LOCK_SECTORS;
1464 } 1310 }
1465 1311
1466 if (ap->ops->dev_config) 1312 if (ap->ops->dev_config)
1467 ap->ops->dev_config(ap, &ap->device[i]); 1313 ap->ops->dev_config(ap, dev);
1314
1315 DPRINTK("EXIT, drv_stat = 0x%x\n", ata_chk_status(ap));
1316 return 0;
1317
1318err_out_nosup:
1319 printk(KERN_WARNING "ata%u: dev %u not supported, ignoring\n",
1320 ap->id, dev->devno);
1321 DPRINTK("EXIT, err\n");
1322 return rc;
1468} 1323}
1469 1324
1470/** 1325/**
@@ -1484,21 +1339,61 @@ void ata_dev_config(struct ata_port *ap, unsigned int i)
1484 1339
1485static int ata_bus_probe(struct ata_port *ap) 1340static int ata_bus_probe(struct ata_port *ap)
1486{ 1341{
1487 unsigned int i, found = 0; 1342 unsigned int classes[ATA_MAX_DEVICES];
1343 unsigned int i, rc, found = 0;
1488 1344
1489 ap->ops->phy_reset(ap); 1345 ata_port_probe(ap);
1490 if (ap->flags & ATA_FLAG_PORT_DISABLED) 1346
1491 goto err_out; 1347 /* reset */
1348 if (ap->ops->probe_reset) {
1349 for (i = 0; i < ATA_MAX_DEVICES; i++)
1350 classes[i] = ATA_DEV_UNKNOWN;
1492 1351
1352 rc = ap->ops->probe_reset(ap, classes);
1353 if (rc) {
1354 printk("ata%u: reset failed (errno=%d)\n", ap->id, rc);
1355 return rc;
1356 }
1357
1358 for (i = 0; i < ATA_MAX_DEVICES; i++)
1359 if (classes[i] == ATA_DEV_UNKNOWN)
1360 classes[i] = ATA_DEV_NONE;
1361 } else {
1362 ap->ops->phy_reset(ap);
1363
1364 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1365 if (!(ap->flags & ATA_FLAG_PORT_DISABLED))
1366 classes[i] = ap->device[i].class;
1367 else
1368 ap->device[i].class = ATA_DEV_UNKNOWN;
1369 }
1370 ata_port_probe(ap);
1371 }
1372
1373 /* read IDENTIFY page and configure devices */
1493 for (i = 0; i < ATA_MAX_DEVICES; i++) { 1374 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1494 ata_dev_identify(ap, i); 1375 struct ata_device *dev = &ap->device[i];
1495 if (ata_dev_present(&ap->device[i])) { 1376
1496 found = 1; 1377 dev->class = classes[i];
1497 ata_dev_config(ap,i); 1378
1379 if (!ata_dev_present(dev))
1380 continue;
1381
1382 WARN_ON(dev->id != NULL);
1383 if (ata_dev_read_id(ap, dev, &dev->class, 1, &dev->id)) {
1384 dev->class = ATA_DEV_NONE;
1385 continue;
1498 } 1386 }
1387
1388 if (ata_dev_configure(ap, dev, 1)) {
1389 dev->class++; /* disable device */
1390 continue;
1391 }
1392
1393 found = 1;
1499 } 1394 }
1500 1395
1501 if ((!found) || (ap->flags & ATA_FLAG_PORT_DISABLED)) 1396 if (!found)
1502 goto err_out_disable; 1397 goto err_out_disable;
1503 1398
1504 ata_set_mode(ap); 1399 ata_set_mode(ap);
@@ -1509,7 +1404,6 @@ static int ata_bus_probe(struct ata_port *ap)
1509 1404
1510err_out_disable: 1405err_out_disable:
1511 ap->ops->port_disable(ap); 1406 ap->ops->port_disable(ap);
1512err_out:
1513 return -1; 1407 return -1;
1514} 1408}
1515 1409
@@ -1530,6 +1424,41 @@ void ata_port_probe(struct ata_port *ap)
1530} 1424}
1531 1425
1532/** 1426/**
1427 * sata_print_link_status - Print SATA link status
1428 * @ap: SATA port to printk link status about
1429 *
1430 * This function prints link speed and status of a SATA link.
1431 *
1432 * LOCKING:
1433 * None.
1434 */
1435static void sata_print_link_status(struct ata_port *ap)
1436{
1437 u32 sstatus, tmp;
1438 const char *speed;
1439
1440 if (!ap->ops->scr_read)
1441 return;
1442
1443 sstatus = scr_read(ap, SCR_STATUS);
1444
1445 if (sata_dev_present(ap)) {
1446 tmp = (sstatus >> 4) & 0xf;
1447 if (tmp & (1 << 0))
1448 speed = "1.5";
1449 else if (tmp & (1 << 1))
1450 speed = "3.0";
1451 else
1452 speed = "<unknown>";
1453 printk(KERN_INFO "ata%u: SATA link up %s Gbps (SStatus %X)\n",
1454 ap->id, speed, sstatus);
1455 } else {
1456 printk(KERN_INFO "ata%u: SATA link down (SStatus %X)\n",
1457 ap->id, sstatus);
1458 }
1459}
1460
1461/**
1533 * __sata_phy_reset - Wake/reset a low-level SATA PHY 1462 * __sata_phy_reset - Wake/reset a low-level SATA PHY
1534 * @ap: SATA port associated with target SATA PHY. 1463 * @ap: SATA port associated with target SATA PHY.
1535 * 1464 *
@@ -1563,27 +1492,14 @@ void __sata_phy_reset(struct ata_port *ap)
1563 break; 1492 break;
1564 } while (time_before(jiffies, timeout)); 1493 } while (time_before(jiffies, timeout));
1565 1494
1566 /* TODO: phy layer with polling, timeouts, etc. */ 1495 /* print link status */
1567 sstatus = scr_read(ap, SCR_STATUS); 1496 sata_print_link_status(ap);
1568 if (sata_dev_present(ap)) {
1569 const char *speed;
1570 u32 tmp;
1571 1497
1572 tmp = (sstatus >> 4) & 0xf; 1498 /* TODO: phy layer with polling, timeouts, etc. */
1573 if (tmp & (1 << 0)) 1499 if (sata_dev_present(ap))
1574 speed = "1.5";
1575 else if (tmp & (1 << 1))
1576 speed = "3.0";
1577 else
1578 speed = "<unknown>";
1579 printk(KERN_INFO "ata%u: SATA link up %s Gbps (SStatus %X)\n",
1580 ap->id, speed, sstatus);
1581 ata_port_probe(ap); 1500 ata_port_probe(ap);
1582 } else { 1501 else
1583 printk(KERN_INFO "ata%u: SATA link down (SStatus %X)\n",
1584 ap->id, sstatus);
1585 ata_port_disable(ap); 1502 ata_port_disable(ap);
1586 }
1587 1503
1588 if (ap->flags & ATA_FLAG_PORT_DISABLED) 1504 if (ap->flags & ATA_FLAG_PORT_DISABLED)
1589 return; 1505 return;
@@ -1756,9 +1672,9 @@ int ata_timing_compute(struct ata_device *adev, unsigned short speed,
1756 ata_timing_quantize(t, t, T, UT); 1672 ata_timing_quantize(t, t, T, UT);
1757 1673
1758 /* 1674 /*
1759 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY, S.M.A.R.T 1675 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY,
1760 * and some other commands. We have to ensure that the DMA cycle timing is 1676 * S.M.A.R.T * and some other commands. We have to ensure that the
1761 * slower/equal than the fastest PIO timing. 1677 * DMA cycle timing is slower/equal than the fastest PIO timing.
1762 */ 1678 */
1763 1679
1764 if (speed > XFER_PIO_4) { 1680 if (speed > XFER_PIO_4) {
@@ -1767,7 +1683,7 @@ int ata_timing_compute(struct ata_device *adev, unsigned short speed,
1767 } 1683 }
1768 1684
1769 /* 1685 /*
1770 * Lenghten active & recovery time so that cycle time is correct. 1686 * Lengthen active & recovery time so that cycle time is correct.
1771 */ 1687 */
1772 1688
1773 if (t->act8b + t->rec8b < t->cyc8b) { 1689 if (t->act8b + t->rec8b < t->cyc8b) {
@@ -1783,31 +1699,8 @@ int ata_timing_compute(struct ata_device *adev, unsigned short speed,
1783 return 0; 1699 return 0;
1784} 1700}
1785 1701
1786static const struct {
1787 unsigned int shift;
1788 u8 base;
1789} xfer_mode_classes[] = {
1790 { ATA_SHIFT_UDMA, XFER_UDMA_0 },
1791 { ATA_SHIFT_MWDMA, XFER_MW_DMA_0 },
1792 { ATA_SHIFT_PIO, XFER_PIO_0 },
1793};
1794
1795static u8 base_from_shift(unsigned int shift)
1796{
1797 int i;
1798
1799 for (i = 0; i < ARRAY_SIZE(xfer_mode_classes); i++)
1800 if (xfer_mode_classes[i].shift == shift)
1801 return xfer_mode_classes[i].base;
1802
1803 return 0xff;
1804}
1805
1806static void ata_dev_set_mode(struct ata_port *ap, struct ata_device *dev) 1702static void ata_dev_set_mode(struct ata_port *ap, struct ata_device *dev)
1807{ 1703{
1808 int ofs, idx;
1809 u8 base;
1810
1811 if (!ata_dev_present(dev) || (ap->flags & ATA_FLAG_PORT_DISABLED)) 1704 if (!ata_dev_present(dev) || (ap->flags & ATA_FLAG_PORT_DISABLED))
1812 return; 1705 return;
1813 1706
@@ -1816,65 +1709,58 @@ static void ata_dev_set_mode(struct ata_port *ap, struct ata_device *dev)
1816 1709
1817 ata_dev_set_xfermode(ap, dev); 1710 ata_dev_set_xfermode(ap, dev);
1818 1711
1819 base = base_from_shift(dev->xfer_shift); 1712 if (ata_dev_revalidate(ap, dev, 0)) {
1820 ofs = dev->xfer_mode - base; 1713 printk(KERN_ERR "ata%u: failed to revalidate after set "
1821 idx = ofs + dev->xfer_shift; 1714 "xfermode, disabled\n", ap->id);
1822 WARN_ON(idx >= ARRAY_SIZE(xfer_mode_str)); 1715 ata_port_disable(ap);
1716 }
1823 1717
1824 DPRINTK("idx=%d xfer_shift=%u, xfer_mode=0x%x, base=0x%x, offset=%d\n", 1718 DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n",
1825 idx, dev->xfer_shift, (int)dev->xfer_mode, (int)base, ofs); 1719 dev->xfer_shift, (int)dev->xfer_mode);
1826 1720
1827 printk(KERN_INFO "ata%u: dev %u configured for %s\n", 1721 printk(KERN_INFO "ata%u: dev %u configured for %s\n",
1828 ap->id, dev->devno, xfer_mode_str[idx]); 1722 ap->id, dev->devno,
1723 ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)));
1829} 1724}
1830 1725
1831static int ata_host_set_pio(struct ata_port *ap) 1726static int ata_host_set_pio(struct ata_port *ap)
1832{ 1727{
1833 unsigned int mask; 1728 int i;
1834 int x, i;
1835 u8 base, xfer_mode;
1836
1837 mask = ata_get_mode_mask(ap, ATA_SHIFT_PIO);
1838 x = fgb(mask);
1839 if (x < 0) {
1840 printk(KERN_WARNING "ata%u: no PIO support\n", ap->id);
1841 return -1;
1842 }
1843
1844 base = base_from_shift(ATA_SHIFT_PIO);
1845 xfer_mode = base + x;
1846
1847 DPRINTK("base 0x%x xfer_mode 0x%x mask 0x%x x %d\n",
1848 (int)base, (int)xfer_mode, mask, x);
1849 1729
1850 for (i = 0; i < ATA_MAX_DEVICES; i++) { 1730 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1851 struct ata_device *dev = &ap->device[i]; 1731 struct ata_device *dev = &ap->device[i];
1852 if (ata_dev_present(dev)) { 1732
1853 dev->pio_mode = xfer_mode; 1733 if (!ata_dev_present(dev))
1854 dev->xfer_mode = xfer_mode; 1734 continue;
1855 dev->xfer_shift = ATA_SHIFT_PIO; 1735
1856 if (ap->ops->set_piomode) 1736 if (!dev->pio_mode) {
1857 ap->ops->set_piomode(ap, dev); 1737 printk(KERN_WARNING "ata%u: no PIO support\n", ap->id);
1738 return -1;
1858 } 1739 }
1740
1741 dev->xfer_mode = dev->pio_mode;
1742 dev->xfer_shift = ATA_SHIFT_PIO;
1743 if (ap->ops->set_piomode)
1744 ap->ops->set_piomode(ap, dev);
1859 } 1745 }
1860 1746
1861 return 0; 1747 return 0;
1862} 1748}
1863 1749
1864static void ata_host_set_dma(struct ata_port *ap, u8 xfer_mode, 1750static void ata_host_set_dma(struct ata_port *ap)
1865 unsigned int xfer_shift)
1866{ 1751{
1867 int i; 1752 int i;
1868 1753
1869 for (i = 0; i < ATA_MAX_DEVICES; i++) { 1754 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1870 struct ata_device *dev = &ap->device[i]; 1755 struct ata_device *dev = &ap->device[i];
1871 if (ata_dev_present(dev)) { 1756
1872 dev->dma_mode = xfer_mode; 1757 if (!ata_dev_present(dev) || !dev->dma_mode)
1873 dev->xfer_mode = xfer_mode; 1758 continue;
1874 dev->xfer_shift = xfer_shift; 1759
1875 if (ap->ops->set_dmamode) 1760 dev->xfer_mode = dev->dma_mode;
1876 ap->ops->set_dmamode(ap, dev); 1761 dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode);
1877 } 1762 if (ap->ops->set_dmamode)
1763 ap->ops->set_dmamode(ap, dev);
1878 } 1764 }
1879} 1765}
1880 1766
@@ -1886,32 +1772,37 @@ static void ata_host_set_dma(struct ata_port *ap, u8 xfer_mode,
1886 * 1772 *
1887 * LOCKING: 1773 * LOCKING:
1888 * PCI/etc. bus probe sem. 1774 * PCI/etc. bus probe sem.
1889 *
1890 */ 1775 */
1891static void ata_set_mode(struct ata_port *ap) 1776static void ata_set_mode(struct ata_port *ap)
1892{ 1777{
1893 unsigned int xfer_shift; 1778 int i, rc;
1894 u8 xfer_mode;
1895 int rc;
1896 1779
1897 /* step 1: always set host PIO timings */ 1780 /* step 1: calculate xfer_mask */
1898 rc = ata_host_set_pio(ap); 1781 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1899 if (rc) 1782 struct ata_device *dev = &ap->device[i];
1900 goto err_out; 1783 unsigned int xfer_mask;
1784
1785 if (!ata_dev_present(dev))
1786 continue;
1901 1787
1902 /* step 2: choose the best data xfer mode */ 1788 xfer_mask = ata_dev_xfermask(ap, dev);
1903 xfer_mode = xfer_shift = 0; 1789
1904 rc = ata_choose_xfer_mode(ap, &xfer_mode, &xfer_shift); 1790 dev->pio_mode = ata_xfer_mask2mode(xfer_mask & ATA_MASK_PIO);
1791 dev->dma_mode = ata_xfer_mask2mode(xfer_mask & (ATA_MASK_MWDMA |
1792 ATA_MASK_UDMA));
1793 }
1794
1795 /* step 2: always set host PIO timings */
1796 rc = ata_host_set_pio(ap);
1905 if (rc) 1797 if (rc)
1906 goto err_out; 1798 goto err_out;
1907 1799
1908 /* step 3: if that xfer mode isn't PIO, set host DMA timings */ 1800 /* step 3: set host DMA timings */
1909 if (xfer_shift != ATA_SHIFT_PIO) 1801 ata_host_set_dma(ap);
1910 ata_host_set_dma(ap, xfer_mode, xfer_shift);
1911 1802
1912 /* step 4: update devices' xfer mode */ 1803 /* step 4: update devices' xfer mode */
1913 ata_dev_set_mode(ap, &ap->device[0]); 1804 for (i = 0; i < ATA_MAX_DEVICES; i++)
1914 ata_dev_set_mode(ap, &ap->device[1]); 1805 ata_dev_set_mode(ap, &ap->device[i]);
1915 1806
1916 if (ap->flags & ATA_FLAG_PORT_DISABLED) 1807 if (ap->flags & ATA_FLAG_PORT_DISABLED)
1917 return; 1808 return;
@@ -1926,6 +1817,26 @@ err_out:
1926} 1817}
1927 1818
1928/** 1819/**
1820 * ata_tf_to_host - issue ATA taskfile to host controller
1821 * @ap: port to which command is being issued
1822 * @tf: ATA taskfile register set
1823 *
1824 * Issues ATA taskfile register set to ATA host controller,
1825 * with proper synchronization with interrupt handler and
1826 * other threads.
1827 *
1828 * LOCKING:
1829 * spin_lock_irqsave(host_set lock)
1830 */
1831
1832static inline void ata_tf_to_host(struct ata_port *ap,
1833 const struct ata_taskfile *tf)
1834{
1835 ap->ops->tf_load(ap, tf);
1836 ap->ops->exec_command(ap, tf);
1837}
1838
1839/**
1929 * ata_busy_sleep - sleep until BSY clears, or timeout 1840 * ata_busy_sleep - sleep until BSY clears, or timeout
1930 * @ap: port containing status register to be polled 1841 * @ap: port containing status register to be polled
1931 * @tmout_pat: impatience timeout 1842 * @tmout_pat: impatience timeout
@@ -1935,12 +1846,10 @@ err_out:
1935 * or a timeout occurs. 1846 * or a timeout occurs.
1936 * 1847 *
1937 * LOCKING: None. 1848 * LOCKING: None.
1938 *
1939 */ 1849 */
1940 1850
1941static unsigned int ata_busy_sleep (struct ata_port *ap, 1851unsigned int ata_busy_sleep (struct ata_port *ap,
1942 unsigned long tmout_pat, 1852 unsigned long tmout_pat, unsigned long tmout)
1943 unsigned long tmout)
1944{ 1853{
1945 unsigned long timer_start, timeout; 1854 unsigned long timer_start, timeout;
1946 u8 status; 1855 u8 status;
@@ -2159,9 +2068,9 @@ void ata_bus_reset(struct ata_port *ap)
2159 /* 2068 /*
2160 * determine by signature whether we have ATA or ATAPI devices 2069 * determine by signature whether we have ATA or ATAPI devices
2161 */ 2070 */
2162 err = ata_dev_try_classify(ap, 0); 2071 ap->device[0].class = ata_dev_try_classify(ap, 0, &err);
2163 if ((slave_possible) && (err != 0x81)) 2072 if ((slave_possible) && (err != 0x81))
2164 ata_dev_try_classify(ap, 1); 2073 ap->device[1].class = ata_dev_try_classify(ap, 1, &err);
2165 2074
2166 /* re-enable interrupts */ 2075 /* re-enable interrupts */
2167 if (ap->ioaddr.ctl_addr) /* FIXME: hack. create a hook instead */ 2076 if (ap->ioaddr.ctl_addr) /* FIXME: hack. create a hook instead */
@@ -2196,11 +2105,446 @@ err_out:
2196 DPRINTK("EXIT\n"); 2105 DPRINTK("EXIT\n");
2197} 2106}
2198 2107
2199static void ata_pr_blacklisted(const struct ata_port *ap, 2108static int sata_phy_resume(struct ata_port *ap)
2200 const struct ata_device *dev)
2201{ 2109{
2202 printk(KERN_WARNING "ata%u: dev %u is on DMA blacklist, disabling DMA\n", 2110 unsigned long timeout = jiffies + (HZ * 5);
2203 ap->id, dev->devno); 2111 u32 sstatus;
2112
2113 scr_write_flush(ap, SCR_CONTROL, 0x300);
2114
2115 /* Wait for phy to become ready, if necessary. */
2116 do {
2117 msleep(200);
2118 sstatus = scr_read(ap, SCR_STATUS);
2119 if ((sstatus & 0xf) != 1)
2120 return 0;
2121 } while (time_before(jiffies, timeout));
2122
2123 return -1;
2124}
2125
2126/**
2127 * ata_std_probeinit - initialize probing
2128 * @ap: port to be probed
2129 *
2130 * @ap is about to be probed. Initialize it. This function is
2131 * to be used as standard callback for ata_drive_probe_reset().
2132 *
2133 * NOTE!!! Do not use this function as probeinit if a low level
2134 * driver implements only hardreset. Just pass NULL as probeinit
2135 * in that case. Using this function is probably okay but doing
2136 * so makes reset sequence different from the original
2137 * ->phy_reset implementation and Jeff nervous. :-P
2138 */
2139extern void ata_std_probeinit(struct ata_port *ap)
2140{
2141 if (ap->flags & ATA_FLAG_SATA && ap->ops->scr_read) {
2142 sata_phy_resume(ap);
2143 if (sata_dev_present(ap))
2144 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
2145 }
2146}
2147
2148/**
2149 * ata_std_softreset - reset host port via ATA SRST
2150 * @ap: port to reset
2151 * @verbose: fail verbosely
2152 * @classes: resulting classes of attached devices
2153 *
2154 * Reset host port using ATA SRST. This function is to be used
2155 * as standard callback for ata_drive_*_reset() functions.
2156 *
2157 * LOCKING:
2158 * Kernel thread context (may sleep)
2159 *
2160 * RETURNS:
2161 * 0 on success, -errno otherwise.
2162 */
2163int ata_std_softreset(struct ata_port *ap, int verbose, unsigned int *classes)
2164{
2165 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
2166 unsigned int devmask = 0, err_mask;
2167 u8 err;
2168
2169 DPRINTK("ENTER\n");
2170
2171 if (ap->ops->scr_read && !sata_dev_present(ap)) {
2172 classes[0] = ATA_DEV_NONE;
2173 goto out;
2174 }
2175
2176 /* determine if device 0/1 are present */
2177 if (ata_devchk(ap, 0))
2178 devmask |= (1 << 0);
2179 if (slave_possible && ata_devchk(ap, 1))
2180 devmask |= (1 << 1);
2181
2182 /* select device 0 again */
2183 ap->ops->dev_select(ap, 0);
2184
2185 /* issue bus reset */
2186 DPRINTK("about to softreset, devmask=%x\n", devmask);
2187 err_mask = ata_bus_softreset(ap, devmask);
2188 if (err_mask) {
2189 if (verbose)
2190 printk(KERN_ERR "ata%u: SRST failed (err_mask=0x%x)\n",
2191 ap->id, err_mask);
2192 else
2193 DPRINTK("EXIT, softreset failed (err_mask=0x%x)\n",
2194 err_mask);
2195 return -EIO;
2196 }
2197
2198 /* determine by signature whether we have ATA or ATAPI devices */
2199 classes[0] = ata_dev_try_classify(ap, 0, &err);
2200 if (slave_possible && err != 0x81)
2201 classes[1] = ata_dev_try_classify(ap, 1, &err);
2202
2203 out:
2204 DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]);
2205 return 0;
2206}
2207
2208/**
2209 * sata_std_hardreset - reset host port via SATA phy reset
2210 * @ap: port to reset
2211 * @verbose: fail verbosely
2212 * @class: resulting class of attached device
2213 *
2214 * SATA phy-reset host port using DET bits of SControl register.
2215 * This function is to be used as standard callback for
2216 * ata_drive_*_reset().
2217 *
2218 * LOCKING:
2219 * Kernel thread context (may sleep)
2220 *
2221 * RETURNS:
2222 * 0 on success, -errno otherwise.
2223 */
2224int sata_std_hardreset(struct ata_port *ap, int verbose, unsigned int *class)
2225{
2226 DPRINTK("ENTER\n");
2227
2228 /* Issue phy wake/reset */
2229 scr_write_flush(ap, SCR_CONTROL, 0x301);
2230
2231 /*
2232 * Couldn't find anything in SATA I/II specs, but AHCI-1.1
2233 * 10.4.2 says at least 1 ms.
2234 */
2235 msleep(1);
2236
2237 /* Bring phy back */
2238 sata_phy_resume(ap);
2239
2240 /* TODO: phy layer with polling, timeouts, etc. */
2241 if (!sata_dev_present(ap)) {
2242 *class = ATA_DEV_NONE;
2243 DPRINTK("EXIT, link offline\n");
2244 return 0;
2245 }
2246
2247 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
2248 if (verbose)
2249 printk(KERN_ERR "ata%u: COMRESET failed "
2250 "(device not ready)\n", ap->id);
2251 else
2252 DPRINTK("EXIT, device not ready\n");
2253 return -EIO;
2254 }
2255
2256 ap->ops->dev_select(ap, 0); /* probably unnecessary */
2257
2258 *class = ata_dev_try_classify(ap, 0, NULL);
2259
2260 DPRINTK("EXIT, class=%u\n", *class);
2261 return 0;
2262}
2263
2264/**
2265 * ata_std_postreset - standard postreset callback
2266 * @ap: the target ata_port
2267 * @classes: classes of attached devices
2268 *
2269 * This function is invoked after a successful reset. Note that
2270 * the device might have been reset more than once using
2271 * different reset methods before postreset is invoked.
2272 *
2273 * This function is to be used as standard callback for
2274 * ata_drive_*_reset().
2275 *
2276 * LOCKING:
2277 * Kernel thread context (may sleep)
2278 */
2279void ata_std_postreset(struct ata_port *ap, unsigned int *classes)
2280{
2281 DPRINTK("ENTER\n");
2282
2283 /* set cable type if it isn't already set */
2284 if (ap->cbl == ATA_CBL_NONE && ap->flags & ATA_FLAG_SATA)
2285 ap->cbl = ATA_CBL_SATA;
2286
2287 /* print link status */
2288 if (ap->cbl == ATA_CBL_SATA)
2289 sata_print_link_status(ap);
2290
2291 /* re-enable interrupts */
2292 if (ap->ioaddr.ctl_addr) /* FIXME: hack. create a hook instead */
2293 ata_irq_on(ap);
2294
2295 /* is double-select really necessary? */
2296 if (classes[0] != ATA_DEV_NONE)
2297 ap->ops->dev_select(ap, 1);
2298 if (classes[1] != ATA_DEV_NONE)
2299 ap->ops->dev_select(ap, 0);
2300
2301 /* bail out if no device is present */
2302 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
2303 DPRINTK("EXIT, no device\n");
2304 return;
2305 }
2306
2307 /* set up device control */
2308 if (ap->ioaddr.ctl_addr) {
2309 if (ap->flags & ATA_FLAG_MMIO)
2310 writeb(ap->ctl, (void __iomem *) ap->ioaddr.ctl_addr);
2311 else
2312 outb(ap->ctl, ap->ioaddr.ctl_addr);
2313 }
2314
2315 DPRINTK("EXIT\n");
2316}
2317
2318/**
2319 * ata_std_probe_reset - standard probe reset method
2320 * @ap: prot to perform probe-reset
2321 * @classes: resulting classes of attached devices
2322 *
2323 * The stock off-the-shelf ->probe_reset method.
2324 *
2325 * LOCKING:
2326 * Kernel thread context (may sleep)
2327 *
2328 * RETURNS:
2329 * 0 on success, -errno otherwise.
2330 */
2331int ata_std_probe_reset(struct ata_port *ap, unsigned int *classes)
2332{
2333 ata_reset_fn_t hardreset;
2334
2335 hardreset = NULL;
2336 if (ap->flags & ATA_FLAG_SATA && ap->ops->scr_read)
2337 hardreset = sata_std_hardreset;
2338
2339 return ata_drive_probe_reset(ap, ata_std_probeinit,
2340 ata_std_softreset, hardreset,
2341 ata_std_postreset, classes);
2342}
2343
2344static int do_probe_reset(struct ata_port *ap, ata_reset_fn_t reset,
2345 ata_postreset_fn_t postreset,
2346 unsigned int *classes)
2347{
2348 int i, rc;
2349
2350 for (i = 0; i < ATA_MAX_DEVICES; i++)
2351 classes[i] = ATA_DEV_UNKNOWN;
2352
2353 rc = reset(ap, 0, classes);
2354 if (rc)
2355 return rc;
2356
2357 /* If any class isn't ATA_DEV_UNKNOWN, consider classification
2358 * is complete and convert all ATA_DEV_UNKNOWN to
2359 * ATA_DEV_NONE.
2360 */
2361 for (i = 0; i < ATA_MAX_DEVICES; i++)
2362 if (classes[i] != ATA_DEV_UNKNOWN)
2363 break;
2364
2365 if (i < ATA_MAX_DEVICES)
2366 for (i = 0; i < ATA_MAX_DEVICES; i++)
2367 if (classes[i] == ATA_DEV_UNKNOWN)
2368 classes[i] = ATA_DEV_NONE;
2369
2370 if (postreset)
2371 postreset(ap, classes);
2372
2373 return classes[0] != ATA_DEV_UNKNOWN ? 0 : -ENODEV;
2374}
2375
2376/**
2377 * ata_drive_probe_reset - Perform probe reset with given methods
2378 * @ap: port to reset
2379 * @probeinit: probeinit method (can be NULL)
2380 * @softreset: softreset method (can be NULL)
2381 * @hardreset: hardreset method (can be NULL)
2382 * @postreset: postreset method (can be NULL)
2383 * @classes: resulting classes of attached devices
2384 *
2385 * Reset the specified port and classify attached devices using
2386 * given methods. This function prefers softreset but tries all
2387 * possible reset sequences to reset and classify devices. This
2388 * function is intended to be used for constructing ->probe_reset
2389 * callback by low level drivers.
2390 *
2391 * Reset methods should follow the following rules.
2392 *
2393 * - Return 0 on sucess, -errno on failure.
2394 * - If classification is supported, fill classes[] with
2395 * recognized class codes.
2396 * - If classification is not supported, leave classes[] alone.
2397 * - If verbose is non-zero, print error message on failure;
2398 * otherwise, shut up.
2399 *
2400 * LOCKING:
2401 * Kernel thread context (may sleep)
2402 *
2403 * RETURNS:
2404 * 0 on success, -EINVAL if no reset method is avaliable, -ENODEV
2405 * if classification fails, and any error code from reset
2406 * methods.
2407 */
2408int ata_drive_probe_reset(struct ata_port *ap, ata_probeinit_fn_t probeinit,
2409 ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
2410 ata_postreset_fn_t postreset, unsigned int *classes)
2411{
2412 int rc = -EINVAL;
2413
2414 if (probeinit)
2415 probeinit(ap);
2416
2417 if (softreset) {
2418 rc = do_probe_reset(ap, softreset, postreset, classes);
2419 if (rc == 0)
2420 return 0;
2421 }
2422
2423 if (!hardreset)
2424 return rc;
2425
2426 rc = do_probe_reset(ap, hardreset, postreset, classes);
2427 if (rc == 0 || rc != -ENODEV)
2428 return rc;
2429
2430 if (softreset)
2431 rc = do_probe_reset(ap, softreset, postreset, classes);
2432
2433 return rc;
2434}
2435
2436/**
2437 * ata_dev_same_device - Determine whether new ID matches configured device
2438 * @ap: port on which the device to compare against resides
2439 * @dev: device to compare against
2440 * @new_class: class of the new device
2441 * @new_id: IDENTIFY page of the new device
2442 *
2443 * Compare @new_class and @new_id against @dev and determine
2444 * whether @dev is the device indicated by @new_class and
2445 * @new_id.
2446 *
2447 * LOCKING:
2448 * None.
2449 *
2450 * RETURNS:
2451 * 1 if @dev matches @new_class and @new_id, 0 otherwise.
2452 */
2453static int ata_dev_same_device(struct ata_port *ap, struct ata_device *dev,
2454 unsigned int new_class, const u16 *new_id)
2455{
2456 const u16 *old_id = dev->id;
2457 unsigned char model[2][41], serial[2][21];
2458 u64 new_n_sectors;
2459
2460 if (dev->class != new_class) {
2461 printk(KERN_INFO
2462 "ata%u: dev %u class mismatch %d != %d\n",
2463 ap->id, dev->devno, dev->class, new_class);
2464 return 0;
2465 }
2466
2467 ata_id_c_string(old_id, model[0], ATA_ID_PROD_OFS, sizeof(model[0]));
2468 ata_id_c_string(new_id, model[1], ATA_ID_PROD_OFS, sizeof(model[1]));
2469 ata_id_c_string(old_id, serial[0], ATA_ID_SERNO_OFS, sizeof(serial[0]));
2470 ata_id_c_string(new_id, serial[1], ATA_ID_SERNO_OFS, sizeof(serial[1]));
2471 new_n_sectors = ata_id_n_sectors(new_id);
2472
2473 if (strcmp(model[0], model[1])) {
2474 printk(KERN_INFO
2475 "ata%u: dev %u model number mismatch '%s' != '%s'\n",
2476 ap->id, dev->devno, model[0], model[1]);
2477 return 0;
2478 }
2479
2480 if (strcmp(serial[0], serial[1])) {
2481 printk(KERN_INFO
2482 "ata%u: dev %u serial number mismatch '%s' != '%s'\n",
2483 ap->id, dev->devno, serial[0], serial[1]);
2484 return 0;
2485 }
2486
2487 if (dev->class == ATA_DEV_ATA && dev->n_sectors != new_n_sectors) {
2488 printk(KERN_INFO
2489 "ata%u: dev %u n_sectors mismatch %llu != %llu\n",
2490 ap->id, dev->devno, (unsigned long long)dev->n_sectors,
2491 (unsigned long long)new_n_sectors);
2492 return 0;
2493 }
2494
2495 return 1;
2496}
2497
2498/**
2499 * ata_dev_revalidate - Revalidate ATA device
2500 * @ap: port on which the device to revalidate resides
2501 * @dev: device to revalidate
2502 * @post_reset: is this revalidation after reset?
2503 *
2504 * Re-read IDENTIFY page and make sure @dev is still attached to
2505 * the port.
2506 *
2507 * LOCKING:
2508 * Kernel thread context (may sleep)
2509 *
2510 * RETURNS:
2511 * 0 on success, negative errno otherwise
2512 */
2513int ata_dev_revalidate(struct ata_port *ap, struct ata_device *dev,
2514 int post_reset)
2515{
2516 unsigned int class;
2517 u16 *id;
2518 int rc;
2519
2520 if (!ata_dev_present(dev))
2521 return -ENODEV;
2522
2523 class = dev->class;
2524 id = NULL;
2525
2526 /* allocate & read ID data */
2527 rc = ata_dev_read_id(ap, dev, &class, post_reset, &id);
2528 if (rc)
2529 goto fail;
2530
2531 /* is the device still there? */
2532 if (!ata_dev_same_device(ap, dev, class, id)) {
2533 rc = -ENODEV;
2534 goto fail;
2535 }
2536
2537 kfree(dev->id);
2538 dev->id = id;
2539
2540 /* configure device according to the new ID */
2541 return ata_dev_configure(ap, dev, 0);
2542
2543 fail:
2544 printk(KERN_ERR "ata%u: dev %u revalidation failed (errno=%d)\n",
2545 ap->id, dev->devno, rc);
2546 kfree(id);
2547 return rc;
2204} 2548}
2205 2549
2206static const char * const ata_dma_blacklist [] = { 2550static const char * const ata_dma_blacklist [] = {
@@ -2237,151 +2581,57 @@ static const char * const ata_dma_blacklist [] = {
2237 2581
2238static int ata_dma_blacklisted(const struct ata_device *dev) 2582static int ata_dma_blacklisted(const struct ata_device *dev)
2239{ 2583{
2240 unsigned char model_num[40]; 2584 unsigned char model_num[41];
2241 char *s;
2242 unsigned int len;
2243 int i; 2585 int i;
2244 2586
2245 ata_dev_id_string(dev->id, model_num, ATA_ID_PROD_OFS, 2587 ata_id_c_string(dev->id, model_num, ATA_ID_PROD_OFS, sizeof(model_num));
2246 sizeof(model_num));
2247 s = &model_num[0];
2248 len = strnlen(s, sizeof(model_num));
2249
2250 /* ATAPI specifies that empty space is blank-filled; remove blanks */
2251 while ((len > 0) && (s[len - 1] == ' ')) {
2252 len--;
2253 s[len] = 0;
2254 }
2255 2588
2256 for (i = 0; i < ARRAY_SIZE(ata_dma_blacklist); i++) 2589 for (i = 0; i < ARRAY_SIZE(ata_dma_blacklist); i++)
2257 if (!strncmp(ata_dma_blacklist[i], s, len)) 2590 if (!strcmp(ata_dma_blacklist[i], model_num))
2258 return 1; 2591 return 1;
2259 2592
2260 return 0; 2593 return 0;
2261} 2594}
2262 2595
2263static unsigned int ata_get_mode_mask(const struct ata_port *ap, int shift)
2264{
2265 const struct ata_device *master, *slave;
2266 unsigned int mask;
2267
2268 master = &ap->device[0];
2269 slave = &ap->device[1];
2270
2271 assert (ata_dev_present(master) || ata_dev_present(slave));
2272
2273 if (shift == ATA_SHIFT_UDMA) {
2274 mask = ap->udma_mask;
2275 if (ata_dev_present(master)) {
2276 mask &= (master->id[ATA_ID_UDMA_MODES] & 0xff);
2277 if (ata_dma_blacklisted(master)) {
2278 mask = 0;
2279 ata_pr_blacklisted(ap, master);
2280 }
2281 }
2282 if (ata_dev_present(slave)) {
2283 mask &= (slave->id[ATA_ID_UDMA_MODES] & 0xff);
2284 if (ata_dma_blacklisted(slave)) {
2285 mask = 0;
2286 ata_pr_blacklisted(ap, slave);
2287 }
2288 }
2289 }
2290 else if (shift == ATA_SHIFT_MWDMA) {
2291 mask = ap->mwdma_mask;
2292 if (ata_dev_present(master)) {
2293 mask &= (master->id[ATA_ID_MWDMA_MODES] & 0x07);
2294 if (ata_dma_blacklisted(master)) {
2295 mask = 0;
2296 ata_pr_blacklisted(ap, master);
2297 }
2298 }
2299 if (ata_dev_present(slave)) {
2300 mask &= (slave->id[ATA_ID_MWDMA_MODES] & 0x07);
2301 if (ata_dma_blacklisted(slave)) {
2302 mask = 0;
2303 ata_pr_blacklisted(ap, slave);
2304 }
2305 }
2306 }
2307 else if (shift == ATA_SHIFT_PIO) {
2308 mask = ap->pio_mask;
2309 if (ata_dev_present(master)) {
2310 /* spec doesn't return explicit support for
2311 * PIO0-2, so we fake it
2312 */
2313 u16 tmp_mode = master->id[ATA_ID_PIO_MODES] & 0x03;
2314 tmp_mode <<= 3;
2315 tmp_mode |= 0x7;
2316 mask &= tmp_mode;
2317 }
2318 if (ata_dev_present(slave)) {
2319 /* spec doesn't return explicit support for
2320 * PIO0-2, so we fake it
2321 */
2322 u16 tmp_mode = slave->id[ATA_ID_PIO_MODES] & 0x03;
2323 tmp_mode <<= 3;
2324 tmp_mode |= 0x7;
2325 mask &= tmp_mode;
2326 }
2327 }
2328 else {
2329 mask = 0xffffffff; /* shut up compiler warning */
2330 BUG();
2331 }
2332
2333 return mask;
2334}
2335
2336/* find greatest bit */
2337static int fgb(u32 bitmap)
2338{
2339 unsigned int i;
2340 int x = -1;
2341
2342 for (i = 0; i < 32; i++)
2343 if (bitmap & (1 << i))
2344 x = i;
2345
2346 return x;
2347}
2348
2349/** 2596/**
2350 * ata_choose_xfer_mode - attempt to find best transfer mode 2597 * ata_dev_xfermask - Compute supported xfermask of the given device
2351 * @ap: Port for which an xfer mode will be selected 2598 * @ap: Port on which the device to compute xfermask for resides
2352 * @xfer_mode_out: (output) SET FEATURES - XFER MODE code 2599 * @dev: Device to compute xfermask for
2353 * @xfer_shift_out: (output) bit shift that selects this mode
2354 * 2600 *
2355 * Based on host and device capabilities, determine the 2601 * Compute supported xfermask of @dev. This function is
2356 * maximum transfer mode that is amenable to all. 2602 * responsible for applying all known limits including host
2603 * controller limits, device blacklist, etc...
2357 * 2604 *
2358 * LOCKING: 2605 * LOCKING:
2359 * PCI/etc. bus probe sem. 2606 * None.
2360 * 2607 *
2361 * RETURNS: 2608 * RETURNS:
2362 * Zero on success, negative on error. 2609 * Computed xfermask.
2363 */ 2610 */
2364 2611static unsigned int ata_dev_xfermask(struct ata_port *ap,
2365static int ata_choose_xfer_mode(const struct ata_port *ap, 2612 struct ata_device *dev)
2366 u8 *xfer_mode_out,
2367 unsigned int *xfer_shift_out)
2368{ 2613{
2369 unsigned int mask, shift; 2614 unsigned long xfer_mask;
2370 int x, i; 2615 int i;
2371 2616
2372 for (i = 0; i < ARRAY_SIZE(xfer_mode_classes); i++) { 2617 xfer_mask = ata_pack_xfermask(ap->pio_mask, ap->mwdma_mask,
2373 shift = xfer_mode_classes[i].shift; 2618 ap->udma_mask);
2374 mask = ata_get_mode_mask(ap, shift);
2375 2619
2376 x = fgb(mask); 2620 /* use port-wide xfermask for now */
2377 if (x >= 0) { 2621 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2378 *xfer_mode_out = xfer_mode_classes[i].base + x; 2622 struct ata_device *d = &ap->device[i];
2379 *xfer_shift_out = shift; 2623 if (!ata_dev_present(d))
2380 return 0; 2624 continue;
2381 } 2625 xfer_mask &= ata_id_xfermask(d->id);
2626 if (ata_dma_blacklisted(d))
2627 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
2382 } 2628 }
2383 2629
2384 return -1; 2630 if (ata_dma_blacklisted(dev))
2631 printk(KERN_WARNING "ata%u: dev %u is on DMA blacklist, "
2632 "disabling DMA\n", ap->id, dev->devno);
2633
2634 return xfer_mask;
2385} 2635}
2386 2636
2387/** 2637/**
@@ -2420,63 +2670,28 @@ static void ata_dev_set_xfermode(struct ata_port *ap, struct ata_device *dev)
2420} 2670}
2421 2671
2422/** 2672/**
2423 * ata_dev_reread_id - Reread the device identify device info
2424 * @ap: port where the device is
2425 * @dev: device to reread the identify device info
2426 *
2427 * LOCKING:
2428 */
2429
2430static void ata_dev_reread_id(struct ata_port *ap, struct ata_device *dev)
2431{
2432 struct ata_taskfile tf;
2433
2434 ata_tf_init(ap, &tf, dev->devno);
2435
2436 if (dev->class == ATA_DEV_ATA) {
2437 tf.command = ATA_CMD_ID_ATA;
2438 DPRINTK("do ATA identify\n");
2439 } else {
2440 tf.command = ATA_CMD_ID_ATAPI;
2441 DPRINTK("do ATAPI identify\n");
2442 }
2443
2444 tf.flags |= ATA_TFLAG_DEVICE;
2445 tf.protocol = ATA_PROT_PIO;
2446
2447 if (ata_exec_internal(ap, dev, &tf, DMA_FROM_DEVICE,
2448 dev->id, sizeof(dev->id)))
2449 goto err_out;
2450
2451 swap_buf_le16(dev->id, ATA_ID_WORDS);
2452
2453 ata_dump_id(dev);
2454
2455 DPRINTK("EXIT\n");
2456
2457 return;
2458err_out:
2459 printk(KERN_ERR "ata%u: failed to reread ID, disabled\n", ap->id);
2460 ata_port_disable(ap);
2461}
2462
2463/**
2464 * ata_dev_init_params - Issue INIT DEV PARAMS command 2673 * ata_dev_init_params - Issue INIT DEV PARAMS command
2465 * @ap: Port associated with device @dev 2674 * @ap: Port associated with device @dev
2466 * @dev: Device to which command will be sent 2675 * @dev: Device to which command will be sent
2467 * 2676 *
2468 * LOCKING: 2677 * LOCKING:
2678 * Kernel thread context (may sleep)
2679 *
2680 * RETURNS:
2681 * 0 on success, AC_ERR_* mask otherwise.
2469 */ 2682 */
2470 2683
2471static void ata_dev_init_params(struct ata_port *ap, struct ata_device *dev) 2684static unsigned int ata_dev_init_params(struct ata_port *ap,
2685 struct ata_device *dev)
2472{ 2686{
2473 struct ata_taskfile tf; 2687 struct ata_taskfile tf;
2688 unsigned int err_mask;
2474 u16 sectors = dev->id[6]; 2689 u16 sectors = dev->id[6];
2475 u16 heads = dev->id[3]; 2690 u16 heads = dev->id[3];
2476 2691
2477 /* Number of sectors per track 1-255. Number of heads 1-16 */ 2692 /* Number of sectors per track 1-255. Number of heads 1-16 */
2478 if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16) 2693 if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
2479 return; 2694 return 0;
2480 2695
2481 /* set up init dev params taskfile */ 2696 /* set up init dev params taskfile */
2482 DPRINTK("init dev params \n"); 2697 DPRINTK("init dev params \n");
@@ -2488,13 +2703,10 @@ static void ata_dev_init_params(struct ata_port *ap, struct ata_device *dev)
2488 tf.nsect = sectors; 2703 tf.nsect = sectors;
2489 tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */ 2704 tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
2490 2705
2491 if (ata_exec_internal(ap, dev, &tf, DMA_NONE, NULL, 0)) { 2706 err_mask = ata_exec_internal(ap, dev, &tf, DMA_NONE, NULL, 0);
2492 printk(KERN_ERR "ata%u: failed to init parameters, disabled\n",
2493 ap->id);
2494 ata_port_disable(ap);
2495 }
2496 2707
2497 DPRINTK("EXIT\n"); 2708 DPRINTK("EXIT, err_mask=%x\n", err_mask);
2709 return err_mask;
2498} 2710}
2499 2711
2500/** 2712/**
@@ -2514,11 +2726,11 @@ static void ata_sg_clean(struct ata_queued_cmd *qc)
2514 int dir = qc->dma_dir; 2726 int dir = qc->dma_dir;
2515 void *pad_buf = NULL; 2727 void *pad_buf = NULL;
2516 2728
2517 assert(qc->flags & ATA_QCFLAG_DMAMAP); 2729 WARN_ON(!(qc->flags & ATA_QCFLAG_DMAMAP));
2518 assert(sg != NULL); 2730 WARN_ON(sg == NULL);
2519 2731
2520 if (qc->flags & ATA_QCFLAG_SINGLE) 2732 if (qc->flags & ATA_QCFLAG_SINGLE)
2521 assert(qc->n_elem <= 1); 2733 WARN_ON(qc->n_elem > 1);
2522 2734
2523 VPRINTK("unmapping %u sg elements\n", qc->n_elem); 2735 VPRINTK("unmapping %u sg elements\n", qc->n_elem);
2524 2736
@@ -2573,8 +2785,8 @@ static void ata_fill_sg(struct ata_queued_cmd *qc)
2573 struct scatterlist *sg; 2785 struct scatterlist *sg;
2574 unsigned int idx; 2786 unsigned int idx;
2575 2787
2576 assert(qc->__sg != NULL); 2788 WARN_ON(qc->__sg == NULL);
2577 assert(qc->n_elem > 0 || qc->pad_len > 0); 2789 WARN_ON(qc->n_elem == 0 && qc->pad_len == 0);
2578 2790
2579 idx = 0; 2791 idx = 0;
2580 ata_for_each_sg(sg, qc) { 2792 ata_for_each_sg(sg, qc) {
@@ -2727,7 +2939,7 @@ static int ata_sg_setup_one(struct ata_queued_cmd *qc)
2727 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ); 2939 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
2728 struct scatterlist *psg = &qc->pad_sgent; 2940 struct scatterlist *psg = &qc->pad_sgent;
2729 2941
2730 assert(qc->dev->class == ATA_DEV_ATAPI); 2942 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
2731 2943
2732 memset(pad_buf, 0, ATA_DMA_PAD_SZ); 2944 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
2733 2945
@@ -2791,7 +3003,7 @@ static int ata_sg_setup(struct ata_queued_cmd *qc)
2791 int n_elem, pre_n_elem, dir, trim_sg = 0; 3003 int n_elem, pre_n_elem, dir, trim_sg = 0;
2792 3004
2793 VPRINTK("ENTER, ata%u\n", ap->id); 3005 VPRINTK("ENTER, ata%u\n", ap->id);
2794 assert(qc->flags & ATA_QCFLAG_SG); 3006 WARN_ON(!(qc->flags & ATA_QCFLAG_SG));
2795 3007
2796 /* we must lengthen transfers to end on a 32-bit boundary */ 3008 /* we must lengthen transfers to end on a 32-bit boundary */
2797 qc->pad_len = lsg->length & 3; 3009 qc->pad_len = lsg->length & 3;
@@ -2800,7 +3012,7 @@ static int ata_sg_setup(struct ata_queued_cmd *qc)
2800 struct scatterlist *psg = &qc->pad_sgent; 3012 struct scatterlist *psg = &qc->pad_sgent;
2801 unsigned int offset; 3013 unsigned int offset;
2802 3014
2803 assert(qc->dev->class == ATA_DEV_ATAPI); 3015 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
2804 3016
2805 memset(pad_buf, 0, ATA_DMA_PAD_SZ); 3017 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
2806 3018
@@ -2876,7 +3088,7 @@ void ata_poll_qc_complete(struct ata_queued_cmd *qc)
2876} 3088}
2877 3089
2878/** 3090/**
2879 * ata_pio_poll - 3091 * ata_pio_poll - poll using PIO, depending on current state
2880 * @ap: the target ata_port 3092 * @ap: the target ata_port
2881 * 3093 *
2882 * LOCKING: 3094 * LOCKING:
@@ -2894,7 +3106,7 @@ static unsigned long ata_pio_poll(struct ata_port *ap)
2894 unsigned int reg_state = HSM_ST_UNKNOWN; 3106 unsigned int reg_state = HSM_ST_UNKNOWN;
2895 3107
2896 qc = ata_qc_from_tag(ap, ap->active_tag); 3108 qc = ata_qc_from_tag(ap, ap->active_tag);
2897 assert(qc != NULL); 3109 WARN_ON(qc == NULL);
2898 3110
2899 switch (ap->hsm_task_state) { 3111 switch (ap->hsm_task_state) {
2900 case HSM_ST: 3112 case HSM_ST:
@@ -2915,7 +3127,7 @@ static unsigned long ata_pio_poll(struct ata_port *ap)
2915 status = ata_chk_status(ap); 3127 status = ata_chk_status(ap);
2916 if (status & ATA_BUSY) { 3128 if (status & ATA_BUSY) {
2917 if (time_after(jiffies, ap->pio_task_timeout)) { 3129 if (time_after(jiffies, ap->pio_task_timeout)) {
2918 qc->err_mask |= AC_ERR_ATA_BUS; 3130 qc->err_mask |= AC_ERR_TIMEOUT;
2919 ap->hsm_task_state = HSM_ST_TMOUT; 3131 ap->hsm_task_state = HSM_ST_TMOUT;
2920 return 0; 3132 return 0;
2921 } 3133 }
@@ -2962,7 +3174,7 @@ static int ata_pio_complete (struct ata_port *ap)
2962 } 3174 }
2963 3175
2964 qc = ata_qc_from_tag(ap, ap->active_tag); 3176 qc = ata_qc_from_tag(ap, ap->active_tag);
2965 assert(qc != NULL); 3177 WARN_ON(qc == NULL);
2966 3178
2967 drv_stat = ata_wait_idle(ap); 3179 drv_stat = ata_wait_idle(ap);
2968 if (!ata_ok(drv_stat)) { 3180 if (!ata_ok(drv_stat)) {
@@ -2973,7 +3185,7 @@ static int ata_pio_complete (struct ata_port *ap)
2973 3185
2974 ap->hsm_task_state = HSM_ST_IDLE; 3186 ap->hsm_task_state = HSM_ST_IDLE;
2975 3187
2976 assert(qc->err_mask == 0); 3188 WARN_ON(qc->err_mask);
2977 ata_poll_qc_complete(qc); 3189 ata_poll_qc_complete(qc);
2978 3190
2979 /* another command may start at this point */ 3191 /* another command may start at this point */
@@ -2983,7 +3195,7 @@ static int ata_pio_complete (struct ata_port *ap)
2983 3195
2984 3196
2985/** 3197/**
2986 * swap_buf_le16 - swap halves of 16-words in place 3198 * swap_buf_le16 - swap halves of 16-bit words in place
2987 * @buf: Buffer to swap 3199 * @buf: Buffer to swap
2988 * @buf_words: Number of 16-bit words in buffer. 3200 * @buf_words: Number of 16-bit words in buffer.
2989 * 3201 *
@@ -3293,7 +3505,7 @@ static void atapi_pio_bytes(struct ata_queued_cmd *qc)
3293err_out: 3505err_out:
3294 printk(KERN_INFO "ata%u: dev %u: ATAPI check failed\n", 3506 printk(KERN_INFO "ata%u: dev %u: ATAPI check failed\n",
3295 ap->id, dev->devno); 3507 ap->id, dev->devno);
3296 qc->err_mask |= AC_ERR_ATA_BUS; 3508 qc->err_mask |= AC_ERR_HSM;
3297 ap->hsm_task_state = HSM_ST_ERR; 3509 ap->hsm_task_state = HSM_ST_ERR;
3298} 3510}
3299 3511
@@ -3330,7 +3542,7 @@ static void ata_pio_block(struct ata_port *ap)
3330 } 3542 }
3331 3543
3332 qc = ata_qc_from_tag(ap, ap->active_tag); 3544 qc = ata_qc_from_tag(ap, ap->active_tag);
3333 assert(qc != NULL); 3545 WARN_ON(qc == NULL);
3334 3546
3335 /* check error */ 3547 /* check error */
3336 if (status & (ATA_ERR | ATA_DF)) { 3548 if (status & (ATA_ERR | ATA_DF)) {
@@ -3351,7 +3563,7 @@ static void ata_pio_block(struct ata_port *ap)
3351 } else { 3563 } else {
3352 /* handle BSY=0, DRQ=0 as error */ 3564 /* handle BSY=0, DRQ=0 as error */
3353 if ((status & ATA_DRQ) == 0) { 3565 if ((status & ATA_DRQ) == 0) {
3354 qc->err_mask |= AC_ERR_ATA_BUS; 3566 qc->err_mask |= AC_ERR_HSM;
3355 ap->hsm_task_state = HSM_ST_ERR; 3567 ap->hsm_task_state = HSM_ST_ERR;
3356 return; 3568 return;
3357 } 3569 }
@@ -3365,7 +3577,7 @@ static void ata_pio_error(struct ata_port *ap)
3365 struct ata_queued_cmd *qc; 3577 struct ata_queued_cmd *qc;
3366 3578
3367 qc = ata_qc_from_tag(ap, ap->active_tag); 3579 qc = ata_qc_from_tag(ap, ap->active_tag);
3368 assert(qc != NULL); 3580 WARN_ON(qc == NULL);
3369 3581
3370 if (qc->tf.command != ATA_CMD_PACKET) 3582 if (qc->tf.command != ATA_CMD_PACKET)
3371 printk(KERN_WARNING "ata%u: PIO error\n", ap->id); 3583 printk(KERN_WARNING "ata%u: PIO error\n", ap->id);
@@ -3373,7 +3585,7 @@ static void ata_pio_error(struct ata_port *ap)
3373 /* make sure qc->err_mask is available to 3585 /* make sure qc->err_mask is available to
3374 * know what's wrong and recover 3586 * know what's wrong and recover
3375 */ 3587 */
3376 assert(qc->err_mask); 3588 WARN_ON(qc->err_mask == 0);
3377 3589
3378 ap->hsm_task_state = HSM_ST_IDLE; 3590 ap->hsm_task_state = HSM_ST_IDLE;
3379 3591
@@ -3414,12 +3626,84 @@ fsm_start:
3414 } 3626 }
3415 3627
3416 if (timeout) 3628 if (timeout)
3417 queue_delayed_work(ata_wq, &ap->pio_task, timeout); 3629 ata_port_queue_task(ap, ata_pio_task, ap, timeout);
3418 else if (!qc_completed) 3630 else if (!qc_completed)
3419 goto fsm_start; 3631 goto fsm_start;
3420} 3632}
3421 3633
3422/** 3634/**
3635 * atapi_packet_task - Write CDB bytes to hardware
3636 * @_data: Port to which ATAPI device is attached.
3637 *
3638 * When device has indicated its readiness to accept
3639 * a CDB, this function is called. Send the CDB.
3640 * If DMA is to be performed, exit immediately.
3641 * Otherwise, we are in polling mode, so poll
3642 * status under operation succeeds or fails.
3643 *
3644 * LOCKING:
3645 * Kernel thread context (may sleep)
3646 */
3647
3648static void atapi_packet_task(void *_data)
3649{
3650 struct ata_port *ap = _data;
3651 struct ata_queued_cmd *qc;
3652 u8 status;
3653
3654 qc = ata_qc_from_tag(ap, ap->active_tag);
3655 WARN_ON(qc == NULL);
3656 WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
3657
3658 /* sleep-wait for BSY to clear */
3659 DPRINTK("busy wait\n");
3660 if (ata_busy_sleep(ap, ATA_TMOUT_CDB_QUICK, ATA_TMOUT_CDB)) {
3661 qc->err_mask |= AC_ERR_TIMEOUT;
3662 goto err_out;
3663 }
3664
3665 /* make sure DRQ is set */
3666 status = ata_chk_status(ap);
3667 if ((status & (ATA_BUSY | ATA_DRQ)) != ATA_DRQ) {
3668 qc->err_mask |= AC_ERR_HSM;
3669 goto err_out;
3670 }
3671
3672 /* send SCSI cdb */
3673 DPRINTK("send cdb\n");
3674 WARN_ON(qc->dev->cdb_len < 12);
3675
3676 if (qc->tf.protocol == ATA_PROT_ATAPI_DMA ||
3677 qc->tf.protocol == ATA_PROT_ATAPI_NODATA) {
3678 unsigned long flags;
3679
3680 /* Once we're done issuing command and kicking bmdma,
3681 * irq handler takes over. To not lose irq, we need
3682 * to clear NOINTR flag before sending cdb, but
3683 * interrupt handler shouldn't be invoked before we're
3684 * finished. Hence, the following locking.
3685 */
3686 spin_lock_irqsave(&ap->host_set->lock, flags);
3687 ap->flags &= ~ATA_FLAG_NOINTR;
3688 ata_data_xfer(ap, qc->cdb, qc->dev->cdb_len, 1);
3689 if (qc->tf.protocol == ATA_PROT_ATAPI_DMA)
3690 ap->ops->bmdma_start(qc); /* initiate bmdma */
3691 spin_unlock_irqrestore(&ap->host_set->lock, flags);
3692 } else {
3693 ata_data_xfer(ap, qc->cdb, qc->dev->cdb_len, 1);
3694
3695 /* PIO commands are handled by polling */
3696 ap->hsm_task_state = HSM_ST;
3697 ata_port_queue_task(ap, ata_pio_task, ap, 0);
3698 }
3699
3700 return;
3701
3702err_out:
3703 ata_poll_qc_complete(qc);
3704}
3705
3706/**
3423 * ata_qc_timeout - Handle timeout of queued command 3707 * ata_qc_timeout - Handle timeout of queued command
3424 * @qc: Command that timed out 3708 * @qc: Command that timed out
3425 * 3709 *
@@ -3447,15 +3731,9 @@ static void ata_qc_timeout(struct ata_queued_cmd *qc)
3447 3731
3448 DPRINTK("ENTER\n"); 3732 DPRINTK("ENTER\n");
3449 3733
3450 spin_lock_irqsave(&host_set->lock, flags); 3734 ap->hsm_task_state = HSM_ST_IDLE;
3451 3735
3452 /* hack alert! We cannot use the supplied completion 3736 spin_lock_irqsave(&host_set->lock, flags);
3453 * function from inside the ->eh_strategy_handler() thread.
3454 * libata is the only user of ->eh_strategy_handler() in
3455 * any kernel, so the default scsi_done() assumes it is
3456 * not being called from the SCSI EH.
3457 */
3458 qc->scsidone = scsi_finish_command;
3459 3737
3460 switch (qc->tf.protocol) { 3738 switch (qc->tf.protocol) {
3461 3739
@@ -3480,12 +3758,13 @@ static void ata_qc_timeout(struct ata_queued_cmd *qc)
3480 3758
3481 /* complete taskfile transaction */ 3759 /* complete taskfile transaction */
3482 qc->err_mask |= ac_err_mask(drv_stat); 3760 qc->err_mask |= ac_err_mask(drv_stat);
3483 ata_qc_complete(qc);
3484 break; 3761 break;
3485 } 3762 }
3486 3763
3487 spin_unlock_irqrestore(&host_set->lock, flags); 3764 spin_unlock_irqrestore(&host_set->lock, flags);
3488 3765
3766 ata_eh_qc_complete(qc);
3767
3489 DPRINTK("EXIT\n"); 3768 DPRINTK("EXIT\n");
3490} 3769}
3491 3770
@@ -3510,20 +3789,10 @@ static void ata_qc_timeout(struct ata_queued_cmd *qc)
3510 3789
3511void ata_eng_timeout(struct ata_port *ap) 3790void ata_eng_timeout(struct ata_port *ap)
3512{ 3791{
3513 struct ata_queued_cmd *qc;
3514
3515 DPRINTK("ENTER\n"); 3792 DPRINTK("ENTER\n");
3516 3793
3517 qc = ata_qc_from_tag(ap, ap->active_tag); 3794 ata_qc_timeout(ata_qc_from_tag(ap, ap->active_tag));
3518 if (qc)
3519 ata_qc_timeout(qc);
3520 else {
3521 printk(KERN_ERR "ata%u: BUG: timeout without command\n",
3522 ap->id);
3523 goto out;
3524 }
3525 3795
3526out:
3527 DPRINTK("EXIT\n"); 3796 DPRINTK("EXIT\n");
3528} 3797}
3529 3798
@@ -3579,21 +3848,6 @@ struct ata_queued_cmd *ata_qc_new_init(struct ata_port *ap,
3579 return qc; 3848 return qc;
3580} 3849}
3581 3850
3582static void __ata_qc_complete(struct ata_queued_cmd *qc)
3583{
3584 struct ata_port *ap = qc->ap;
3585 unsigned int tag;
3586
3587 qc->flags = 0;
3588 tag = qc->tag;
3589 if (likely(ata_tag_valid(tag))) {
3590 if (tag == ap->active_tag)
3591 ap->active_tag = ATA_TAG_POISON;
3592 qc->tag = ATA_TAG_POISON;
3593 clear_bit(tag, &ap->qactive);
3594 }
3595}
3596
3597/** 3851/**
3598 * ata_qc_free - free unused ata_queued_cmd 3852 * ata_qc_free - free unused ata_queued_cmd
3599 * @qc: Command to complete 3853 * @qc: Command to complete
@@ -3606,29 +3860,25 @@ static void __ata_qc_complete(struct ata_queued_cmd *qc)
3606 */ 3860 */
3607void ata_qc_free(struct ata_queued_cmd *qc) 3861void ata_qc_free(struct ata_queued_cmd *qc)
3608{ 3862{
3609 assert(qc != NULL); /* ata_qc_from_tag _might_ return NULL */ 3863 struct ata_port *ap = qc->ap;
3864 unsigned int tag;
3610 3865
3611 __ata_qc_complete(qc); 3866 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
3612}
3613 3867
3614/** 3868 qc->flags = 0;
3615 * ata_qc_complete - Complete an active ATA command 3869 tag = qc->tag;
3616 * @qc: Command to complete 3870 if (likely(ata_tag_valid(tag))) {
3617 * @err_mask: ATA Status register contents 3871 if (tag == ap->active_tag)
3618 * 3872 ap->active_tag = ATA_TAG_POISON;
3619 * Indicate to the mid and upper layers that an ATA 3873 qc->tag = ATA_TAG_POISON;
3620 * command has completed, with either an ok or not-ok status. 3874 clear_bit(tag, &ap->qactive);
3621 * 3875 }
3622 * LOCKING: 3876}
3623 * spin_lock_irqsave(host_set lock)
3624 */
3625 3877
3626void ata_qc_complete(struct ata_queued_cmd *qc) 3878void __ata_qc_complete(struct ata_queued_cmd *qc)
3627{ 3879{
3628 int rc; 3880 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
3629 3881 WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
3630 assert(qc != NULL); /* ata_qc_from_tag _might_ return NULL */
3631 assert(qc->flags & ATA_QCFLAG_ACTIVE);
3632 3882
3633 if (likely(qc->flags & ATA_QCFLAG_DMAMAP)) 3883 if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
3634 ata_sg_clean(qc); 3884 ata_sg_clean(qc);
@@ -3640,17 +3890,7 @@ void ata_qc_complete(struct ata_queued_cmd *qc)
3640 qc->flags &= ~ATA_QCFLAG_ACTIVE; 3890 qc->flags &= ~ATA_QCFLAG_ACTIVE;
3641 3891
3642 /* call completion callback */ 3892 /* call completion callback */
3643 rc = qc->complete_fn(qc); 3893 qc->complete_fn(qc);
3644
3645 /* if callback indicates not to complete command (non-zero),
3646 * return immediately
3647 */
3648 if (rc != 0)
3649 return;
3650
3651 __ata_qc_complete(qc);
3652
3653 VPRINTK("EXIT\n");
3654} 3894}
3655 3895
3656static inline int ata_should_dma_map(struct ata_queued_cmd *qc) 3896static inline int ata_should_dma_map(struct ata_queued_cmd *qc)
@@ -3690,20 +3930,20 @@ static inline int ata_should_dma_map(struct ata_queued_cmd *qc)
3690 * spin_lock_irqsave(host_set lock) 3930 * spin_lock_irqsave(host_set lock)
3691 * 3931 *
3692 * RETURNS: 3932 * RETURNS:
3693 * Zero on success, negative on error. 3933 * Zero on success, AC_ERR_* mask on failure
3694 */ 3934 */
3695 3935
3696int ata_qc_issue(struct ata_queued_cmd *qc) 3936unsigned int ata_qc_issue(struct ata_queued_cmd *qc)
3697{ 3937{
3698 struct ata_port *ap = qc->ap; 3938 struct ata_port *ap = qc->ap;
3699 3939
3700 if (ata_should_dma_map(qc)) { 3940 if (ata_should_dma_map(qc)) {
3701 if (qc->flags & ATA_QCFLAG_SG) { 3941 if (qc->flags & ATA_QCFLAG_SG) {
3702 if (ata_sg_setup(qc)) 3942 if (ata_sg_setup(qc))
3703 goto err_out; 3943 goto sg_err;
3704 } else if (qc->flags & ATA_QCFLAG_SINGLE) { 3944 } else if (qc->flags & ATA_QCFLAG_SINGLE) {
3705 if (ata_sg_setup_one(qc)) 3945 if (ata_sg_setup_one(qc))
3706 goto err_out; 3946 goto sg_err;
3707 } 3947 }
3708 } else { 3948 } else {
3709 qc->flags &= ~ATA_QCFLAG_DMAMAP; 3949 qc->flags &= ~ATA_QCFLAG_DMAMAP;
@@ -3716,8 +3956,9 @@ int ata_qc_issue(struct ata_queued_cmd *qc)
3716 3956
3717 return ap->ops->qc_issue(qc); 3957 return ap->ops->qc_issue(qc);
3718 3958
3719err_out: 3959sg_err:
3720 return -1; 3960 qc->flags &= ~ATA_QCFLAG_DMAMAP;
3961 return AC_ERR_SYSTEM;
3721} 3962}
3722 3963
3723 3964
@@ -3736,10 +3977,10 @@ err_out:
3736 * spin_lock_irqsave(host_set lock) 3977 * spin_lock_irqsave(host_set lock)
3737 * 3978 *
3738 * RETURNS: 3979 * RETURNS:
3739 * Zero on success, negative on error. 3980 * Zero on success, AC_ERR_* mask on failure
3740 */ 3981 */
3741 3982
3742int ata_qc_issue_prot(struct ata_queued_cmd *qc) 3983unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc)
3743{ 3984{
3744 struct ata_port *ap = qc->ap; 3985 struct ata_port *ap = qc->ap;
3745 3986
@@ -3760,31 +4001,31 @@ int ata_qc_issue_prot(struct ata_queued_cmd *qc)
3760 ata_qc_set_polling(qc); 4001 ata_qc_set_polling(qc);
3761 ata_tf_to_host(ap, &qc->tf); 4002 ata_tf_to_host(ap, &qc->tf);
3762 ap->hsm_task_state = HSM_ST; 4003 ap->hsm_task_state = HSM_ST;
3763 queue_work(ata_wq, &ap->pio_task); 4004 ata_port_queue_task(ap, ata_pio_task, ap, 0);
3764 break; 4005 break;
3765 4006
3766 case ATA_PROT_ATAPI: 4007 case ATA_PROT_ATAPI:
3767 ata_qc_set_polling(qc); 4008 ata_qc_set_polling(qc);
3768 ata_tf_to_host(ap, &qc->tf); 4009 ata_tf_to_host(ap, &qc->tf);
3769 queue_work(ata_wq, &ap->packet_task); 4010 ata_port_queue_task(ap, atapi_packet_task, ap, 0);
3770 break; 4011 break;
3771 4012
3772 case ATA_PROT_ATAPI_NODATA: 4013 case ATA_PROT_ATAPI_NODATA:
3773 ap->flags |= ATA_FLAG_NOINTR; 4014 ap->flags |= ATA_FLAG_NOINTR;
3774 ata_tf_to_host(ap, &qc->tf); 4015 ata_tf_to_host(ap, &qc->tf);
3775 queue_work(ata_wq, &ap->packet_task); 4016 ata_port_queue_task(ap, atapi_packet_task, ap, 0);
3776 break; 4017 break;
3777 4018
3778 case ATA_PROT_ATAPI_DMA: 4019 case ATA_PROT_ATAPI_DMA:
3779 ap->flags |= ATA_FLAG_NOINTR; 4020 ap->flags |= ATA_FLAG_NOINTR;
3780 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */ 4021 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
3781 ap->ops->bmdma_setup(qc); /* set up bmdma */ 4022 ap->ops->bmdma_setup(qc); /* set up bmdma */
3782 queue_work(ata_wq, &ap->packet_task); 4023 ata_port_queue_task(ap, atapi_packet_task, ap, 0);
3783 break; 4024 break;
3784 4025
3785 default: 4026 default:
3786 WARN_ON(1); 4027 WARN_ON(1);
3787 return -1; 4028 return AC_ERR_SYSTEM;
3788 } 4029 }
3789 4030
3790 return 0; 4031 return 0;
@@ -4147,91 +4388,6 @@ irqreturn_t ata_interrupt (int irq, void *dev_instance, struct pt_regs *regs)
4147 return IRQ_RETVAL(handled); 4388 return IRQ_RETVAL(handled);
4148} 4389}
4149 4390
4150/**
4151 * atapi_packet_task - Write CDB bytes to hardware
4152 * @_data: Port to which ATAPI device is attached.
4153 *
4154 * When device has indicated its readiness to accept
4155 * a CDB, this function is called. Send the CDB.
4156 * If DMA is to be performed, exit immediately.
4157 * Otherwise, we are in polling mode, so poll
4158 * status under operation succeeds or fails.
4159 *
4160 * LOCKING:
4161 * Kernel thread context (may sleep)
4162 */
4163
4164static void atapi_packet_task(void *_data)
4165{
4166 struct ata_port *ap = _data;
4167 struct ata_queued_cmd *qc;
4168 u8 status;
4169
4170 qc = ata_qc_from_tag(ap, ap->active_tag);
4171 assert(qc != NULL);
4172 assert(qc->flags & ATA_QCFLAG_ACTIVE);
4173
4174 /* sleep-wait for BSY to clear */
4175 DPRINTK("busy wait\n");
4176 if (ata_busy_sleep(ap, ATA_TMOUT_CDB_QUICK, ATA_TMOUT_CDB)) {
4177 qc->err_mask |= AC_ERR_ATA_BUS;
4178 goto err_out;
4179 }
4180
4181 /* make sure DRQ is set */
4182 status = ata_chk_status(ap);
4183 if ((status & (ATA_BUSY | ATA_DRQ)) != ATA_DRQ) {
4184 qc->err_mask |= AC_ERR_ATA_BUS;
4185 goto err_out;
4186 }
4187
4188 /* send SCSI cdb */
4189 DPRINTK("send cdb\n");
4190 assert(ap->cdb_len >= 12);
4191
4192 if (qc->tf.protocol == ATA_PROT_ATAPI_DMA ||
4193 qc->tf.protocol == ATA_PROT_ATAPI_NODATA) {
4194 unsigned long flags;
4195
4196 /* Once we're done issuing command and kicking bmdma,
4197 * irq handler takes over. To not lose irq, we need
4198 * to clear NOINTR flag before sending cdb, but
4199 * interrupt handler shouldn't be invoked before we're
4200 * finished. Hence, the following locking.
4201 */
4202 spin_lock_irqsave(&ap->host_set->lock, flags);
4203 ap->flags &= ~ATA_FLAG_NOINTR;
4204 ata_data_xfer(ap, qc->cdb, ap->cdb_len, 1);
4205 if (qc->tf.protocol == ATA_PROT_ATAPI_DMA)
4206 ap->ops->bmdma_start(qc); /* initiate bmdma */
4207 spin_unlock_irqrestore(&ap->host_set->lock, flags);
4208 } else {
4209 ata_data_xfer(ap, qc->cdb, ap->cdb_len, 1);
4210
4211 /* PIO commands are handled by polling */
4212 ap->hsm_task_state = HSM_ST;
4213 queue_work(ata_wq, &ap->pio_task);
4214 }
4215
4216 return;
4217
4218err_out:
4219 ata_poll_qc_complete(qc);
4220}
4221
4222
4223/**
4224 * ata_port_start - Set port up for dma.
4225 * @ap: Port to initialize
4226 *
4227 * Called just after data structures for each port are
4228 * initialized. Allocates space for PRD table.
4229 *
4230 * May be used as the port_start() entry in ata_port_operations.
4231 *
4232 * LOCKING:
4233 * Inherited from caller.
4234 */
4235 4391
4236/* 4392/*
4237 * Execute a 'simple' command, that only consists of the opcode 'cmd' itself, 4393 * Execute a 'simple' command, that only consists of the opcode 'cmd' itself,
@@ -4284,6 +4440,8 @@ static int ata_start_drive(struct ata_port *ap, struct ata_device *dev)
4284 4440
4285/** 4441/**
4286 * ata_device_resume - wakeup a previously suspended devices 4442 * ata_device_resume - wakeup a previously suspended devices
4443 * @ap: port the device is connected to
4444 * @dev: the device to resume
4287 * 4445 *
4288 * Kick the drive back into action, by sending it an idle immediate 4446 * Kick the drive back into action, by sending it an idle immediate
4289 * command and making sure its transfer mode matches between drive 4447 * command and making sure its transfer mode matches between drive
@@ -4306,10 +4464,11 @@ int ata_device_resume(struct ata_port *ap, struct ata_device *dev)
4306 4464
4307/** 4465/**
4308 * ata_device_suspend - prepare a device for suspend 4466 * ata_device_suspend - prepare a device for suspend
4467 * @ap: port the device is connected to
4468 * @dev: the device to suspend
4309 * 4469 *
4310 * Flush the cache on the drive, if appropriate, then issue a 4470 * Flush the cache on the drive, if appropriate, then issue a
4311 * standbynow command. 4471 * standbynow command.
4312 *
4313 */ 4472 */
4314int ata_device_suspend(struct ata_port *ap, struct ata_device *dev) 4473int ata_device_suspend(struct ata_port *ap, struct ata_device *dev)
4315{ 4474{
@@ -4323,6 +4482,19 @@ int ata_device_suspend(struct ata_port *ap, struct ata_device *dev)
4323 return 0; 4482 return 0;
4324} 4483}
4325 4484
4485/**
4486 * ata_port_start - Set port up for dma.
4487 * @ap: Port to initialize
4488 *
4489 * Called just after data structures for each port are
4490 * initialized. Allocates space for PRD table.
4491 *
4492 * May be used as the port_start() entry in ata_port_operations.
4493 *
4494 * LOCKING:
4495 * Inherited from caller.
4496 */
4497
4326int ata_port_start (struct ata_port *ap) 4498int ata_port_start (struct ata_port *ap)
4327{ 4499{
4328 struct device *dev = ap->host_set->dev; 4500 struct device *dev = ap->host_set->dev;
@@ -4436,8 +4608,8 @@ static void ata_host_init(struct ata_port *ap, struct Scsi_Host *host,
4436 ap->active_tag = ATA_TAG_POISON; 4608 ap->active_tag = ATA_TAG_POISON;
4437 ap->last_ctl = 0xFF; 4609 ap->last_ctl = 0xFF;
4438 4610
4439 INIT_WORK(&ap->packet_task, atapi_packet_task, ap); 4611 INIT_WORK(&ap->port_task, NULL, NULL);
4440 INIT_WORK(&ap->pio_task, ata_pio_task, ap); 4612 INIT_LIST_HEAD(&ap->eh_done_q);
4441 4613
4442 for (i = 0; i < ATA_MAX_DEVICES; i++) 4614 for (i = 0; i < ATA_MAX_DEVICES; i++)
4443 ap->device[i].devno = i; 4615 ap->device[i].devno = i;
@@ -4579,9 +4751,9 @@ int ata_device_add(const struct ata_probe_ent *ent)
4579 4751
4580 ap = host_set->ports[i]; 4752 ap = host_set->ports[i];
4581 4753
4582 DPRINTK("ata%u: probe begin\n", ap->id); 4754 DPRINTK("ata%u: bus probe begin\n", ap->id);
4583 rc = ata_bus_probe(ap); 4755 rc = ata_bus_probe(ap);
4584 DPRINTK("ata%u: probe end\n", ap->id); 4756 DPRINTK("ata%u: bus probe end\n", ap->id);
4585 4757
4586 if (rc) { 4758 if (rc) {
4587 /* FIXME: do something useful here? 4759 /* FIXME: do something useful here?
@@ -4605,7 +4777,7 @@ int ata_device_add(const struct ata_probe_ent *ent)
4605 } 4777 }
4606 4778
4607 /* probes are done, now scan each port's disk(s) */ 4779 /* probes are done, now scan each port's disk(s) */
4608 DPRINTK("probe begin\n"); 4780 DPRINTK("host probe begin\n");
4609 for (i = 0; i < count; i++) { 4781 for (i = 0; i < count; i++) {
4610 struct ata_port *ap = host_set->ports[i]; 4782 struct ata_port *ap = host_set->ports[i];
4611 4783
@@ -4691,11 +4863,14 @@ void ata_host_set_remove(struct ata_host_set *host_set)
4691int ata_scsi_release(struct Scsi_Host *host) 4863int ata_scsi_release(struct Scsi_Host *host)
4692{ 4864{
4693 struct ata_port *ap = (struct ata_port *) &host->hostdata[0]; 4865 struct ata_port *ap = (struct ata_port *) &host->hostdata[0];
4866 int i;
4694 4867
4695 DPRINTK("ENTER\n"); 4868 DPRINTK("ENTER\n");
4696 4869
4697 ap->ops->port_disable(ap); 4870 ap->ops->port_disable(ap);
4698 ata_host_remove(ap, 0); 4871 ata_host_remove(ap, 0);
4872 for (i = 0; i < ATA_MAX_DEVICES; i++)
4873 kfree(ap->device[i].id);
4699 4874
4700 DPRINTK("EXIT\n"); 4875 DPRINTK("EXIT\n");
4701 return 1; 4876 return 1;
@@ -4727,32 +4902,6 @@ void ata_std_ports(struct ata_ioports *ioaddr)
4727 ioaddr->command_addr = ioaddr->cmd_addr + ATA_REG_CMD; 4902 ioaddr->command_addr = ioaddr->cmd_addr + ATA_REG_CMD;
4728} 4903}
4729 4904
4730static struct ata_probe_ent *
4731ata_probe_ent_alloc(struct device *dev, const struct ata_port_info *port)
4732{
4733 struct ata_probe_ent *probe_ent;
4734
4735 probe_ent = kzalloc(sizeof(*probe_ent), GFP_KERNEL);
4736 if (!probe_ent) {
4737 printk(KERN_ERR DRV_NAME "(%s): out of memory\n",
4738 kobject_name(&(dev->kobj)));
4739 return NULL;
4740 }
4741
4742 INIT_LIST_HEAD(&probe_ent->node);
4743 probe_ent->dev = dev;
4744
4745 probe_ent->sht = port->sht;
4746 probe_ent->host_flags = port->host_flags;
4747 probe_ent->pio_mask = port->pio_mask;
4748 probe_ent->mwdma_mask = port->mwdma_mask;
4749 probe_ent->udma_mask = port->udma_mask;
4750 probe_ent->port_ops = port->port_ops;
4751
4752 return probe_ent;
4753}
4754
4755
4756 4905
4757#ifdef CONFIG_PCI 4906#ifdef CONFIG_PCI
4758 4907
@@ -4764,256 +4913,6 @@ void ata_pci_host_stop (struct ata_host_set *host_set)
4764} 4913}
4765 4914
4766/** 4915/**
4767 * ata_pci_init_native_mode - Initialize native-mode driver
4768 * @pdev: pci device to be initialized
4769 * @port: array[2] of pointers to port info structures.
4770 * @ports: bitmap of ports present
4771 *
4772 * Utility function which allocates and initializes an
4773 * ata_probe_ent structure for a standard dual-port
4774 * PIO-based IDE controller. The returned ata_probe_ent
4775 * structure can be passed to ata_device_add(). The returned
4776 * ata_probe_ent structure should then be freed with kfree().
4777 *
4778 * The caller need only pass the address of the primary port, the
4779 * secondary will be deduced automatically. If the device has non
4780 * standard secondary port mappings this function can be called twice,
4781 * once for each interface.
4782 */
4783
4784struct ata_probe_ent *
4785ata_pci_init_native_mode(struct pci_dev *pdev, struct ata_port_info **port, int ports)
4786{
4787 struct ata_probe_ent *probe_ent =
4788 ata_probe_ent_alloc(pci_dev_to_dev(pdev), port[0]);
4789 int p = 0;
4790
4791 if (!probe_ent)
4792 return NULL;
4793
4794 probe_ent->irq = pdev->irq;
4795 probe_ent->irq_flags = SA_SHIRQ;
4796 probe_ent->private_data = port[0]->private_data;
4797
4798 if (ports & ATA_PORT_PRIMARY) {
4799 probe_ent->port[p].cmd_addr = pci_resource_start(pdev, 0);
4800 probe_ent->port[p].altstatus_addr =
4801 probe_ent->port[p].ctl_addr =
4802 pci_resource_start(pdev, 1) | ATA_PCI_CTL_OFS;
4803 probe_ent->port[p].bmdma_addr = pci_resource_start(pdev, 4);
4804 ata_std_ports(&probe_ent->port[p]);
4805 p++;
4806 }
4807
4808 if (ports & ATA_PORT_SECONDARY) {
4809 probe_ent->port[p].cmd_addr = pci_resource_start(pdev, 2);
4810 probe_ent->port[p].altstatus_addr =
4811 probe_ent->port[p].ctl_addr =
4812 pci_resource_start(pdev, 3) | ATA_PCI_CTL_OFS;
4813 probe_ent->port[p].bmdma_addr = pci_resource_start(pdev, 4) + 8;
4814 ata_std_ports(&probe_ent->port[p]);
4815 p++;
4816 }
4817
4818 probe_ent->n_ports = p;
4819 return probe_ent;
4820}
4821
4822static struct ata_probe_ent *ata_pci_init_legacy_port(struct pci_dev *pdev, struct ata_port_info *port, int port_num)
4823{
4824 struct ata_probe_ent *probe_ent;
4825
4826 probe_ent = ata_probe_ent_alloc(pci_dev_to_dev(pdev), port);
4827 if (!probe_ent)
4828 return NULL;
4829
4830 probe_ent->legacy_mode = 1;
4831 probe_ent->n_ports = 1;
4832 probe_ent->hard_port_no = port_num;
4833 probe_ent->private_data = port->private_data;
4834
4835 switch(port_num)
4836 {
4837 case 0:
4838 probe_ent->irq = 14;
4839 probe_ent->port[0].cmd_addr = 0x1f0;
4840 probe_ent->port[0].altstatus_addr =
4841 probe_ent->port[0].ctl_addr = 0x3f6;
4842 break;
4843 case 1:
4844 probe_ent->irq = 15;
4845 probe_ent->port[0].cmd_addr = 0x170;
4846 probe_ent->port[0].altstatus_addr =
4847 probe_ent->port[0].ctl_addr = 0x376;
4848 break;
4849 }
4850 probe_ent->port[0].bmdma_addr = pci_resource_start(pdev, 4) + 8 * port_num;
4851 ata_std_ports(&probe_ent->port[0]);
4852 return probe_ent;
4853}
4854
4855/**
4856 * ata_pci_init_one - Initialize/register PCI IDE host controller
4857 * @pdev: Controller to be initialized
4858 * @port_info: Information from low-level host driver
4859 * @n_ports: Number of ports attached to host controller
4860 *
4861 * This is a helper function which can be called from a driver's
4862 * xxx_init_one() probe function if the hardware uses traditional
4863 * IDE taskfile registers.
4864 *
4865 * This function calls pci_enable_device(), reserves its register
4866 * regions, sets the dma mask, enables bus master mode, and calls
4867 * ata_device_add()
4868 *
4869 * LOCKING:
4870 * Inherited from PCI layer (may sleep).
4871 *
4872 * RETURNS:
4873 * Zero on success, negative on errno-based value on error.
4874 */
4875
4876int ata_pci_init_one (struct pci_dev *pdev, struct ata_port_info **port_info,
4877 unsigned int n_ports)
4878{
4879 struct ata_probe_ent *probe_ent = NULL, *probe_ent2 = NULL;
4880 struct ata_port_info *port[2];
4881 u8 tmp8, mask;
4882 unsigned int legacy_mode = 0;
4883 int disable_dev_on_err = 1;
4884 int rc;
4885
4886 DPRINTK("ENTER\n");
4887
4888 port[0] = port_info[0];
4889 if (n_ports > 1)
4890 port[1] = port_info[1];
4891 else
4892 port[1] = port[0];
4893
4894 if ((port[0]->host_flags & ATA_FLAG_NO_LEGACY) == 0
4895 && (pdev->class >> 8) == PCI_CLASS_STORAGE_IDE) {
4896 /* TODO: What if one channel is in native mode ... */
4897 pci_read_config_byte(pdev, PCI_CLASS_PROG, &tmp8);
4898 mask = (1 << 2) | (1 << 0);
4899 if ((tmp8 & mask) != mask)
4900 legacy_mode = (1 << 3);
4901 }
4902
4903 /* FIXME... */
4904 if ((!legacy_mode) && (n_ports > 2)) {
4905 printk(KERN_ERR "ata: BUG: native mode, n_ports > 2\n");
4906 n_ports = 2;
4907 /* For now */
4908 }
4909
4910 /* FIXME: Really for ATA it isn't safe because the device may be
4911 multi-purpose and we want to leave it alone if it was already
4912 enabled. Secondly for shared use as Arjan says we want refcounting
4913
4914 Checking dev->is_enabled is insufficient as this is not set at
4915 boot for the primary video which is BIOS enabled
4916 */
4917
4918 rc = pci_enable_device(pdev);
4919 if (rc)
4920 return rc;
4921
4922 rc = pci_request_regions(pdev, DRV_NAME);
4923 if (rc) {
4924 disable_dev_on_err = 0;
4925 goto err_out;
4926 }
4927
4928 /* FIXME: Should use platform specific mappers for legacy port ranges */
4929 if (legacy_mode) {
4930 if (!request_region(0x1f0, 8, "libata")) {
4931 struct resource *conflict, res;
4932 res.start = 0x1f0;
4933 res.end = 0x1f0 + 8 - 1;
4934 conflict = ____request_resource(&ioport_resource, &res);
4935 if (!strcmp(conflict->name, "libata"))
4936 legacy_mode |= (1 << 0);
4937 else {
4938 disable_dev_on_err = 0;
4939 printk(KERN_WARNING "ata: 0x1f0 IDE port busy\n");
4940 }
4941 } else
4942 legacy_mode |= (1 << 0);
4943
4944 if (!request_region(0x170, 8, "libata")) {
4945 struct resource *conflict, res;
4946 res.start = 0x170;
4947 res.end = 0x170 + 8 - 1;
4948 conflict = ____request_resource(&ioport_resource, &res);
4949 if (!strcmp(conflict->name, "libata"))
4950 legacy_mode |= (1 << 1);
4951 else {
4952 disable_dev_on_err = 0;
4953 printk(KERN_WARNING "ata: 0x170 IDE port busy\n");
4954 }
4955 } else
4956 legacy_mode |= (1 << 1);
4957 }
4958
4959 /* we have legacy mode, but all ports are unavailable */
4960 if (legacy_mode == (1 << 3)) {
4961 rc = -EBUSY;
4962 goto err_out_regions;
4963 }
4964
4965 rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
4966 if (rc)
4967 goto err_out_regions;
4968 rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
4969 if (rc)
4970 goto err_out_regions;
4971
4972 if (legacy_mode) {
4973 if (legacy_mode & (1 << 0))
4974 probe_ent = ata_pci_init_legacy_port(pdev, port[0], 0);
4975 if (legacy_mode & (1 << 1))
4976 probe_ent2 = ata_pci_init_legacy_port(pdev, port[1], 1);
4977 } else {
4978 if (n_ports == 2)
4979 probe_ent = ata_pci_init_native_mode(pdev, port, ATA_PORT_PRIMARY | ATA_PORT_SECONDARY);
4980 else
4981 probe_ent = ata_pci_init_native_mode(pdev, port, ATA_PORT_PRIMARY);
4982 }
4983 if (!probe_ent && !probe_ent2) {
4984 rc = -ENOMEM;
4985 goto err_out_regions;
4986 }
4987
4988 pci_set_master(pdev);
4989
4990 /* FIXME: check ata_device_add return */
4991 if (legacy_mode) {
4992 if (legacy_mode & (1 << 0))
4993 ata_device_add(probe_ent);
4994 if (legacy_mode & (1 << 1))
4995 ata_device_add(probe_ent2);
4996 } else
4997 ata_device_add(probe_ent);
4998
4999 kfree(probe_ent);
5000 kfree(probe_ent2);
5001
5002 return 0;
5003
5004err_out_regions:
5005 if (legacy_mode & (1 << 0))
5006 release_region(0x1f0, 8);
5007 if (legacy_mode & (1 << 1))
5008 release_region(0x170, 8);
5009 pci_release_regions(pdev);
5010err_out:
5011 if (disable_dev_on_err)
5012 pci_disable_device(pdev);
5013 return rc;
5014}
5015
5016/**
5017 * ata_pci_remove_one - PCI layer callback for device removal 4916 * ata_pci_remove_one - PCI layer callback for device removal
5018 * @pdev: PCI device that was removed 4917 * @pdev: PCI device that was removed
5019 * 4918 *
@@ -5143,7 +5042,7 @@ EXPORT_SYMBOL_GPL(ata_device_add);
5143EXPORT_SYMBOL_GPL(ata_host_set_remove); 5042EXPORT_SYMBOL_GPL(ata_host_set_remove);
5144EXPORT_SYMBOL_GPL(ata_sg_init); 5043EXPORT_SYMBOL_GPL(ata_sg_init);
5145EXPORT_SYMBOL_GPL(ata_sg_init_one); 5044EXPORT_SYMBOL_GPL(ata_sg_init_one);
5146EXPORT_SYMBOL_GPL(ata_qc_complete); 5045EXPORT_SYMBOL_GPL(__ata_qc_complete);
5147EXPORT_SYMBOL_GPL(ata_qc_issue_prot); 5046EXPORT_SYMBOL_GPL(ata_qc_issue_prot);
5148EXPORT_SYMBOL_GPL(ata_eng_timeout); 5047EXPORT_SYMBOL_GPL(ata_eng_timeout);
5149EXPORT_SYMBOL_GPL(ata_tf_load); 5048EXPORT_SYMBOL_GPL(ata_tf_load);
@@ -5169,18 +5068,30 @@ EXPORT_SYMBOL_GPL(ata_port_probe);
5169EXPORT_SYMBOL_GPL(sata_phy_reset); 5068EXPORT_SYMBOL_GPL(sata_phy_reset);
5170EXPORT_SYMBOL_GPL(__sata_phy_reset); 5069EXPORT_SYMBOL_GPL(__sata_phy_reset);
5171EXPORT_SYMBOL_GPL(ata_bus_reset); 5070EXPORT_SYMBOL_GPL(ata_bus_reset);
5071EXPORT_SYMBOL_GPL(ata_std_probeinit);
5072EXPORT_SYMBOL_GPL(ata_std_softreset);
5073EXPORT_SYMBOL_GPL(sata_std_hardreset);
5074EXPORT_SYMBOL_GPL(ata_std_postreset);
5075EXPORT_SYMBOL_GPL(ata_std_probe_reset);
5076EXPORT_SYMBOL_GPL(ata_drive_probe_reset);
5077EXPORT_SYMBOL_GPL(ata_dev_revalidate);
5172EXPORT_SYMBOL_GPL(ata_port_disable); 5078EXPORT_SYMBOL_GPL(ata_port_disable);
5173EXPORT_SYMBOL_GPL(ata_ratelimit); 5079EXPORT_SYMBOL_GPL(ata_ratelimit);
5080EXPORT_SYMBOL_GPL(ata_busy_sleep);
5081EXPORT_SYMBOL_GPL(ata_port_queue_task);
5174EXPORT_SYMBOL_GPL(ata_scsi_ioctl); 5082EXPORT_SYMBOL_GPL(ata_scsi_ioctl);
5175EXPORT_SYMBOL_GPL(ata_scsi_queuecmd); 5083EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
5084EXPORT_SYMBOL_GPL(ata_scsi_timed_out);
5176EXPORT_SYMBOL_GPL(ata_scsi_error); 5085EXPORT_SYMBOL_GPL(ata_scsi_error);
5177EXPORT_SYMBOL_GPL(ata_scsi_slave_config); 5086EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
5178EXPORT_SYMBOL_GPL(ata_scsi_release); 5087EXPORT_SYMBOL_GPL(ata_scsi_release);
5179EXPORT_SYMBOL_GPL(ata_host_intr); 5088EXPORT_SYMBOL_GPL(ata_host_intr);
5180EXPORT_SYMBOL_GPL(ata_dev_classify); 5089EXPORT_SYMBOL_GPL(ata_dev_classify);
5181EXPORT_SYMBOL_GPL(ata_dev_id_string); 5090EXPORT_SYMBOL_GPL(ata_id_string);
5182EXPORT_SYMBOL_GPL(ata_dev_config); 5091EXPORT_SYMBOL_GPL(ata_id_c_string);
5183EXPORT_SYMBOL_GPL(ata_scsi_simulate); 5092EXPORT_SYMBOL_GPL(ata_scsi_simulate);
5093EXPORT_SYMBOL_GPL(ata_eh_qc_complete);
5094EXPORT_SYMBOL_GPL(ata_eh_qc_retry);
5184 5095
5185EXPORT_SYMBOL_GPL(ata_pio_need_iordy); 5096EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
5186EXPORT_SYMBOL_GPL(ata_timing_compute); 5097EXPORT_SYMBOL_GPL(ata_timing_compute);
diff --git a/drivers/scsi/libata-scsi.c b/drivers/scsi/libata-scsi.c
index 59503c9ccac9..ccedb4536977 100644
--- a/drivers/scsi/libata-scsi.c
+++ b/drivers/scsi/libata-scsi.c
@@ -151,7 +151,7 @@ int ata_cmd_ioctl(struct scsi_device *scsidev, void __user *arg)
151 struct scsi_sense_hdr sshdr; 151 struct scsi_sense_hdr sshdr;
152 enum dma_data_direction data_dir; 152 enum dma_data_direction data_dir;
153 153
154 if (NULL == (void *)arg) 154 if (arg == NULL)
155 return -EINVAL; 155 return -EINVAL;
156 156
157 if (copy_from_user(args, arg, sizeof(args))) 157 if (copy_from_user(args, arg, sizeof(args)))
@@ -201,7 +201,7 @@ int ata_cmd_ioctl(struct scsi_device *scsidev, void __user *arg)
201 /* Need code to retrieve data from check condition? */ 201 /* Need code to retrieve data from check condition? */
202 202
203 if ((argbuf) 203 if ((argbuf)
204 && copy_to_user((void *)(arg + sizeof(args)), argbuf, argsize)) 204 && copy_to_user(arg + sizeof(args), argbuf, argsize))
205 rc = -EFAULT; 205 rc = -EFAULT;
206error: 206error:
207 if (argbuf) 207 if (argbuf)
@@ -228,7 +228,7 @@ int ata_task_ioctl(struct scsi_device *scsidev, void __user *arg)
228 u8 args[7]; 228 u8 args[7];
229 struct scsi_sense_hdr sshdr; 229 struct scsi_sense_hdr sshdr;
230 230
231 if (NULL == (void *)arg) 231 if (arg == NULL)
232 return -EINVAL; 232 return -EINVAL;
233 233
234 if (copy_from_user(args, arg, sizeof(args))) 234 if (copy_from_user(args, arg, sizeof(args)))
@@ -553,7 +553,7 @@ void ata_gen_ata_desc_sense(struct ata_queued_cmd *qc)
553 /* 553 /*
554 * Read the controller registers. 554 * Read the controller registers.
555 */ 555 */
556 assert(NULL != qc->ap->ops->tf_read); 556 WARN_ON(qc->ap->ops->tf_read == NULL);
557 qc->ap->ops->tf_read(qc->ap, tf); 557 qc->ap->ops->tf_read(qc->ap, tf);
558 558
559 /* 559 /*
@@ -628,7 +628,7 @@ void ata_gen_fixed_sense(struct ata_queued_cmd *qc)
628 /* 628 /*
629 * Read the controller registers. 629 * Read the controller registers.
630 */ 630 */
631 assert(NULL != qc->ap->ops->tf_read); 631 WARN_ON(qc->ap->ops->tf_read == NULL);
632 qc->ap->ops->tf_read(qc->ap, tf); 632 qc->ap->ops->tf_read(qc->ap, tf);
633 633
634 /* 634 /*
@@ -684,23 +684,23 @@ int ata_scsi_slave_config(struct scsi_device *sdev)
684 if (sdev->id < ATA_MAX_DEVICES) { 684 if (sdev->id < ATA_MAX_DEVICES) {
685 struct ata_port *ap; 685 struct ata_port *ap;
686 struct ata_device *dev; 686 struct ata_device *dev;
687 unsigned int max_sectors;
687 688
688 ap = (struct ata_port *) &sdev->host->hostdata[0]; 689 ap = (struct ata_port *) &sdev->host->hostdata[0];
689 dev = &ap->device[sdev->id]; 690 dev = &ap->device[sdev->id];
690 691
691 /* TODO: 1024 is an arbitrary number, not the 692 /* TODO: 2048 is an arbitrary number, not the
692 * hardware maximum. This should be increased to 693 * hardware maximum. This should be increased to
693 * 65534 when Jens Axboe's patch for dynamically 694 * 65534 when Jens Axboe's patch for dynamically
694 * determining max_sectors is merged. 695 * determining max_sectors is merged.
695 */ 696 */
696 if ((dev->flags & ATA_DFLAG_LBA48) && 697 max_sectors = ATA_MAX_SECTORS;
697 ((dev->flags & ATA_DFLAG_LOCK_SECTORS) == 0)) { 698 if (dev->flags & ATA_DFLAG_LBA48)
698 /* 699 max_sectors = 2048;
699 * do not overwrite sdev->host->max_sectors, since 700 if (dev->max_sectors)
700 * other drives on this host may not support LBA48 701 max_sectors = dev->max_sectors;
701 */ 702
702 blk_queue_max_sectors(sdev->request_queue, 2048); 703 blk_queue_max_sectors(sdev->request_queue, max_sectors);
703 }
704 704
705 /* 705 /*
706 * SATA DMA transfers must be multiples of 4 byte, so 706 * SATA DMA transfers must be multiples of 4 byte, so
@@ -717,6 +717,47 @@ int ata_scsi_slave_config(struct scsi_device *sdev)
717} 717}
718 718
719/** 719/**
720 * ata_scsi_timed_out - SCSI layer time out callback
721 * @cmd: timed out SCSI command
722 *
723 * Handles SCSI layer timeout. We race with normal completion of
724 * the qc for @cmd. If the qc is already gone, we lose and let
725 * the scsi command finish (EH_HANDLED). Otherwise, the qc has
726 * timed out and EH should be invoked. Prevent ata_qc_complete()
727 * from finishing it by setting EH_SCHEDULED and return
728 * EH_NOT_HANDLED.
729 *
730 * LOCKING:
731 * Called from timer context
732 *
733 * RETURNS:
734 * EH_HANDLED or EH_NOT_HANDLED
735 */
736enum scsi_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd)
737{
738 struct Scsi_Host *host = cmd->device->host;
739 struct ata_port *ap = (struct ata_port *) &host->hostdata[0];
740 unsigned long flags;
741 struct ata_queued_cmd *qc;
742 enum scsi_eh_timer_return ret = EH_HANDLED;
743
744 DPRINTK("ENTER\n");
745
746 spin_lock_irqsave(&ap->host_set->lock, flags);
747 qc = ata_qc_from_tag(ap, ap->active_tag);
748 if (qc) {
749 WARN_ON(qc->scsicmd != cmd);
750 qc->flags |= ATA_QCFLAG_EH_SCHEDULED;
751 qc->err_mask |= AC_ERR_TIMEOUT;
752 ret = EH_NOT_HANDLED;
753 }
754 spin_unlock_irqrestore(&ap->host_set->lock, flags);
755
756 DPRINTK("EXIT, ret=%d\n", ret);
757 return ret;
758}
759
760/**
720 * ata_scsi_error - SCSI layer error handler callback 761 * ata_scsi_error - SCSI layer error handler callback
721 * @host: SCSI host on which error occurred 762 * @host: SCSI host on which error occurred
722 * 763 *
@@ -732,23 +773,84 @@ int ata_scsi_slave_config(struct scsi_device *sdev)
732int ata_scsi_error(struct Scsi_Host *host) 773int ata_scsi_error(struct Scsi_Host *host)
733{ 774{
734 struct ata_port *ap; 775 struct ata_port *ap;
776 unsigned long flags;
735 777
736 DPRINTK("ENTER\n"); 778 DPRINTK("ENTER\n");
737 779
738 ap = (struct ata_port *) &host->hostdata[0]; 780 ap = (struct ata_port *) &host->hostdata[0];
781
782 spin_lock_irqsave(&ap->host_set->lock, flags);
783 WARN_ON(ap->flags & ATA_FLAG_IN_EH);
784 ap->flags |= ATA_FLAG_IN_EH;
785 WARN_ON(ata_qc_from_tag(ap, ap->active_tag) == NULL);
786 spin_unlock_irqrestore(&ap->host_set->lock, flags);
787
788 ata_port_flush_task(ap);
789
739 ap->ops->eng_timeout(ap); 790 ap->ops->eng_timeout(ap);
740 791
741 /* TODO: this is per-command; when queueing is supported 792 WARN_ON(host->host_failed || !list_empty(&host->eh_cmd_q));
742 * this code will either change or move to a more 793
743 * appropriate place 794 scsi_eh_flush_done_q(&ap->eh_done_q);
744 */ 795
745 host->host_failed--; 796 spin_lock_irqsave(&ap->host_set->lock, flags);
746 INIT_LIST_HEAD(&host->eh_cmd_q); 797 ap->flags &= ~ATA_FLAG_IN_EH;
798 spin_unlock_irqrestore(&ap->host_set->lock, flags);
747 799
748 DPRINTK("EXIT\n"); 800 DPRINTK("EXIT\n");
749 return 0; 801 return 0;
750} 802}
751 803
804static void ata_eh_scsidone(struct scsi_cmnd *scmd)
805{
806 /* nada */
807}
808
809static void __ata_eh_qc_complete(struct ata_queued_cmd *qc)
810{
811 struct ata_port *ap = qc->ap;
812 struct scsi_cmnd *scmd = qc->scsicmd;
813 unsigned long flags;
814
815 spin_lock_irqsave(&ap->host_set->lock, flags);
816 qc->scsidone = ata_eh_scsidone;
817 __ata_qc_complete(qc);
818 WARN_ON(ata_tag_valid(qc->tag));
819 spin_unlock_irqrestore(&ap->host_set->lock, flags);
820
821 scsi_eh_finish_cmd(scmd, &ap->eh_done_q);
822}
823
824/**
825 * ata_eh_qc_complete - Complete an active ATA command from EH
826 * @qc: Command to complete
827 *
828 * Indicate to the mid and upper layers that an ATA command has
829 * completed. To be used from EH.
830 */
831void ata_eh_qc_complete(struct ata_queued_cmd *qc)
832{
833 struct scsi_cmnd *scmd = qc->scsicmd;
834 scmd->retries = scmd->allowed;
835 __ata_eh_qc_complete(qc);
836}
837
838/**
839 * ata_eh_qc_retry - Tell midlayer to retry an ATA command after EH
840 * @qc: Command to retry
841 *
842 * Indicate to the mid and upper layers that an ATA command
843 * should be retried. To be used from EH.
844 *
845 * SCSI midlayer limits the number of retries to scmd->allowed.
846 * This function might need to adjust scmd->retries for commands
847 * which get retried due to unrelated NCQ failures.
848 */
849void ata_eh_qc_retry(struct ata_queued_cmd *qc)
850{
851 __ata_eh_qc_complete(qc);
852}
853
752/** 854/**
753 * ata_scsi_start_stop_xlat - Translate SCSI START STOP UNIT command 855 * ata_scsi_start_stop_xlat - Translate SCSI START STOP UNIT command
754 * @qc: Storage for translated ATA taskfile 856 * @qc: Storage for translated ATA taskfile
@@ -985,9 +1087,13 @@ static unsigned int ata_scsi_verify_xlat(struct ata_queued_cmd *qc, const u8 *sc
985 if (dev->flags & ATA_DFLAG_LBA) { 1087 if (dev->flags & ATA_DFLAG_LBA) {
986 tf->flags |= ATA_TFLAG_LBA; 1088 tf->flags |= ATA_TFLAG_LBA;
987 1089
988 if (dev->flags & ATA_DFLAG_LBA48) { 1090 if (lba_28_ok(block, n_block)) {
989 if (n_block > (64 * 1024)) 1091 /* use LBA28 */
990 goto invalid_fld; 1092 tf->command = ATA_CMD_VERIFY;
1093 tf->device |= (block >> 24) & 0xf;
1094 } else if (lba_48_ok(block, n_block)) {
1095 if (!(dev->flags & ATA_DFLAG_LBA48))
1096 goto out_of_range;
991 1097
992 /* use LBA48 */ 1098 /* use LBA48 */
993 tf->flags |= ATA_TFLAG_LBA48; 1099 tf->flags |= ATA_TFLAG_LBA48;
@@ -998,15 +1104,9 @@ static unsigned int ata_scsi_verify_xlat(struct ata_queued_cmd *qc, const u8 *sc
998 tf->hob_lbah = (block >> 40) & 0xff; 1104 tf->hob_lbah = (block >> 40) & 0xff;
999 tf->hob_lbam = (block >> 32) & 0xff; 1105 tf->hob_lbam = (block >> 32) & 0xff;
1000 tf->hob_lbal = (block >> 24) & 0xff; 1106 tf->hob_lbal = (block >> 24) & 0xff;
1001 } else { 1107 } else
1002 if (n_block > 256) 1108 /* request too large even for LBA48 */
1003 goto invalid_fld; 1109 goto out_of_range;
1004
1005 /* use LBA28 */
1006 tf->command = ATA_CMD_VERIFY;
1007
1008 tf->device |= (block >> 24) & 0xf;
1009 }
1010 1110
1011 tf->nsect = n_block & 0xff; 1111 tf->nsect = n_block & 0xff;
1012 1112
@@ -1019,8 +1119,8 @@ static unsigned int ata_scsi_verify_xlat(struct ata_queued_cmd *qc, const u8 *sc
1019 /* CHS */ 1119 /* CHS */
1020 u32 sect, head, cyl, track; 1120 u32 sect, head, cyl, track;
1021 1121
1022 if (n_block > 256) 1122 if (!lba_28_ok(block, n_block))
1023 goto invalid_fld; 1123 goto out_of_range;
1024 1124
1025 /* Convert LBA to CHS */ 1125 /* Convert LBA to CHS */
1026 track = (u32)block / dev->sectors; 1126 track = (u32)block / dev->sectors;
@@ -1139,9 +1239,11 @@ static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc, const u8 *scsicm
1139 if (dev->flags & ATA_DFLAG_LBA) { 1239 if (dev->flags & ATA_DFLAG_LBA) {
1140 tf->flags |= ATA_TFLAG_LBA; 1240 tf->flags |= ATA_TFLAG_LBA;
1141 1241
1142 if (dev->flags & ATA_DFLAG_LBA48) { 1242 if (lba_28_ok(block, n_block)) {
1143 /* The request -may- be too large for LBA48. */ 1243 /* use LBA28 */
1144 if ((block >> 48) || (n_block > 65536)) 1244 tf->device |= (block >> 24) & 0xf;
1245 } else if (lba_48_ok(block, n_block)) {
1246 if (!(dev->flags & ATA_DFLAG_LBA48))
1145 goto out_of_range; 1247 goto out_of_range;
1146 1248
1147 /* use LBA48 */ 1249 /* use LBA48 */
@@ -1152,15 +1254,9 @@ static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc, const u8 *scsicm
1152 tf->hob_lbah = (block >> 40) & 0xff; 1254 tf->hob_lbah = (block >> 40) & 0xff;
1153 tf->hob_lbam = (block >> 32) & 0xff; 1255 tf->hob_lbam = (block >> 32) & 0xff;
1154 tf->hob_lbal = (block >> 24) & 0xff; 1256 tf->hob_lbal = (block >> 24) & 0xff;
1155 } else { 1257 } else
1156 /* use LBA28 */ 1258 /* request too large even for LBA48 */
1157 1259 goto out_of_range;
1158 /* The request -may- be too large for LBA28. */
1159 if ((block >> 28) || (n_block > 256))
1160 goto out_of_range;
1161
1162 tf->device |= (block >> 24) & 0xf;
1163 }
1164 1260
1165 if (unlikely(ata_rwcmd_protocol(qc) < 0)) 1261 if (unlikely(ata_rwcmd_protocol(qc) < 0))
1166 goto invalid_fld; 1262 goto invalid_fld;
@@ -1178,7 +1274,7 @@ static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc, const u8 *scsicm
1178 u32 sect, head, cyl, track; 1274 u32 sect, head, cyl, track;
1179 1275
1180 /* The request -may- be too large for CHS addressing. */ 1276 /* The request -may- be too large for CHS addressing. */
1181 if ((block >> 28) || (n_block > 256)) 1277 if (!lba_28_ok(block, n_block))
1182 goto out_of_range; 1278 goto out_of_range;
1183 1279
1184 if (unlikely(ata_rwcmd_protocol(qc) < 0)) 1280 if (unlikely(ata_rwcmd_protocol(qc) < 0))
@@ -1225,7 +1321,7 @@ nothing_to_do:
1225 return 1; 1321 return 1;
1226} 1322}
1227 1323
1228static int ata_scsi_qc_complete(struct ata_queued_cmd *qc) 1324static void ata_scsi_qc_complete(struct ata_queued_cmd *qc)
1229{ 1325{
1230 struct scsi_cmnd *cmd = qc->scsicmd; 1326 struct scsi_cmnd *cmd = qc->scsicmd;
1231 u8 *cdb = cmd->cmnd; 1327 u8 *cdb = cmd->cmnd;
@@ -1262,7 +1358,7 @@ static int ata_scsi_qc_complete(struct ata_queued_cmd *qc)
1262 1358
1263 qc->scsidone(cmd); 1359 qc->scsidone(cmd);
1264 1360
1265 return 0; 1361 ata_qc_free(qc);
1266} 1362}
1267 1363
1268/** 1364/**
@@ -1328,8 +1424,9 @@ static void ata_scsi_translate(struct ata_port *ap, struct ata_device *dev,
1328 goto early_finish; 1424 goto early_finish;
1329 1425
1330 /* select device, send command to hardware */ 1426 /* select device, send command to hardware */
1331 if (ata_qc_issue(qc)) 1427 qc->err_mask = ata_qc_issue(qc);
1332 goto err_did; 1428 if (qc->err_mask)
1429 ata_qc_complete(qc);
1333 1430
1334 VPRINTK("EXIT\n"); 1431 VPRINTK("EXIT\n");
1335 return; 1432 return;
@@ -1472,8 +1569,8 @@ unsigned int ata_scsiop_inq_std(struct ata_scsi_args *args, u8 *rbuf,
1472 1569
1473 if (buflen > 35) { 1570 if (buflen > 35) {
1474 memcpy(&rbuf[8], "ATA ", 8); 1571 memcpy(&rbuf[8], "ATA ", 8);
1475 ata_dev_id_string(args->id, &rbuf[16], ATA_ID_PROD_OFS, 16); 1572 ata_id_string(args->id, &rbuf[16], ATA_ID_PROD_OFS, 16);
1476 ata_dev_id_string(args->id, &rbuf[32], ATA_ID_FW_REV_OFS, 4); 1573 ata_id_string(args->id, &rbuf[32], ATA_ID_FW_REV_OFS, 4);
1477 if (rbuf[32] == 0 || rbuf[32] == ' ') 1574 if (rbuf[32] == 0 || rbuf[32] == ' ')
1478 memcpy(&rbuf[32], "n/a ", 4); 1575 memcpy(&rbuf[32], "n/a ", 4);
1479 } 1576 }
@@ -1547,8 +1644,8 @@ unsigned int ata_scsiop_inq_80(struct ata_scsi_args *args, u8 *rbuf,
1547 memcpy(rbuf, hdr, sizeof(hdr)); 1644 memcpy(rbuf, hdr, sizeof(hdr));
1548 1645
1549 if (buflen > (ATA_SERNO_LEN + 4 - 1)) 1646 if (buflen > (ATA_SERNO_LEN + 4 - 1))
1550 ata_dev_id_string(args->id, (unsigned char *) &rbuf[4], 1647 ata_id_string(args->id, (unsigned char *) &rbuf[4],
1551 ATA_ID_SERNO_OFS, ATA_SERNO_LEN); 1648 ATA_ID_SERNO_OFS, ATA_SERNO_LEN);
1552 1649
1553 return 0; 1650 return 0;
1554} 1651}
@@ -1713,15 +1810,12 @@ static int ata_dev_supports_fua(u16 *id)
1713 if (!ata_id_has_fua(id)) 1810 if (!ata_id_has_fua(id))
1714 return 0; 1811 return 0;
1715 1812
1716 model[40] = '\0'; 1813 ata_id_c_string(id, model, ATA_ID_PROD_OFS, sizeof(model));
1717 fw[8] = '\0'; 1814 ata_id_c_string(id, fw, ATA_ID_FW_REV_OFS, sizeof(fw));
1718
1719 ata_dev_id_string(id, model, ATA_ID_PROD_OFS, sizeof(model) - 1);
1720 ata_dev_id_string(id, fw, ATA_ID_FW_REV_OFS, sizeof(fw) - 1);
1721 1815
1722 if (strncmp(model, "Maxtor", 6)) 1816 if (strcmp(model, "Maxtor"))
1723 return 1; 1817 return 1;
1724 if (strncmp(fw, "BANC1G10", 8)) 1818 if (strcmp(fw, "BANC1G10"))
1725 return 1; 1819 return 1;
1726 1820
1727 return 0; /* blacklisted */ 1821 return 0; /* blacklisted */
@@ -2015,7 +2109,7 @@ void ata_scsi_badcmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *), u8
2015 done(cmd); 2109 done(cmd);
2016} 2110}
2017 2111
2018static int atapi_sense_complete(struct ata_queued_cmd *qc) 2112static void atapi_sense_complete(struct ata_queued_cmd *qc)
2019{ 2113{
2020 if (qc->err_mask && ((qc->err_mask & AC_ERR_DEV) == 0)) 2114 if (qc->err_mask && ((qc->err_mask & AC_ERR_DEV) == 0))
2021 /* FIXME: not quite right; we don't want the 2115 /* FIXME: not quite right; we don't want the
@@ -2026,7 +2120,7 @@ static int atapi_sense_complete(struct ata_queued_cmd *qc)
2026 ata_gen_ata_desc_sense(qc); 2120 ata_gen_ata_desc_sense(qc);
2027 2121
2028 qc->scsidone(qc->scsicmd); 2122 qc->scsidone(qc->scsicmd);
2029 return 0; 2123 ata_qc_free(qc);
2030} 2124}
2031 2125
2032/* is it pointless to prefer PIO for "safety reasons"? */ 2126/* is it pointless to prefer PIO for "safety reasons"? */
@@ -2056,7 +2150,7 @@ static void atapi_request_sense(struct ata_queued_cmd *qc)
2056 ata_sg_init_one(qc, cmd->sense_buffer, sizeof(cmd->sense_buffer)); 2150 ata_sg_init_one(qc, cmd->sense_buffer, sizeof(cmd->sense_buffer));
2057 qc->dma_dir = DMA_FROM_DEVICE; 2151 qc->dma_dir = DMA_FROM_DEVICE;
2058 2152
2059 memset(&qc->cdb, 0, ap->cdb_len); 2153 memset(&qc->cdb, 0, qc->dev->cdb_len);
2060 qc->cdb[0] = REQUEST_SENSE; 2154 qc->cdb[0] = REQUEST_SENSE;
2061 qc->cdb[4] = SCSI_SENSE_BUFFERSIZE; 2155 qc->cdb[4] = SCSI_SENSE_BUFFERSIZE;
2062 2156
@@ -2075,15 +2169,14 @@ static void atapi_request_sense(struct ata_queued_cmd *qc)
2075 2169
2076 qc->complete_fn = atapi_sense_complete; 2170 qc->complete_fn = atapi_sense_complete;
2077 2171
2078 if (ata_qc_issue(qc)) { 2172 qc->err_mask = ata_qc_issue(qc);
2079 qc->err_mask |= AC_ERR_OTHER; 2173 if (qc->err_mask)
2080 ata_qc_complete(qc); 2174 ata_qc_complete(qc);
2081 }
2082 2175
2083 DPRINTK("EXIT\n"); 2176 DPRINTK("EXIT\n");
2084} 2177}
2085 2178
2086static int atapi_qc_complete(struct ata_queued_cmd *qc) 2179static void atapi_qc_complete(struct ata_queued_cmd *qc)
2087{ 2180{
2088 struct scsi_cmnd *cmd = qc->scsicmd; 2181 struct scsi_cmnd *cmd = qc->scsicmd;
2089 unsigned int err_mask = qc->err_mask; 2182 unsigned int err_mask = qc->err_mask;
@@ -2093,7 +2186,7 @@ static int atapi_qc_complete(struct ata_queued_cmd *qc)
2093 if (unlikely(err_mask & AC_ERR_DEV)) { 2186 if (unlikely(err_mask & AC_ERR_DEV)) {
2094 cmd->result = SAM_STAT_CHECK_CONDITION; 2187 cmd->result = SAM_STAT_CHECK_CONDITION;
2095 atapi_request_sense(qc); 2188 atapi_request_sense(qc);
2096 return 1; 2189 return;
2097 } 2190 }
2098 2191
2099 else if (unlikely(err_mask)) 2192 else if (unlikely(err_mask))
@@ -2133,7 +2226,7 @@ static int atapi_qc_complete(struct ata_queued_cmd *qc)
2133 } 2226 }
2134 2227
2135 qc->scsidone(cmd); 2228 qc->scsidone(cmd);
2136 return 0; 2229 ata_qc_free(qc);
2137} 2230}
2138/** 2231/**
2139 * atapi_xlat - Initialize PACKET taskfile 2232 * atapi_xlat - Initialize PACKET taskfile
@@ -2159,7 +2252,7 @@ static unsigned int atapi_xlat(struct ata_queued_cmd *qc, const u8 *scsicmd)
2159 if (ata_check_atapi_dma(qc)) 2252 if (ata_check_atapi_dma(qc))
2160 using_pio = 1; 2253 using_pio = 1;
2161 2254
2162 memcpy(&qc->cdb, scsicmd, qc->ap->cdb_len); 2255 memcpy(&qc->cdb, scsicmd, dev->cdb_len);
2163 2256
2164 qc->complete_fn = atapi_qc_complete; 2257 qc->complete_fn = atapi_qc_complete;
2165 2258
@@ -2519,7 +2612,8 @@ out_unlock:
2519 2612
2520/** 2613/**
2521 * ata_scsi_simulate - simulate SCSI command on ATA device 2614 * ata_scsi_simulate - simulate SCSI command on ATA device
2522 * @id: current IDENTIFY data for target device. 2615 * @ap: port the device is connected to
2616 * @dev: the target device
2523 * @cmd: SCSI command being sent to device. 2617 * @cmd: SCSI command being sent to device.
2524 * @done: SCSI command completion function. 2618 * @done: SCSI command completion function.
2525 * 2619 *
diff --git a/drivers/scsi/libata.h b/drivers/scsi/libata.h
index fddaf479a544..f4c48c91b63d 100644
--- a/drivers/scsi/libata.h
+++ b/drivers/scsi/libata.h
@@ -45,8 +45,9 @@ extern int libata_fua;
45extern struct ata_queued_cmd *ata_qc_new_init(struct ata_port *ap, 45extern struct ata_queued_cmd *ata_qc_new_init(struct ata_port *ap,
46 struct ata_device *dev); 46 struct ata_device *dev);
47extern int ata_rwcmd_protocol(struct ata_queued_cmd *qc); 47extern int ata_rwcmd_protocol(struct ata_queued_cmd *qc);
48extern void ata_port_flush_task(struct ata_port *ap);
48extern void ata_qc_free(struct ata_queued_cmd *qc); 49extern void ata_qc_free(struct ata_queued_cmd *qc);
49extern int ata_qc_issue(struct ata_queued_cmd *qc); 50extern unsigned int ata_qc_issue(struct ata_queued_cmd *qc);
50extern int ata_check_atapi_dma(struct ata_queued_cmd *qc); 51extern int ata_check_atapi_dma(struct ata_queued_cmd *qc);
51extern void ata_dev_select(struct ata_port *ap, unsigned int device, 52extern void ata_dev_select(struct ata_port *ap, unsigned int device,
52 unsigned int wait, unsigned int can_sleep); 53 unsigned int wait, unsigned int can_sleep);
diff --git a/drivers/scsi/pdc_adma.c b/drivers/scsi/pdc_adma.c
index e8df0c9ec1e6..5f33cc932e70 100644
--- a/drivers/scsi/pdc_adma.c
+++ b/drivers/scsi/pdc_adma.c
@@ -131,7 +131,7 @@ static void adma_host_stop(struct ata_host_set *host_set);
131static void adma_port_stop(struct ata_port *ap); 131static void adma_port_stop(struct ata_port *ap);
132static void adma_phy_reset(struct ata_port *ap); 132static void adma_phy_reset(struct ata_port *ap);
133static void adma_qc_prep(struct ata_queued_cmd *qc); 133static void adma_qc_prep(struct ata_queued_cmd *qc);
134static int adma_qc_issue(struct ata_queued_cmd *qc); 134static unsigned int adma_qc_issue(struct ata_queued_cmd *qc);
135static int adma_check_atapi_dma(struct ata_queued_cmd *qc); 135static int adma_check_atapi_dma(struct ata_queued_cmd *qc);
136static void adma_bmdma_stop(struct ata_queued_cmd *qc); 136static void adma_bmdma_stop(struct ata_queued_cmd *qc);
137static u8 adma_bmdma_status(struct ata_port *ap); 137static u8 adma_bmdma_status(struct ata_port *ap);
@@ -143,11 +143,11 @@ static struct scsi_host_template adma_ata_sht = {
143 .name = DRV_NAME, 143 .name = DRV_NAME,
144 .ioctl = ata_scsi_ioctl, 144 .ioctl = ata_scsi_ioctl,
145 .queuecommand = ata_scsi_queuecmd, 145 .queuecommand = ata_scsi_queuecmd,
146 .eh_timed_out = ata_scsi_timed_out,
146 .eh_strategy_handler = ata_scsi_error, 147 .eh_strategy_handler = ata_scsi_error,
147 .can_queue = ATA_DEF_QUEUE, 148 .can_queue = ATA_DEF_QUEUE,
148 .this_id = ATA_SHT_THIS_ID, 149 .this_id = ATA_SHT_THIS_ID,
149 .sg_tablesize = LIBATA_MAX_PRD, 150 .sg_tablesize = LIBATA_MAX_PRD,
150 .max_sectors = ATA_MAX_SECTORS,
151 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 151 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
152 .emulated = ATA_SHT_EMULATED, 152 .emulated = ATA_SHT_EMULATED,
153 .use_clustering = ENABLE_CLUSTERING, 153 .use_clustering = ENABLE_CLUSTERING,
@@ -419,7 +419,7 @@ static inline void adma_packet_start(struct ata_queued_cmd *qc)
419 writew(aPIOMD4 | aGO, chan + ADMA_CONTROL); 419 writew(aPIOMD4 | aGO, chan + ADMA_CONTROL);
420} 420}
421 421
422static int adma_qc_issue(struct ata_queued_cmd *qc) 422static unsigned int adma_qc_issue(struct ata_queued_cmd *qc)
423{ 423{
424 struct adma_port_priv *pp = qc->ap->private_data; 424 struct adma_port_priv *pp = qc->ap->private_data;
425 425
diff --git a/drivers/scsi/sata_mv.c b/drivers/scsi/sata_mv.c
index 2770005324b4..aceaf56999a5 100644
--- a/drivers/scsi/sata_mv.c
+++ b/drivers/scsi/sata_mv.c
@@ -37,7 +37,7 @@
37#include <asm/io.h> 37#include <asm/io.h>
38 38
39#define DRV_NAME "sata_mv" 39#define DRV_NAME "sata_mv"
40#define DRV_VERSION "0.5" 40#define DRV_VERSION "0.6"
41 41
42enum { 42enum {
43 /* BAR's are enumerated in terms of pci_resource_start() terms */ 43 /* BAR's are enumerated in terms of pci_resource_start() terms */
@@ -228,7 +228,9 @@ enum {
228 MV_HP_ERRATA_50XXB2 = (1 << 2), 228 MV_HP_ERRATA_50XXB2 = (1 << 2),
229 MV_HP_ERRATA_60X1B2 = (1 << 3), 229 MV_HP_ERRATA_60X1B2 = (1 << 3),
230 MV_HP_ERRATA_60X1C0 = (1 << 4), 230 MV_HP_ERRATA_60X1C0 = (1 << 4),
231 MV_HP_50XX = (1 << 5), 231 MV_HP_ERRATA_XX42A0 = (1 << 5),
232 MV_HP_50XX = (1 << 6),
233 MV_HP_GEN_IIE = (1 << 7),
232 234
233 /* Port private flags (pp_flags) */ 235 /* Port private flags (pp_flags) */
234 MV_PP_FLAG_EDMA_EN = (1 << 0), 236 MV_PP_FLAG_EDMA_EN = (1 << 0),
@@ -237,6 +239,9 @@ enum {
237 239
238#define IS_50XX(hpriv) ((hpriv)->hp_flags & MV_HP_50XX) 240#define IS_50XX(hpriv) ((hpriv)->hp_flags & MV_HP_50XX)
239#define IS_60XX(hpriv) (((hpriv)->hp_flags & MV_HP_50XX) == 0) 241#define IS_60XX(hpriv) (((hpriv)->hp_flags & MV_HP_50XX) == 0)
242#define IS_GEN_I(hpriv) IS_50XX(hpriv)
243#define IS_GEN_II(hpriv) IS_60XX(hpriv)
244#define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
240 245
241enum { 246enum {
242 /* Our DMA boundary is determined by an ePRD being unable to handle 247 /* Our DMA boundary is determined by an ePRD being unable to handle
@@ -255,6 +260,8 @@ enum chip_type {
255 chip_5080, 260 chip_5080,
256 chip_604x, 261 chip_604x,
257 chip_608x, 262 chip_608x,
263 chip_6042,
264 chip_7042,
258}; 265};
259 266
260/* Command ReQuest Block: 32B */ 267/* Command ReQuest Block: 32B */
@@ -265,6 +272,14 @@ struct mv_crqb {
265 u16 ata_cmd[11]; 272 u16 ata_cmd[11];
266}; 273};
267 274
275struct mv_crqb_iie {
276 u32 addr;
277 u32 addr_hi;
278 u32 flags;
279 u32 len;
280 u32 ata_cmd[4];
281};
282
268/* Command ResPonse Block: 8B */ 283/* Command ResPonse Block: 8B */
269struct mv_crpb { 284struct mv_crpb {
270 u16 id; 285 u16 id;
@@ -328,7 +343,8 @@ static void mv_host_stop(struct ata_host_set *host_set);
328static int mv_port_start(struct ata_port *ap); 343static int mv_port_start(struct ata_port *ap);
329static void mv_port_stop(struct ata_port *ap); 344static void mv_port_stop(struct ata_port *ap);
330static void mv_qc_prep(struct ata_queued_cmd *qc); 345static void mv_qc_prep(struct ata_queued_cmd *qc);
331static int mv_qc_issue(struct ata_queued_cmd *qc); 346static void mv_qc_prep_iie(struct ata_queued_cmd *qc);
347static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
332static irqreturn_t mv_interrupt(int irq, void *dev_instance, 348static irqreturn_t mv_interrupt(int irq, void *dev_instance,
333 struct pt_regs *regs); 349 struct pt_regs *regs);
334static void mv_eng_timeout(struct ata_port *ap); 350static void mv_eng_timeout(struct ata_port *ap);
@@ -362,11 +378,11 @@ static struct scsi_host_template mv_sht = {
362 .name = DRV_NAME, 378 .name = DRV_NAME,
363 .ioctl = ata_scsi_ioctl, 379 .ioctl = ata_scsi_ioctl,
364 .queuecommand = ata_scsi_queuecmd, 380 .queuecommand = ata_scsi_queuecmd,
381 .eh_timed_out = ata_scsi_timed_out,
365 .eh_strategy_handler = ata_scsi_error, 382 .eh_strategy_handler = ata_scsi_error,
366 .can_queue = MV_USE_Q_DEPTH, 383 .can_queue = MV_USE_Q_DEPTH,
367 .this_id = ATA_SHT_THIS_ID, 384 .this_id = ATA_SHT_THIS_ID,
368 .sg_tablesize = MV_MAX_SG_CT / 2, 385 .sg_tablesize = MV_MAX_SG_CT / 2,
369 .max_sectors = ATA_MAX_SECTORS,
370 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 386 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
371 .emulated = ATA_SHT_EMULATED, 387 .emulated = ATA_SHT_EMULATED,
372 .use_clustering = ATA_SHT_USE_CLUSTERING, 388 .use_clustering = ATA_SHT_USE_CLUSTERING,
@@ -430,6 +446,33 @@ static const struct ata_port_operations mv6_ops = {
430 .host_stop = mv_host_stop, 446 .host_stop = mv_host_stop,
431}; 447};
432 448
449static const struct ata_port_operations mv_iie_ops = {
450 .port_disable = ata_port_disable,
451
452 .tf_load = ata_tf_load,
453 .tf_read = ata_tf_read,
454 .check_status = ata_check_status,
455 .exec_command = ata_exec_command,
456 .dev_select = ata_std_dev_select,
457
458 .phy_reset = mv_phy_reset,
459
460 .qc_prep = mv_qc_prep_iie,
461 .qc_issue = mv_qc_issue,
462
463 .eng_timeout = mv_eng_timeout,
464
465 .irq_handler = mv_interrupt,
466 .irq_clear = mv_irq_clear,
467
468 .scr_read = mv_scr_read,
469 .scr_write = mv_scr_write,
470
471 .port_start = mv_port_start,
472 .port_stop = mv_port_stop,
473 .host_stop = mv_host_stop,
474};
475
433static const struct ata_port_info mv_port_info[] = { 476static const struct ata_port_info mv_port_info[] = {
434 { /* chip_504x */ 477 { /* chip_504x */
435 .sht = &mv_sht, 478 .sht = &mv_sht,
@@ -467,6 +510,21 @@ static const struct ata_port_info mv_port_info[] = {
467 .udma_mask = 0x7f, /* udma0-6 */ 510 .udma_mask = 0x7f, /* udma0-6 */
468 .port_ops = &mv6_ops, 511 .port_ops = &mv6_ops,
469 }, 512 },
513 { /* chip_6042 */
514 .sht = &mv_sht,
515 .host_flags = (MV_COMMON_FLAGS | MV_6XXX_FLAGS),
516 .pio_mask = 0x1f, /* pio0-4 */
517 .udma_mask = 0x7f, /* udma0-6 */
518 .port_ops = &mv_iie_ops,
519 },
520 { /* chip_7042 */
521 .sht = &mv_sht,
522 .host_flags = (MV_COMMON_FLAGS | MV_6XXX_FLAGS |
523 MV_FLAG_DUAL_HC),
524 .pio_mask = 0x1f, /* pio0-4 */
525 .udma_mask = 0x7f, /* udma0-6 */
526 .port_ops = &mv_iie_ops,
527 },
470}; 528};
471 529
472static const struct pci_device_id mv_pci_tbl[] = { 530static const struct pci_device_id mv_pci_tbl[] = {
@@ -477,6 +535,7 @@ static const struct pci_device_id mv_pci_tbl[] = {
477 535
478 {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x6040), 0, 0, chip_604x}, 536 {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x6040), 0, 0, chip_604x},
479 {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x6041), 0, 0, chip_604x}, 537 {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x6041), 0, 0, chip_604x},
538 {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x6042), 0, 0, chip_6042},
480 {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x6080), 0, 0, chip_608x}, 539 {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x6080), 0, 0, chip_608x},
481 {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x6081), 0, 0, chip_608x}, 540 {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x6081), 0, 0, chip_608x},
482 541
@@ -572,8 +631,8 @@ static void mv_irq_clear(struct ata_port *ap)
572 * @base: port base address 631 * @base: port base address
573 * @pp: port private data 632 * @pp: port private data
574 * 633 *
575 * Verify the local cache of the eDMA state is accurate with an 634 * Verify the local cache of the eDMA state is accurate with a
576 * assert. 635 * WARN_ON.
577 * 636 *
578 * LOCKING: 637 * LOCKING:
579 * Inherited from caller. 638 * Inherited from caller.
@@ -584,15 +643,15 @@ static void mv_start_dma(void __iomem *base, struct mv_port_priv *pp)
584 writelfl(EDMA_EN, base + EDMA_CMD_OFS); 643 writelfl(EDMA_EN, base + EDMA_CMD_OFS);
585 pp->pp_flags |= MV_PP_FLAG_EDMA_EN; 644 pp->pp_flags |= MV_PP_FLAG_EDMA_EN;
586 } 645 }
587 assert(EDMA_EN & readl(base + EDMA_CMD_OFS)); 646 WARN_ON(!(EDMA_EN & readl(base + EDMA_CMD_OFS)));
588} 647}
589 648
590/** 649/**
591 * mv_stop_dma - Disable eDMA engine 650 * mv_stop_dma - Disable eDMA engine
592 * @ap: ATA channel to manipulate 651 * @ap: ATA channel to manipulate
593 * 652 *
594 * Verify the local cache of the eDMA state is accurate with an 653 * Verify the local cache of the eDMA state is accurate with a
595 * assert. 654 * WARN_ON.
596 * 655 *
597 * LOCKING: 656 * LOCKING:
598 * Inherited from caller. 657 * Inherited from caller.
@@ -610,7 +669,7 @@ static void mv_stop_dma(struct ata_port *ap)
610 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS); 669 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
611 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN; 670 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
612 } else { 671 } else {
613 assert(!(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS))); 672 WARN_ON(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS));
614 } 673 }
615 674
616 /* now properly wait for the eDMA to stop */ 675 /* now properly wait for the eDMA to stop */
@@ -773,6 +832,33 @@ static inline void mv_priv_free(struct mv_port_priv *pp, struct device *dev)
773 dma_free_coherent(dev, MV_PORT_PRIV_DMA_SZ, pp->crpb, pp->crpb_dma); 832 dma_free_coherent(dev, MV_PORT_PRIV_DMA_SZ, pp->crpb, pp->crpb_dma);
774} 833}
775 834
835static void mv_edma_cfg(struct mv_host_priv *hpriv, void __iomem *port_mmio)
836{
837 u32 cfg = readl(port_mmio + EDMA_CFG_OFS);
838
839 /* set up non-NCQ EDMA configuration */
840 cfg &= ~0x1f; /* clear queue depth */
841 cfg &= ~EDMA_CFG_NCQ; /* clear NCQ mode */
842 cfg &= ~(1 << 9); /* disable equeue */
843
844 if (IS_GEN_I(hpriv))
845 cfg |= (1 << 8); /* enab config burst size mask */
846
847 else if (IS_GEN_II(hpriv))
848 cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN;
849
850 else if (IS_GEN_IIE(hpriv)) {
851 cfg |= (1 << 23); /* dis RX PM port mask */
852 cfg &= ~(1 << 16); /* dis FIS-based switching (for now) */
853 cfg &= ~(1 << 19); /* dis 128-entry queue (for now?) */
854 cfg |= (1 << 18); /* enab early completion */
855 cfg |= (1 << 17); /* enab host q cache */
856 cfg |= (1 << 22); /* enab cutthrough */
857 }
858
859 writelfl(cfg, port_mmio + EDMA_CFG_OFS);
860}
861
776/** 862/**
777 * mv_port_start - Port specific init/start routine. 863 * mv_port_start - Port specific init/start routine.
778 * @ap: ATA channel to manipulate 864 * @ap: ATA channel to manipulate
@@ -786,6 +872,7 @@ static inline void mv_priv_free(struct mv_port_priv *pp, struct device *dev)
786static int mv_port_start(struct ata_port *ap) 872static int mv_port_start(struct ata_port *ap)
787{ 873{
788 struct device *dev = ap->host_set->dev; 874 struct device *dev = ap->host_set->dev;
875 struct mv_host_priv *hpriv = ap->host_set->private_data;
789 struct mv_port_priv *pp; 876 struct mv_port_priv *pp;
790 void __iomem *port_mmio = mv_ap_base(ap); 877 void __iomem *port_mmio = mv_ap_base(ap);
791 void *mem; 878 void *mem;
@@ -829,17 +916,26 @@ static int mv_port_start(struct ata_port *ap)
829 pp->sg_tbl = mem; 916 pp->sg_tbl = mem;
830 pp->sg_tbl_dma = mem_dma; 917 pp->sg_tbl_dma = mem_dma;
831 918
832 writelfl(EDMA_CFG_Q_DEPTH | EDMA_CFG_RD_BRST_EXT | 919 mv_edma_cfg(hpriv, port_mmio);
833 EDMA_CFG_WR_BUFF_LEN, port_mmio + EDMA_CFG_OFS);
834 920
835 writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS); 921 writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS);
836 writelfl(pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK, 922 writelfl(pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK,
837 port_mmio + EDMA_REQ_Q_IN_PTR_OFS); 923 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
838 924
839 writelfl(0, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS); 925 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
840 writelfl(0, port_mmio + EDMA_RSP_Q_IN_PTR_OFS); 926 writelfl(pp->crqb_dma & 0xffffffff,
927 port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
928 else
929 writelfl(0, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
841 930
842 writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS); 931 writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS);
932
933 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
934 writelfl(pp->crpb_dma & 0xffffffff,
935 port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
936 else
937 writelfl(0, port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
938
843 writelfl(pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK, 939 writelfl(pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK,
844 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS); 940 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
845 941
@@ -960,21 +1056,19 @@ static void mv_qc_prep(struct ata_queued_cmd *qc)
960 struct ata_taskfile *tf; 1056 struct ata_taskfile *tf;
961 u16 flags = 0; 1057 u16 flags = 0;
962 1058
963 if (ATA_PROT_DMA != qc->tf.protocol) { 1059 if (ATA_PROT_DMA != qc->tf.protocol)
964 return; 1060 return;
965 }
966 1061
967 /* the req producer index should be the same as we remember it */ 1062 /* the req producer index should be the same as we remember it */
968 assert(((readl(mv_ap_base(qc->ap) + EDMA_REQ_Q_IN_PTR_OFS) >> 1063 WARN_ON(((readl(mv_ap_base(qc->ap) + EDMA_REQ_Q_IN_PTR_OFS) >>
969 EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) == 1064 EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) !=
970 pp->req_producer); 1065 pp->req_producer);
971 1066
972 /* Fill in command request block 1067 /* Fill in command request block
973 */ 1068 */
974 if (!(qc->tf.flags & ATA_TFLAG_WRITE)) { 1069 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
975 flags |= CRQB_FLAG_READ; 1070 flags |= CRQB_FLAG_READ;
976 } 1071 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
977 assert(MV_MAX_Q_DEPTH > qc->tag);
978 flags |= qc->tag << CRQB_TAG_SHIFT; 1072 flags |= qc->tag << CRQB_TAG_SHIFT;
979 1073
980 pp->crqb[pp->req_producer].sg_addr = 1074 pp->crqb[pp->req_producer].sg_addr =
@@ -1029,9 +1123,76 @@ static void mv_qc_prep(struct ata_queued_cmd *qc)
1029 mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0); 1123 mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0);
1030 mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */ 1124 mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */
1031 1125
1032 if (!(qc->flags & ATA_QCFLAG_DMAMAP)) { 1126 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1127 return;
1128 mv_fill_sg(qc);
1129}
1130
1131/**
1132 * mv_qc_prep_iie - Host specific command preparation.
1133 * @qc: queued command to prepare
1134 *
1135 * This routine simply redirects to the general purpose routine
1136 * if command is not DMA. Else, it handles prep of the CRQB
1137 * (command request block), does some sanity checking, and calls
1138 * the SG load routine.
1139 *
1140 * LOCKING:
1141 * Inherited from caller.
1142 */
1143static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
1144{
1145 struct ata_port *ap = qc->ap;
1146 struct mv_port_priv *pp = ap->private_data;
1147 struct mv_crqb_iie *crqb;
1148 struct ata_taskfile *tf;
1149 u32 flags = 0;
1150
1151 if (ATA_PROT_DMA != qc->tf.protocol)
1152 return;
1153
1154 /* the req producer index should be the same as we remember it */
1155 WARN_ON(((readl(mv_ap_base(qc->ap) + EDMA_REQ_Q_IN_PTR_OFS) >>
1156 EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) !=
1157 pp->req_producer);
1158
1159 /* Fill in Gen IIE command request block
1160 */
1161 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1162 flags |= CRQB_FLAG_READ;
1163
1164 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
1165 flags |= qc->tag << CRQB_TAG_SHIFT;
1166
1167 crqb = (struct mv_crqb_iie *) &pp->crqb[pp->req_producer];
1168 crqb->addr = cpu_to_le32(pp->sg_tbl_dma & 0xffffffff);
1169 crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16);
1170 crqb->flags = cpu_to_le32(flags);
1171
1172 tf = &qc->tf;
1173 crqb->ata_cmd[0] = cpu_to_le32(
1174 (tf->command << 16) |
1175 (tf->feature << 24)
1176 );
1177 crqb->ata_cmd[1] = cpu_to_le32(
1178 (tf->lbal << 0) |
1179 (tf->lbam << 8) |
1180 (tf->lbah << 16) |
1181 (tf->device << 24)
1182 );
1183 crqb->ata_cmd[2] = cpu_to_le32(
1184 (tf->hob_lbal << 0) |
1185 (tf->hob_lbam << 8) |
1186 (tf->hob_lbah << 16) |
1187 (tf->hob_feature << 24)
1188 );
1189 crqb->ata_cmd[3] = cpu_to_le32(
1190 (tf->nsect << 0) |
1191 (tf->hob_nsect << 8)
1192 );
1193
1194 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1033 return; 1195 return;
1034 }
1035 mv_fill_sg(qc); 1196 mv_fill_sg(qc);
1036} 1197}
1037 1198
@@ -1047,7 +1208,7 @@ static void mv_qc_prep(struct ata_queued_cmd *qc)
1047 * LOCKING: 1208 * LOCKING:
1048 * Inherited from caller. 1209 * Inherited from caller.
1049 */ 1210 */
1050static int mv_qc_issue(struct ata_queued_cmd *qc) 1211static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
1051{ 1212{
1052 void __iomem *port_mmio = mv_ap_base(qc->ap); 1213 void __iomem *port_mmio = mv_ap_base(qc->ap);
1053 struct mv_port_priv *pp = qc->ap->private_data; 1214 struct mv_port_priv *pp = qc->ap->private_data;
@@ -1065,12 +1226,12 @@ static int mv_qc_issue(struct ata_queued_cmd *qc)
1065 in_ptr = readl(port_mmio + EDMA_REQ_Q_IN_PTR_OFS); 1226 in_ptr = readl(port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
1066 1227
1067 /* the req producer index should be the same as we remember it */ 1228 /* the req producer index should be the same as we remember it */
1068 assert(((in_ptr >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) == 1229 WARN_ON(((in_ptr >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) !=
1069 pp->req_producer); 1230 pp->req_producer);
1070 /* until we do queuing, the queue should be empty at this point */ 1231 /* until we do queuing, the queue should be empty at this point */
1071 assert(((in_ptr >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) == 1232 WARN_ON(((in_ptr >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) !=
1072 ((readl(port_mmio + EDMA_REQ_Q_OUT_PTR_OFS) >> 1233 ((readl(port_mmio + EDMA_REQ_Q_OUT_PTR_OFS) >>
1073 EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK)); 1234 EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK));
1074 1235
1075 mv_inc_q_index(&pp->req_producer); /* now incr producer index */ 1236 mv_inc_q_index(&pp->req_producer); /* now incr producer index */
1076 1237
@@ -1090,7 +1251,7 @@ static int mv_qc_issue(struct ata_queued_cmd *qc)
1090 * 1251 *
1091 * This routine is for use when the port is in DMA mode, when it 1252 * This routine is for use when the port is in DMA mode, when it
1092 * will be using the CRPB (command response block) method of 1253 * will be using the CRPB (command response block) method of
1093 * returning command completion information. We assert indices 1254 * returning command completion information. We check indices
1094 * are good, grab status, and bump the response consumer index to 1255 * are good, grab status, and bump the response consumer index to
1095 * prove that we're up to date. 1256 * prove that we're up to date.
1096 * 1257 *
@@ -1106,16 +1267,16 @@ static u8 mv_get_crpb_status(struct ata_port *ap)
1106 out_ptr = readl(port_mmio + EDMA_RSP_Q_OUT_PTR_OFS); 1267 out_ptr = readl(port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
1107 1268
1108 /* the response consumer index should be the same as we remember it */ 1269 /* the response consumer index should be the same as we remember it */
1109 assert(((out_ptr >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) == 1270 WARN_ON(((out_ptr >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) !=
1110 pp->rsp_consumer); 1271 pp->rsp_consumer);
1111 1272
1112 /* increment our consumer index... */ 1273 /* increment our consumer index... */
1113 pp->rsp_consumer = mv_inc_q_index(&pp->rsp_consumer); 1274 pp->rsp_consumer = mv_inc_q_index(&pp->rsp_consumer);
1114 1275
1115 /* and, until we do NCQ, there should only be 1 CRPB waiting */ 1276 /* and, until we do NCQ, there should only be 1 CRPB waiting */
1116 assert(((readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS) >> 1277 WARN_ON(((readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS) >>
1117 EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) == 1278 EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) !=
1118 pp->rsp_consumer); 1279 pp->rsp_consumer);
1119 1280
1120 /* write out our inc'd consumer index so EDMA knows we're caught up */ 1281 /* write out our inc'd consumer index so EDMA knows we're caught up */
1121 out_ptr &= EDMA_RSP_Q_BASE_LO_MASK; 1282 out_ptr &= EDMA_RSP_Q_BASE_LO_MASK;
@@ -1681,6 +1842,12 @@ static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
1681 m2 |= hpriv->signal[port].pre; 1842 m2 |= hpriv->signal[port].pre;
1682 m2 &= ~(1 << 16); 1843 m2 &= ~(1 << 16);
1683 1844
1845 /* according to mvSata 3.6.1, some IIE values are fixed */
1846 if (IS_GEN_IIE(hpriv)) {
1847 m2 &= ~0xC30FF01F;
1848 m2 |= 0x0000900F;
1849 }
1850
1684 writel(m2, port_mmio + PHY_MODE2); 1851 writel(m2, port_mmio + PHY_MODE2);
1685} 1852}
1686 1853
@@ -1846,7 +2013,6 @@ static void mv_phy_reset(struct ata_port *ap)
1846static void mv_eng_timeout(struct ata_port *ap) 2013static void mv_eng_timeout(struct ata_port *ap)
1847{ 2014{
1848 struct ata_queued_cmd *qc; 2015 struct ata_queued_cmd *qc;
1849 unsigned long flags;
1850 2016
1851 printk(KERN_ERR "ata%u: Entering mv_eng_timeout\n",ap->id); 2017 printk(KERN_ERR "ata%u: Entering mv_eng_timeout\n",ap->id);
1852 DPRINTK("All regs @ start of eng_timeout\n"); 2018 DPRINTK("All regs @ start of eng_timeout\n");
@@ -1861,22 +2027,8 @@ static void mv_eng_timeout(struct ata_port *ap)
1861 mv_err_intr(ap); 2027 mv_err_intr(ap);
1862 mv_stop_and_reset(ap); 2028 mv_stop_and_reset(ap);
1863 2029
1864 if (!qc) { 2030 qc->err_mask |= AC_ERR_TIMEOUT;
1865 printk(KERN_ERR "ata%u: BUG: timeout without command\n", 2031 ata_eh_qc_complete(qc);
1866 ap->id);
1867 } else {
1868 /* hack alert! We cannot use the supplied completion
1869 * function from inside the ->eh_strategy_handler() thread.
1870 * libata is the only user of ->eh_strategy_handler() in
1871 * any kernel, so the default scsi_done() assumes it is
1872 * not being called from the SCSI EH.
1873 */
1874 spin_lock_irqsave(&ap->host_set->lock, flags);
1875 qc->scsidone = scsi_finish_command;
1876 qc->err_mask |= AC_ERR_OTHER;
1877 ata_qc_complete(qc);
1878 spin_unlock_irqrestore(&ap->host_set->lock, flags);
1879 }
1880} 2032}
1881 2033
1882/** 2034/**
@@ -1995,6 +2147,27 @@ static int mv_chip_id(struct pci_dev *pdev, struct mv_host_priv *hpriv,
1995 } 2147 }
1996 break; 2148 break;
1997 2149
2150 case chip_7042:
2151 case chip_6042:
2152 hpriv->ops = &mv6xxx_ops;
2153
2154 hp_flags |= MV_HP_GEN_IIE;
2155
2156 switch (rev_id) {
2157 case 0x0:
2158 hp_flags |= MV_HP_ERRATA_XX42A0;
2159 break;
2160 case 0x1:
2161 hp_flags |= MV_HP_ERRATA_60X1C0;
2162 break;
2163 default:
2164 dev_printk(KERN_WARNING, &pdev->dev,
2165 "Applying 60X1C0 workarounds to unknown rev\n");
2166 hp_flags |= MV_HP_ERRATA_60X1C0;
2167 break;
2168 }
2169 break;
2170
1998 default: 2171 default:
1999 printk(KERN_ERR DRV_NAME ": BUG: invalid board index %u\n", board_idx); 2172 printk(KERN_ERR DRV_NAME ": BUG: invalid board index %u\n", board_idx);
2000 return 1; 2173 return 1;
diff --git a/drivers/scsi/sata_nv.c b/drivers/scsi/sata_nv.c
index bbbb55eeb73a..caffadc2e0ae 100644
--- a/drivers/scsi/sata_nv.c
+++ b/drivers/scsi/sata_nv.c
@@ -229,11 +229,11 @@ static struct scsi_host_template nv_sht = {
229 .name = DRV_NAME, 229 .name = DRV_NAME,
230 .ioctl = ata_scsi_ioctl, 230 .ioctl = ata_scsi_ioctl,
231 .queuecommand = ata_scsi_queuecmd, 231 .queuecommand = ata_scsi_queuecmd,
232 .eh_timed_out = ata_scsi_timed_out,
232 .eh_strategy_handler = ata_scsi_error, 233 .eh_strategy_handler = ata_scsi_error,
233 .can_queue = ATA_DEF_QUEUE, 234 .can_queue = ATA_DEF_QUEUE,
234 .this_id = ATA_SHT_THIS_ID, 235 .this_id = ATA_SHT_THIS_ID,
235 .sg_tablesize = LIBATA_MAX_PRD, 236 .sg_tablesize = LIBATA_MAX_PRD,
236 .max_sectors = ATA_MAX_SECTORS,
237 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 237 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
238 .emulated = ATA_SHT_EMULATED, 238 .emulated = ATA_SHT_EMULATED,
239 .use_clustering = ATA_SHT_USE_CLUSTERING, 239 .use_clustering = ATA_SHT_USE_CLUSTERING,
diff --git a/drivers/scsi/sata_promise.c b/drivers/scsi/sata_promise.c
index b0b0a69b3563..84cb3940ad88 100644
--- a/drivers/scsi/sata_promise.c
+++ b/drivers/scsi/sata_promise.c
@@ -46,7 +46,7 @@
46#include "sata_promise.h" 46#include "sata_promise.h"
47 47
48#define DRV_NAME "sata_promise" 48#define DRV_NAME "sata_promise"
49#define DRV_VERSION "1.03" 49#define DRV_VERSION "1.04"
50 50
51 51
52enum { 52enum {
@@ -58,6 +58,7 @@ enum {
58 PDC_GLOBAL_CTL = 0x48, /* Global control/status (per port) */ 58 PDC_GLOBAL_CTL = 0x48, /* Global control/status (per port) */
59 PDC_CTLSTAT = 0x60, /* IDE control and status (per port) */ 59 PDC_CTLSTAT = 0x60, /* IDE control and status (per port) */
60 PDC_SATA_PLUG_CSR = 0x6C, /* SATA Plug control/status reg */ 60 PDC_SATA_PLUG_CSR = 0x6C, /* SATA Plug control/status reg */
61 PDC2_SATA_PLUG_CSR = 0x60, /* SATAII Plug control/status reg */
61 PDC_SLEW_CTL = 0x470, /* slew rate control reg */ 62 PDC_SLEW_CTL = 0x470, /* slew rate control reg */
62 63
63 PDC_ERR_MASK = (1<<19) | (1<<20) | (1<<21) | (1<<22) | 64 PDC_ERR_MASK = (1<<19) | (1<<20) | (1<<21) | (1<<22) |
@@ -67,8 +68,10 @@ enum {
67 board_20319 = 1, /* FastTrak S150 TX4 */ 68 board_20319 = 1, /* FastTrak S150 TX4 */
68 board_20619 = 2, /* FastTrak TX4000 */ 69 board_20619 = 2, /* FastTrak TX4000 */
69 board_20771 = 3, /* FastTrak TX2300 */ 70 board_20771 = 3, /* FastTrak TX2300 */
71 board_2057x = 4, /* SATAII150 Tx2plus */
72 board_40518 = 5, /* SATAII150 Tx4 */
70 73
71 PDC_HAS_PATA = (1 << 1), /* PDC20375 has PATA */ 74 PDC_HAS_PATA = (1 << 1), /* PDC20375/20575 has PATA */
72 75
73 PDC_RESET = (1 << 11), /* HDMA reset */ 76 PDC_RESET = (1 << 11), /* HDMA reset */
74 77
@@ -82,6 +85,10 @@ struct pdc_port_priv {
82 dma_addr_t pkt_dma; 85 dma_addr_t pkt_dma;
83}; 86};
84 87
88struct pdc_host_priv {
89 int hotplug_offset;
90};
91
85static u32 pdc_sata_scr_read (struct ata_port *ap, unsigned int sc_reg); 92static u32 pdc_sata_scr_read (struct ata_port *ap, unsigned int sc_reg);
86static void pdc_sata_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val); 93static void pdc_sata_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val);
87static int pdc_ata_init_one (struct pci_dev *pdev, const struct pci_device_id *ent); 94static int pdc_ata_init_one (struct pci_dev *pdev, const struct pci_device_id *ent);
@@ -95,7 +102,8 @@ static void pdc_qc_prep(struct ata_queued_cmd *qc);
95static void pdc_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf); 102static void pdc_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf);
96static void pdc_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf); 103static void pdc_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf);
97static void pdc_irq_clear(struct ata_port *ap); 104static void pdc_irq_clear(struct ata_port *ap);
98static int pdc_qc_issue_prot(struct ata_queued_cmd *qc); 105static unsigned int pdc_qc_issue_prot(struct ata_queued_cmd *qc);
106static void pdc_host_stop(struct ata_host_set *host_set);
99 107
100 108
101static struct scsi_host_template pdc_ata_sht = { 109static struct scsi_host_template pdc_ata_sht = {
@@ -103,11 +111,11 @@ static struct scsi_host_template pdc_ata_sht = {
103 .name = DRV_NAME, 111 .name = DRV_NAME,
104 .ioctl = ata_scsi_ioctl, 112 .ioctl = ata_scsi_ioctl,
105 .queuecommand = ata_scsi_queuecmd, 113 .queuecommand = ata_scsi_queuecmd,
114 .eh_timed_out = ata_scsi_timed_out,
106 .eh_strategy_handler = ata_scsi_error, 115 .eh_strategy_handler = ata_scsi_error,
107 .can_queue = ATA_DEF_QUEUE, 116 .can_queue = ATA_DEF_QUEUE,
108 .this_id = ATA_SHT_THIS_ID, 117 .this_id = ATA_SHT_THIS_ID,
109 .sg_tablesize = LIBATA_MAX_PRD, 118 .sg_tablesize = LIBATA_MAX_PRD,
110 .max_sectors = ATA_MAX_SECTORS,
111 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 119 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
112 .emulated = ATA_SHT_EMULATED, 120 .emulated = ATA_SHT_EMULATED,
113 .use_clustering = ATA_SHT_USE_CLUSTERING, 121 .use_clustering = ATA_SHT_USE_CLUSTERING,
@@ -137,7 +145,7 @@ static const struct ata_port_operations pdc_sata_ops = {
137 .scr_write = pdc_sata_scr_write, 145 .scr_write = pdc_sata_scr_write,
138 .port_start = pdc_port_start, 146 .port_start = pdc_port_start,
139 .port_stop = pdc_port_stop, 147 .port_stop = pdc_port_stop,
140 .host_stop = ata_pci_host_stop, 148 .host_stop = pdc_host_stop,
141}; 149};
142 150
143static const struct ata_port_operations pdc_pata_ops = { 151static const struct ata_port_operations pdc_pata_ops = {
@@ -158,7 +166,7 @@ static const struct ata_port_operations pdc_pata_ops = {
158 166
159 .port_start = pdc_port_start, 167 .port_start = pdc_port_start,
160 .port_stop = pdc_port_stop, 168 .port_stop = pdc_port_stop,
161 .host_stop = ata_pci_host_stop, 169 .host_stop = pdc_host_stop,
162}; 170};
163 171
164static const struct ata_port_info pdc_port_info[] = { 172static const struct ata_port_info pdc_port_info[] = {
@@ -201,6 +209,26 @@ static const struct ata_port_info pdc_port_info[] = {
201 .udma_mask = 0x7f, /* udma0-6 ; FIXME */ 209 .udma_mask = 0x7f, /* udma0-6 ; FIXME */
202 .port_ops = &pdc_sata_ops, 210 .port_ops = &pdc_sata_ops,
203 }, 211 },
212
213 /* board_2057x */
214 {
215 .sht = &pdc_ata_sht,
216 .host_flags = PDC_COMMON_FLAGS | ATA_FLAG_SATA,
217 .pio_mask = 0x1f, /* pio0-4 */
218 .mwdma_mask = 0x07, /* mwdma0-2 */
219 .udma_mask = 0x7f, /* udma0-6 ; FIXME */
220 .port_ops = &pdc_sata_ops,
221 },
222
223 /* board_40518 */
224 {
225 .sht = &pdc_ata_sht,
226 .host_flags = PDC_COMMON_FLAGS | ATA_FLAG_SATA,
227 .pio_mask = 0x1f, /* pio0-4 */
228 .mwdma_mask = 0x07, /* mwdma0-2 */
229 .udma_mask = 0x7f, /* udma0-6 ; FIXME */
230 .port_ops = &pdc_sata_ops,
231 },
204}; 232};
205 233
206static const struct pci_device_id pdc_ata_pci_tbl[] = { 234static const struct pci_device_id pdc_ata_pci_tbl[] = {
@@ -217,9 +245,9 @@ static const struct pci_device_id pdc_ata_pci_tbl[] = {
217 { PCI_VENDOR_ID_PROMISE, 0x3376, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 245 { PCI_VENDOR_ID_PROMISE, 0x3376, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
218 board_2037x }, 246 board_2037x },
219 { PCI_VENDOR_ID_PROMISE, 0x3574, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 247 { PCI_VENDOR_ID_PROMISE, 0x3574, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
220 board_2037x }, 248 board_2057x },
221 { PCI_VENDOR_ID_PROMISE, 0x3d75, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 249 { PCI_VENDOR_ID_PROMISE, 0x3d75, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
222 board_2037x }, 250 board_2057x },
223 { PCI_VENDOR_ID_PROMISE, 0x3d73, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 251 { PCI_VENDOR_ID_PROMISE, 0x3d73, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
224 board_2037x }, 252 board_2037x },
225 253
@@ -227,12 +255,14 @@ static const struct pci_device_id pdc_ata_pci_tbl[] = {
227 board_20319 }, 255 board_20319 },
228 { PCI_VENDOR_ID_PROMISE, 0x3319, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 256 { PCI_VENDOR_ID_PROMISE, 0x3319, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
229 board_20319 }, 257 board_20319 },
258 { PCI_VENDOR_ID_PROMISE, 0x3515, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
259 board_20319 },
230 { PCI_VENDOR_ID_PROMISE, 0x3519, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 260 { PCI_VENDOR_ID_PROMISE, 0x3519, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
231 board_20319 }, 261 board_20319 },
232 { PCI_VENDOR_ID_PROMISE, 0x3d17, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 262 { PCI_VENDOR_ID_PROMISE, 0x3d17, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
233 board_20319 }, 263 board_20319 },
234 { PCI_VENDOR_ID_PROMISE, 0x3d18, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 264 { PCI_VENDOR_ID_PROMISE, 0x3d18, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
235 board_20319 }, 265 board_40518 },
236 266
237 { PCI_VENDOR_ID_PROMISE, 0x6629, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 267 { PCI_VENDOR_ID_PROMISE, 0x6629, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
238 board_20619 }, 268 board_20619 },
@@ -261,12 +291,11 @@ static int pdc_port_start(struct ata_port *ap)
261 if (rc) 291 if (rc)
262 return rc; 292 return rc;
263 293
264 pp = kmalloc(sizeof(*pp), GFP_KERNEL); 294 pp = kzalloc(sizeof(*pp), GFP_KERNEL);
265 if (!pp) { 295 if (!pp) {
266 rc = -ENOMEM; 296 rc = -ENOMEM;
267 goto err_out; 297 goto err_out;
268 } 298 }
269 memset(pp, 0, sizeof(*pp));
270 299
271 pp->pkt = dma_alloc_coherent(dev, 128, &pp->pkt_dma, GFP_KERNEL); 300 pp->pkt = dma_alloc_coherent(dev, 128, &pp->pkt_dma, GFP_KERNEL);
272 if (!pp->pkt) { 301 if (!pp->pkt) {
@@ -298,6 +327,16 @@ static void pdc_port_stop(struct ata_port *ap)
298} 327}
299 328
300 329
330static void pdc_host_stop(struct ata_host_set *host_set)
331{
332 struct pdc_host_priv *hp = host_set->private_data;
333
334 ata_pci_host_stop(host_set);
335
336 kfree(hp);
337}
338
339
301static void pdc_reset_port(struct ata_port *ap) 340static void pdc_reset_port(struct ata_port *ap)
302{ 341{
303 void __iomem *mmio = (void __iomem *) ap->ioaddr.cmd_addr + PDC_CTLSTAT; 342 void __iomem *mmio = (void __iomem *) ap->ioaddr.cmd_addr + PDC_CTLSTAT;
@@ -394,19 +433,6 @@ static void pdc_eng_timeout(struct ata_port *ap)
394 spin_lock_irqsave(&host_set->lock, flags); 433 spin_lock_irqsave(&host_set->lock, flags);
395 434
396 qc = ata_qc_from_tag(ap, ap->active_tag); 435 qc = ata_qc_from_tag(ap, ap->active_tag);
397 if (!qc) {
398 printk(KERN_ERR "ata%u: BUG: timeout without command\n",
399 ap->id);
400 goto out;
401 }
402
403 /* hack alert! We cannot use the supplied completion
404 * function from inside the ->eh_strategy_handler() thread.
405 * libata is the only user of ->eh_strategy_handler() in
406 * any kernel, so the default scsi_done() assumes it is
407 * not being called from the SCSI EH.
408 */
409 qc->scsidone = scsi_finish_command;
410 436
411 switch (qc->tf.protocol) { 437 switch (qc->tf.protocol) {
412 case ATA_PROT_DMA: 438 case ATA_PROT_DMA:
@@ -414,7 +440,6 @@ static void pdc_eng_timeout(struct ata_port *ap)
414 printk(KERN_ERR "ata%u: command timeout\n", ap->id); 440 printk(KERN_ERR "ata%u: command timeout\n", ap->id);
415 drv_stat = ata_wait_idle(ap); 441 drv_stat = ata_wait_idle(ap);
416 qc->err_mask |= __ac_err_mask(drv_stat); 442 qc->err_mask |= __ac_err_mask(drv_stat);
417 ata_qc_complete(qc);
418 break; 443 break;
419 444
420 default: 445 default:
@@ -424,12 +449,11 @@ static void pdc_eng_timeout(struct ata_port *ap)
424 ap->id, qc->tf.command, drv_stat); 449 ap->id, qc->tf.command, drv_stat);
425 450
426 qc->err_mask |= ac_err_mask(drv_stat); 451 qc->err_mask |= ac_err_mask(drv_stat);
427 ata_qc_complete(qc);
428 break; 452 break;
429 } 453 }
430 454
431out:
432 spin_unlock_irqrestore(&host_set->lock, flags); 455 spin_unlock_irqrestore(&host_set->lock, flags);
456 ata_eh_qc_complete(qc);
433 DPRINTK("EXIT\n"); 457 DPRINTK("EXIT\n");
434} 458}
435 459
@@ -495,14 +519,15 @@ static irqreturn_t pdc_interrupt (int irq, void *dev_instance, struct pt_regs *r
495 VPRINTK("QUICK EXIT 2\n"); 519 VPRINTK("QUICK EXIT 2\n");
496 return IRQ_NONE; 520 return IRQ_NONE;
497 } 521 }
522
523 spin_lock(&host_set->lock);
524
498 mask &= 0xffff; /* only 16 tags possible */ 525 mask &= 0xffff; /* only 16 tags possible */
499 if (!mask) { 526 if (!mask) {
500 VPRINTK("QUICK EXIT 3\n"); 527 VPRINTK("QUICK EXIT 3\n");
501 return IRQ_NONE; 528 goto done_irq;
502 } 529 }
503 530
504 spin_lock(&host_set->lock);
505
506 writel(mask, mmio_base + PDC_INT_SEQMASK); 531 writel(mask, mmio_base + PDC_INT_SEQMASK);
507 532
508 for (i = 0; i < host_set->n_ports; i++) { 533 for (i = 0; i < host_set->n_ports; i++) {
@@ -519,10 +544,10 @@ static irqreturn_t pdc_interrupt (int irq, void *dev_instance, struct pt_regs *r
519 } 544 }
520 } 545 }
521 546
522 spin_unlock(&host_set->lock);
523
524 VPRINTK("EXIT\n"); 547 VPRINTK("EXIT\n");
525 548
549done_irq:
550 spin_unlock(&host_set->lock);
526 return IRQ_RETVAL(handled); 551 return IRQ_RETVAL(handled);
527} 552}
528 553
@@ -544,7 +569,7 @@ static inline void pdc_packet_start(struct ata_queued_cmd *qc)
544 readl((void __iomem *) ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT); /* flush */ 569 readl((void __iomem *) ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT); /* flush */
545} 570}
546 571
547static int pdc_qc_issue_prot(struct ata_queued_cmd *qc) 572static unsigned int pdc_qc_issue_prot(struct ata_queued_cmd *qc)
548{ 573{
549 switch (qc->tf.protocol) { 574 switch (qc->tf.protocol) {
550 case ATA_PROT_DMA: 575 case ATA_PROT_DMA:
@@ -600,6 +625,8 @@ static void pdc_ata_setup_port(struct ata_ioports *port, unsigned long base)
600static void pdc_host_init(unsigned int chip_id, struct ata_probe_ent *pe) 625static void pdc_host_init(unsigned int chip_id, struct ata_probe_ent *pe)
601{ 626{
602 void __iomem *mmio = pe->mmio_base; 627 void __iomem *mmio = pe->mmio_base;
628 struct pdc_host_priv *hp = pe->private_data;
629 int hotplug_offset = hp->hotplug_offset;
603 u32 tmp; 630 u32 tmp;
604 631
605 /* 632 /*
@@ -614,12 +641,12 @@ static void pdc_host_init(unsigned int chip_id, struct ata_probe_ent *pe)
614 writel(tmp, mmio + PDC_FLASH_CTL); 641 writel(tmp, mmio + PDC_FLASH_CTL);
615 642
616 /* clear plug/unplug flags for all ports */ 643 /* clear plug/unplug flags for all ports */
617 tmp = readl(mmio + PDC_SATA_PLUG_CSR); 644 tmp = readl(mmio + hotplug_offset);
618 writel(tmp | 0xff, mmio + PDC_SATA_PLUG_CSR); 645 writel(tmp | 0xff, mmio + hotplug_offset);
619 646
620 /* mask plug/unplug ints */ 647 /* mask plug/unplug ints */
621 tmp = readl(mmio + PDC_SATA_PLUG_CSR); 648 tmp = readl(mmio + hotplug_offset);
622 writel(tmp | 0xff0000, mmio + PDC_SATA_PLUG_CSR); 649 writel(tmp | 0xff0000, mmio + hotplug_offset);
623 650
624 /* reduce TBG clock to 133 Mhz. */ 651 /* reduce TBG clock to 133 Mhz. */
625 tmp = readl(mmio + PDC_TBG_MODE); 652 tmp = readl(mmio + PDC_TBG_MODE);
@@ -641,6 +668,7 @@ static int pdc_ata_init_one (struct pci_dev *pdev, const struct pci_device_id *e
641{ 668{
642 static int printed_version; 669 static int printed_version;
643 struct ata_probe_ent *probe_ent = NULL; 670 struct ata_probe_ent *probe_ent = NULL;
671 struct pdc_host_priv *hp;
644 unsigned long base; 672 unsigned long base;
645 void __iomem *mmio_base; 673 void __iomem *mmio_base;
646 unsigned int board_idx = (unsigned int) ent->driver_data; 674 unsigned int board_idx = (unsigned int) ent->driver_data;
@@ -671,13 +699,12 @@ static int pdc_ata_init_one (struct pci_dev *pdev, const struct pci_device_id *e
671 if (rc) 699 if (rc)
672 goto err_out_regions; 700 goto err_out_regions;
673 701
674 probe_ent = kmalloc(sizeof(*probe_ent), GFP_KERNEL); 702 probe_ent = kzalloc(sizeof(*probe_ent), GFP_KERNEL);
675 if (probe_ent == NULL) { 703 if (probe_ent == NULL) {
676 rc = -ENOMEM; 704 rc = -ENOMEM;
677 goto err_out_regions; 705 goto err_out_regions;
678 } 706 }
679 707
680 memset(probe_ent, 0, sizeof(*probe_ent));
681 probe_ent->dev = pci_dev_to_dev(pdev); 708 probe_ent->dev = pci_dev_to_dev(pdev);
682 INIT_LIST_HEAD(&probe_ent->node); 709 INIT_LIST_HEAD(&probe_ent->node);
683 710
@@ -688,6 +715,16 @@ static int pdc_ata_init_one (struct pci_dev *pdev, const struct pci_device_id *e
688 } 715 }
689 base = (unsigned long) mmio_base; 716 base = (unsigned long) mmio_base;
690 717
718 hp = kzalloc(sizeof(*hp), GFP_KERNEL);
719 if (hp == NULL) {
720 rc = -ENOMEM;
721 goto err_out_free_ent;
722 }
723
724 /* Set default hotplug offset */
725 hp->hotplug_offset = PDC_SATA_PLUG_CSR;
726 probe_ent->private_data = hp;
727
691 probe_ent->sht = pdc_port_info[board_idx].sht; 728 probe_ent->sht = pdc_port_info[board_idx].sht;
692 probe_ent->host_flags = pdc_port_info[board_idx].host_flags; 729 probe_ent->host_flags = pdc_port_info[board_idx].host_flags;
693 probe_ent->pio_mask = pdc_port_info[board_idx].pio_mask; 730 probe_ent->pio_mask = pdc_port_info[board_idx].pio_mask;
@@ -707,6 +744,10 @@ static int pdc_ata_init_one (struct pci_dev *pdev, const struct pci_device_id *e
707 744
708 /* notice 4-port boards */ 745 /* notice 4-port boards */
709 switch (board_idx) { 746 switch (board_idx) {
747 case board_40518:
748 /* Override hotplug offset for SATAII150 */
749 hp->hotplug_offset = PDC2_SATA_PLUG_CSR;
750 /* Fall through */
710 case board_20319: 751 case board_20319:
711 probe_ent->n_ports = 4; 752 probe_ent->n_ports = 4;
712 753
@@ -716,6 +757,10 @@ static int pdc_ata_init_one (struct pci_dev *pdev, const struct pci_device_id *e
716 probe_ent->port[2].scr_addr = base + 0x600; 757 probe_ent->port[2].scr_addr = base + 0x600;
717 probe_ent->port[3].scr_addr = base + 0x700; 758 probe_ent->port[3].scr_addr = base + 0x700;
718 break; 759 break;
760 case board_2057x:
761 /* Override hotplug offset for SATAII150 */
762 hp->hotplug_offset = PDC2_SATA_PLUG_CSR;
763 /* Fall through */
719 case board_2037x: 764 case board_2037x:
720 probe_ent->n_ports = 2; 765 probe_ent->n_ports = 2;
721 break; 766 break;
@@ -741,8 +786,10 @@ static int pdc_ata_init_one (struct pci_dev *pdev, const struct pci_device_id *e
741 /* initialize adapter */ 786 /* initialize adapter */
742 pdc_host_init(board_idx, probe_ent); 787 pdc_host_init(board_idx, probe_ent);
743 788
744 /* FIXME: check ata_device_add return value */ 789 /* FIXME: Need any other frees than hp? */
745 ata_device_add(probe_ent); 790 if (!ata_device_add(probe_ent))
791 kfree(hp);
792
746 kfree(probe_ent); 793 kfree(probe_ent);
747 794
748 return 0; 795 return 0;
diff --git a/drivers/scsi/sata_qstor.c b/drivers/scsi/sata_qstor.c
index 80480f0fb2b8..9602f43a298e 100644
--- a/drivers/scsi/sata_qstor.c
+++ b/drivers/scsi/sata_qstor.c
@@ -120,7 +120,7 @@ static void qs_host_stop(struct ata_host_set *host_set);
120static void qs_port_stop(struct ata_port *ap); 120static void qs_port_stop(struct ata_port *ap);
121static void qs_phy_reset(struct ata_port *ap); 121static void qs_phy_reset(struct ata_port *ap);
122static void qs_qc_prep(struct ata_queued_cmd *qc); 122static void qs_qc_prep(struct ata_queued_cmd *qc);
123static int qs_qc_issue(struct ata_queued_cmd *qc); 123static unsigned int qs_qc_issue(struct ata_queued_cmd *qc);
124static int qs_check_atapi_dma(struct ata_queued_cmd *qc); 124static int qs_check_atapi_dma(struct ata_queued_cmd *qc);
125static void qs_bmdma_stop(struct ata_queued_cmd *qc); 125static void qs_bmdma_stop(struct ata_queued_cmd *qc);
126static u8 qs_bmdma_status(struct ata_port *ap); 126static u8 qs_bmdma_status(struct ata_port *ap);
@@ -132,11 +132,11 @@ static struct scsi_host_template qs_ata_sht = {
132 .name = DRV_NAME, 132 .name = DRV_NAME,
133 .ioctl = ata_scsi_ioctl, 133 .ioctl = ata_scsi_ioctl,
134 .queuecommand = ata_scsi_queuecmd, 134 .queuecommand = ata_scsi_queuecmd,
135 .eh_timed_out = ata_scsi_timed_out,
135 .eh_strategy_handler = ata_scsi_error, 136 .eh_strategy_handler = ata_scsi_error,
136 .can_queue = ATA_DEF_QUEUE, 137 .can_queue = ATA_DEF_QUEUE,
137 .this_id = ATA_SHT_THIS_ID, 138 .this_id = ATA_SHT_THIS_ID,
138 .sg_tablesize = QS_MAX_PRD, 139 .sg_tablesize = QS_MAX_PRD,
139 .max_sectors = ATA_MAX_SECTORS,
140 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 140 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
141 .emulated = ATA_SHT_EMULATED, 141 .emulated = ATA_SHT_EMULATED,
142 //FIXME .use_clustering = ATA_SHT_USE_CLUSTERING, 142 //FIXME .use_clustering = ATA_SHT_USE_CLUSTERING,
@@ -276,8 +276,8 @@ static unsigned int qs_fill_sg(struct ata_queued_cmd *qc)
276 unsigned int nelem; 276 unsigned int nelem;
277 u8 *prd = pp->pkt + QS_CPB_BYTES; 277 u8 *prd = pp->pkt + QS_CPB_BYTES;
278 278
279 assert(qc->__sg != NULL); 279 WARN_ON(qc->__sg == NULL);
280 assert(qc->n_elem > 0 || qc->pad_len > 0); 280 WARN_ON(qc->n_elem == 0 && qc->pad_len == 0);
281 281
282 nelem = 0; 282 nelem = 0;
283 ata_for_each_sg(sg, qc) { 283 ata_for_each_sg(sg, qc) {
@@ -352,7 +352,7 @@ static inline void qs_packet_start(struct ata_queued_cmd *qc)
352 readl(chan + QS_CCT_CFF); /* flush */ 352 readl(chan + QS_CCT_CFF); /* flush */
353} 353}
354 354
355static int qs_qc_issue(struct ata_queued_cmd *qc) 355static unsigned int qs_qc_issue(struct ata_queued_cmd *qc)
356{ 356{
357 struct qs_port_priv *pp = qc->ap->private_data; 357 struct qs_port_priv *pp = qc->ap->private_data;
358 358
diff --git a/drivers/scsi/sata_sil.c b/drivers/scsi/sata_sil.c
index 9face3c6aa21..4f2a67ed39d8 100644
--- a/drivers/scsi/sata_sil.c
+++ b/drivers/scsi/sata_sil.c
@@ -49,24 +49,30 @@
49#define DRV_VERSION "0.9" 49#define DRV_VERSION "0.9"
50 50
51enum { 51enum {
52 /*
53 * host flags
54 */
52 SIL_FLAG_RERR_ON_DMA_ACT = (1 << 29), 55 SIL_FLAG_RERR_ON_DMA_ACT = (1 << 29),
53 SIL_FLAG_MOD15WRITE = (1 << 30), 56 SIL_FLAG_MOD15WRITE = (1 << 30),
57 SIL_DFL_HOST_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
58 ATA_FLAG_MMIO,
54 59
60 /*
61 * Controller IDs
62 */
55 sil_3112 = 0, 63 sil_3112 = 0,
56 sil_3112_m15w = 1, 64 sil_3512 = 1,
57 sil_3512 = 2, 65 sil_3114 = 2,
58 sil_3114 = 3,
59
60 SIL_FIFO_R0 = 0x40,
61 SIL_FIFO_W0 = 0x41,
62 SIL_FIFO_R1 = 0x44,
63 SIL_FIFO_W1 = 0x45,
64 SIL_FIFO_R2 = 0x240,
65 SIL_FIFO_W2 = 0x241,
66 SIL_FIFO_R3 = 0x244,
67 SIL_FIFO_W3 = 0x245,
68 66
67 /*
68 * Register offsets
69 */
69 SIL_SYSCFG = 0x48, 70 SIL_SYSCFG = 0x48,
71
72 /*
73 * Register bits
74 */
75 /* SYSCFG */
70 SIL_MASK_IDE0_INT = (1 << 22), 76 SIL_MASK_IDE0_INT = (1 << 22),
71 SIL_MASK_IDE1_INT = (1 << 23), 77 SIL_MASK_IDE1_INT = (1 << 23),
72 SIL_MASK_IDE2_INT = (1 << 24), 78 SIL_MASK_IDE2_INT = (1 << 24),
@@ -75,9 +81,12 @@ enum {
75 SIL_MASK_4PORT = SIL_MASK_2PORT | 81 SIL_MASK_4PORT = SIL_MASK_2PORT |
76 SIL_MASK_IDE2_INT | SIL_MASK_IDE3_INT, 82 SIL_MASK_IDE2_INT | SIL_MASK_IDE3_INT,
77 83
78 SIL_IDE2_BMDMA = 0x200, 84 /* BMDMA/BMDMA2 */
79
80 SIL_INTR_STEERING = (1 << 1), 85 SIL_INTR_STEERING = (1 << 1),
86
87 /*
88 * Others
89 */
81 SIL_QUIRK_MOD15WRITE = (1 << 0), 90 SIL_QUIRK_MOD15WRITE = (1 << 0),
82 SIL_QUIRK_UDMA5MAX = (1 << 1), 91 SIL_QUIRK_UDMA5MAX = (1 << 1),
83}; 92};
@@ -90,13 +99,13 @@ static void sil_post_set_mode (struct ata_port *ap);
90 99
91 100
92static const struct pci_device_id sil_pci_tbl[] = { 101static const struct pci_device_id sil_pci_tbl[] = {
93 { 0x1095, 0x3112, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3112_m15w }, 102 { 0x1095, 0x3112, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3112 },
94 { 0x1095, 0x0240, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3112_m15w }, 103 { 0x1095, 0x0240, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3112 },
95 { 0x1095, 0x3512, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3512 }, 104 { 0x1095, 0x3512, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3512 },
96 { 0x1095, 0x3114, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3114 }, 105 { 0x1095, 0x3114, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3114 },
97 { 0x1002, 0x436e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3112_m15w }, 106 { 0x1002, 0x436e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3112 },
98 { 0x1002, 0x4379, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3112_m15w }, 107 { 0x1002, 0x4379, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3112 },
99 { 0x1002, 0x437a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3112_m15w }, 108 { 0x1002, 0x437a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3112 },
100 { } /* terminate list */ 109 { } /* terminate list */
101}; 110};
102 111
@@ -137,11 +146,11 @@ static struct scsi_host_template sil_sht = {
137 .name = DRV_NAME, 146 .name = DRV_NAME,
138 .ioctl = ata_scsi_ioctl, 147 .ioctl = ata_scsi_ioctl,
139 .queuecommand = ata_scsi_queuecmd, 148 .queuecommand = ata_scsi_queuecmd,
149 .eh_timed_out = ata_scsi_timed_out,
140 .eh_strategy_handler = ata_scsi_error, 150 .eh_strategy_handler = ata_scsi_error,
141 .can_queue = ATA_DEF_QUEUE, 151 .can_queue = ATA_DEF_QUEUE,
142 .this_id = ATA_SHT_THIS_ID, 152 .this_id = ATA_SHT_THIS_ID,
143 .sg_tablesize = LIBATA_MAX_PRD, 153 .sg_tablesize = LIBATA_MAX_PRD,
144 .max_sectors = ATA_MAX_SECTORS,
145 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 154 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
146 .emulated = ATA_SHT_EMULATED, 155 .emulated = ATA_SHT_EMULATED,
147 .use_clustering = ATA_SHT_USE_CLUSTERING, 156 .use_clustering = ATA_SHT_USE_CLUSTERING,
@@ -159,7 +168,7 @@ static const struct ata_port_operations sil_ops = {
159 .check_status = ata_check_status, 168 .check_status = ata_check_status,
160 .exec_command = ata_exec_command, 169 .exec_command = ata_exec_command,
161 .dev_select = ata_std_dev_select, 170 .dev_select = ata_std_dev_select,
162 .phy_reset = sata_phy_reset, 171 .probe_reset = ata_std_probe_reset,
163 .post_set_mode = sil_post_set_mode, 172 .post_set_mode = sil_post_set_mode,
164 .bmdma_setup = ata_bmdma_setup, 173 .bmdma_setup = ata_bmdma_setup,
165 .bmdma_start = ata_bmdma_start, 174 .bmdma_start = ata_bmdma_start,
@@ -181,19 +190,7 @@ static const struct ata_port_info sil_port_info[] = {
181 /* sil_3112 */ 190 /* sil_3112 */
182 { 191 {
183 .sht = &sil_sht, 192 .sht = &sil_sht,
184 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 193 .host_flags = SIL_DFL_HOST_FLAGS | SIL_FLAG_MOD15WRITE,
185 ATA_FLAG_SRST | ATA_FLAG_MMIO,
186 .pio_mask = 0x1f, /* pio0-4 */
187 .mwdma_mask = 0x07, /* mwdma0-2 */
188 .udma_mask = 0x3f, /* udma0-5 */
189 .port_ops = &sil_ops,
190 },
191 /* sil_3112_15w - keep it sync'd w/ sil_3112 */
192 {
193 .sht = &sil_sht,
194 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
195 ATA_FLAG_SRST | ATA_FLAG_MMIO |
196 SIL_FLAG_MOD15WRITE,
197 .pio_mask = 0x1f, /* pio0-4 */ 194 .pio_mask = 0x1f, /* pio0-4 */
198 .mwdma_mask = 0x07, /* mwdma0-2 */ 195 .mwdma_mask = 0x07, /* mwdma0-2 */
199 .udma_mask = 0x3f, /* udma0-5 */ 196 .udma_mask = 0x3f, /* udma0-5 */
@@ -202,9 +199,7 @@ static const struct ata_port_info sil_port_info[] = {
202 /* sil_3512 */ 199 /* sil_3512 */
203 { 200 {
204 .sht = &sil_sht, 201 .sht = &sil_sht,
205 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 202 .host_flags = SIL_DFL_HOST_FLAGS | SIL_FLAG_RERR_ON_DMA_ACT,
206 ATA_FLAG_SRST | ATA_FLAG_MMIO |
207 SIL_FLAG_RERR_ON_DMA_ACT,
208 .pio_mask = 0x1f, /* pio0-4 */ 203 .pio_mask = 0x1f, /* pio0-4 */
209 .mwdma_mask = 0x07, /* mwdma0-2 */ 204 .mwdma_mask = 0x07, /* mwdma0-2 */
210 .udma_mask = 0x3f, /* udma0-5 */ 205 .udma_mask = 0x3f, /* udma0-5 */
@@ -213,9 +208,7 @@ static const struct ata_port_info sil_port_info[] = {
213 /* sil_3114 */ 208 /* sil_3114 */
214 { 209 {
215 .sht = &sil_sht, 210 .sht = &sil_sht,
216 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 211 .host_flags = SIL_DFL_HOST_FLAGS | SIL_FLAG_RERR_ON_DMA_ACT,
217 ATA_FLAG_SRST | ATA_FLAG_MMIO |
218 SIL_FLAG_RERR_ON_DMA_ACT,
219 .pio_mask = 0x1f, /* pio0-4 */ 212 .pio_mask = 0x1f, /* pio0-4 */
220 .mwdma_mask = 0x07, /* mwdma0-2 */ 213 .mwdma_mask = 0x07, /* mwdma0-2 */
221 .udma_mask = 0x3f, /* udma0-5 */ 214 .udma_mask = 0x3f, /* udma0-5 */
@@ -229,16 +222,17 @@ static const struct {
229 unsigned long tf; /* ATA taskfile register block */ 222 unsigned long tf; /* ATA taskfile register block */
230 unsigned long ctl; /* ATA control/altstatus register block */ 223 unsigned long ctl; /* ATA control/altstatus register block */
231 unsigned long bmdma; /* DMA register block */ 224 unsigned long bmdma; /* DMA register block */
225 unsigned long fifo_cfg; /* FIFO Valid Byte Count and Control */
232 unsigned long scr; /* SATA control register block */ 226 unsigned long scr; /* SATA control register block */
233 unsigned long sien; /* SATA Interrupt Enable register */ 227 unsigned long sien; /* SATA Interrupt Enable register */
234 unsigned long xfer_mode;/* data transfer mode register */ 228 unsigned long xfer_mode;/* data transfer mode register */
235 unsigned long sfis_cfg; /* SATA FIS reception config register */ 229 unsigned long sfis_cfg; /* SATA FIS reception config register */
236} sil_port[] = { 230} sil_port[] = {
237 /* port 0 ... */ 231 /* port 0 ... */
238 { 0x80, 0x8A, 0x00, 0x100, 0x148, 0xb4, 0x14c }, 232 { 0x80, 0x8A, 0x00, 0x40, 0x100, 0x148, 0xb4, 0x14c },
239 { 0xC0, 0xCA, 0x08, 0x180, 0x1c8, 0xf4, 0x1cc }, 233 { 0xC0, 0xCA, 0x08, 0x44, 0x180, 0x1c8, 0xf4, 0x1cc },
240 { 0x280, 0x28A, 0x200, 0x300, 0x348, 0x2b4, 0x34c }, 234 { 0x280, 0x28A, 0x200, 0x240, 0x300, 0x348, 0x2b4, 0x34c },
241 { 0x2C0, 0x2CA, 0x208, 0x380, 0x3c8, 0x2f4, 0x3cc }, 235 { 0x2C0, 0x2CA, 0x208, 0x244, 0x380, 0x3c8, 0x2f4, 0x3cc },
242 /* ... port 3 */ 236 /* ... port 3 */
243}; 237};
244 238
@@ -354,22 +348,12 @@ static void sil_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val)
354static void sil_dev_config(struct ata_port *ap, struct ata_device *dev) 348static void sil_dev_config(struct ata_port *ap, struct ata_device *dev)
355{ 349{
356 unsigned int n, quirks = 0; 350 unsigned int n, quirks = 0;
357 unsigned char model_num[40]; 351 unsigned char model_num[41];
358 const char *s;
359 unsigned int len;
360 352
361 ata_dev_id_string(dev->id, model_num, ATA_ID_PROD_OFS, 353 ata_id_c_string(dev->id, model_num, ATA_ID_PROD_OFS, sizeof(model_num));
362 sizeof(model_num));
363 s = &model_num[0];
364 len = strnlen(s, sizeof(model_num));
365
366 /* ATAPI specifies that empty space is blank-filled; remove blanks */
367 while ((len > 0) && (s[len - 1] == ' '))
368 len--;
369 354
370 for (n = 0; sil_blacklist[n].product; n++) 355 for (n = 0; sil_blacklist[n].product; n++)
371 if (!memcmp(sil_blacklist[n].product, s, 356 if (!strcmp(sil_blacklist[n].product, model_num)) {
372 strlen(sil_blacklist[n].product))) {
373 quirks = sil_blacklist[n].quirk; 357 quirks = sil_blacklist[n].quirk;
374 break; 358 break;
375 } 359 }
@@ -380,16 +364,14 @@ static void sil_dev_config(struct ata_port *ap, struct ata_device *dev)
380 (quirks & SIL_QUIRK_MOD15WRITE))) { 364 (quirks & SIL_QUIRK_MOD15WRITE))) {
381 printk(KERN_INFO "ata%u(%u): applying Seagate errata fix (mod15write workaround)\n", 365 printk(KERN_INFO "ata%u(%u): applying Seagate errata fix (mod15write workaround)\n",
382 ap->id, dev->devno); 366 ap->id, dev->devno);
383 ap->host->max_sectors = 15; 367 dev->max_sectors = 15;
384 ap->host->hostt->max_sectors = 15;
385 dev->flags |= ATA_DFLAG_LOCK_SECTORS;
386 return; 368 return;
387 } 369 }
388 370
389 /* limit to udma5 */ 371 /* limit to udma5 */
390 if (quirks & SIL_QUIRK_UDMA5MAX) { 372 if (quirks & SIL_QUIRK_UDMA5MAX) {
391 printk(KERN_INFO "ata%u(%u): applying Maxtor errata fix %s\n", 373 printk(KERN_INFO "ata%u(%u): applying Maxtor errata fix %s\n",
392 ap->id, dev->devno, s); 374 ap->id, dev->devno, model_num);
393 ap->udma_mask &= ATA_UDMA5; 375 ap->udma_mask &= ATA_UDMA5;
394 return; 376 return;
395 } 377 }
@@ -431,13 +413,12 @@ static int sil_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
431 if (rc) 413 if (rc)
432 goto err_out_regions; 414 goto err_out_regions;
433 415
434 probe_ent = kmalloc(sizeof(*probe_ent), GFP_KERNEL); 416 probe_ent = kzalloc(sizeof(*probe_ent), GFP_KERNEL);
435 if (probe_ent == NULL) { 417 if (probe_ent == NULL) {
436 rc = -ENOMEM; 418 rc = -ENOMEM;
437 goto err_out_regions; 419 goto err_out_regions;
438 } 420 }
439 421
440 memset(probe_ent, 0, sizeof(*probe_ent));
441 INIT_LIST_HEAD(&probe_ent->node); 422 INIT_LIST_HEAD(&probe_ent->node);
442 probe_ent->dev = pci_dev_to_dev(pdev); 423 probe_ent->dev = pci_dev_to_dev(pdev);
443 probe_ent->port_ops = sil_port_info[ent->driver_data].port_ops; 424 probe_ent->port_ops = sil_port_info[ent->driver_data].port_ops;
@@ -474,19 +455,12 @@ static int sil_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
474 if (cls) { 455 if (cls) {
475 cls >>= 3; 456 cls >>= 3;
476 cls++; /* cls = (line_size/8)+1 */ 457 cls++; /* cls = (line_size/8)+1 */
477 writeb(cls, mmio_base + SIL_FIFO_R0); 458 for (i = 0; i < probe_ent->n_ports; i++)
478 writeb(cls, mmio_base + SIL_FIFO_W0); 459 writew(cls << 8 | cls,
479 writeb(cls, mmio_base + SIL_FIFO_R1); 460 mmio_base + sil_port[i].fifo_cfg);
480 writeb(cls, mmio_base + SIL_FIFO_W1);
481 if (ent->driver_data == sil_3114) {
482 writeb(cls, mmio_base + SIL_FIFO_R2);
483 writeb(cls, mmio_base + SIL_FIFO_W2);
484 writeb(cls, mmio_base + SIL_FIFO_R3);
485 writeb(cls, mmio_base + SIL_FIFO_W3);
486 }
487 } else 461 } else
488 dev_printk(KERN_WARNING, &pdev->dev, 462 dev_printk(KERN_WARNING, &pdev->dev,
489 "cache line size not set. Driver may not function\n"); 463 "cache line size not set. Driver may not function\n");
490 464
491 /* Apply R_ERR on DMA activate FIS errata workaround */ 465 /* Apply R_ERR on DMA activate FIS errata workaround */
492 if (probe_ent->host_flags & SIL_FLAG_RERR_ON_DMA_ACT) { 466 if (probe_ent->host_flags & SIL_FLAG_RERR_ON_DMA_ACT) {
@@ -509,10 +483,10 @@ static int sil_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
509 irq_mask = SIL_MASK_4PORT; 483 irq_mask = SIL_MASK_4PORT;
510 484
511 /* flip the magic "make 4 ports work" bit */ 485 /* flip the magic "make 4 ports work" bit */
512 tmp = readl(mmio_base + SIL_IDE2_BMDMA); 486 tmp = readl(mmio_base + sil_port[2].bmdma);
513 if ((tmp & SIL_INTR_STEERING) == 0) 487 if ((tmp & SIL_INTR_STEERING) == 0)
514 writel(tmp | SIL_INTR_STEERING, 488 writel(tmp | SIL_INTR_STEERING,
515 mmio_base + SIL_IDE2_BMDMA); 489 mmio_base + sil_port[2].bmdma);
516 490
517 } else { 491 } else {
518 irq_mask = SIL_MASK_2PORT; 492 irq_mask = SIL_MASK_2PORT;
diff --git a/drivers/scsi/sata_sil24.c b/drivers/scsi/sata_sil24.c
index 923130185a9e..8fb62427be84 100644
--- a/drivers/scsi/sata_sil24.c
+++ b/drivers/scsi/sata_sil24.c
@@ -249,9 +249,9 @@ static u8 sil24_check_status(struct ata_port *ap);
249static u32 sil24_scr_read(struct ata_port *ap, unsigned sc_reg); 249static u32 sil24_scr_read(struct ata_port *ap, unsigned sc_reg);
250static void sil24_scr_write(struct ata_port *ap, unsigned sc_reg, u32 val); 250static void sil24_scr_write(struct ata_port *ap, unsigned sc_reg, u32 val);
251static void sil24_tf_read(struct ata_port *ap, struct ata_taskfile *tf); 251static void sil24_tf_read(struct ata_port *ap, struct ata_taskfile *tf);
252static void sil24_phy_reset(struct ata_port *ap); 252static int sil24_probe_reset(struct ata_port *ap, unsigned int *classes);
253static void sil24_qc_prep(struct ata_queued_cmd *qc); 253static void sil24_qc_prep(struct ata_queued_cmd *qc);
254static int sil24_qc_issue(struct ata_queued_cmd *qc); 254static unsigned int sil24_qc_issue(struct ata_queued_cmd *qc);
255static void sil24_irq_clear(struct ata_port *ap); 255static void sil24_irq_clear(struct ata_port *ap);
256static void sil24_eng_timeout(struct ata_port *ap); 256static void sil24_eng_timeout(struct ata_port *ap);
257static irqreturn_t sil24_interrupt(int irq, void *dev_instance, struct pt_regs *regs); 257static irqreturn_t sil24_interrupt(int irq, void *dev_instance, struct pt_regs *regs);
@@ -262,6 +262,7 @@ static int sil24_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
262 262
263static const struct pci_device_id sil24_pci_tbl[] = { 263static const struct pci_device_id sil24_pci_tbl[] = {
264 { 0x1095, 0x3124, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BID_SIL3124 }, 264 { 0x1095, 0x3124, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BID_SIL3124 },
265 { 0x8086, 0x3124, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BID_SIL3124 },
265 { 0x1095, 0x3132, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BID_SIL3132 }, 266 { 0x1095, 0x3132, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BID_SIL3132 },
266 { 0x1095, 0x3131, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BID_SIL3131 }, 267 { 0x1095, 0x3131, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BID_SIL3131 },
267 { 0x1095, 0x3531, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BID_SIL3131 }, 268 { 0x1095, 0x3531, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BID_SIL3131 },
@@ -280,11 +281,11 @@ static struct scsi_host_template sil24_sht = {
280 .name = DRV_NAME, 281 .name = DRV_NAME,
281 .ioctl = ata_scsi_ioctl, 282 .ioctl = ata_scsi_ioctl,
282 .queuecommand = ata_scsi_queuecmd, 283 .queuecommand = ata_scsi_queuecmd,
284 .eh_timed_out = ata_scsi_timed_out,
283 .eh_strategy_handler = ata_scsi_error, 285 .eh_strategy_handler = ata_scsi_error,
284 .can_queue = ATA_DEF_QUEUE, 286 .can_queue = ATA_DEF_QUEUE,
285 .this_id = ATA_SHT_THIS_ID, 287 .this_id = ATA_SHT_THIS_ID,
286 .sg_tablesize = LIBATA_MAX_PRD, 288 .sg_tablesize = LIBATA_MAX_PRD,
287 .max_sectors = ATA_MAX_SECTORS,
288 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 289 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
289 .emulated = ATA_SHT_EMULATED, 290 .emulated = ATA_SHT_EMULATED,
290 .use_clustering = ATA_SHT_USE_CLUSTERING, 291 .use_clustering = ATA_SHT_USE_CLUSTERING,
@@ -305,7 +306,7 @@ static const struct ata_port_operations sil24_ops = {
305 306
306 .tf_read = sil24_tf_read, 307 .tf_read = sil24_tf_read,
307 308
308 .phy_reset = sil24_phy_reset, 309 .probe_reset = sil24_probe_reset,
309 310
310 .qc_prep = sil24_qc_prep, 311 .qc_prep = sil24_qc_prep,
311 .qc_issue = sil24_qc_issue, 312 .qc_issue = sil24_qc_issue,
@@ -335,8 +336,8 @@ static struct ata_port_info sil24_port_info[] = {
335 { 336 {
336 .sht = &sil24_sht, 337 .sht = &sil24_sht,
337 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 338 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
338 ATA_FLAG_SRST | ATA_FLAG_MMIO | 339 ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA |
339 ATA_FLAG_PIO_DMA | SIL24_NPORTS2FLAG(4), 340 SIL24_NPORTS2FLAG(4),
340 .pio_mask = 0x1f, /* pio0-4 */ 341 .pio_mask = 0x1f, /* pio0-4 */
341 .mwdma_mask = 0x07, /* mwdma0-2 */ 342 .mwdma_mask = 0x07, /* mwdma0-2 */
342 .udma_mask = 0x3f, /* udma0-5 */ 343 .udma_mask = 0x3f, /* udma0-5 */
@@ -346,8 +347,8 @@ static struct ata_port_info sil24_port_info[] = {
346 { 347 {
347 .sht = &sil24_sht, 348 .sht = &sil24_sht,
348 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 349 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
349 ATA_FLAG_SRST | ATA_FLAG_MMIO | 350 ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA |
350 ATA_FLAG_PIO_DMA | SIL24_NPORTS2FLAG(2), 351 SIL24_NPORTS2FLAG(2),
351 .pio_mask = 0x1f, /* pio0-4 */ 352 .pio_mask = 0x1f, /* pio0-4 */
352 .mwdma_mask = 0x07, /* mwdma0-2 */ 353 .mwdma_mask = 0x07, /* mwdma0-2 */
353 .udma_mask = 0x3f, /* udma0-5 */ 354 .udma_mask = 0x3f, /* udma0-5 */
@@ -357,8 +358,8 @@ static struct ata_port_info sil24_port_info[] = {
357 { 358 {
358 .sht = &sil24_sht, 359 .sht = &sil24_sht,
359 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 360 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
360 ATA_FLAG_SRST | ATA_FLAG_MMIO | 361 ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA |
361 ATA_FLAG_PIO_DMA | SIL24_NPORTS2FLAG(1), 362 SIL24_NPORTS2FLAG(1),
362 .pio_mask = 0x1f, /* pio0-4 */ 363 .pio_mask = 0x1f, /* pio0-4 */
363 .mwdma_mask = 0x07, /* mwdma0-2 */ 364 .mwdma_mask = 0x07, /* mwdma0-2 */
364 .udma_mask = 0x3f, /* udma0-5 */ 365 .udma_mask = 0x3f, /* udma0-5 */
@@ -370,7 +371,7 @@ static void sil24_dev_config(struct ata_port *ap, struct ata_device *dev)
370{ 371{
371 void __iomem *port = (void __iomem *)ap->ioaddr.cmd_addr; 372 void __iomem *port = (void __iomem *)ap->ioaddr.cmd_addr;
372 373
373 if (ap->cdb_len == 16) 374 if (dev->cdb_len == 16)
374 writel(PORT_CS_CDB16, port + PORT_CTRL_STAT); 375 writel(PORT_CS_CDB16, port + PORT_CTRL_STAT);
375 else 376 else
376 writel(PORT_CS_CDB16, port + PORT_CTRL_CLR); 377 writel(PORT_CS_CDB16, port + PORT_CTRL_CLR);
@@ -427,7 +428,8 @@ static void sil24_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
427 *tf = pp->tf; 428 *tf = pp->tf;
428} 429}
429 430
430static int sil24_issue_SRST(struct ata_port *ap) 431static int sil24_softreset(struct ata_port *ap, int verbose,
432 unsigned int *class)
431{ 433{
432 void __iomem *port = (void __iomem *)ap->ioaddr.cmd_addr; 434 void __iomem *port = (void __iomem *)ap->ioaddr.cmd_addr;
433 struct sil24_port_priv *pp = ap->private_data; 435 struct sil24_port_priv *pp = ap->private_data;
@@ -436,6 +438,8 @@ static int sil24_issue_SRST(struct ata_port *ap)
436 u32 irq_enable, irq_stat; 438 u32 irq_enable, irq_stat;
437 int cnt; 439 int cnt;
438 440
441 DPRINTK("ENTER\n");
442
439 /* temporarily turn off IRQs during SRST */ 443 /* temporarily turn off IRQs during SRST */
440 irq_enable = readl(port + PORT_IRQ_ENABLE_SET); 444 irq_enable = readl(port + PORT_IRQ_ENABLE_SET);
441 writel(irq_enable, port + PORT_IRQ_ENABLE_CLR); 445 writel(irq_enable, port + PORT_IRQ_ENABLE_CLR);
@@ -465,30 +469,36 @@ static int sil24_issue_SRST(struct ata_port *ap)
465 /* restore IRQs */ 469 /* restore IRQs */
466 writel(irq_enable, port + PORT_IRQ_ENABLE_SET); 470 writel(irq_enable, port + PORT_IRQ_ENABLE_SET);
467 471
468 if (!(irq_stat & PORT_IRQ_COMPLETE)) 472 if (sata_dev_present(ap)) {
469 return -1; 473 if (!(irq_stat & PORT_IRQ_COMPLETE)) {
474 DPRINTK("EXIT, srst failed\n");
475 return -EIO;
476 }
470 477
471 /* update TF */ 478 sil24_update_tf(ap);
472 sil24_update_tf(ap); 479 *class = ata_dev_classify(&pp->tf);
480 }
481 if (*class == ATA_DEV_UNKNOWN)
482 *class = ATA_DEV_NONE;
483
484 DPRINTK("EXIT, class=%u\n", *class);
473 return 0; 485 return 0;
474} 486}
475 487
476static void sil24_phy_reset(struct ata_port *ap) 488static int sil24_hardreset(struct ata_port *ap, int verbose,
489 unsigned int *class)
477{ 490{
478 struct sil24_port_priv *pp = ap->private_data; 491 unsigned int dummy_class;
479
480 __sata_phy_reset(ap);
481 if (ap->flags & ATA_FLAG_PORT_DISABLED)
482 return;
483 492
484 if (sil24_issue_SRST(ap) < 0) { 493 /* sil24 doesn't report device signature after hard reset */
485 printk(KERN_ERR DRV_NAME 494 return sata_std_hardreset(ap, verbose, &dummy_class);
486 " ata%u: SRST failed, disabling port\n", ap->id); 495}
487 ap->ops->port_disable(ap);
488 return;
489 }
490 496
491 ap->device->class = ata_dev_classify(&pp->tf); 497static int sil24_probe_reset(struct ata_port *ap, unsigned int *classes)
498{
499 return ata_drive_probe_reset(ap, ata_std_probeinit,
500 sil24_softreset, sil24_hardreset,
501 ata_std_postreset, classes);
492} 502}
493 503
494static inline void sil24_fill_sg(struct ata_queued_cmd *qc, 504static inline void sil24_fill_sg(struct ata_queued_cmd *qc,
@@ -533,7 +543,7 @@ static void sil24_qc_prep(struct ata_queued_cmd *qc)
533 prb = &cb->atapi.prb; 543 prb = &cb->atapi.prb;
534 sge = cb->atapi.sge; 544 sge = cb->atapi.sge;
535 memset(cb->atapi.cdb, 0, 32); 545 memset(cb->atapi.cdb, 0, 32);
536 memcpy(cb->atapi.cdb, qc->cdb, ap->cdb_len); 546 memcpy(cb->atapi.cdb, qc->cdb, qc->dev->cdb_len);
537 547
538 if (qc->tf.protocol != ATA_PROT_ATAPI_NODATA) { 548 if (qc->tf.protocol != ATA_PROT_ATAPI_NODATA) {
539 if (qc->tf.flags & ATA_TFLAG_WRITE) 549 if (qc->tf.flags & ATA_TFLAG_WRITE)
@@ -557,7 +567,7 @@ static void sil24_qc_prep(struct ata_queued_cmd *qc)
557 sil24_fill_sg(qc, sge); 567 sil24_fill_sg(qc, sge);
558} 568}
559 569
560static int sil24_qc_issue(struct ata_queued_cmd *qc) 570static unsigned int sil24_qc_issue(struct ata_queued_cmd *qc)
561{ 571{
562 struct ata_port *ap = qc->ap; 572 struct ata_port *ap = qc->ap;
563 void __iomem *port = (void __iomem *)ap->ioaddr.cmd_addr; 573 void __iomem *port = (void __iomem *)ap->ioaddr.cmd_addr;
@@ -638,23 +648,10 @@ static void sil24_eng_timeout(struct ata_port *ap)
638 struct ata_queued_cmd *qc; 648 struct ata_queued_cmd *qc;
639 649
640 qc = ata_qc_from_tag(ap, ap->active_tag); 650 qc = ata_qc_from_tag(ap, ap->active_tag);
641 if (!qc) {
642 printk(KERN_ERR "ata%u: BUG: timeout without command\n",
643 ap->id);
644 return;
645 }
646 651
647 /*
648 * hack alert! We cannot use the supplied completion
649 * function from inside the ->eh_strategy_handler() thread.
650 * libata is the only user of ->eh_strategy_handler() in
651 * any kernel, so the default scsi_done() assumes it is
652 * not being called from the SCSI EH.
653 */
654 printk(KERN_ERR "ata%u: command timeout\n", ap->id); 652 printk(KERN_ERR "ata%u: command timeout\n", ap->id);
655 qc->scsidone = scsi_finish_command; 653 qc->err_mask |= AC_ERR_TIMEOUT;
656 qc->err_mask |= AC_ERR_OTHER; 654 ata_eh_qc_complete(qc);
657 ata_qc_complete(qc);
658 655
659 sil24_reset_controller(ap); 656 sil24_reset_controller(ap);
660} 657}
@@ -895,6 +892,7 @@ static int sil24_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
895 probe_ent->sht = pinfo->sht; 892 probe_ent->sht = pinfo->sht;
896 probe_ent->host_flags = pinfo->host_flags; 893 probe_ent->host_flags = pinfo->host_flags;
897 probe_ent->pio_mask = pinfo->pio_mask; 894 probe_ent->pio_mask = pinfo->pio_mask;
895 probe_ent->mwdma_mask = pinfo->mwdma_mask;
898 probe_ent->udma_mask = pinfo->udma_mask; 896 probe_ent->udma_mask = pinfo->udma_mask;
899 probe_ent->port_ops = pinfo->port_ops; 897 probe_ent->port_ops = pinfo->port_ops;
900 probe_ent->n_ports = SIL24_FLAG2NPORTS(pinfo->host_flags); 898 probe_ent->n_ports = SIL24_FLAG2NPORTS(pinfo->host_flags);
diff --git a/drivers/scsi/sata_sis.c b/drivers/scsi/sata_sis.c
index 2df8c5632ac3..7fd45f86de99 100644
--- a/drivers/scsi/sata_sis.c
+++ b/drivers/scsi/sata_sis.c
@@ -87,11 +87,11 @@ static struct scsi_host_template sis_sht = {
87 .name = DRV_NAME, 87 .name = DRV_NAME,
88 .ioctl = ata_scsi_ioctl, 88 .ioctl = ata_scsi_ioctl,
89 .queuecommand = ata_scsi_queuecmd, 89 .queuecommand = ata_scsi_queuecmd,
90 .eh_timed_out = ata_scsi_timed_out,
90 .eh_strategy_handler = ata_scsi_error, 91 .eh_strategy_handler = ata_scsi_error,
91 .can_queue = ATA_DEF_QUEUE, 92 .can_queue = ATA_DEF_QUEUE,
92 .this_id = ATA_SHT_THIS_ID, 93 .this_id = ATA_SHT_THIS_ID,
93 .sg_tablesize = ATA_MAX_PRD, 94 .sg_tablesize = ATA_MAX_PRD,
94 .max_sectors = ATA_MAX_SECTORS,
95 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 95 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
96 .emulated = ATA_SHT_EMULATED, 96 .emulated = ATA_SHT_EMULATED,
97 .use_clustering = ATA_SHT_USE_CLUSTERING, 97 .use_clustering = ATA_SHT_USE_CLUSTERING,
diff --git a/drivers/scsi/sata_svw.c b/drivers/scsi/sata_svw.c
index d8472563fde8..4aaccd53e736 100644
--- a/drivers/scsi/sata_svw.c
+++ b/drivers/scsi/sata_svw.c
@@ -288,11 +288,11 @@ static struct scsi_host_template k2_sata_sht = {
288 .name = DRV_NAME, 288 .name = DRV_NAME,
289 .ioctl = ata_scsi_ioctl, 289 .ioctl = ata_scsi_ioctl,
290 .queuecommand = ata_scsi_queuecmd, 290 .queuecommand = ata_scsi_queuecmd,
291 .eh_timed_out = ata_scsi_timed_out,
291 .eh_strategy_handler = ata_scsi_error, 292 .eh_strategy_handler = ata_scsi_error,
292 .can_queue = ATA_DEF_QUEUE, 293 .can_queue = ATA_DEF_QUEUE,
293 .this_id = ATA_SHT_THIS_ID, 294 .this_id = ATA_SHT_THIS_ID,
294 .sg_tablesize = LIBATA_MAX_PRD, 295 .sg_tablesize = LIBATA_MAX_PRD,
295 .max_sectors = ATA_MAX_SECTORS,
296 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 296 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
297 .emulated = ATA_SHT_EMULATED, 297 .emulated = ATA_SHT_EMULATED,
298 .use_clustering = ATA_SHT_USE_CLUSTERING, 298 .use_clustering = ATA_SHT_USE_CLUSTERING,
diff --git a/drivers/scsi/sata_sx4.c b/drivers/scsi/sata_sx4.c
index bc87c16c80d2..9f8a76815402 100644
--- a/drivers/scsi/sata_sx4.c
+++ b/drivers/scsi/sata_sx4.c
@@ -174,7 +174,7 @@ static void pdc20621_get_from_dimm(struct ata_probe_ent *pe,
174static void pdc20621_put_to_dimm(struct ata_probe_ent *pe, 174static void pdc20621_put_to_dimm(struct ata_probe_ent *pe,
175 void *psource, u32 offset, u32 size); 175 void *psource, u32 offset, u32 size);
176static void pdc20621_irq_clear(struct ata_port *ap); 176static void pdc20621_irq_clear(struct ata_port *ap);
177static int pdc20621_qc_issue_prot(struct ata_queued_cmd *qc); 177static unsigned int pdc20621_qc_issue_prot(struct ata_queued_cmd *qc);
178 178
179 179
180static struct scsi_host_template pdc_sata_sht = { 180static struct scsi_host_template pdc_sata_sht = {
@@ -182,11 +182,11 @@ static struct scsi_host_template pdc_sata_sht = {
182 .name = DRV_NAME, 182 .name = DRV_NAME,
183 .ioctl = ata_scsi_ioctl, 183 .ioctl = ata_scsi_ioctl,
184 .queuecommand = ata_scsi_queuecmd, 184 .queuecommand = ata_scsi_queuecmd,
185 .eh_timed_out = ata_scsi_timed_out,
185 .eh_strategy_handler = ata_scsi_error, 186 .eh_strategy_handler = ata_scsi_error,
186 .can_queue = ATA_DEF_QUEUE, 187 .can_queue = ATA_DEF_QUEUE,
187 .this_id = ATA_SHT_THIS_ID, 188 .this_id = ATA_SHT_THIS_ID,
188 .sg_tablesize = LIBATA_MAX_PRD, 189 .sg_tablesize = LIBATA_MAX_PRD,
189 .max_sectors = ATA_MAX_SECTORS,
190 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 190 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
191 .emulated = ATA_SHT_EMULATED, 191 .emulated = ATA_SHT_EMULATED,
192 .use_clustering = ATA_SHT_USE_CLUSTERING, 192 .use_clustering = ATA_SHT_USE_CLUSTERING,
@@ -460,7 +460,7 @@ static void pdc20621_dma_prep(struct ata_queued_cmd *qc)
460 unsigned int i, idx, total_len = 0, sgt_len; 460 unsigned int i, idx, total_len = 0, sgt_len;
461 u32 *buf = (u32 *) &pp->dimm_buf[PDC_DIMM_HEADER_SZ]; 461 u32 *buf = (u32 *) &pp->dimm_buf[PDC_DIMM_HEADER_SZ];
462 462
463 assert(qc->flags & ATA_QCFLAG_DMAMAP); 463 WARN_ON(!(qc->flags & ATA_QCFLAG_DMAMAP));
464 464
465 VPRINTK("ata%u: ENTER\n", ap->id); 465 VPRINTK("ata%u: ENTER\n", ap->id);
466 466
@@ -678,7 +678,7 @@ static void pdc20621_packet_start(struct ata_queued_cmd *qc)
678 } 678 }
679} 679}
680 680
681static int pdc20621_qc_issue_prot(struct ata_queued_cmd *qc) 681static unsigned int pdc20621_qc_issue_prot(struct ata_queued_cmd *qc)
682{ 682{
683 switch (qc->tf.protocol) { 683 switch (qc->tf.protocol) {
684 case ATA_PROT_DMA: 684 case ATA_PROT_DMA:
@@ -866,26 +866,12 @@ static void pdc_eng_timeout(struct ata_port *ap)
866 spin_lock_irqsave(&host_set->lock, flags); 866 spin_lock_irqsave(&host_set->lock, flags);
867 867
868 qc = ata_qc_from_tag(ap, ap->active_tag); 868 qc = ata_qc_from_tag(ap, ap->active_tag);
869 if (!qc) {
870 printk(KERN_ERR "ata%u: BUG: timeout without command\n",
871 ap->id);
872 goto out;
873 }
874
875 /* hack alert! We cannot use the supplied completion
876 * function from inside the ->eh_strategy_handler() thread.
877 * libata is the only user of ->eh_strategy_handler() in
878 * any kernel, so the default scsi_done() assumes it is
879 * not being called from the SCSI EH.
880 */
881 qc->scsidone = scsi_finish_command;
882 869
883 switch (qc->tf.protocol) { 870 switch (qc->tf.protocol) {
884 case ATA_PROT_DMA: 871 case ATA_PROT_DMA:
885 case ATA_PROT_NODATA: 872 case ATA_PROT_NODATA:
886 printk(KERN_ERR "ata%u: command timeout\n", ap->id); 873 printk(KERN_ERR "ata%u: command timeout\n", ap->id);
887 qc->err_mask |= __ac_err_mask(ata_wait_idle(ap)); 874 qc->err_mask |= __ac_err_mask(ata_wait_idle(ap));
888 ata_qc_complete(qc);
889 break; 875 break;
890 876
891 default: 877 default:
@@ -895,12 +881,11 @@ static void pdc_eng_timeout(struct ata_port *ap)
895 ap->id, qc->tf.command, drv_stat); 881 ap->id, qc->tf.command, drv_stat);
896 882
897 qc->err_mask |= ac_err_mask(drv_stat); 883 qc->err_mask |= ac_err_mask(drv_stat);
898 ata_qc_complete(qc);
899 break; 884 break;
900 } 885 }
901 886
902out:
903 spin_unlock_irqrestore(&host_set->lock, flags); 887 spin_unlock_irqrestore(&host_set->lock, flags);
888 ata_eh_qc_complete(qc);
904 DPRINTK("EXIT\n"); 889 DPRINTK("EXIT\n");
905} 890}
906 891
diff --git a/drivers/scsi/sata_uli.c b/drivers/scsi/sata_uli.c
index 9635ca700977..37a487b7d655 100644
--- a/drivers/scsi/sata_uli.c
+++ b/drivers/scsi/sata_uli.c
@@ -75,11 +75,11 @@ static struct scsi_host_template uli_sht = {
75 .name = DRV_NAME, 75 .name = DRV_NAME,
76 .ioctl = ata_scsi_ioctl, 76 .ioctl = ata_scsi_ioctl,
77 .queuecommand = ata_scsi_queuecmd, 77 .queuecommand = ata_scsi_queuecmd,
78 .eh_timed_out = ata_scsi_timed_out,
78 .eh_strategy_handler = ata_scsi_error, 79 .eh_strategy_handler = ata_scsi_error,
79 .can_queue = ATA_DEF_QUEUE, 80 .can_queue = ATA_DEF_QUEUE,
80 .this_id = ATA_SHT_THIS_ID, 81 .this_id = ATA_SHT_THIS_ID,
81 .sg_tablesize = LIBATA_MAX_PRD, 82 .sg_tablesize = LIBATA_MAX_PRD,
82 .max_sectors = ATA_MAX_SECTORS,
83 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 83 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
84 .emulated = ATA_SHT_EMULATED, 84 .emulated = ATA_SHT_EMULATED,
85 .use_clustering = ATA_SHT_USE_CLUSTERING, 85 .use_clustering = ATA_SHT_USE_CLUSTERING,
diff --git a/drivers/scsi/sata_via.c b/drivers/scsi/sata_via.c
index 6d5b0a794cfd..ff65a0b0457f 100644
--- a/drivers/scsi/sata_via.c
+++ b/drivers/scsi/sata_via.c
@@ -94,11 +94,11 @@ static struct scsi_host_template svia_sht = {
94 .name = DRV_NAME, 94 .name = DRV_NAME,
95 .ioctl = ata_scsi_ioctl, 95 .ioctl = ata_scsi_ioctl,
96 .queuecommand = ata_scsi_queuecmd, 96 .queuecommand = ata_scsi_queuecmd,
97 .eh_timed_out = ata_scsi_timed_out,
97 .eh_strategy_handler = ata_scsi_error, 98 .eh_strategy_handler = ata_scsi_error,
98 .can_queue = ATA_DEF_QUEUE, 99 .can_queue = ATA_DEF_QUEUE,
99 .this_id = ATA_SHT_THIS_ID, 100 .this_id = ATA_SHT_THIS_ID,
100 .sg_tablesize = LIBATA_MAX_PRD, 101 .sg_tablesize = LIBATA_MAX_PRD,
101 .max_sectors = ATA_MAX_SECTORS,
102 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 102 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
103 .emulated = ATA_SHT_EMULATED, 103 .emulated = ATA_SHT_EMULATED,
104 .use_clustering = ATA_SHT_USE_CLUSTERING, 104 .use_clustering = ATA_SHT_USE_CLUSTERING,
diff --git a/drivers/scsi/sata_vsc.c b/drivers/scsi/sata_vsc.c
index e484e8db6810..b574379a7a82 100644
--- a/drivers/scsi/sata_vsc.c
+++ b/drivers/scsi/sata_vsc.c
@@ -251,11 +251,11 @@ static struct scsi_host_template vsc_sata_sht = {
251 .name = DRV_NAME, 251 .name = DRV_NAME,
252 .ioctl = ata_scsi_ioctl, 252 .ioctl = ata_scsi_ioctl,
253 .queuecommand = ata_scsi_queuecmd, 253 .queuecommand = ata_scsi_queuecmd,
254 .eh_timed_out = ata_scsi_timed_out,
254 .eh_strategy_handler = ata_scsi_error, 255 .eh_strategy_handler = ata_scsi_error,
255 .can_queue = ATA_DEF_QUEUE, 256 .can_queue = ATA_DEF_QUEUE,
256 .this_id = ATA_SHT_THIS_ID, 257 .this_id = ATA_SHT_THIS_ID,
257 .sg_tablesize = LIBATA_MAX_PRD, 258 .sg_tablesize = LIBATA_MAX_PRD,
258 .max_sectors = ATA_MAX_SECTORS,
259 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 259 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
260 .emulated = ATA_SHT_EMULATED, 260 .emulated = ATA_SHT_EMULATED,
261 .use_clustering = ATA_SHT_USE_CLUSTERING, 261 .use_clustering = ATA_SHT_USE_CLUSTERING,
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index ff82ccfbb106..5d169a2881b9 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -584,8 +584,7 @@ static int scsi_request_sense(struct scsi_cmnd *scmd)
584 * keep a list of pending commands for final completion, and once we 584 * keep a list of pending commands for final completion, and once we
585 * are ready to leave error handling we handle completion for real. 585 * are ready to leave error handling we handle completion for real.
586 **/ 586 **/
587static void scsi_eh_finish_cmd(struct scsi_cmnd *scmd, 587void scsi_eh_finish_cmd(struct scsi_cmnd *scmd, struct list_head *done_q)
588 struct list_head *done_q)
589{ 588{
590 scmd->device->host->host_failed--; 589 scmd->device->host->host_failed--;
591 scmd->eh_eflags = 0; 590 scmd->eh_eflags = 0;
@@ -597,6 +596,7 @@ static void scsi_eh_finish_cmd(struct scsi_cmnd *scmd,
597 scsi_setup_cmd_retry(scmd); 596 scsi_setup_cmd_retry(scmd);
598 list_move_tail(&scmd->eh_entry, done_q); 597 list_move_tail(&scmd->eh_entry, done_q);
599} 598}
599EXPORT_SYMBOL(scsi_eh_finish_cmd);
600 600
601/** 601/**
602 * scsi_eh_get_sense - Get device sense data. 602 * scsi_eh_get_sense - Get device sense data.
@@ -1425,7 +1425,7 @@ static void scsi_eh_ready_devs(struct Scsi_Host *shost,
1425 * @done_q: list_head of processed commands. 1425 * @done_q: list_head of processed commands.
1426 * 1426 *
1427 **/ 1427 **/
1428static void scsi_eh_flush_done_q(struct list_head *done_q) 1428void scsi_eh_flush_done_q(struct list_head *done_q)
1429{ 1429{
1430 struct scsi_cmnd *scmd, *next; 1430 struct scsi_cmnd *scmd, *next;
1431 1431
@@ -1454,6 +1454,7 @@ static void scsi_eh_flush_done_q(struct list_head *done_q)
1454 } 1454 }
1455 } 1455 }
1456} 1456}
1457EXPORT_SYMBOL(scsi_eh_flush_done_q);
1457 1458
1458/** 1459/**
1459 * scsi_unjam_host - Attempt to fix a host which has a cmd that failed. 1460 * scsi_unjam_host - Attempt to fix a host which has a cmd that failed.