aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi
diff options
context:
space:
mode:
authorJames Bottomley <jejb@mulgrave.il.steeleye.com>2006-03-21 14:05:45 -0500
committerJames Bottomley <jejb@mulgrave.il.steeleye.com>2006-03-21 14:05:45 -0500
commitd04cdb64212eb5ae6a98026a97dda626e40e8e9a (patch)
treeb6a7dbb21ccfceb915844e9a330b3d3dfcaf3c5b /drivers/scsi
parent2f8600dff2b140096a7df781884e918a16aa90e0 (diff)
parentec1248e70edc5cf7b485efcc7b41e44e10f422e5 (diff)
Merge ../linux-2.6
Diffstat (limited to 'drivers/scsi')
-rw-r--r--drivers/scsi/Kconfig8
-rw-r--r--drivers/scsi/Makefile2
-rw-r--r--drivers/scsi/ahci.c197
-rw-r--r--drivers/scsi/ata_piix.c392
-rw-r--r--drivers/scsi/libata-bmdma.c703
-rw-r--r--drivers/scsi/libata-core.c2794
-rw-r--r--drivers/scsi/libata-scsi.c240
-rw-r--r--drivers/scsi/libata.h3
-rw-r--r--drivers/scsi/pcmcia/Kconfig1
-rw-r--r--drivers/scsi/pdc_adma.c6
-rw-r--r--drivers/scsi/sata_mv.c281
-rw-r--r--drivers/scsi/sata_nv.c2
-rw-r--r--drivers/scsi/sata_promise.c129
-rw-r--r--drivers/scsi/sata_qstor.c10
-rw-r--r--drivers/scsi/sata_sil.c126
-rw-r--r--drivers/scsi/sata_sil24.c102
-rw-r--r--drivers/scsi/sata_sis.c2
-rw-r--r--drivers/scsi/sata_svw.c2
-rw-r--r--drivers/scsi/sata_sx4.c25
-rw-r--r--drivers/scsi/sata_uli.c2
-rw-r--r--drivers/scsi/sata_via.c2
-rw-r--r--drivers/scsi/sata_vsc.c2
-rw-r--r--drivers/scsi/scsi_devinfo.c1
-rw-r--r--drivers/scsi/scsi_error.c7
-rw-r--r--drivers/scsi/scsi_transport_fc.c2
25 files changed, 3066 insertions, 1975 deletions
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 3c606cf8c8ca..5c94a5d4efc0 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -379,6 +379,14 @@ config SCSI_AHA1740
379config SCSI_AACRAID 379config SCSI_AACRAID
380 tristate "Adaptec AACRAID support" 380 tristate "Adaptec AACRAID support"
381 depends on SCSI && PCI 381 depends on SCSI && PCI
382 help
383 This driver supports a variety of Dell, HP, Adaptec, IBM and
384 ICP storage products. For a list of supported products, refer
385 to <file:Documentation/scsi/aacraid.txt>.
386
387 To compile this driver as a module, choose M here: the module
388 will be called aacraid.
389
382 390
383source "drivers/scsi/aic7xxx/Kconfig.aic7xxx" 391source "drivers/scsi/aic7xxx/Kconfig.aic7xxx"
384 392
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index 320e765fa0cd..15dc2e00e1b2 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -163,7 +163,7 @@ ncr53c8xx-flags-$(CONFIG_SCSI_ZALON) \
163CFLAGS_ncr53c8xx.o := $(ncr53c8xx-flags-y) $(ncr53c8xx-flags-m) 163CFLAGS_ncr53c8xx.o := $(ncr53c8xx-flags-y) $(ncr53c8xx-flags-m)
164zalon7xx-objs := zalon.o ncr53c8xx.o 164zalon7xx-objs := zalon.o ncr53c8xx.o
165NCR_Q720_mod-objs := NCR_Q720.o ncr53c8xx.o 165NCR_Q720_mod-objs := NCR_Q720.o ncr53c8xx.o
166libata-objs := libata-core.o libata-scsi.o 166libata-objs := libata-core.o libata-scsi.o libata-bmdma.o
167oktagon_esp_mod-objs := oktagon_esp.o oktagon_io.o 167oktagon_esp_mod-objs := oktagon_esp.o oktagon_io.o
168 168
169# Files generated that shall be removed upon make clean 169# Files generated that shall be removed upon make clean
diff --git a/drivers/scsi/ahci.c b/drivers/scsi/ahci.c
index 559ff7aae3f1..e97ab3e6de4d 100644
--- a/drivers/scsi/ahci.c
+++ b/drivers/scsi/ahci.c
@@ -66,6 +66,9 @@ enum {
66 AHCI_IRQ_ON_SG = (1 << 31), 66 AHCI_IRQ_ON_SG = (1 << 31),
67 AHCI_CMD_ATAPI = (1 << 5), 67 AHCI_CMD_ATAPI = (1 << 5),
68 AHCI_CMD_WRITE = (1 << 6), 68 AHCI_CMD_WRITE = (1 << 6),
69 AHCI_CMD_PREFETCH = (1 << 7),
70 AHCI_CMD_RESET = (1 << 8),
71 AHCI_CMD_CLR_BUSY = (1 << 10),
69 72
70 RX_FIS_D2H_REG = 0x40, /* offset of D2H Register FIS data */ 73 RX_FIS_D2H_REG = 0x40, /* offset of D2H Register FIS data */
71 74
@@ -85,6 +88,7 @@ enum {
85 88
86 /* HOST_CAP bits */ 89 /* HOST_CAP bits */
87 HOST_CAP_64 = (1 << 31), /* PCI DAC (64-bit DMA) support */ 90 HOST_CAP_64 = (1 << 31), /* PCI DAC (64-bit DMA) support */
91 HOST_CAP_CLO = (1 << 24), /* Command List Override support */
88 92
89 /* registers for each SATA port */ 93 /* registers for each SATA port */
90 PORT_LST_ADDR = 0x00, /* command list DMA addr */ 94 PORT_LST_ADDR = 0x00, /* command list DMA addr */
@@ -138,6 +142,7 @@ enum {
138 PORT_CMD_LIST_ON = (1 << 15), /* cmd list DMA engine running */ 142 PORT_CMD_LIST_ON = (1 << 15), /* cmd list DMA engine running */
139 PORT_CMD_FIS_ON = (1 << 14), /* FIS DMA engine running */ 143 PORT_CMD_FIS_ON = (1 << 14), /* FIS DMA engine running */
140 PORT_CMD_FIS_RX = (1 << 4), /* Enable FIS receive DMA engine */ 144 PORT_CMD_FIS_RX = (1 << 4), /* Enable FIS receive DMA engine */
145 PORT_CMD_CLO = (1 << 3), /* Command list override */
141 PORT_CMD_POWER_ON = (1 << 2), /* Power up device */ 146 PORT_CMD_POWER_ON = (1 << 2), /* Power up device */
142 PORT_CMD_SPIN_UP = (1 << 1), /* Spin up device */ 147 PORT_CMD_SPIN_UP = (1 << 1), /* Spin up device */
143 PORT_CMD_START = (1 << 0), /* Enable port DMA engine */ 148 PORT_CMD_START = (1 << 0), /* Enable port DMA engine */
@@ -184,9 +189,9 @@ struct ahci_port_priv {
184static u32 ahci_scr_read (struct ata_port *ap, unsigned int sc_reg); 189static u32 ahci_scr_read (struct ata_port *ap, unsigned int sc_reg);
185static void ahci_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val); 190static void ahci_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val);
186static int ahci_init_one (struct pci_dev *pdev, const struct pci_device_id *ent); 191static int ahci_init_one (struct pci_dev *pdev, const struct pci_device_id *ent);
187static int ahci_qc_issue(struct ata_queued_cmd *qc); 192static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc);
188static irqreturn_t ahci_interrupt (int irq, void *dev_instance, struct pt_regs *regs); 193static irqreturn_t ahci_interrupt (int irq, void *dev_instance, struct pt_regs *regs);
189static void ahci_phy_reset(struct ata_port *ap); 194static int ahci_probe_reset(struct ata_port *ap, unsigned int *classes);
190static void ahci_irq_clear(struct ata_port *ap); 195static void ahci_irq_clear(struct ata_port *ap);
191static void ahci_eng_timeout(struct ata_port *ap); 196static void ahci_eng_timeout(struct ata_port *ap);
192static int ahci_port_start(struct ata_port *ap); 197static int ahci_port_start(struct ata_port *ap);
@@ -202,11 +207,11 @@ static struct scsi_host_template ahci_sht = {
202 .name = DRV_NAME, 207 .name = DRV_NAME,
203 .ioctl = ata_scsi_ioctl, 208 .ioctl = ata_scsi_ioctl,
204 .queuecommand = ata_scsi_queuecmd, 209 .queuecommand = ata_scsi_queuecmd,
210 .eh_timed_out = ata_scsi_timed_out,
205 .eh_strategy_handler = ata_scsi_error, 211 .eh_strategy_handler = ata_scsi_error,
206 .can_queue = ATA_DEF_QUEUE, 212 .can_queue = ATA_DEF_QUEUE,
207 .this_id = ATA_SHT_THIS_ID, 213 .this_id = ATA_SHT_THIS_ID,
208 .sg_tablesize = AHCI_MAX_SG, 214 .sg_tablesize = AHCI_MAX_SG,
209 .max_sectors = ATA_MAX_SECTORS,
210 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 215 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
211 .emulated = ATA_SHT_EMULATED, 216 .emulated = ATA_SHT_EMULATED,
212 .use_clustering = AHCI_USE_CLUSTERING, 217 .use_clustering = AHCI_USE_CLUSTERING,
@@ -225,7 +230,7 @@ static const struct ata_port_operations ahci_ops = {
225 230
226 .tf_read = ahci_tf_read, 231 .tf_read = ahci_tf_read,
227 232
228 .phy_reset = ahci_phy_reset, 233 .probe_reset = ahci_probe_reset,
229 234
230 .qc_prep = ahci_qc_prep, 235 .qc_prep = ahci_qc_prep,
231 .qc_issue = ahci_qc_issue, 236 .qc_issue = ahci_qc_issue,
@@ -247,8 +252,7 @@ static const struct ata_port_info ahci_port_info[] = {
247 { 252 {
248 .sht = &ahci_sht, 253 .sht = &ahci_sht,
249 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 254 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
250 ATA_FLAG_SATA_RESET | ATA_FLAG_MMIO | 255 ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA,
251 ATA_FLAG_PIO_DMA,
252 .pio_mask = 0x1f, /* pio0-4 */ 256 .pio_mask = 0x1f, /* pio0-4 */
253 .udma_mask = 0x7f, /* udma0-6 ; FIXME */ 257 .udma_mask = 0x7f, /* udma0-6 ; FIXME */
254 .port_ops = &ahci_ops, 258 .port_ops = &ahci_ops,
@@ -450,17 +454,48 @@ static void ahci_scr_write (struct ata_port *ap, unsigned int sc_reg_in,
450 writel(val, (void __iomem *) ap->ioaddr.scr_addr + (sc_reg * 4)); 454 writel(val, (void __iomem *) ap->ioaddr.scr_addr + (sc_reg * 4));
451} 455}
452 456
453static void ahci_phy_reset(struct ata_port *ap) 457static int ahci_stop_engine(struct ata_port *ap)
454{ 458{
455 void __iomem *port_mmio = (void __iomem *) ap->ioaddr.cmd_addr; 459 void __iomem *mmio = ap->host_set->mmio_base;
456 struct ata_taskfile tf; 460 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
457 struct ata_device *dev = &ap->device[0]; 461 int work;
458 u32 new_tmp, tmp; 462 u32 tmp;
459 463
460 __sata_phy_reset(ap); 464 tmp = readl(port_mmio + PORT_CMD);
465 tmp &= ~PORT_CMD_START;
466 writel(tmp, port_mmio + PORT_CMD);
461 467
462 if (ap->flags & ATA_FLAG_PORT_DISABLED) 468 /* wait for engine to stop. TODO: this could be
463 return; 469 * as long as 500 msec
470 */
471 work = 1000;
472 while (work-- > 0) {
473 tmp = readl(port_mmio + PORT_CMD);
474 if ((tmp & PORT_CMD_LIST_ON) == 0)
475 return 0;
476 udelay(10);
477 }
478
479 return -EIO;
480}
481
482static void ahci_start_engine(struct ata_port *ap)
483{
484 void __iomem *mmio = ap->host_set->mmio_base;
485 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
486 u32 tmp;
487
488 tmp = readl(port_mmio + PORT_CMD);
489 tmp |= PORT_CMD_START;
490 writel(tmp, port_mmio + PORT_CMD);
491 readl(port_mmio + PORT_CMD); /* flush */
492}
493
494static unsigned int ahci_dev_classify(struct ata_port *ap)
495{
496 void __iomem *port_mmio = (void __iomem *) ap->ioaddr.cmd_addr;
497 struct ata_taskfile tf;
498 u32 tmp;
464 499
465 tmp = readl(port_mmio + PORT_SIG); 500 tmp = readl(port_mmio + PORT_SIG);
466 tf.lbah = (tmp >> 24) & 0xff; 501 tf.lbah = (tmp >> 24) & 0xff;
@@ -468,15 +503,46 @@ static void ahci_phy_reset(struct ata_port *ap)
468 tf.lbal = (tmp >> 8) & 0xff; 503 tf.lbal = (tmp >> 8) & 0xff;
469 tf.nsect = (tmp) & 0xff; 504 tf.nsect = (tmp) & 0xff;
470 505
471 dev->class = ata_dev_classify(&tf); 506 return ata_dev_classify(&tf);
472 if (!ata_dev_present(dev)) { 507}
473 ata_port_disable(ap); 508
474 return; 509static void ahci_fill_cmd_slot(struct ahci_port_priv *pp, u32 opts)
475 } 510{
511 pp->cmd_slot[0].opts = cpu_to_le32(opts);
512 pp->cmd_slot[0].status = 0;
513 pp->cmd_slot[0].tbl_addr = cpu_to_le32(pp->cmd_tbl_dma & 0xffffffff);
514 pp->cmd_slot[0].tbl_addr_hi = cpu_to_le32((pp->cmd_tbl_dma >> 16) >> 16);
515}
516
517static int ahci_hardreset(struct ata_port *ap, int verbose, unsigned int *class)
518{
519 int rc;
520
521 DPRINTK("ENTER\n");
522
523 ahci_stop_engine(ap);
524 rc = sata_std_hardreset(ap, verbose, class);
525 ahci_start_engine(ap);
526
527 if (rc == 0)
528 *class = ahci_dev_classify(ap);
529 if (*class == ATA_DEV_UNKNOWN)
530 *class = ATA_DEV_NONE;
531
532 DPRINTK("EXIT, rc=%d, class=%u\n", rc, *class);
533 return rc;
534}
535
536static void ahci_postreset(struct ata_port *ap, unsigned int *class)
537{
538 void __iomem *port_mmio = (void __iomem *) ap->ioaddr.cmd_addr;
539 u32 new_tmp, tmp;
540
541 ata_std_postreset(ap, class);
476 542
477 /* Make sure port's ATAPI bit is set appropriately */ 543 /* Make sure port's ATAPI bit is set appropriately */
478 new_tmp = tmp = readl(port_mmio + PORT_CMD); 544 new_tmp = tmp = readl(port_mmio + PORT_CMD);
479 if (dev->class == ATA_DEV_ATAPI) 545 if (*class == ATA_DEV_ATAPI)
480 new_tmp |= PORT_CMD_ATAPI; 546 new_tmp |= PORT_CMD_ATAPI;
481 else 547 else
482 new_tmp &= ~PORT_CMD_ATAPI; 548 new_tmp &= ~PORT_CMD_ATAPI;
@@ -486,6 +552,12 @@ static void ahci_phy_reset(struct ata_port *ap)
486 } 552 }
487} 553}
488 554
555static int ahci_probe_reset(struct ata_port *ap, unsigned int *classes)
556{
557 return ata_drive_probe_reset(ap, NULL, NULL, ahci_hardreset,
558 ahci_postreset, classes);
559}
560
489static u8 ahci_check_status(struct ata_port *ap) 561static u8 ahci_check_status(struct ata_port *ap)
490{ 562{
491 void __iomem *mmio = (void __iomem *) ap->ioaddr.cmd_addr; 563 void __iomem *mmio = (void __iomem *) ap->ioaddr.cmd_addr;
@@ -533,42 +605,36 @@ static void ahci_qc_prep(struct ata_queued_cmd *qc)
533{ 605{
534 struct ata_port *ap = qc->ap; 606 struct ata_port *ap = qc->ap;
535 struct ahci_port_priv *pp = ap->private_data; 607 struct ahci_port_priv *pp = ap->private_data;
608 int is_atapi = is_atapi_taskfile(&qc->tf);
536 u32 opts; 609 u32 opts;
537 const u32 cmd_fis_len = 5; /* five dwords */ 610 const u32 cmd_fis_len = 5; /* five dwords */
538 unsigned int n_elem; 611 unsigned int n_elem;
539 612
540 /* 613 /*
541 * Fill in command slot information (currently only one slot,
542 * slot 0, is currently since we don't do queueing)
543 */
544
545 opts = cmd_fis_len;
546 if (qc->tf.flags & ATA_TFLAG_WRITE)
547 opts |= AHCI_CMD_WRITE;
548 if (is_atapi_taskfile(&qc->tf))
549 opts |= AHCI_CMD_ATAPI;
550
551 pp->cmd_slot[0].opts = cpu_to_le32(opts);
552 pp->cmd_slot[0].status = 0;
553 pp->cmd_slot[0].tbl_addr = cpu_to_le32(pp->cmd_tbl_dma & 0xffffffff);
554 pp->cmd_slot[0].tbl_addr_hi = cpu_to_le32((pp->cmd_tbl_dma >> 16) >> 16);
555
556 /*
557 * Fill in command table information. First, the header, 614 * Fill in command table information. First, the header,
558 * a SATA Register - Host to Device command FIS. 615 * a SATA Register - Host to Device command FIS.
559 */ 616 */
560 ata_tf_to_fis(&qc->tf, pp->cmd_tbl, 0); 617 ata_tf_to_fis(&qc->tf, pp->cmd_tbl, 0);
561 if (opts & AHCI_CMD_ATAPI) { 618 if (is_atapi) {
562 memset(pp->cmd_tbl + AHCI_CMD_TBL_CDB, 0, 32); 619 memset(pp->cmd_tbl + AHCI_CMD_TBL_CDB, 0, 32);
563 memcpy(pp->cmd_tbl + AHCI_CMD_TBL_CDB, qc->cdb, ap->cdb_len); 620 memcpy(pp->cmd_tbl + AHCI_CMD_TBL_CDB, qc->cdb,
621 qc->dev->cdb_len);
564 } 622 }
565 623
566 if (!(qc->flags & ATA_QCFLAG_DMAMAP)) 624 n_elem = 0;
567 return; 625 if (qc->flags & ATA_QCFLAG_DMAMAP)
626 n_elem = ahci_fill_sg(qc);
568 627
569 n_elem = ahci_fill_sg(qc); 628 /*
629 * Fill in command slot information.
630 */
631 opts = cmd_fis_len | n_elem << 16;
632 if (qc->tf.flags & ATA_TFLAG_WRITE)
633 opts |= AHCI_CMD_WRITE;
634 if (is_atapi)
635 opts |= AHCI_CMD_ATAPI | AHCI_CMD_PREFETCH;
570 636
571 pp->cmd_slot[0].opts |= cpu_to_le32(n_elem << 16); 637 ahci_fill_cmd_slot(pp, opts);
572} 638}
573 639
574static void ahci_restart_port(struct ata_port *ap, u32 irq_stat) 640static void ahci_restart_port(struct ata_port *ap, u32 irq_stat)
@@ -576,7 +642,6 @@ static void ahci_restart_port(struct ata_port *ap, u32 irq_stat)
576 void __iomem *mmio = ap->host_set->mmio_base; 642 void __iomem *mmio = ap->host_set->mmio_base;
577 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no); 643 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
578 u32 tmp; 644 u32 tmp;
579 int work;
580 645
581 if ((ap->device[0].class != ATA_DEV_ATAPI) || 646 if ((ap->device[0].class != ATA_DEV_ATAPI) ||
582 ((irq_stat & PORT_IRQ_TF_ERR) == 0)) 647 ((irq_stat & PORT_IRQ_TF_ERR) == 0))
@@ -592,20 +657,7 @@ static void ahci_restart_port(struct ata_port *ap, u32 irq_stat)
592 readl(port_mmio + PORT_SCR_ERR)); 657 readl(port_mmio + PORT_SCR_ERR));
593 658
594 /* stop DMA */ 659 /* stop DMA */
595 tmp = readl(port_mmio + PORT_CMD); 660 ahci_stop_engine(ap);
596 tmp &= ~PORT_CMD_START;
597 writel(tmp, port_mmio + PORT_CMD);
598
599 /* wait for engine to stop. TODO: this could be
600 * as long as 500 msec
601 */
602 work = 1000;
603 while (work-- > 0) {
604 tmp = readl(port_mmio + PORT_CMD);
605 if ((tmp & PORT_CMD_LIST_ON) == 0)
606 break;
607 udelay(10);
608 }
609 661
610 /* clear SATA phy error, if any */ 662 /* clear SATA phy error, if any */
611 tmp = readl(port_mmio + PORT_SCR_ERR); 663 tmp = readl(port_mmio + PORT_SCR_ERR);
@@ -624,10 +676,7 @@ static void ahci_restart_port(struct ata_port *ap, u32 irq_stat)
624 } 676 }
625 677
626 /* re-start DMA */ 678 /* re-start DMA */
627 tmp = readl(port_mmio + PORT_CMD); 679 ahci_start_engine(ap);
628 tmp |= PORT_CMD_START;
629 writel(tmp, port_mmio + PORT_CMD);
630 readl(port_mmio + PORT_CMD); /* flush */
631} 680}
632 681
633static void ahci_eng_timeout(struct ata_port *ap) 682static void ahci_eng_timeout(struct ata_port *ap)
@@ -642,25 +691,13 @@ static void ahci_eng_timeout(struct ata_port *ap)
642 691
643 spin_lock_irqsave(&host_set->lock, flags); 692 spin_lock_irqsave(&host_set->lock, flags);
644 693
694 ahci_restart_port(ap, readl(port_mmio + PORT_IRQ_STAT));
645 qc = ata_qc_from_tag(ap, ap->active_tag); 695 qc = ata_qc_from_tag(ap, ap->active_tag);
646 if (!qc) { 696 qc->err_mask |= AC_ERR_TIMEOUT;
647 printk(KERN_ERR "ata%u: BUG: timeout without command\n",
648 ap->id);
649 } else {
650 ahci_restart_port(ap, readl(port_mmio + PORT_IRQ_STAT));
651
652 /* hack alert! We cannot use the supplied completion
653 * function from inside the ->eh_strategy_handler() thread.
654 * libata is the only user of ->eh_strategy_handler() in
655 * any kernel, so the default scsi_done() assumes it is
656 * not being called from the SCSI EH.
657 */
658 qc->scsidone = scsi_finish_command;
659 qc->err_mask |= AC_ERR_OTHER;
660 ata_qc_complete(qc);
661 }
662 697
663 spin_unlock_irqrestore(&host_set->lock, flags); 698 spin_unlock_irqrestore(&host_set->lock, flags);
699
700 ata_eh_qc_complete(qc);
664} 701}
665 702
666static inline int ahci_host_intr(struct ata_port *ap, struct ata_queued_cmd *qc) 703static inline int ahci_host_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
@@ -678,7 +715,7 @@ static inline int ahci_host_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
678 ci = readl(port_mmio + PORT_CMD_ISSUE); 715 ci = readl(port_mmio + PORT_CMD_ISSUE);
679 if (likely((ci & 0x1) == 0)) { 716 if (likely((ci & 0x1) == 0)) {
680 if (qc) { 717 if (qc) {
681 assert(qc->err_mask == 0); 718 WARN_ON(qc->err_mask);
682 ata_qc_complete(qc); 719 ata_qc_complete(qc);
683 qc = NULL; 720 qc = NULL;
684 } 721 }
@@ -697,7 +734,7 @@ static inline int ahci_host_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
697 ahci_restart_port(ap, status); 734 ahci_restart_port(ap, status);
698 735
699 if (qc) { 736 if (qc) {
700 qc->err_mask |= AC_ERR_OTHER; 737 qc->err_mask |= err_mask;
701 ata_qc_complete(qc); 738 ata_qc_complete(qc);
702 } 739 }
703 } 740 }
@@ -770,7 +807,7 @@ static irqreturn_t ahci_interrupt (int irq, void *dev_instance, struct pt_regs *
770 return IRQ_RETVAL(handled); 807 return IRQ_RETVAL(handled);
771} 808}
772 809
773static int ahci_qc_issue(struct ata_queued_cmd *qc) 810static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc)
774{ 811{
775 struct ata_port *ap = qc->ap; 812 struct ata_port *ap = qc->ap;
776 void __iomem *port_mmio = (void __iomem *) ap->ioaddr.cmd_addr; 813 void __iomem *port_mmio = (void __iomem *) ap->ioaddr.cmd_addr;
diff --git a/drivers/scsi/ata_piix.c b/drivers/scsi/ata_piix.c
index fc3ca051ceed..9327b62f97de 100644
--- a/drivers/scsi/ata_piix.c
+++ b/drivers/scsi/ata_piix.c
@@ -101,36 +101,54 @@ enum {
101 ICH5_PCS = 0x92, /* port control and status */ 101 ICH5_PCS = 0x92, /* port control and status */
102 PIIX_SCC = 0x0A, /* sub-class code register */ 102 PIIX_SCC = 0x0A, /* sub-class code register */
103 103
104 PIIX_FLAG_AHCI = (1 << 28), /* AHCI possible */ 104 PIIX_FLAG_IGNORE_PCS = (1 << 25), /* ignore PCS present bits */
105 PIIX_FLAG_CHECKINTR = (1 << 29), /* make sure PCI INTx enabled */ 105 PIIX_FLAG_SCR = (1 << 26), /* SCR available */
106 PIIX_FLAG_COMBINED = (1 << 30), /* combined mode possible */ 106 PIIX_FLAG_AHCI = (1 << 27), /* AHCI possible */
107 PIIX_FLAG_CHECKINTR = (1 << 28), /* make sure PCI INTx enabled */
108 PIIX_FLAG_COMBINED = (1 << 29), /* combined mode possible */
109 /* ICH6/7 use different scheme for map value */
110 PIIX_FLAG_COMBINED_ICH6 = PIIX_FLAG_COMBINED | (1 << 30),
107 111
108 /* combined mode. if set, PATA is channel 0. 112 /* combined mode. if set, PATA is channel 0.
109 * if clear, PATA is channel 1. 113 * if clear, PATA is channel 1.
110 */ 114 */
111 PIIX_COMB_PATA_P0 = (1 << 1),
112 PIIX_COMB = (1 << 2), /* combined mode enabled? */
113
114 PIIX_PORT_ENABLED = (1 << 0), 115 PIIX_PORT_ENABLED = (1 << 0),
115 PIIX_PORT_PRESENT = (1 << 4), 116 PIIX_PORT_PRESENT = (1 << 4),
116 117
117 PIIX_80C_PRI = (1 << 5) | (1 << 4), 118 PIIX_80C_PRI = (1 << 5) | (1 << 4),
118 PIIX_80C_SEC = (1 << 7) | (1 << 6), 119 PIIX_80C_SEC = (1 << 7) | (1 << 6),
119 120
120 ich5_pata = 0, 121 /* controller IDs */
121 ich5_sata = 1, 122 piix4_pata = 0,
122 piix4_pata = 2, 123 ich5_pata = 1,
123 ich6_sata = 3, 124 ich5_sata = 2,
124 ich6_sata_ahci = 4, 125 esb_sata = 3,
126 ich6_sata = 4,
127 ich6_sata_ahci = 5,
128 ich6m_sata_ahci = 6,
129
130 /* constants for mapping table */
131 P0 = 0, /* port 0 */
132 P1 = 1, /* port 1 */
133 P2 = 2, /* port 2 */
134 P3 = 3, /* port 3 */
135 IDE = -1, /* IDE */
136 NA = -2, /* not avaliable */
137 RV = -3, /* reserved */
125 138
126 PIIX_AHCI_DEVICE = 6, 139 PIIX_AHCI_DEVICE = 6,
127}; 140};
128 141
142struct piix_map_db {
143 const u32 mask;
144 const int map[][4];
145};
146
129static int piix_init_one (struct pci_dev *pdev, 147static int piix_init_one (struct pci_dev *pdev,
130 const struct pci_device_id *ent); 148 const struct pci_device_id *ent);
131 149
132static void piix_pata_phy_reset(struct ata_port *ap); 150static int piix_pata_probe_reset(struct ata_port *ap, unsigned int *classes);
133static void piix_sata_phy_reset(struct ata_port *ap); 151static int piix_sata_probe_reset(struct ata_port *ap, unsigned int *classes);
134static void piix_set_piomode (struct ata_port *ap, struct ata_device *adev); 152static void piix_set_piomode (struct ata_port *ap, struct ata_device *adev);
135static void piix_set_dmamode (struct ata_port *ap, struct ata_device *adev); 153static void piix_set_dmamode (struct ata_port *ap, struct ata_device *adev);
136 154
@@ -147,19 +165,32 @@ static const struct pci_device_id piix_pci_tbl[] = {
147 * list in drivers/pci/quirks.c. 165 * list in drivers/pci/quirks.c.
148 */ 166 */
149 167
168 /* 82801EB (ICH5) */
150 { 0x8086, 0x24d1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich5_sata }, 169 { 0x8086, 0x24d1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich5_sata },
170 /* 82801EB (ICH5) */
151 { 0x8086, 0x24df, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich5_sata }, 171 { 0x8086, 0x24df, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich5_sata },
152 { 0x8086, 0x25a3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich5_sata }, 172 /* 6300ESB (ICH5 variant with broken PCS present bits) */
153 { 0x8086, 0x25b0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich5_sata }, 173 { 0x8086, 0x25a3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, esb_sata },
174 /* 6300ESB pretending RAID */
175 { 0x8086, 0x25b0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, esb_sata },
176 /* 82801FB/FW (ICH6/ICH6W) */
154 { 0x8086, 0x2651, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata }, 177 { 0x8086, 0x2651, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata },
178 /* 82801FR/FRW (ICH6R/ICH6RW) */
155 { 0x8086, 0x2652, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata_ahci }, 179 { 0x8086, 0x2652, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata_ahci },
156 { 0x8086, 0x2653, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata_ahci }, 180 /* 82801FBM ICH6M (ICH6R with only port 0 and 2 implemented) */
181 { 0x8086, 0x2653, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6m_sata_ahci },
182 /* 82801GB/GR/GH (ICH7, identical to ICH6) */
157 { 0x8086, 0x27c0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata_ahci }, 183 { 0x8086, 0x27c0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata_ahci },
158 { 0x8086, 0x27c4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata_ahci }, 184 /* 2801GBM/GHM (ICH7M, identical to ICH6M) */
185 { 0x8086, 0x27c4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6m_sata_ahci },
186 /* Enterprise Southbridge 2 (where's the datasheet?) */
159 { 0x8086, 0x2680, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata_ahci }, 187 { 0x8086, 0x2680, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata_ahci },
188 /* SATA Controller 1 IDE (ICH8, no datasheet yet) */
160 { 0x8086, 0x2820, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata_ahci }, 189 { 0x8086, 0x2820, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata_ahci },
190 /* SATA Controller 2 IDE (ICH8, ditto) */
161 { 0x8086, 0x2825, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata_ahci }, 191 { 0x8086, 0x2825, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata_ahci },
162 { 0x8086, 0x2828, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata_ahci }, 192 /* Mobile SATA Controller IDE (ICH8M, ditto) */
193 { 0x8086, 0x2828, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6m_sata_ahci },
163 194
164 { } /* terminate list */ 195 { } /* terminate list */
165}; 196};
@@ -178,11 +209,11 @@ static struct scsi_host_template piix_sht = {
178 .name = DRV_NAME, 209 .name = DRV_NAME,
179 .ioctl = ata_scsi_ioctl, 210 .ioctl = ata_scsi_ioctl,
180 .queuecommand = ata_scsi_queuecmd, 211 .queuecommand = ata_scsi_queuecmd,
212 .eh_timed_out = ata_scsi_timed_out,
181 .eh_strategy_handler = ata_scsi_error, 213 .eh_strategy_handler = ata_scsi_error,
182 .can_queue = ATA_DEF_QUEUE, 214 .can_queue = ATA_DEF_QUEUE,
183 .this_id = ATA_SHT_THIS_ID, 215 .this_id = ATA_SHT_THIS_ID,
184 .sg_tablesize = LIBATA_MAX_PRD, 216 .sg_tablesize = LIBATA_MAX_PRD,
185 .max_sectors = ATA_MAX_SECTORS,
186 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 217 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
187 .emulated = ATA_SHT_EMULATED, 218 .emulated = ATA_SHT_EMULATED,
188 .use_clustering = ATA_SHT_USE_CLUSTERING, 219 .use_clustering = ATA_SHT_USE_CLUSTERING,
@@ -205,7 +236,7 @@ static const struct ata_port_operations piix_pata_ops = {
205 .exec_command = ata_exec_command, 236 .exec_command = ata_exec_command,
206 .dev_select = ata_std_dev_select, 237 .dev_select = ata_std_dev_select,
207 238
208 .phy_reset = piix_pata_phy_reset, 239 .probe_reset = piix_pata_probe_reset,
209 240
210 .bmdma_setup = ata_bmdma_setup, 241 .bmdma_setup = ata_bmdma_setup,
211 .bmdma_start = ata_bmdma_start, 242 .bmdma_start = ata_bmdma_start,
@@ -233,7 +264,7 @@ static const struct ata_port_operations piix_sata_ops = {
233 .exec_command = ata_exec_command, 264 .exec_command = ata_exec_command,
234 .dev_select = ata_std_dev_select, 265 .dev_select = ata_std_dev_select,
235 266
236 .phy_reset = piix_sata_phy_reset, 267 .probe_reset = piix_sata_probe_reset,
237 268
238 .bmdma_setup = ata_bmdma_setup, 269 .bmdma_setup = ata_bmdma_setup,
239 .bmdma_start = ata_bmdma_start, 270 .bmdma_start = ata_bmdma_start,
@@ -252,12 +283,62 @@ static const struct ata_port_operations piix_sata_ops = {
252 .host_stop = ata_host_stop, 283 .host_stop = ata_host_stop,
253}; 284};
254 285
286static struct piix_map_db ich5_map_db = {
287 .mask = 0x7,
288 .map = {
289 /* PM PS SM SS MAP */
290 { P0, NA, P1, NA }, /* 000b */
291 { P1, NA, P0, NA }, /* 001b */
292 { RV, RV, RV, RV },
293 { RV, RV, RV, RV },
294 { P0, P1, IDE, IDE }, /* 100b */
295 { P1, P0, IDE, IDE }, /* 101b */
296 { IDE, IDE, P0, P1 }, /* 110b */
297 { IDE, IDE, P1, P0 }, /* 111b */
298 },
299};
300
301static struct piix_map_db ich6_map_db = {
302 .mask = 0x3,
303 .map = {
304 /* PM PS SM SS MAP */
305 { P0, P1, P2, P3 }, /* 00b */
306 { IDE, IDE, P1, P3 }, /* 01b */
307 { P0, P2, IDE, IDE }, /* 10b */
308 { RV, RV, RV, RV },
309 },
310};
311
312static struct piix_map_db ich6m_map_db = {
313 .mask = 0x3,
314 .map = {
315 /* PM PS SM SS MAP */
316 { P0, P1, P2, P3 }, /* 00b */
317 { RV, RV, RV, RV },
318 { P0, P2, IDE, IDE }, /* 10b */
319 { RV, RV, RV, RV },
320 },
321};
322
255static struct ata_port_info piix_port_info[] = { 323static struct ata_port_info piix_port_info[] = {
324 /* piix4_pata */
325 {
326 .sht = &piix_sht,
327 .host_flags = ATA_FLAG_SLAVE_POSS,
328 .pio_mask = 0x1f, /* pio0-4 */
329#if 0
330 .mwdma_mask = 0x06, /* mwdma1-2 */
331#else
332 .mwdma_mask = 0x00, /* mwdma broken */
333#endif
334 .udma_mask = ATA_UDMA_MASK_40C,
335 .port_ops = &piix_pata_ops,
336 },
337
256 /* ich5_pata */ 338 /* ich5_pata */
257 { 339 {
258 .sht = &piix_sht, 340 .sht = &piix_sht,
259 .host_flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST | 341 .host_flags = ATA_FLAG_SLAVE_POSS | PIIX_FLAG_CHECKINTR,
260 PIIX_FLAG_CHECKINTR,
261 .pio_mask = 0x1f, /* pio0-4 */ 342 .pio_mask = 0x1f, /* pio0-4 */
262#if 0 343#if 0
263 .mwdma_mask = 0x06, /* mwdma1-2 */ 344 .mwdma_mask = 0x06, /* mwdma1-2 */
@@ -271,50 +352,63 @@ static struct ata_port_info piix_port_info[] = {
271 /* ich5_sata */ 352 /* ich5_sata */
272 { 353 {
273 .sht = &piix_sht, 354 .sht = &piix_sht,
274 .host_flags = ATA_FLAG_SATA | ATA_FLAG_SRST | 355 .host_flags = ATA_FLAG_SATA | PIIX_FLAG_COMBINED |
275 PIIX_FLAG_COMBINED | PIIX_FLAG_CHECKINTR, 356 PIIX_FLAG_CHECKINTR,
276 .pio_mask = 0x1f, /* pio0-4 */ 357 .pio_mask = 0x1f, /* pio0-4 */
277 .mwdma_mask = 0x07, /* mwdma0-2 */ 358 .mwdma_mask = 0x07, /* mwdma0-2 */
278 .udma_mask = 0x7f, /* udma0-6 */ 359 .udma_mask = 0x7f, /* udma0-6 */
279 .port_ops = &piix_sata_ops, 360 .port_ops = &piix_sata_ops,
361 .private_data = &ich5_map_db,
280 }, 362 },
281 363
282 /* piix4_pata */ 364 /* i6300esb_sata */
283 { 365 {
284 .sht = &piix_sht, 366 .sht = &piix_sht,
285 .host_flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST, 367 .host_flags = ATA_FLAG_SATA | PIIX_FLAG_COMBINED |
368 PIIX_FLAG_CHECKINTR | PIIX_FLAG_IGNORE_PCS,
286 .pio_mask = 0x1f, /* pio0-4 */ 369 .pio_mask = 0x1f, /* pio0-4 */
287#if 0 370 .mwdma_mask = 0x07, /* mwdma0-2 */
288 .mwdma_mask = 0x06, /* mwdma1-2 */ 371 .udma_mask = 0x7f, /* udma0-6 */
289#else 372 .port_ops = &piix_sata_ops,
290 .mwdma_mask = 0x00, /* mwdma broken */ 373 .private_data = &ich5_map_db,
291#endif
292 .udma_mask = ATA_UDMA_MASK_40C,
293 .port_ops = &piix_pata_ops,
294 }, 374 },
295 375
296 /* ich6_sata */ 376 /* ich6_sata */
297 { 377 {
298 .sht = &piix_sht, 378 .sht = &piix_sht,
299 .host_flags = ATA_FLAG_SATA | ATA_FLAG_SRST | 379 .host_flags = ATA_FLAG_SATA | PIIX_FLAG_COMBINED_ICH6 |
300 PIIX_FLAG_COMBINED | PIIX_FLAG_CHECKINTR | 380 PIIX_FLAG_CHECKINTR | PIIX_FLAG_SCR,
301 ATA_FLAG_SLAVE_POSS,
302 .pio_mask = 0x1f, /* pio0-4 */ 381 .pio_mask = 0x1f, /* pio0-4 */
303 .mwdma_mask = 0x07, /* mwdma0-2 */ 382 .mwdma_mask = 0x07, /* mwdma0-2 */
304 .udma_mask = 0x7f, /* udma0-6 */ 383 .udma_mask = 0x7f, /* udma0-6 */
305 .port_ops = &piix_sata_ops, 384 .port_ops = &piix_sata_ops,
385 .private_data = &ich6_map_db,
306 }, 386 },
307 387
308 /* ich6_sata_ahci */ 388 /* ich6_sata_ahci */
309 { 389 {
310 .sht = &piix_sht, 390 .sht = &piix_sht,
311 .host_flags = ATA_FLAG_SATA | ATA_FLAG_SRST | 391 .host_flags = ATA_FLAG_SATA | PIIX_FLAG_COMBINED_ICH6 |
312 PIIX_FLAG_COMBINED | PIIX_FLAG_CHECKINTR | 392 PIIX_FLAG_CHECKINTR | PIIX_FLAG_SCR |
313 ATA_FLAG_SLAVE_POSS | PIIX_FLAG_AHCI, 393 PIIX_FLAG_AHCI,
394 .pio_mask = 0x1f, /* pio0-4 */
395 .mwdma_mask = 0x07, /* mwdma0-2 */
396 .udma_mask = 0x7f, /* udma0-6 */
397 .port_ops = &piix_sata_ops,
398 .private_data = &ich6_map_db,
399 },
400
401 /* ich6m_sata_ahci */
402 {
403 .sht = &piix_sht,
404 .host_flags = ATA_FLAG_SATA | PIIX_FLAG_COMBINED_ICH6 |
405 PIIX_FLAG_CHECKINTR | PIIX_FLAG_SCR |
406 PIIX_FLAG_AHCI,
314 .pio_mask = 0x1f, /* pio0-4 */ 407 .pio_mask = 0x1f, /* pio0-4 */
315 .mwdma_mask = 0x07, /* mwdma0-2 */ 408 .mwdma_mask = 0x07, /* mwdma0-2 */
316 .udma_mask = 0x7f, /* udma0-6 */ 409 .udma_mask = 0x7f, /* udma0-6 */
317 .port_ops = &piix_sata_ops, 410 .port_ops = &piix_sata_ops,
411 .private_data = &ich6m_map_db,
318 }, 412 },
319}; 413};
320 414
@@ -363,102 +457,123 @@ cbl40:
363} 457}
364 458
365/** 459/**
366 * piix_pata_phy_reset - Probe specified port on PATA host controller 460 * piix_pata_probeinit - probeinit for PATA host controller
367 * @ap: Port to probe 461 * @ap: Target port
368 * 462 *
369 * Probe PATA phy. 463 * Probeinit including cable detection.
370 * 464 *
371 * LOCKING: 465 * LOCKING:
372 * None (inherited from caller). 466 * None (inherited from caller).
373 */ 467 */
468static void piix_pata_probeinit(struct ata_port *ap)
469{
470 piix_pata_cbl_detect(ap);
471 ata_std_probeinit(ap);
472}
374 473
375static void piix_pata_phy_reset(struct ata_port *ap) 474/**
475 * piix_pata_probe_reset - Perform reset on PATA port and classify
476 * @ap: Port to reset
477 * @classes: Resulting classes of attached devices
478 *
479 * Reset PATA phy and classify attached devices.
480 *
481 * LOCKING:
482 * None (inherited from caller).
483 */
484static int piix_pata_probe_reset(struct ata_port *ap, unsigned int *classes)
376{ 485{
377 struct pci_dev *pdev = to_pci_dev(ap->host_set->dev); 486 struct pci_dev *pdev = to_pci_dev(ap->host_set->dev);
378 487
379 if (!pci_test_config_bits(pdev, &piix_enable_bits[ap->hard_port_no])) { 488 if (!pci_test_config_bits(pdev, &piix_enable_bits[ap->hard_port_no])) {
380 ata_port_disable(ap);
381 printk(KERN_INFO "ata%u: port disabled. ignoring.\n", ap->id); 489 printk(KERN_INFO "ata%u: port disabled. ignoring.\n", ap->id);
382 return; 490 return 0;
383 } 491 }
384 492
385 piix_pata_cbl_detect(ap); 493 return ata_drive_probe_reset(ap, piix_pata_probeinit,
386 494 ata_std_softreset, NULL,
387 ata_port_probe(ap); 495 ata_std_postreset, classes);
388
389 ata_bus_reset(ap);
390} 496}
391 497
392/** 498/**
393 * piix_sata_probe - Probe PCI device for present SATA devices 499 * piix_sata_probe - Probe PCI device for present SATA devices
394 * @ap: Port associated with the PCI device we wish to probe 500 * @ap: Port associated with the PCI device we wish to probe
395 * 501 *
396 * Reads SATA PCI device's PCI config register Port Configuration 502 * Reads and configures SATA PCI device's PCI config register
397 * and Status (PCS) to determine port and device availability. 503 * Port Configuration and Status (PCS) to determine port and
504 * device availability.
398 * 505 *
399 * LOCKING: 506 * LOCKING:
400 * None (inherited from caller). 507 * None (inherited from caller).
401 * 508 *
402 * RETURNS: 509 * RETURNS:
403 * Non-zero if port is enabled, it may or may not have a device 510 * Mask of avaliable devices on the port.
404 * attached in that case (PRESENT bit would only be set if BIOS probe
405 * was done). Zero is returned if port is disabled.
406 */ 511 */
407static int piix_sata_probe (struct ata_port *ap) 512static unsigned int piix_sata_probe (struct ata_port *ap)
408{ 513{
409 struct pci_dev *pdev = to_pci_dev(ap->host_set->dev); 514 struct pci_dev *pdev = to_pci_dev(ap->host_set->dev);
410 int combined = (ap->flags & ATA_FLAG_SLAVE_POSS); 515 const unsigned int *map = ap->host_set->private_data;
411 int orig_mask, mask, i; 516 int base = 2 * ap->hard_port_no;
517 unsigned int present_mask = 0;
518 int port, i;
412 u8 pcs; 519 u8 pcs;
413 520
414 mask = (PIIX_PORT_PRESENT << ap->hard_port_no) |
415 (PIIX_PORT_ENABLED << ap->hard_port_no);
416
417 pci_read_config_byte(pdev, ICH5_PCS, &pcs); 521 pci_read_config_byte(pdev, ICH5_PCS, &pcs);
418 orig_mask = (int) pcs & 0xff; 522 DPRINTK("ata%u: ENTER, pcs=0x%x base=%d\n", ap->id, pcs, base);
419
420 /* TODO: this is vaguely wrong for ICH6 combined mode,
421 * where only two of the four SATA ports are mapped
422 * onto a single ATA channel. It is also vaguely inaccurate
423 * for ICH5, which has only two ports. However, this is ok,
424 * as further device presence detection code will handle
425 * any false positives produced here.
426 */
427 523
428 for (i = 0; i < 4; i++) { 524 /* enable all ports on this ap and wait for them to settle */
429 mask = (PIIX_PORT_ENABLED << i); 525 for (i = 0; i < 2; i++) {
526 port = map[base + i];
527 if (port >= 0)
528 pcs |= 1 << port;
529 }
530
531 pci_write_config_byte(pdev, ICH5_PCS, pcs);
532 msleep(100);
430 533
431 if ((orig_mask & mask) == mask) 534 /* let's see which devices are present */
432 if (combined || (i == ap->hard_port_no)) 535 pci_read_config_byte(pdev, ICH5_PCS, &pcs);
433 return 1; 536
537 for (i = 0; i < 2; i++) {
538 port = map[base + i];
539 if (port < 0)
540 continue;
541 if (ap->flags & PIIX_FLAG_IGNORE_PCS || pcs & 1 << (4 + port))
542 present_mask |= 1 << i;
543 else
544 pcs &= ~(1 << port);
434 } 545 }
435 546
436 return 0; 547 /* disable offline ports on non-AHCI controllers */
548 if (!(ap->flags & PIIX_FLAG_AHCI))
549 pci_write_config_byte(pdev, ICH5_PCS, pcs);
550
551 DPRINTK("ata%u: LEAVE, pcs=0x%x present_mask=0x%x\n",
552 ap->id, pcs, present_mask);
553
554 return present_mask;
437} 555}
438 556
439/** 557/**
440 * piix_sata_phy_reset - Probe specified port on SATA host controller 558 * piix_sata_probe_reset - Perform reset on SATA port and classify
441 * @ap: Port to probe 559 * @ap: Port to reset
560 * @classes: Resulting classes of attached devices
442 * 561 *
443 * Probe SATA phy. 562 * Reset SATA phy and classify attached devices.
444 * 563 *
445 * LOCKING: 564 * LOCKING:
446 * None (inherited from caller). 565 * None (inherited from caller).
447 */ 566 */
448 567static int piix_sata_probe_reset(struct ata_port *ap, unsigned int *classes)
449static void piix_sata_phy_reset(struct ata_port *ap)
450{ 568{
451 if (!piix_sata_probe(ap)) { 569 if (!piix_sata_probe(ap)) {
452 ata_port_disable(ap);
453 printk(KERN_INFO "ata%u: SATA port has no device.\n", ap->id); 570 printk(KERN_INFO "ata%u: SATA port has no device.\n", ap->id);
454 return; 571 return 0;
455 } 572 }
456 573
457 ap->cbl = ATA_CBL_SATA; 574 return ata_drive_probe_reset(ap, ata_std_probeinit,
458 575 ata_std_softreset, NULL,
459 ata_port_probe(ap); 576 ata_std_postreset, classes);
460
461 ata_bus_reset(ap);
462} 577}
463 578
464/** 579/**
@@ -627,6 +742,7 @@ static int piix_disable_ahci(struct pci_dev *pdev)
627 742
628/** 743/**
629 * piix_check_450nx_errata - Check for problem 450NX setup 744 * piix_check_450nx_errata - Check for problem 450NX setup
745 * @ata_dev: the PCI device to check
630 * 746 *
631 * Check for the present of 450NX errata #19 and errata #25. If 747 * Check for the present of 450NX errata #19 and errata #25. If
632 * they are found return an error code so we can turn off DMA 748 * they are found return an error code so we can turn off DMA
@@ -659,6 +775,54 @@ static int __devinit piix_check_450nx_errata(struct pci_dev *ata_dev)
659 return no_piix_dma; 775 return no_piix_dma;
660} 776}
661 777
778static void __devinit piix_init_sata_map(struct pci_dev *pdev,
779 struct ata_port_info *pinfo)
780{
781 struct piix_map_db *map_db = pinfo[0].private_data;
782 const unsigned int *map;
783 int i, invalid_map = 0;
784 u8 map_value;
785
786 pci_read_config_byte(pdev, ICH5_PMR, &map_value);
787
788 map = map_db->map[map_value & map_db->mask];
789
790 dev_printk(KERN_INFO, &pdev->dev, "MAP [");
791 for (i = 0; i < 4; i++) {
792 switch (map[i]) {
793 case RV:
794 invalid_map = 1;
795 printk(" XX");
796 break;
797
798 case NA:
799 printk(" --");
800 break;
801
802 case IDE:
803 WARN_ON((i & 1) || map[i + 1] != IDE);
804 pinfo[i / 2] = piix_port_info[ich5_pata];
805 i++;
806 printk(" IDE IDE");
807 break;
808
809 default:
810 printk(" P%d", map[i]);
811 if (i & 1)
812 pinfo[i / 2].host_flags |= ATA_FLAG_SLAVE_POSS;
813 break;
814 }
815 }
816 printk(" ]\n");
817
818 if (invalid_map)
819 dev_printk(KERN_ERR, &pdev->dev,
820 "invalid MAP value %u\n", map_value);
821
822 pinfo[0].private_data = (void *)map;
823 pinfo[1].private_data = (void *)map;
824}
825
662/** 826/**
663 * piix_init_one - Register PIIX ATA PCI device with kernel services 827 * piix_init_one - Register PIIX ATA PCI device with kernel services
664 * @pdev: PCI device to register 828 * @pdev: PCI device to register
@@ -677,9 +841,9 @@ static int __devinit piix_check_450nx_errata(struct pci_dev *ata_dev)
677static int piix_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) 841static int piix_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
678{ 842{
679 static int printed_version; 843 static int printed_version;
680 struct ata_port_info *port_info[2]; 844 struct ata_port_info port_info[2];
681 unsigned int combined = 0; 845 struct ata_port_info *ppinfo[2] = { &port_info[0], &port_info[1] };
682 unsigned int pata_chan = 0, sata_chan = 0; 846 unsigned long host_flags;
683 847
684 if (!printed_version++) 848 if (!printed_version++)
685 dev_printk(KERN_DEBUG, &pdev->dev, 849 dev_printk(KERN_DEBUG, &pdev->dev,
@@ -689,10 +853,12 @@ static int piix_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
689 if (!in_module_init) 853 if (!in_module_init)
690 return -ENODEV; 854 return -ENODEV;
691 855
692 port_info[0] = &piix_port_info[ent->driver_data]; 856 port_info[0] = piix_port_info[ent->driver_data];
693 port_info[1] = &piix_port_info[ent->driver_data]; 857 port_info[1] = piix_port_info[ent->driver_data];
858
859 host_flags = port_info[0].host_flags;
694 860
695 if (port_info[0]->host_flags & PIIX_FLAG_AHCI) { 861 if (host_flags & PIIX_FLAG_AHCI) {
696 u8 tmp; 862 u8 tmp;
697 pci_read_config_byte(pdev, PIIX_SCC, &tmp); 863 pci_read_config_byte(pdev, PIIX_SCC, &tmp);
698 if (tmp == PIIX_AHCI_DEVICE) { 864 if (tmp == PIIX_AHCI_DEVICE) {
@@ -702,18 +868,9 @@ static int piix_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
702 } 868 }
703 } 869 }
704 870
705 if (port_info[0]->host_flags & PIIX_FLAG_COMBINED) { 871 /* Initialize SATA map */
706 u8 tmp; 872 if (host_flags & ATA_FLAG_SATA)
707 pci_read_config_byte(pdev, ICH5_PMR, &tmp); 873 piix_init_sata_map(pdev, port_info);
708
709 if (tmp & PIIX_COMB) {
710 combined = 1;
711 if (tmp & PIIX_COMB_PATA_P0)
712 sata_chan = 1;
713 else
714 pata_chan = 1;
715 }
716 }
717 874
718 /* On ICH5, some BIOSen disable the interrupt using the 875 /* On ICH5, some BIOSen disable the interrupt using the
719 * PCI_COMMAND_INTX_DISABLE bit added in PCI 2.3. 876 * PCI_COMMAND_INTX_DISABLE bit added in PCI 2.3.
@@ -721,28 +878,19 @@ static int piix_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
721 * MSI is disabled (and it is disabled, as we don't use 878 * MSI is disabled (and it is disabled, as we don't use
722 * message-signalled interrupts currently). 879 * message-signalled interrupts currently).
723 */ 880 */
724 if (port_info[0]->host_flags & PIIX_FLAG_CHECKINTR) 881 if (host_flags & PIIX_FLAG_CHECKINTR)
725 pci_intx(pdev, 1); 882 pci_intx(pdev, 1);
726 883
727 if (combined) {
728 port_info[sata_chan] = &piix_port_info[ent->driver_data];
729 port_info[sata_chan]->host_flags |= ATA_FLAG_SLAVE_POSS;
730 port_info[pata_chan] = &piix_port_info[ich5_pata];
731
732 dev_printk(KERN_WARNING, &pdev->dev,
733 "combined mode detected (p=%u, s=%u)\n",
734 pata_chan, sata_chan);
735 }
736 if (piix_check_450nx_errata(pdev)) { 884 if (piix_check_450nx_errata(pdev)) {
737 /* This writes into the master table but it does not 885 /* This writes into the master table but it does not
738 really matter for this errata as we will apply it to 886 really matter for this errata as we will apply it to
739 all the PIIX devices on the board */ 887 all the PIIX devices on the board */
740 port_info[0]->mwdma_mask = 0; 888 port_info[0].mwdma_mask = 0;
741 port_info[0]->udma_mask = 0; 889 port_info[0].udma_mask = 0;
742 port_info[1]->mwdma_mask = 0; 890 port_info[1].mwdma_mask = 0;
743 port_info[1]->udma_mask = 0; 891 port_info[1].udma_mask = 0;
744 } 892 }
745 return ata_pci_init_one(pdev, port_info, 2); 893 return ata_pci_init_one(pdev, ppinfo, 2);
746} 894}
747 895
748static int __init piix_init(void) 896static int __init piix_init(void)
diff --git a/drivers/scsi/libata-bmdma.c b/drivers/scsi/libata-bmdma.c
new file mode 100644
index 000000000000..a93336adcd23
--- /dev/null
+++ b/drivers/scsi/libata-bmdma.c
@@ -0,0 +1,703 @@
1/*
2 * libata-bmdma.c - helper library for PCI IDE BMDMA
3 *
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
7 *
8 * Copyright 2003-2006 Red Hat, Inc. All rights reserved.
9 * Copyright 2003-2006 Jeff Garzik
10 *
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
15 * any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; see the file COPYING. If not, write to
24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25 *
26 *
27 * libata documentation is available via 'make {ps|pdf}docs',
28 * as Documentation/DocBook/libata.*
29 *
30 * Hardware documentation available from http://www.t13.org/ and
31 * http://www.sata-io.org/
32 *
33 */
34
35#include <linux/config.h>
36#include <linux/kernel.h>
37#include <linux/pci.h>
38#include <linux/libata.h>
39
40#include "libata.h"
41
42/**
43 * ata_tf_load_pio - send taskfile registers to host controller
44 * @ap: Port to which output is sent
45 * @tf: ATA taskfile register set
46 *
47 * Outputs ATA taskfile to standard ATA host controller.
48 *
49 * LOCKING:
50 * Inherited from caller.
51 */
52
53static void ata_tf_load_pio(struct ata_port *ap, const struct ata_taskfile *tf)
54{
55 struct ata_ioports *ioaddr = &ap->ioaddr;
56 unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
57
58 if (tf->ctl != ap->last_ctl) {
59 outb(tf->ctl, ioaddr->ctl_addr);
60 ap->last_ctl = tf->ctl;
61 ata_wait_idle(ap);
62 }
63
64 if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
65 outb(tf->hob_feature, ioaddr->feature_addr);
66 outb(tf->hob_nsect, ioaddr->nsect_addr);
67 outb(tf->hob_lbal, ioaddr->lbal_addr);
68 outb(tf->hob_lbam, ioaddr->lbam_addr);
69 outb(tf->hob_lbah, ioaddr->lbah_addr);
70 VPRINTK("hob: feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n",
71 tf->hob_feature,
72 tf->hob_nsect,
73 tf->hob_lbal,
74 tf->hob_lbam,
75 tf->hob_lbah);
76 }
77
78 if (is_addr) {
79 outb(tf->feature, ioaddr->feature_addr);
80 outb(tf->nsect, ioaddr->nsect_addr);
81 outb(tf->lbal, ioaddr->lbal_addr);
82 outb(tf->lbam, ioaddr->lbam_addr);
83 outb(tf->lbah, ioaddr->lbah_addr);
84 VPRINTK("feat 0x%X nsect 0x%X lba 0x%X 0x%X 0x%X\n",
85 tf->feature,
86 tf->nsect,
87 tf->lbal,
88 tf->lbam,
89 tf->lbah);
90 }
91
92 if (tf->flags & ATA_TFLAG_DEVICE) {
93 outb(tf->device, ioaddr->device_addr);
94 VPRINTK("device 0x%X\n", tf->device);
95 }
96
97 ata_wait_idle(ap);
98}
99
100/**
101 * ata_tf_load_mmio - send taskfile registers to host controller
102 * @ap: Port to which output is sent
103 * @tf: ATA taskfile register set
104 *
105 * Outputs ATA taskfile to standard ATA host controller using MMIO.
106 *
107 * LOCKING:
108 * Inherited from caller.
109 */
110
111static void ata_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf)
112{
113 struct ata_ioports *ioaddr = &ap->ioaddr;
114 unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
115
116 if (tf->ctl != ap->last_ctl) {
117 writeb(tf->ctl, (void __iomem *) ap->ioaddr.ctl_addr);
118 ap->last_ctl = tf->ctl;
119 ata_wait_idle(ap);
120 }
121
122 if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
123 writeb(tf->hob_feature, (void __iomem *) ioaddr->feature_addr);
124 writeb(tf->hob_nsect, (void __iomem *) ioaddr->nsect_addr);
125 writeb(tf->hob_lbal, (void __iomem *) ioaddr->lbal_addr);
126 writeb(tf->hob_lbam, (void __iomem *) ioaddr->lbam_addr);
127 writeb(tf->hob_lbah, (void __iomem *) ioaddr->lbah_addr);
128 VPRINTK("hob: feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n",
129 tf->hob_feature,
130 tf->hob_nsect,
131 tf->hob_lbal,
132 tf->hob_lbam,
133 tf->hob_lbah);
134 }
135
136 if (is_addr) {
137 writeb(tf->feature, (void __iomem *) ioaddr->feature_addr);
138 writeb(tf->nsect, (void __iomem *) ioaddr->nsect_addr);
139 writeb(tf->lbal, (void __iomem *) ioaddr->lbal_addr);
140 writeb(tf->lbam, (void __iomem *) ioaddr->lbam_addr);
141 writeb(tf->lbah, (void __iomem *) ioaddr->lbah_addr);
142 VPRINTK("feat 0x%X nsect 0x%X lba 0x%X 0x%X 0x%X\n",
143 tf->feature,
144 tf->nsect,
145 tf->lbal,
146 tf->lbam,
147 tf->lbah);
148 }
149
150 if (tf->flags & ATA_TFLAG_DEVICE) {
151 writeb(tf->device, (void __iomem *) ioaddr->device_addr);
152 VPRINTK("device 0x%X\n", tf->device);
153 }
154
155 ata_wait_idle(ap);
156}
157
158
159/**
160 * ata_tf_load - send taskfile registers to host controller
161 * @ap: Port to which output is sent
162 * @tf: ATA taskfile register set
163 *
164 * Outputs ATA taskfile to standard ATA host controller using MMIO
165 * or PIO as indicated by the ATA_FLAG_MMIO flag.
166 * Writes the control, feature, nsect, lbal, lbam, and lbah registers.
167 * Optionally (ATA_TFLAG_LBA48) writes hob_feature, hob_nsect,
168 * hob_lbal, hob_lbam, and hob_lbah.
169 *
170 * This function waits for idle (!BUSY and !DRQ) after writing
171 * registers. If the control register has a new value, this
172 * function also waits for idle after writing control and before
173 * writing the remaining registers.
174 *
175 * May be used as the tf_load() entry in ata_port_operations.
176 *
177 * LOCKING:
178 * Inherited from caller.
179 */
180void ata_tf_load(struct ata_port *ap, const struct ata_taskfile *tf)
181{
182 if (ap->flags & ATA_FLAG_MMIO)
183 ata_tf_load_mmio(ap, tf);
184 else
185 ata_tf_load_pio(ap, tf);
186}
187
188/**
189 * ata_exec_command_pio - issue ATA command to host controller
190 * @ap: port to which command is being issued
191 * @tf: ATA taskfile register set
192 *
193 * Issues PIO write to ATA command register, with proper
194 * synchronization with interrupt handler / other threads.
195 *
196 * LOCKING:
197 * spin_lock_irqsave(host_set lock)
198 */
199
200static void ata_exec_command_pio(struct ata_port *ap, const struct ata_taskfile *tf)
201{
202 DPRINTK("ata%u: cmd 0x%X\n", ap->id, tf->command);
203
204 outb(tf->command, ap->ioaddr.command_addr);
205 ata_pause(ap);
206}
207
208
209/**
210 * ata_exec_command_mmio - issue ATA command to host controller
211 * @ap: port to which command is being issued
212 * @tf: ATA taskfile register set
213 *
214 * Issues MMIO write to ATA command register, with proper
215 * synchronization with interrupt handler / other threads.
216 *
217 * LOCKING:
218 * spin_lock_irqsave(host_set lock)
219 */
220
221static void ata_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf)
222{
223 DPRINTK("ata%u: cmd 0x%X\n", ap->id, tf->command);
224
225 writeb(tf->command, (void __iomem *) ap->ioaddr.command_addr);
226 ata_pause(ap);
227}
228
229
230/**
231 * ata_exec_command - issue ATA command to host controller
232 * @ap: port to which command is being issued
233 * @tf: ATA taskfile register set
234 *
235 * Issues PIO/MMIO write to ATA command register, with proper
236 * synchronization with interrupt handler / other threads.
237 *
238 * LOCKING:
239 * spin_lock_irqsave(host_set lock)
240 */
241void ata_exec_command(struct ata_port *ap, const struct ata_taskfile *tf)
242{
243 if (ap->flags & ATA_FLAG_MMIO)
244 ata_exec_command_mmio(ap, tf);
245 else
246 ata_exec_command_pio(ap, tf);
247}
248
249/**
250 * ata_tf_read_pio - input device's ATA taskfile shadow registers
251 * @ap: Port from which input is read
252 * @tf: ATA taskfile register set for storing input
253 *
254 * Reads ATA taskfile registers for currently-selected device
255 * into @tf.
256 *
257 * LOCKING:
258 * Inherited from caller.
259 */
260
261static void ata_tf_read_pio(struct ata_port *ap, struct ata_taskfile *tf)
262{
263 struct ata_ioports *ioaddr = &ap->ioaddr;
264
265 tf->command = ata_check_status(ap);
266 tf->feature = inb(ioaddr->error_addr);
267 tf->nsect = inb(ioaddr->nsect_addr);
268 tf->lbal = inb(ioaddr->lbal_addr);
269 tf->lbam = inb(ioaddr->lbam_addr);
270 tf->lbah = inb(ioaddr->lbah_addr);
271 tf->device = inb(ioaddr->device_addr);
272
273 if (tf->flags & ATA_TFLAG_LBA48) {
274 outb(tf->ctl | ATA_HOB, ioaddr->ctl_addr);
275 tf->hob_feature = inb(ioaddr->error_addr);
276 tf->hob_nsect = inb(ioaddr->nsect_addr);
277 tf->hob_lbal = inb(ioaddr->lbal_addr);
278 tf->hob_lbam = inb(ioaddr->lbam_addr);
279 tf->hob_lbah = inb(ioaddr->lbah_addr);
280 }
281}
282
283/**
284 * ata_tf_read_mmio - input device's ATA taskfile shadow registers
285 * @ap: Port from which input is read
286 * @tf: ATA taskfile register set for storing input
287 *
288 * Reads ATA taskfile registers for currently-selected device
289 * into @tf via MMIO.
290 *
291 * LOCKING:
292 * Inherited from caller.
293 */
294
295static void ata_tf_read_mmio(struct ata_port *ap, struct ata_taskfile *tf)
296{
297 struct ata_ioports *ioaddr = &ap->ioaddr;
298
299 tf->command = ata_check_status(ap);
300 tf->feature = readb((void __iomem *)ioaddr->error_addr);
301 tf->nsect = readb((void __iomem *)ioaddr->nsect_addr);
302 tf->lbal = readb((void __iomem *)ioaddr->lbal_addr);
303 tf->lbam = readb((void __iomem *)ioaddr->lbam_addr);
304 tf->lbah = readb((void __iomem *)ioaddr->lbah_addr);
305 tf->device = readb((void __iomem *)ioaddr->device_addr);
306
307 if (tf->flags & ATA_TFLAG_LBA48) {
308 writeb(tf->ctl | ATA_HOB, (void __iomem *) ap->ioaddr.ctl_addr);
309 tf->hob_feature = readb((void __iomem *)ioaddr->error_addr);
310 tf->hob_nsect = readb((void __iomem *)ioaddr->nsect_addr);
311 tf->hob_lbal = readb((void __iomem *)ioaddr->lbal_addr);
312 tf->hob_lbam = readb((void __iomem *)ioaddr->lbam_addr);
313 tf->hob_lbah = readb((void __iomem *)ioaddr->lbah_addr);
314 }
315}
316
317
318/**
319 * ata_tf_read - input device's ATA taskfile shadow registers
320 * @ap: Port from which input is read
321 * @tf: ATA taskfile register set for storing input
322 *
323 * Reads ATA taskfile registers for currently-selected device
324 * into @tf.
325 *
326 * Reads nsect, lbal, lbam, lbah, and device. If ATA_TFLAG_LBA48
327 * is set, also reads the hob registers.
328 *
329 * May be used as the tf_read() entry in ata_port_operations.
330 *
331 * LOCKING:
332 * Inherited from caller.
333 */
334void ata_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
335{
336 if (ap->flags & ATA_FLAG_MMIO)
337 ata_tf_read_mmio(ap, tf);
338 else
339 ata_tf_read_pio(ap, tf);
340}
341
342/**
343 * ata_check_status_pio - Read device status reg & clear interrupt
344 * @ap: port where the device is
345 *
346 * Reads ATA taskfile status register for currently-selected device
347 * and return its value. This also clears pending interrupts
348 * from this device
349 *
350 * LOCKING:
351 * Inherited from caller.
352 */
353static u8 ata_check_status_pio(struct ata_port *ap)
354{
355 return inb(ap->ioaddr.status_addr);
356}
357
358/**
359 * ata_check_status_mmio - Read device status reg & clear interrupt
360 * @ap: port where the device is
361 *
362 * Reads ATA taskfile status register for currently-selected device
363 * via MMIO and return its value. This also clears pending interrupts
364 * from this device
365 *
366 * LOCKING:
367 * Inherited from caller.
368 */
369static u8 ata_check_status_mmio(struct ata_port *ap)
370{
371 return readb((void __iomem *) ap->ioaddr.status_addr);
372}
373
374
375/**
376 * ata_check_status - Read device status reg & clear interrupt
377 * @ap: port where the device is
378 *
379 * Reads ATA taskfile status register for currently-selected device
380 * and return its value. This also clears pending interrupts
381 * from this device
382 *
383 * May be used as the check_status() entry in ata_port_operations.
384 *
385 * LOCKING:
386 * Inherited from caller.
387 */
388u8 ata_check_status(struct ata_port *ap)
389{
390 if (ap->flags & ATA_FLAG_MMIO)
391 return ata_check_status_mmio(ap);
392 return ata_check_status_pio(ap);
393}
394
395
396/**
397 * ata_altstatus - Read device alternate status reg
398 * @ap: port where the device is
399 *
400 * Reads ATA taskfile alternate status register for
401 * currently-selected device and return its value.
402 *
403 * Note: may NOT be used as the check_altstatus() entry in
404 * ata_port_operations.
405 *
406 * LOCKING:
407 * Inherited from caller.
408 */
409u8 ata_altstatus(struct ata_port *ap)
410{
411 if (ap->ops->check_altstatus)
412 return ap->ops->check_altstatus(ap);
413
414 if (ap->flags & ATA_FLAG_MMIO)
415 return readb((void __iomem *)ap->ioaddr.altstatus_addr);
416 return inb(ap->ioaddr.altstatus_addr);
417}
418
419#ifdef CONFIG_PCI
420static struct ata_probe_ent *
421ata_probe_ent_alloc(struct device *dev, const struct ata_port_info *port)
422{
423 struct ata_probe_ent *probe_ent;
424
425 probe_ent = kzalloc(sizeof(*probe_ent), GFP_KERNEL);
426 if (!probe_ent) {
427 printk(KERN_ERR DRV_NAME "(%s): out of memory\n",
428 kobject_name(&(dev->kobj)));
429 return NULL;
430 }
431
432 INIT_LIST_HEAD(&probe_ent->node);
433 probe_ent->dev = dev;
434
435 probe_ent->sht = port->sht;
436 probe_ent->host_flags = port->host_flags;
437 probe_ent->pio_mask = port->pio_mask;
438 probe_ent->mwdma_mask = port->mwdma_mask;
439 probe_ent->udma_mask = port->udma_mask;
440 probe_ent->port_ops = port->port_ops;
441
442 return probe_ent;
443}
444
445
446/**
447 * ata_pci_init_native_mode - Initialize native-mode driver
448 * @pdev: pci device to be initialized
449 * @port: array[2] of pointers to port info structures.
450 * @ports: bitmap of ports present
451 *
452 * Utility function which allocates and initializes an
453 * ata_probe_ent structure for a standard dual-port
454 * PIO-based IDE controller. The returned ata_probe_ent
455 * structure can be passed to ata_device_add(). The returned
456 * ata_probe_ent structure should then be freed with kfree().
457 *
458 * The caller need only pass the address of the primary port, the
459 * secondary will be deduced automatically. If the device has non
460 * standard secondary port mappings this function can be called twice,
461 * once for each interface.
462 */
463
464struct ata_probe_ent *
465ata_pci_init_native_mode(struct pci_dev *pdev, struct ata_port_info **port, int ports)
466{
467 struct ata_probe_ent *probe_ent =
468 ata_probe_ent_alloc(pci_dev_to_dev(pdev), port[0]);
469 int p = 0;
470
471 if (!probe_ent)
472 return NULL;
473
474 probe_ent->irq = pdev->irq;
475 probe_ent->irq_flags = SA_SHIRQ;
476 probe_ent->private_data = port[0]->private_data;
477
478 if (ports & ATA_PORT_PRIMARY) {
479 probe_ent->port[p].cmd_addr = pci_resource_start(pdev, 0);
480 probe_ent->port[p].altstatus_addr =
481 probe_ent->port[p].ctl_addr =
482 pci_resource_start(pdev, 1) | ATA_PCI_CTL_OFS;
483 probe_ent->port[p].bmdma_addr = pci_resource_start(pdev, 4);
484 ata_std_ports(&probe_ent->port[p]);
485 p++;
486 }
487
488 if (ports & ATA_PORT_SECONDARY) {
489 probe_ent->port[p].cmd_addr = pci_resource_start(pdev, 2);
490 probe_ent->port[p].altstatus_addr =
491 probe_ent->port[p].ctl_addr =
492 pci_resource_start(pdev, 3) | ATA_PCI_CTL_OFS;
493 probe_ent->port[p].bmdma_addr = pci_resource_start(pdev, 4) + 8;
494 ata_std_ports(&probe_ent->port[p]);
495 p++;
496 }
497
498 probe_ent->n_ports = p;
499 return probe_ent;
500}
501
502
503static struct ata_probe_ent *ata_pci_init_legacy_port(struct pci_dev *pdev,
504 struct ata_port_info *port, int port_num)
505{
506 struct ata_probe_ent *probe_ent;
507
508 probe_ent = ata_probe_ent_alloc(pci_dev_to_dev(pdev), port);
509 if (!probe_ent)
510 return NULL;
511
512 probe_ent->legacy_mode = 1;
513 probe_ent->n_ports = 1;
514 probe_ent->hard_port_no = port_num;
515 probe_ent->private_data = port->private_data;
516
517 switch(port_num)
518 {
519 case 0:
520 probe_ent->irq = 14;
521 probe_ent->port[0].cmd_addr = 0x1f0;
522 probe_ent->port[0].altstatus_addr =
523 probe_ent->port[0].ctl_addr = 0x3f6;
524 break;
525 case 1:
526 probe_ent->irq = 15;
527 probe_ent->port[0].cmd_addr = 0x170;
528 probe_ent->port[0].altstatus_addr =
529 probe_ent->port[0].ctl_addr = 0x376;
530 break;
531 }
532
533 probe_ent->port[0].bmdma_addr =
534 pci_resource_start(pdev, 4) + 8 * port_num;
535 ata_std_ports(&probe_ent->port[0]);
536
537 return probe_ent;
538}
539
540
541/**
542 * ata_pci_init_one - Initialize/register PCI IDE host controller
543 * @pdev: Controller to be initialized
544 * @port_info: Information from low-level host driver
545 * @n_ports: Number of ports attached to host controller
546 *
547 * This is a helper function which can be called from a driver's
548 * xxx_init_one() probe function if the hardware uses traditional
549 * IDE taskfile registers.
550 *
551 * This function calls pci_enable_device(), reserves its register
552 * regions, sets the dma mask, enables bus master mode, and calls
553 * ata_device_add()
554 *
555 * LOCKING:
556 * Inherited from PCI layer (may sleep).
557 *
558 * RETURNS:
559 * Zero on success, negative on errno-based value on error.
560 */
561
562int ata_pci_init_one (struct pci_dev *pdev, struct ata_port_info **port_info,
563 unsigned int n_ports)
564{
565 struct ata_probe_ent *probe_ent = NULL, *probe_ent2 = NULL;
566 struct ata_port_info *port[2];
567 u8 tmp8, mask;
568 unsigned int legacy_mode = 0;
569 int disable_dev_on_err = 1;
570 int rc;
571
572 DPRINTK("ENTER\n");
573
574 port[0] = port_info[0];
575 if (n_ports > 1)
576 port[1] = port_info[1];
577 else
578 port[1] = port[0];
579
580 if ((port[0]->host_flags & ATA_FLAG_NO_LEGACY) == 0
581 && (pdev->class >> 8) == PCI_CLASS_STORAGE_IDE) {
582 /* TODO: What if one channel is in native mode ... */
583 pci_read_config_byte(pdev, PCI_CLASS_PROG, &tmp8);
584 mask = (1 << 2) | (1 << 0);
585 if ((tmp8 & mask) != mask)
586 legacy_mode = (1 << 3);
587 }
588
589 /* FIXME... */
590 if ((!legacy_mode) && (n_ports > 2)) {
591 printk(KERN_ERR "ata: BUG: native mode, n_ports > 2\n");
592 n_ports = 2;
593 /* For now */
594 }
595
596 /* FIXME: Really for ATA it isn't safe because the device may be
597 multi-purpose and we want to leave it alone if it was already
598 enabled. Secondly for shared use as Arjan says we want refcounting
599
600 Checking dev->is_enabled is insufficient as this is not set at
601 boot for the primary video which is BIOS enabled
602 */
603
604 rc = pci_enable_device(pdev);
605 if (rc)
606 return rc;
607
608 rc = pci_request_regions(pdev, DRV_NAME);
609 if (rc) {
610 disable_dev_on_err = 0;
611 goto err_out;
612 }
613
614 /* FIXME: Should use platform specific mappers for legacy port ranges */
615 if (legacy_mode) {
616 if (!request_region(0x1f0, 8, "libata")) {
617 struct resource *conflict, res;
618 res.start = 0x1f0;
619 res.end = 0x1f0 + 8 - 1;
620 conflict = ____request_resource(&ioport_resource, &res);
621 if (!strcmp(conflict->name, "libata"))
622 legacy_mode |= (1 << 0);
623 else {
624 disable_dev_on_err = 0;
625 printk(KERN_WARNING "ata: 0x1f0 IDE port busy\n");
626 }
627 } else
628 legacy_mode |= (1 << 0);
629
630 if (!request_region(0x170, 8, "libata")) {
631 struct resource *conflict, res;
632 res.start = 0x170;
633 res.end = 0x170 + 8 - 1;
634 conflict = ____request_resource(&ioport_resource, &res);
635 if (!strcmp(conflict->name, "libata"))
636 legacy_mode |= (1 << 1);
637 else {
638 disable_dev_on_err = 0;
639 printk(KERN_WARNING "ata: 0x170 IDE port busy\n");
640 }
641 } else
642 legacy_mode |= (1 << 1);
643 }
644
645 /* we have legacy mode, but all ports are unavailable */
646 if (legacy_mode == (1 << 3)) {
647 rc = -EBUSY;
648 goto err_out_regions;
649 }
650
651 rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
652 if (rc)
653 goto err_out_regions;
654 rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
655 if (rc)
656 goto err_out_regions;
657
658 if (legacy_mode) {
659 if (legacy_mode & (1 << 0))
660 probe_ent = ata_pci_init_legacy_port(pdev, port[0], 0);
661 if (legacy_mode & (1 << 1))
662 probe_ent2 = ata_pci_init_legacy_port(pdev, port[1], 1);
663 } else {
664 if (n_ports == 2)
665 probe_ent = ata_pci_init_native_mode(pdev, port, ATA_PORT_PRIMARY | ATA_PORT_SECONDARY);
666 else
667 probe_ent = ata_pci_init_native_mode(pdev, port, ATA_PORT_PRIMARY);
668 }
669 if (!probe_ent && !probe_ent2) {
670 rc = -ENOMEM;
671 goto err_out_regions;
672 }
673
674 pci_set_master(pdev);
675
676 /* FIXME: check ata_device_add return */
677 if (legacy_mode) {
678 if (legacy_mode & (1 << 0))
679 ata_device_add(probe_ent);
680 if (legacy_mode & (1 << 1))
681 ata_device_add(probe_ent2);
682 } else
683 ata_device_add(probe_ent);
684
685 kfree(probe_ent);
686 kfree(probe_ent2);
687
688 return 0;
689
690err_out_regions:
691 if (legacy_mode & (1 << 0))
692 release_region(0x1f0, 8);
693 if (legacy_mode & (1 << 1))
694 release_region(0x170, 8);
695 pci_release_regions(pdev);
696err_out:
697 if (disable_dev_on_err)
698 pci_disable_device(pdev);
699 return rc;
700}
701
702#endif /* CONFIG_PCI */
703
diff --git a/drivers/scsi/libata-core.c b/drivers/scsi/libata-core.c
index 4f91b0dc572b..714b42bad935 100644
--- a/drivers/scsi/libata-core.c
+++ b/drivers/scsi/libata-core.c
@@ -61,24 +61,17 @@
61 61
62#include "libata.h" 62#include "libata.h"
63 63
64static unsigned int ata_busy_sleep (struct ata_port *ap, 64static unsigned int ata_dev_init_params(struct ata_port *ap,
65 unsigned long tmout_pat, 65 struct ata_device *dev);
66 unsigned long tmout);
67static void ata_dev_reread_id(struct ata_port *ap, struct ata_device *dev);
68static void ata_dev_init_params(struct ata_port *ap, struct ata_device *dev);
69static void ata_set_mode(struct ata_port *ap); 66static void ata_set_mode(struct ata_port *ap);
70static void ata_dev_set_xfermode(struct ata_port *ap, struct ata_device *dev); 67static void ata_dev_set_xfermode(struct ata_port *ap, struct ata_device *dev);
71static unsigned int ata_get_mode_mask(const struct ata_port *ap, int shift); 68static unsigned int ata_dev_xfermask(struct ata_port *ap,
72static int fgb(u32 bitmap); 69 struct ata_device *dev);
73static int ata_choose_xfer_mode(const struct ata_port *ap,
74 u8 *xfer_mode_out,
75 unsigned int *xfer_shift_out);
76static void __ata_qc_complete(struct ata_queued_cmd *qc);
77 70
78static unsigned int ata_unique_id = 1; 71static unsigned int ata_unique_id = 1;
79static struct workqueue_struct *ata_wq; 72static struct workqueue_struct *ata_wq;
80 73
81int atapi_enabled = 0; 74int atapi_enabled = 1;
82module_param(atapi_enabled, int, 0444); 75module_param(atapi_enabled, int, 0444);
83MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on)"); 76MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on)");
84 77
@@ -91,403 +84,6 @@ MODULE_DESCRIPTION("Library module for ATA devices");
91MODULE_LICENSE("GPL"); 84MODULE_LICENSE("GPL");
92MODULE_VERSION(DRV_VERSION); 85MODULE_VERSION(DRV_VERSION);
93 86
94/**
95 * ata_tf_load_pio - send taskfile registers to host controller
96 * @ap: Port to which output is sent
97 * @tf: ATA taskfile register set
98 *
99 * Outputs ATA taskfile to standard ATA host controller.
100 *
101 * LOCKING:
102 * Inherited from caller.
103 */
104
105static void ata_tf_load_pio(struct ata_port *ap, const struct ata_taskfile *tf)
106{
107 struct ata_ioports *ioaddr = &ap->ioaddr;
108 unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
109
110 if (tf->ctl != ap->last_ctl) {
111 outb(tf->ctl, ioaddr->ctl_addr);
112 ap->last_ctl = tf->ctl;
113 ata_wait_idle(ap);
114 }
115
116 if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
117 outb(tf->hob_feature, ioaddr->feature_addr);
118 outb(tf->hob_nsect, ioaddr->nsect_addr);
119 outb(tf->hob_lbal, ioaddr->lbal_addr);
120 outb(tf->hob_lbam, ioaddr->lbam_addr);
121 outb(tf->hob_lbah, ioaddr->lbah_addr);
122 VPRINTK("hob: feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n",
123 tf->hob_feature,
124 tf->hob_nsect,
125 tf->hob_lbal,
126 tf->hob_lbam,
127 tf->hob_lbah);
128 }
129
130 if (is_addr) {
131 outb(tf->feature, ioaddr->feature_addr);
132 outb(tf->nsect, ioaddr->nsect_addr);
133 outb(tf->lbal, ioaddr->lbal_addr);
134 outb(tf->lbam, ioaddr->lbam_addr);
135 outb(tf->lbah, ioaddr->lbah_addr);
136 VPRINTK("feat 0x%X nsect 0x%X lba 0x%X 0x%X 0x%X\n",
137 tf->feature,
138 tf->nsect,
139 tf->lbal,
140 tf->lbam,
141 tf->lbah);
142 }
143
144 if (tf->flags & ATA_TFLAG_DEVICE) {
145 outb(tf->device, ioaddr->device_addr);
146 VPRINTK("device 0x%X\n", tf->device);
147 }
148
149 ata_wait_idle(ap);
150}
151
152/**
153 * ata_tf_load_mmio - send taskfile registers to host controller
154 * @ap: Port to which output is sent
155 * @tf: ATA taskfile register set
156 *
157 * Outputs ATA taskfile to standard ATA host controller using MMIO.
158 *
159 * LOCKING:
160 * Inherited from caller.
161 */
162
163static void ata_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf)
164{
165 struct ata_ioports *ioaddr = &ap->ioaddr;
166 unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
167
168 if (tf->ctl != ap->last_ctl) {
169 writeb(tf->ctl, (void __iomem *) ap->ioaddr.ctl_addr);
170 ap->last_ctl = tf->ctl;
171 ata_wait_idle(ap);
172 }
173
174 if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
175 writeb(tf->hob_feature, (void __iomem *) ioaddr->feature_addr);
176 writeb(tf->hob_nsect, (void __iomem *) ioaddr->nsect_addr);
177 writeb(tf->hob_lbal, (void __iomem *) ioaddr->lbal_addr);
178 writeb(tf->hob_lbam, (void __iomem *) ioaddr->lbam_addr);
179 writeb(tf->hob_lbah, (void __iomem *) ioaddr->lbah_addr);
180 VPRINTK("hob: feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n",
181 tf->hob_feature,
182 tf->hob_nsect,
183 tf->hob_lbal,
184 tf->hob_lbam,
185 tf->hob_lbah);
186 }
187
188 if (is_addr) {
189 writeb(tf->feature, (void __iomem *) ioaddr->feature_addr);
190 writeb(tf->nsect, (void __iomem *) ioaddr->nsect_addr);
191 writeb(tf->lbal, (void __iomem *) ioaddr->lbal_addr);
192 writeb(tf->lbam, (void __iomem *) ioaddr->lbam_addr);
193 writeb(tf->lbah, (void __iomem *) ioaddr->lbah_addr);
194 VPRINTK("feat 0x%X nsect 0x%X lba 0x%X 0x%X 0x%X\n",
195 tf->feature,
196 tf->nsect,
197 tf->lbal,
198 tf->lbam,
199 tf->lbah);
200 }
201
202 if (tf->flags & ATA_TFLAG_DEVICE) {
203 writeb(tf->device, (void __iomem *) ioaddr->device_addr);
204 VPRINTK("device 0x%X\n", tf->device);
205 }
206
207 ata_wait_idle(ap);
208}
209
210
211/**
212 * ata_tf_load - send taskfile registers to host controller
213 * @ap: Port to which output is sent
214 * @tf: ATA taskfile register set
215 *
216 * Outputs ATA taskfile to standard ATA host controller using MMIO
217 * or PIO as indicated by the ATA_FLAG_MMIO flag.
218 * Writes the control, feature, nsect, lbal, lbam, and lbah registers.
219 * Optionally (ATA_TFLAG_LBA48) writes hob_feature, hob_nsect,
220 * hob_lbal, hob_lbam, and hob_lbah.
221 *
222 * This function waits for idle (!BUSY and !DRQ) after writing
223 * registers. If the control register has a new value, this
224 * function also waits for idle after writing control and before
225 * writing the remaining registers.
226 *
227 * May be used as the tf_load() entry in ata_port_operations.
228 *
229 * LOCKING:
230 * Inherited from caller.
231 */
232void ata_tf_load(struct ata_port *ap, const struct ata_taskfile *tf)
233{
234 if (ap->flags & ATA_FLAG_MMIO)
235 ata_tf_load_mmio(ap, tf);
236 else
237 ata_tf_load_pio(ap, tf);
238}
239
240/**
241 * ata_exec_command_pio - issue ATA command to host controller
242 * @ap: port to which command is being issued
243 * @tf: ATA taskfile register set
244 *
245 * Issues PIO write to ATA command register, with proper
246 * synchronization with interrupt handler / other threads.
247 *
248 * LOCKING:
249 * spin_lock_irqsave(host_set lock)
250 */
251
252static void ata_exec_command_pio(struct ata_port *ap, const struct ata_taskfile *tf)
253{
254 DPRINTK("ata%u: cmd 0x%X\n", ap->id, tf->command);
255
256 outb(tf->command, ap->ioaddr.command_addr);
257 ata_pause(ap);
258}
259
260
261/**
262 * ata_exec_command_mmio - issue ATA command to host controller
263 * @ap: port to which command is being issued
264 * @tf: ATA taskfile register set
265 *
266 * Issues MMIO write to ATA command register, with proper
267 * synchronization with interrupt handler / other threads.
268 *
269 * LOCKING:
270 * spin_lock_irqsave(host_set lock)
271 */
272
273static void ata_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf)
274{
275 DPRINTK("ata%u: cmd 0x%X\n", ap->id, tf->command);
276
277 writeb(tf->command, (void __iomem *) ap->ioaddr.command_addr);
278 ata_pause(ap);
279}
280
281
282/**
283 * ata_exec_command - issue ATA command to host controller
284 * @ap: port to which command is being issued
285 * @tf: ATA taskfile register set
286 *
287 * Issues PIO/MMIO write to ATA command register, with proper
288 * synchronization with interrupt handler / other threads.
289 *
290 * LOCKING:
291 * spin_lock_irqsave(host_set lock)
292 */
293void ata_exec_command(struct ata_port *ap, const struct ata_taskfile *tf)
294{
295 if (ap->flags & ATA_FLAG_MMIO)
296 ata_exec_command_mmio(ap, tf);
297 else
298 ata_exec_command_pio(ap, tf);
299}
300
301/**
302 * ata_tf_to_host - issue ATA taskfile to host controller
303 * @ap: port to which command is being issued
304 * @tf: ATA taskfile register set
305 *
306 * Issues ATA taskfile register set to ATA host controller,
307 * with proper synchronization with interrupt handler and
308 * other threads.
309 *
310 * LOCKING:
311 * spin_lock_irqsave(host_set lock)
312 */
313
314static inline void ata_tf_to_host(struct ata_port *ap,
315 const struct ata_taskfile *tf)
316{
317 ap->ops->tf_load(ap, tf);
318 ap->ops->exec_command(ap, tf);
319}
320
321/**
322 * ata_tf_read_pio - input device's ATA taskfile shadow registers
323 * @ap: Port from which input is read
324 * @tf: ATA taskfile register set for storing input
325 *
326 * Reads ATA taskfile registers for currently-selected device
327 * into @tf.
328 *
329 * LOCKING:
330 * Inherited from caller.
331 */
332
333static void ata_tf_read_pio(struct ata_port *ap, struct ata_taskfile *tf)
334{
335 struct ata_ioports *ioaddr = &ap->ioaddr;
336
337 tf->command = ata_check_status(ap);
338 tf->feature = inb(ioaddr->error_addr);
339 tf->nsect = inb(ioaddr->nsect_addr);
340 tf->lbal = inb(ioaddr->lbal_addr);
341 tf->lbam = inb(ioaddr->lbam_addr);
342 tf->lbah = inb(ioaddr->lbah_addr);
343 tf->device = inb(ioaddr->device_addr);
344
345 if (tf->flags & ATA_TFLAG_LBA48) {
346 outb(tf->ctl | ATA_HOB, ioaddr->ctl_addr);
347 tf->hob_feature = inb(ioaddr->error_addr);
348 tf->hob_nsect = inb(ioaddr->nsect_addr);
349 tf->hob_lbal = inb(ioaddr->lbal_addr);
350 tf->hob_lbam = inb(ioaddr->lbam_addr);
351 tf->hob_lbah = inb(ioaddr->lbah_addr);
352 }
353}
354
355/**
356 * ata_tf_read_mmio - input device's ATA taskfile shadow registers
357 * @ap: Port from which input is read
358 * @tf: ATA taskfile register set for storing input
359 *
360 * Reads ATA taskfile registers for currently-selected device
361 * into @tf via MMIO.
362 *
363 * LOCKING:
364 * Inherited from caller.
365 */
366
367static void ata_tf_read_mmio(struct ata_port *ap, struct ata_taskfile *tf)
368{
369 struct ata_ioports *ioaddr = &ap->ioaddr;
370
371 tf->command = ata_check_status(ap);
372 tf->feature = readb((void __iomem *)ioaddr->error_addr);
373 tf->nsect = readb((void __iomem *)ioaddr->nsect_addr);
374 tf->lbal = readb((void __iomem *)ioaddr->lbal_addr);
375 tf->lbam = readb((void __iomem *)ioaddr->lbam_addr);
376 tf->lbah = readb((void __iomem *)ioaddr->lbah_addr);
377 tf->device = readb((void __iomem *)ioaddr->device_addr);
378
379 if (tf->flags & ATA_TFLAG_LBA48) {
380 writeb(tf->ctl | ATA_HOB, (void __iomem *) ap->ioaddr.ctl_addr);
381 tf->hob_feature = readb((void __iomem *)ioaddr->error_addr);
382 tf->hob_nsect = readb((void __iomem *)ioaddr->nsect_addr);
383 tf->hob_lbal = readb((void __iomem *)ioaddr->lbal_addr);
384 tf->hob_lbam = readb((void __iomem *)ioaddr->lbam_addr);
385 tf->hob_lbah = readb((void __iomem *)ioaddr->lbah_addr);
386 }
387}
388
389
390/**
391 * ata_tf_read - input device's ATA taskfile shadow registers
392 * @ap: Port from which input is read
393 * @tf: ATA taskfile register set for storing input
394 *
395 * Reads ATA taskfile registers for currently-selected device
396 * into @tf.
397 *
398 * Reads nsect, lbal, lbam, lbah, and device. If ATA_TFLAG_LBA48
399 * is set, also reads the hob registers.
400 *
401 * May be used as the tf_read() entry in ata_port_operations.
402 *
403 * LOCKING:
404 * Inherited from caller.
405 */
406void ata_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
407{
408 if (ap->flags & ATA_FLAG_MMIO)
409 ata_tf_read_mmio(ap, tf);
410 else
411 ata_tf_read_pio(ap, tf);
412}
413
414/**
415 * ata_check_status_pio - Read device status reg & clear interrupt
416 * @ap: port where the device is
417 *
418 * Reads ATA taskfile status register for currently-selected device
419 * and return its value. This also clears pending interrupts
420 * from this device
421 *
422 * LOCKING:
423 * Inherited from caller.
424 */
425static u8 ata_check_status_pio(struct ata_port *ap)
426{
427 return inb(ap->ioaddr.status_addr);
428}
429
430/**
431 * ata_check_status_mmio - Read device status reg & clear interrupt
432 * @ap: port where the device is
433 *
434 * Reads ATA taskfile status register for currently-selected device
435 * via MMIO and return its value. This also clears pending interrupts
436 * from this device
437 *
438 * LOCKING:
439 * Inherited from caller.
440 */
441static u8 ata_check_status_mmio(struct ata_port *ap)
442{
443 return readb((void __iomem *) ap->ioaddr.status_addr);
444}
445
446
447/**
448 * ata_check_status - Read device status reg & clear interrupt
449 * @ap: port where the device is
450 *
451 * Reads ATA taskfile status register for currently-selected device
452 * and return its value. This also clears pending interrupts
453 * from this device
454 *
455 * May be used as the check_status() entry in ata_port_operations.
456 *
457 * LOCKING:
458 * Inherited from caller.
459 */
460u8 ata_check_status(struct ata_port *ap)
461{
462 if (ap->flags & ATA_FLAG_MMIO)
463 return ata_check_status_mmio(ap);
464 return ata_check_status_pio(ap);
465}
466
467
468/**
469 * ata_altstatus - Read device alternate status reg
470 * @ap: port where the device is
471 *
472 * Reads ATA taskfile alternate status register for
473 * currently-selected device and return its value.
474 *
475 * Note: may NOT be used as the check_altstatus() entry in
476 * ata_port_operations.
477 *
478 * LOCKING:
479 * Inherited from caller.
480 */
481u8 ata_altstatus(struct ata_port *ap)
482{
483 if (ap->ops->check_altstatus)
484 return ap->ops->check_altstatus(ap);
485
486 if (ap->flags & ATA_FLAG_MMIO)
487 return readb((void __iomem *)ap->ioaddr.altstatus_addr);
488 return inb(ap->ioaddr.altstatus_addr);
489}
490
491 87
492/** 88/**
493 * ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure 89 * ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
@@ -632,58 +228,148 @@ int ata_rwcmd_protocol(struct ata_queued_cmd *qc)
632 return -1; 228 return -1;
633} 229}
634 230
635static const char * const xfer_mode_str[] = { 231/**
636 "UDMA/16", 232 * ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask
637 "UDMA/25", 233 * @pio_mask: pio_mask
638 "UDMA/33", 234 * @mwdma_mask: mwdma_mask
639 "UDMA/44", 235 * @udma_mask: udma_mask
640 "UDMA/66", 236 *
641 "UDMA/100", 237 * Pack @pio_mask, @mwdma_mask and @udma_mask into a single
642 "UDMA/133", 238 * unsigned int xfer_mask.
643 "UDMA7", 239 *
644 "MWDMA0", 240 * LOCKING:
645 "MWDMA1", 241 * None.
646 "MWDMA2", 242 *
647 "PIO0", 243 * RETURNS:
648 "PIO1", 244 * Packed xfer_mask.
649 "PIO2", 245 */
650 "PIO3", 246static unsigned int ata_pack_xfermask(unsigned int pio_mask,
651 "PIO4", 247 unsigned int mwdma_mask,
248 unsigned int udma_mask)
249{
250 return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) |
251 ((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) |
252 ((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA);
253}
254
255static const struct ata_xfer_ent {
256 unsigned int shift, bits;
257 u8 base;
258} ata_xfer_tbl[] = {
259 { ATA_SHIFT_PIO, ATA_BITS_PIO, XFER_PIO_0 },
260 { ATA_SHIFT_MWDMA, ATA_BITS_MWDMA, XFER_MW_DMA_0 },
261 { ATA_SHIFT_UDMA, ATA_BITS_UDMA, XFER_UDMA_0 },
262 { -1, },
652}; 263};
653 264
654/** 265/**
655 * ata_udma_string - convert UDMA bit offset to string 266 * ata_xfer_mask2mode - Find matching XFER_* for the given xfer_mask
656 * @mask: mask of bits supported; only highest bit counts. 267 * @xfer_mask: xfer_mask of interest
657 * 268 *
658 * Determine string which represents the highest speed 269 * Return matching XFER_* value for @xfer_mask. Only the highest
659 * (highest bit in @udma_mask). 270 * bit of @xfer_mask is considered.
660 * 271 *
661 * LOCKING: 272 * LOCKING:
662 * None. 273 * None.
663 * 274 *
664 * RETURNS: 275 * RETURNS:
665 * Constant C string representing highest speed listed in 276 * Matching XFER_* value, 0 if no match found.
666 * @udma_mask, or the constant C string "<n/a>".
667 */ 277 */
278static u8 ata_xfer_mask2mode(unsigned int xfer_mask)
279{
280 int highbit = fls(xfer_mask) - 1;
281 const struct ata_xfer_ent *ent;
668 282
669static const char *ata_mode_string(unsigned int mask) 283 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
284 if (highbit >= ent->shift && highbit < ent->shift + ent->bits)
285 return ent->base + highbit - ent->shift;
286 return 0;
287}
288
289/**
290 * ata_xfer_mode2mask - Find matching xfer_mask for XFER_*
291 * @xfer_mode: XFER_* of interest
292 *
293 * Return matching xfer_mask for @xfer_mode.
294 *
295 * LOCKING:
296 * None.
297 *
298 * RETURNS:
299 * Matching xfer_mask, 0 if no match found.
300 */
301static unsigned int ata_xfer_mode2mask(u8 xfer_mode)
670{ 302{
671 int i; 303 const struct ata_xfer_ent *ent;
672 304
673 for (i = 7; i >= 0; i--) 305 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
674 if (mask & (1 << i)) 306 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
675 goto out; 307 return 1 << (ent->shift + xfer_mode - ent->base);
676 for (i = ATA_SHIFT_MWDMA + 2; i >= ATA_SHIFT_MWDMA; i--) 308 return 0;
677 if (mask & (1 << i)) 309}
678 goto out;
679 for (i = ATA_SHIFT_PIO + 4; i >= ATA_SHIFT_PIO; i--)
680 if (mask & (1 << i))
681 goto out;
682 310
683 return "<n/a>"; 311/**
312 * ata_xfer_mode2shift - Find matching xfer_shift for XFER_*
313 * @xfer_mode: XFER_* of interest
314 *
315 * Return matching xfer_shift for @xfer_mode.
316 *
317 * LOCKING:
318 * None.
319 *
320 * RETURNS:
321 * Matching xfer_shift, -1 if no match found.
322 */
323static int ata_xfer_mode2shift(unsigned int xfer_mode)
324{
325 const struct ata_xfer_ent *ent;
684 326
685out: 327 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
686 return xfer_mode_str[i]; 328 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
329 return ent->shift;
330 return -1;
331}
332
333/**
334 * ata_mode_string - convert xfer_mask to string
335 * @xfer_mask: mask of bits supported; only highest bit counts.
336 *
337 * Determine string which represents the highest speed
338 * (highest bit in @modemask).
339 *
340 * LOCKING:
341 * None.
342 *
343 * RETURNS:
344 * Constant C string representing highest speed listed in
345 * @mode_mask, or the constant C string "<n/a>".
346 */
347static const char *ata_mode_string(unsigned int xfer_mask)
348{
349 static const char * const xfer_mode_str[] = {
350 "PIO0",
351 "PIO1",
352 "PIO2",
353 "PIO3",
354 "PIO4",
355 "MWDMA0",
356 "MWDMA1",
357 "MWDMA2",
358 "UDMA/16",
359 "UDMA/25",
360 "UDMA/33",
361 "UDMA/44",
362 "UDMA/66",
363 "UDMA/100",
364 "UDMA/133",
365 "UDMA7",
366 };
367 int highbit;
368
369 highbit = fls(xfer_mask) - 1;
370 if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str))
371 return xfer_mode_str[highbit];
372 return "<n/a>";
687} 373}
688 374
689/** 375/**
@@ -838,6 +524,7 @@ unsigned int ata_dev_classify(const struct ata_taskfile *tf)
838 * ata_dev_try_classify - Parse returned ATA device signature 524 * ata_dev_try_classify - Parse returned ATA device signature
839 * @ap: ATA channel to examine 525 * @ap: ATA channel to examine
840 * @device: Device to examine (starting at zero) 526 * @device: Device to examine (starting at zero)
527 * @r_err: Value of error register on completion
841 * 528 *
842 * After an event -- SRST, E.D.D., or SATA COMRESET -- occurs, 529 * After an event -- SRST, E.D.D., or SATA COMRESET -- occurs,
843 * an ATA/ATAPI-defined set of values is placed in the ATA 530 * an ATA/ATAPI-defined set of values is placed in the ATA
@@ -850,11 +537,14 @@ unsigned int ata_dev_classify(const struct ata_taskfile *tf)
850 * 537 *
851 * LOCKING: 538 * LOCKING:
852 * caller. 539 * caller.
540 *
541 * RETURNS:
542 * Device type - %ATA_DEV_ATA, %ATA_DEV_ATAPI or %ATA_DEV_NONE.
853 */ 543 */
854 544
855static u8 ata_dev_try_classify(struct ata_port *ap, unsigned int device) 545static unsigned int
546ata_dev_try_classify(struct ata_port *ap, unsigned int device, u8 *r_err)
856{ 547{
857 struct ata_device *dev = &ap->device[device];
858 struct ata_taskfile tf; 548 struct ata_taskfile tf;
859 unsigned int class; 549 unsigned int class;
860 u8 err; 550 u8 err;
@@ -865,8 +555,8 @@ static u8 ata_dev_try_classify(struct ata_port *ap, unsigned int device)
865 555
866 ap->ops->tf_read(ap, &tf); 556 ap->ops->tf_read(ap, &tf);
867 err = tf.feature; 557 err = tf.feature;
868 558 if (r_err)
869 dev->class = ATA_DEV_NONE; 559 *r_err = err;
870 560
871 /* see if device passed diags */ 561 /* see if device passed diags */
872 if (err == 1) 562 if (err == 1)
@@ -874,22 +564,20 @@ static u8 ata_dev_try_classify(struct ata_port *ap, unsigned int device)
874 else if ((device == 0) && (err == 0x81)) 564 else if ((device == 0) && (err == 0x81))
875 /* do nothing */ ; 565 /* do nothing */ ;
876 else 566 else
877 return err; 567 return ATA_DEV_NONE;
878 568
879 /* determine if device if ATA or ATAPI */ 569 /* determine if device is ATA or ATAPI */
880 class = ata_dev_classify(&tf); 570 class = ata_dev_classify(&tf);
571
881 if (class == ATA_DEV_UNKNOWN) 572 if (class == ATA_DEV_UNKNOWN)
882 return err; 573 return ATA_DEV_NONE;
883 if ((class == ATA_DEV_ATA) && (ata_chk_status(ap) == 0)) 574 if ((class == ATA_DEV_ATA) && (ata_chk_status(ap) == 0))
884 return err; 575 return ATA_DEV_NONE;
885 576 return class;
886 dev->class = class;
887
888 return err;
889} 577}
890 578
891/** 579/**
892 * ata_dev_id_string - Convert IDENTIFY DEVICE page into string 580 * ata_id_string - Convert IDENTIFY DEVICE page into string
893 * @id: IDENTIFY DEVICE results we will examine 581 * @id: IDENTIFY DEVICE results we will examine
894 * @s: string into which data is output 582 * @s: string into which data is output
895 * @ofs: offset into identify device page 583 * @ofs: offset into identify device page
@@ -903,8 +591,8 @@ static u8 ata_dev_try_classify(struct ata_port *ap, unsigned int device)
903 * caller. 591 * caller.
904 */ 592 */
905 593
906void ata_dev_id_string(const u16 *id, unsigned char *s, 594void ata_id_string(const u16 *id, unsigned char *s,
907 unsigned int ofs, unsigned int len) 595 unsigned int ofs, unsigned int len)
908{ 596{
909 unsigned int c; 597 unsigned int c;
910 598
@@ -922,6 +610,49 @@ void ata_dev_id_string(const u16 *id, unsigned char *s,
922 } 610 }
923} 611}
924 612
613/**
614 * ata_id_c_string - Convert IDENTIFY DEVICE page into C string
615 * @id: IDENTIFY DEVICE results we will examine
616 * @s: string into which data is output
617 * @ofs: offset into identify device page
618 * @len: length of string to return. must be an odd number.
619 *
620 * This function is identical to ata_id_string except that it
621 * trims trailing spaces and terminates the resulting string with
622 * null. @len must be actual maximum length (even number) + 1.
623 *
624 * LOCKING:
625 * caller.
626 */
627void ata_id_c_string(const u16 *id, unsigned char *s,
628 unsigned int ofs, unsigned int len)
629{
630 unsigned char *p;
631
632 WARN_ON(!(len & 1));
633
634 ata_id_string(id, s, ofs, len - 1);
635
636 p = s + strnlen(s, len - 1);
637 while (p > s && p[-1] == ' ')
638 p--;
639 *p = '\0';
640}
641
642static u64 ata_id_n_sectors(const u16 *id)
643{
644 if (ata_id_has_lba(id)) {
645 if (ata_id_has_lba48(id))
646 return ata_id_u64(id, 100);
647 else
648 return ata_id_u32(id, 60);
649 } else {
650 if (ata_id_current_chs_valid(id))
651 return ata_id_u32(id, 57);
652 else
653 return id[1] * id[3] * id[6];
654 }
655}
925 656
926/** 657/**
927 * ata_noop_dev_select - Select device 0/1 on ATA bus 658 * ata_noop_dev_select - Select device 0/1 on ATA bus
@@ -1011,90 +742,172 @@ void ata_dev_select(struct ata_port *ap, unsigned int device,
1011 742
1012/** 743/**
1013 * ata_dump_id - IDENTIFY DEVICE info debugging output 744 * ata_dump_id - IDENTIFY DEVICE info debugging output
1014 * @dev: Device whose IDENTIFY DEVICE page we will dump 745 * @id: IDENTIFY DEVICE page to dump
1015 * 746 *
1016 * Dump selected 16-bit words from a detected device's 747 * Dump selected 16-bit words from the given IDENTIFY DEVICE
1017 * IDENTIFY PAGE page. 748 * page.
1018 * 749 *
1019 * LOCKING: 750 * LOCKING:
1020 * caller. 751 * caller.
1021 */ 752 */
1022 753
1023static inline void ata_dump_id(const struct ata_device *dev) 754static inline void ata_dump_id(const u16 *id)
1024{ 755{
1025 DPRINTK("49==0x%04x " 756 DPRINTK("49==0x%04x "
1026 "53==0x%04x " 757 "53==0x%04x "
1027 "63==0x%04x " 758 "63==0x%04x "
1028 "64==0x%04x " 759 "64==0x%04x "
1029 "75==0x%04x \n", 760 "75==0x%04x \n",
1030 dev->id[49], 761 id[49],
1031 dev->id[53], 762 id[53],
1032 dev->id[63], 763 id[63],
1033 dev->id[64], 764 id[64],
1034 dev->id[75]); 765 id[75]);
1035 DPRINTK("80==0x%04x " 766 DPRINTK("80==0x%04x "
1036 "81==0x%04x " 767 "81==0x%04x "
1037 "82==0x%04x " 768 "82==0x%04x "
1038 "83==0x%04x " 769 "83==0x%04x "
1039 "84==0x%04x \n", 770 "84==0x%04x \n",
1040 dev->id[80], 771 id[80],
1041 dev->id[81], 772 id[81],
1042 dev->id[82], 773 id[82],
1043 dev->id[83], 774 id[83],
1044 dev->id[84]); 775 id[84]);
1045 DPRINTK("88==0x%04x " 776 DPRINTK("88==0x%04x "
1046 "93==0x%04x\n", 777 "93==0x%04x\n",
1047 dev->id[88], 778 id[88],
1048 dev->id[93]); 779 id[93]);
1049} 780}
1050 781
1051/* 782/**
1052 * Compute the PIO modes available for this device. This is not as 783 * ata_id_xfermask - Compute xfermask from the given IDENTIFY data
1053 * trivial as it seems if we must consider early devices correctly. 784 * @id: IDENTIFY data to compute xfer mask from
785 *
786 * Compute the xfermask for this device. This is not as trivial
787 * as it seems if we must consider early devices correctly.
788 *
789 * FIXME: pre IDE drive timing (do we care ?).
790 *
791 * LOCKING:
792 * None.
1054 * 793 *
1055 * FIXME: pre IDE drive timing (do we care ?). 794 * RETURNS:
795 * Computed xfermask
1056 */ 796 */
1057 797static unsigned int ata_id_xfermask(const u16 *id)
1058static unsigned int ata_pio_modes(const struct ata_device *adev)
1059{ 798{
1060 u16 modes; 799 unsigned int pio_mask, mwdma_mask, udma_mask;
1061 800
1062 /* Usual case. Word 53 indicates word 64 is valid */ 801 /* Usual case. Word 53 indicates word 64 is valid */
1063 if (adev->id[ATA_ID_FIELD_VALID] & (1 << 1)) { 802 if (id[ATA_ID_FIELD_VALID] & (1 << 1)) {
1064 modes = adev->id[ATA_ID_PIO_MODES] & 0x03; 803 pio_mask = id[ATA_ID_PIO_MODES] & 0x03;
1065 modes <<= 3; 804 pio_mask <<= 3;
1066 modes |= 0x7; 805 pio_mask |= 0x7;
1067 return modes; 806 } else {
807 /* If word 64 isn't valid then Word 51 high byte holds
808 * the PIO timing number for the maximum. Turn it into
809 * a mask.
810 */
811 pio_mask = (2 << (id[ATA_ID_OLD_PIO_MODES] & 0xFF)) - 1 ;
812
813 /* But wait.. there's more. Design your standards by
814 * committee and you too can get a free iordy field to
815 * process. However its the speeds not the modes that
816 * are supported... Note drivers using the timing API
817 * will get this right anyway
818 */
1068 } 819 }
1069 820
1070 /* If word 64 isn't valid then Word 51 high byte holds the PIO timing 821 mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07;
1071 number for the maximum. Turn it into a mask and return it */ 822
1072 modes = (2 << ((adev->id[ATA_ID_OLD_PIO_MODES] >> 8) & 0xFF)) - 1 ; 823 udma_mask = 0;
1073 return modes; 824 if (id[ATA_ID_FIELD_VALID] & (1 << 2))
1074 /* But wait.. there's more. Design your standards by committee and 825 udma_mask = id[ATA_ID_UDMA_MODES] & 0xff;
1075 you too can get a free iordy field to process. However its the 826
1076 speeds not the modes that are supported... Note drivers using the 827 return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
1077 timing API will get this right anyway */
1078} 828}
1079 829
1080struct ata_exec_internal_arg { 830/**
1081 unsigned int err_mask; 831 * ata_port_queue_task - Queue port_task
1082 struct ata_taskfile *tf; 832 * @ap: The ata_port to queue port_task for
1083 struct completion *waiting; 833 *
1084}; 834 * Schedule @fn(@data) for execution after @delay jiffies using
835 * port_task. There is one port_task per port and it's the
836 * user(low level driver)'s responsibility to make sure that only
837 * one task is active at any given time.
838 *
839 * libata core layer takes care of synchronization between
840 * port_task and EH. ata_port_queue_task() may be ignored for EH
841 * synchronization.
842 *
843 * LOCKING:
844 * Inherited from caller.
845 */
846void ata_port_queue_task(struct ata_port *ap, void (*fn)(void *), void *data,
847 unsigned long delay)
848{
849 int rc;
850
851 if (ap->flags & ATA_FLAG_FLUSH_PORT_TASK)
852 return;
853
854 PREPARE_WORK(&ap->port_task, fn, data);
855
856 if (!delay)
857 rc = queue_work(ata_wq, &ap->port_task);
858 else
859 rc = queue_delayed_work(ata_wq, &ap->port_task, delay);
860
861 /* rc == 0 means that another user is using port task */
862 WARN_ON(rc == 0);
863}
1085 864
1086int ata_qc_complete_internal(struct ata_queued_cmd *qc) 865/**
866 * ata_port_flush_task - Flush port_task
867 * @ap: The ata_port to flush port_task for
868 *
869 * After this function completes, port_task is guranteed not to
870 * be running or scheduled.
871 *
872 * LOCKING:
873 * Kernel thread context (may sleep)
874 */
875void ata_port_flush_task(struct ata_port *ap)
1087{ 876{
1088 struct ata_exec_internal_arg *arg = qc->private_data; 877 unsigned long flags;
1089 struct completion *waiting = arg->waiting;
1090 878
1091 if (!(qc->err_mask & ~AC_ERR_DEV)) 879 DPRINTK("ENTER\n");
1092 qc->ap->ops->tf_read(qc->ap, arg->tf);
1093 arg->err_mask = qc->err_mask;
1094 arg->waiting = NULL;
1095 complete(waiting);
1096 880
1097 return 0; 881 spin_lock_irqsave(&ap->host_set->lock, flags);
882 ap->flags |= ATA_FLAG_FLUSH_PORT_TASK;
883 spin_unlock_irqrestore(&ap->host_set->lock, flags);
884
885 DPRINTK("flush #1\n");
886 flush_workqueue(ata_wq);
887
888 /*
889 * At this point, if a task is running, it's guaranteed to see
890 * the FLUSH flag; thus, it will never queue pio tasks again.
891 * Cancel and flush.
892 */
893 if (!cancel_delayed_work(&ap->port_task)) {
894 DPRINTK("flush #2\n");
895 flush_workqueue(ata_wq);
896 }
897
898 spin_lock_irqsave(&ap->host_set->lock, flags);
899 ap->flags &= ~ATA_FLAG_FLUSH_PORT_TASK;
900 spin_unlock_irqrestore(&ap->host_set->lock, flags);
901
902 DPRINTK("EXIT\n");
903}
904
905void ata_qc_complete_internal(struct ata_queued_cmd *qc)
906{
907 struct completion *waiting = qc->private_data;
908
909 qc->ap->ops->tf_read(qc->ap, &qc->tf);
910 complete(waiting);
1098} 911}
1099 912
1100/** 913/**
@@ -1125,7 +938,7 @@ ata_exec_internal(struct ata_port *ap, struct ata_device *dev,
1125 struct ata_queued_cmd *qc; 938 struct ata_queued_cmd *qc;
1126 DECLARE_COMPLETION(wait); 939 DECLARE_COMPLETION(wait);
1127 unsigned long flags; 940 unsigned long flags;
1128 struct ata_exec_internal_arg arg; 941 unsigned int err_mask;
1129 942
1130 spin_lock_irqsave(&ap->host_set->lock, flags); 943 spin_lock_irqsave(&ap->host_set->lock, flags);
1131 944
@@ -1139,13 +952,12 @@ ata_exec_internal(struct ata_port *ap, struct ata_device *dev,
1139 qc->nsect = buflen / ATA_SECT_SIZE; 952 qc->nsect = buflen / ATA_SECT_SIZE;
1140 } 953 }
1141 954
1142 arg.waiting = &wait; 955 qc->private_data = &wait;
1143 arg.tf = tf;
1144 qc->private_data = &arg;
1145 qc->complete_fn = ata_qc_complete_internal; 956 qc->complete_fn = ata_qc_complete_internal;
1146 957
1147 if (ata_qc_issue(qc)) 958 qc->err_mask = ata_qc_issue(qc);
1148 goto issue_fail; 959 if (qc->err_mask)
960 ata_qc_complete(qc);
1149 961
1150 spin_unlock_irqrestore(&ap->host_set->lock, flags); 962 spin_unlock_irqrestore(&ap->host_set->lock, flags);
1151 963
@@ -1158,8 +970,8 @@ ata_exec_internal(struct ata_port *ap, struct ata_device *dev,
1158 * before the caller cleans up, it will result in a 970 * before the caller cleans up, it will result in a
1159 * spurious interrupt. We can live with that. 971 * spurious interrupt. We can live with that.
1160 */ 972 */
1161 if (arg.waiting) { 973 if (qc->flags & ATA_QCFLAG_ACTIVE) {
1162 qc->err_mask = AC_ERR_OTHER; 974 qc->err_mask = AC_ERR_TIMEOUT;
1163 ata_qc_complete(qc); 975 ata_qc_complete(qc);
1164 printk(KERN_WARNING "ata%u: qc timeout (cmd 0x%x)\n", 976 printk(KERN_WARNING "ata%u: qc timeout (cmd 0x%x)\n",
1165 ap->id, command); 977 ap->id, command);
@@ -1168,12 +980,12 @@ ata_exec_internal(struct ata_port *ap, struct ata_device *dev,
1168 spin_unlock_irqrestore(&ap->host_set->lock, flags); 980 spin_unlock_irqrestore(&ap->host_set->lock, flags);
1169 } 981 }
1170 982
1171 return arg.err_mask; 983 *tf = qc->tf;
984 err_mask = qc->err_mask;
1172 985
1173 issue_fail:
1174 ata_qc_free(qc); 986 ata_qc_free(qc);
1175 spin_unlock_irqrestore(&ap->host_set->lock, flags); 987
1176 return AC_ERR_OTHER; 988 return err_mask;
1177} 989}
1178 990
1179/** 991/**
@@ -1210,73 +1022,78 @@ unsigned int ata_pio_need_iordy(const struct ata_device *adev)
1210} 1022}
1211 1023
1212/** 1024/**
1213 * ata_dev_identify - obtain IDENTIFY x DEVICE page 1025 * ata_dev_read_id - Read ID data from the specified device
1214 * @ap: port on which device we wish to probe resides 1026 * @ap: port on which target device resides
1215 * @device: device bus address, starting at zero 1027 * @dev: target device
1216 * 1028 * @p_class: pointer to class of the target device (may be changed)
1217 * Following bus reset, we issue the IDENTIFY [PACKET] DEVICE 1029 * @post_reset: is this read ID post-reset?
1218 * command, and read back the 512-byte device information page. 1030 * @p_id: read IDENTIFY page (newly allocated)
1219 * The device information page is fed to us via the standard 1031 *
1220 * PIO-IN protocol, but we hand-code it here. (TODO: investigate 1032 * Read ID data from the specified device. ATA_CMD_ID_ATA is
1221 * using standard PIO-IN paths) 1033 * performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI
1222 * 1034 * devices. This function also takes care of EDD signature
1223 * After reading the device information page, we use several 1035 * misreporting (to be removed once EDD support is gone) and
1224 * bits of information from it to initialize data structures 1036 * issues ATA_CMD_INIT_DEV_PARAMS for pre-ATA4 drives.
1225 * that will be used during the lifetime of the ata_device.
1226 * Other data from the info page is used to disqualify certain
1227 * older ATA devices we do not wish to support.
1228 * 1037 *
1229 * LOCKING: 1038 * LOCKING:
1230 * Inherited from caller. Some functions called by this function 1039 * Kernel thread context (may sleep)
1231 * obtain the host_set lock. 1040 *
1041 * RETURNS:
1042 * 0 on success, -errno otherwise.
1232 */ 1043 */
1233 1044static int ata_dev_read_id(struct ata_port *ap, struct ata_device *dev,
1234static void ata_dev_identify(struct ata_port *ap, unsigned int device) 1045 unsigned int *p_class, int post_reset, u16 **p_id)
1235{ 1046{
1236 struct ata_device *dev = &ap->device[device]; 1047 unsigned int class = *p_class;
1237 unsigned int major_version;
1238 u16 tmp;
1239 unsigned long xfer_modes;
1240 unsigned int using_edd; 1048 unsigned int using_edd;
1241 struct ata_taskfile tf; 1049 struct ata_taskfile tf;
1242 unsigned int err_mask; 1050 unsigned int err_mask = 0;
1051 u16 *id;
1052 const char *reason;
1243 int rc; 1053 int rc;
1244 1054
1245 if (!ata_dev_present(dev)) { 1055 DPRINTK("ENTER, host %u, dev %u\n", ap->id, dev->devno);
1246 DPRINTK("ENTER/EXIT (host %u, dev %u) -- nodev\n",
1247 ap->id, device);
1248 return;
1249 }
1250 1056
1251 if (ap->flags & (ATA_FLAG_SRST | ATA_FLAG_SATA_RESET)) 1057 if (ap->ops->probe_reset ||
1058 ap->flags & (ATA_FLAG_SRST | ATA_FLAG_SATA_RESET))
1252 using_edd = 0; 1059 using_edd = 0;
1253 else 1060 else
1254 using_edd = 1; 1061 using_edd = 1;
1255 1062
1256 DPRINTK("ENTER, host %u, dev %u\n", ap->id, device); 1063 ata_dev_select(ap, dev->devno, 1, 1); /* select device 0/1 */
1257
1258 assert (dev->class == ATA_DEV_ATA || dev->class == ATA_DEV_ATAPI ||
1259 dev->class == ATA_DEV_NONE);
1260 1064
1261 ata_dev_select(ap, device, 1, 1); /* select device 0/1 */ 1065 id = kmalloc(sizeof(id[0]) * ATA_ID_WORDS, GFP_KERNEL);
1066 if (id == NULL) {
1067 rc = -ENOMEM;
1068 reason = "out of memory";
1069 goto err_out;
1070 }
1262 1071
1263retry: 1072 retry:
1264 ata_tf_init(ap, &tf, device); 1073 ata_tf_init(ap, &tf, dev->devno);
1265 1074
1266 if (dev->class == ATA_DEV_ATA) { 1075 switch (class) {
1076 case ATA_DEV_ATA:
1267 tf.command = ATA_CMD_ID_ATA; 1077 tf.command = ATA_CMD_ID_ATA;
1268 DPRINTK("do ATA identify\n"); 1078 break;
1269 } else { 1079 case ATA_DEV_ATAPI:
1270 tf.command = ATA_CMD_ID_ATAPI; 1080 tf.command = ATA_CMD_ID_ATAPI;
1271 DPRINTK("do ATAPI identify\n"); 1081 break;
1082 default:
1083 rc = -ENODEV;
1084 reason = "unsupported class";
1085 goto err_out;
1272 } 1086 }
1273 1087
1274 tf.protocol = ATA_PROT_PIO; 1088 tf.protocol = ATA_PROT_PIO;
1275 1089
1276 err_mask = ata_exec_internal(ap, dev, &tf, DMA_FROM_DEVICE, 1090 err_mask = ata_exec_internal(ap, dev, &tf, DMA_FROM_DEVICE,
1277 dev->id, sizeof(dev->id)); 1091 id, sizeof(id[0]) * ATA_ID_WORDS);
1278 1092
1279 if (err_mask) { 1093 if (err_mask) {
1094 rc = -EIO;
1095 reason = "I/O error";
1096
1280 if (err_mask & ~AC_ERR_DEV) 1097 if (err_mask & ~AC_ERR_DEV)
1281 goto err_out; 1098 goto err_out;
1282 1099
@@ -1291,180 +1108,223 @@ retry:
1291 * ATA software reset (SRST, the default) does not appear 1108 * ATA software reset (SRST, the default) does not appear
1292 * to have this problem. 1109 * to have this problem.
1293 */ 1110 */
1294 if ((using_edd) && (dev->class == ATA_DEV_ATA)) { 1111 if ((using_edd) && (class == ATA_DEV_ATA)) {
1295 u8 err = tf.feature; 1112 u8 err = tf.feature;
1296 if (err & ATA_ABORTED) { 1113 if (err & ATA_ABORTED) {
1297 dev->class = ATA_DEV_ATAPI; 1114 class = ATA_DEV_ATAPI;
1298 goto retry; 1115 goto retry;
1299 } 1116 }
1300 } 1117 }
1301 goto err_out; 1118 goto err_out;
1302 } 1119 }
1303 1120
1304 swap_buf_le16(dev->id, ATA_ID_WORDS); 1121 swap_buf_le16(id, ATA_ID_WORDS);
1122
1123 /* sanity check */
1124 if ((class == ATA_DEV_ATA) != ata_id_is_ata(id)) {
1125 rc = -EINVAL;
1126 reason = "device reports illegal type";
1127 goto err_out;
1128 }
1129
1130 if (post_reset && class == ATA_DEV_ATA) {
1131 /*
1132 * The exact sequence expected by certain pre-ATA4 drives is:
1133 * SRST RESET
1134 * IDENTIFY
1135 * INITIALIZE DEVICE PARAMETERS
1136 * anything else..
1137 * Some drives were very specific about that exact sequence.
1138 */
1139 if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
1140 err_mask = ata_dev_init_params(ap, dev);
1141 if (err_mask) {
1142 rc = -EIO;
1143 reason = "INIT_DEV_PARAMS failed";
1144 goto err_out;
1145 }
1146
1147 /* current CHS translation info (id[53-58]) might be
1148 * changed. reread the identify device info.
1149 */
1150 post_reset = 0;
1151 goto retry;
1152 }
1153 }
1154
1155 *p_class = class;
1156 *p_id = id;
1157 return 0;
1158
1159 err_out:
1160 printk(KERN_WARNING "ata%u: dev %u failed to IDENTIFY (%s)\n",
1161 ap->id, dev->devno, reason);
1162 kfree(id);
1163 return rc;
1164}
1165
1166static inline u8 ata_dev_knobble(const struct ata_port *ap,
1167 struct ata_device *dev)
1168{
1169 return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
1170}
1171
1172/**
1173 * ata_dev_configure - Configure the specified ATA/ATAPI device
1174 * @ap: Port on which target device resides
1175 * @dev: Target device to configure
1176 * @print_info: Enable device info printout
1177 *
1178 * Configure @dev according to @dev->id. Generic and low-level
1179 * driver specific fixups are also applied.
1180 *
1181 * LOCKING:
1182 * Kernel thread context (may sleep)
1183 *
1184 * RETURNS:
1185 * 0 on success, -errno otherwise
1186 */
1187static int ata_dev_configure(struct ata_port *ap, struct ata_device *dev,
1188 int print_info)
1189{
1190 const u16 *id = dev->id;
1191 unsigned int xfer_mask;
1192 int i, rc;
1193
1194 if (!ata_dev_present(dev)) {
1195 DPRINTK("ENTER/EXIT (host %u, dev %u) -- nodev\n",
1196 ap->id, dev->devno);
1197 return 0;
1198 }
1199
1200 DPRINTK("ENTER, host %u, dev %u\n", ap->id, dev->devno);
1305 1201
1306 /* print device capabilities */ 1202 /* print device capabilities */
1307 printk(KERN_DEBUG "ata%u: dev %u cfg " 1203 if (print_info)
1308 "49:%04x 82:%04x 83:%04x 84:%04x 85:%04x 86:%04x 87:%04x 88:%04x\n", 1204 printk(KERN_DEBUG "ata%u: dev %u cfg 49:%04x 82:%04x 83:%04x "
1309 ap->id, device, dev->id[49], 1205 "84:%04x 85:%04x 86:%04x 87:%04x 88:%04x\n",
1310 dev->id[82], dev->id[83], dev->id[84], 1206 ap->id, dev->devno, id[49], id[82], id[83],
1311 dev->id[85], dev->id[86], dev->id[87], 1207 id[84], id[85], id[86], id[87], id[88]);
1312 dev->id[88]); 1208
1209 /* initialize to-be-configured parameters */
1210 dev->flags = 0;
1211 dev->max_sectors = 0;
1212 dev->cdb_len = 0;
1213 dev->n_sectors = 0;
1214 dev->cylinders = 0;
1215 dev->heads = 0;
1216 dev->sectors = 0;
1313 1217
1314 /* 1218 /*
1315 * common ATA, ATAPI feature tests 1219 * common ATA, ATAPI feature tests
1316 */ 1220 */
1317 1221
1318 /* we require DMA support (bits 8 of word 49) */ 1222 /* we require DMA support (bits 8 of word 49) */
1319 if (!ata_id_has_dma(dev->id)) { 1223 if (!ata_id_has_dma(id)) {
1320 printk(KERN_DEBUG "ata%u: no dma\n", ap->id); 1224 printk(KERN_DEBUG "ata%u: no dma\n", ap->id);
1225 rc = -EINVAL;
1321 goto err_out_nosup; 1226 goto err_out_nosup;
1322 } 1227 }
1323 1228
1324 /* quick-n-dirty find max transfer mode; for printk only */ 1229 /* find max transfer mode; for printk only */
1325 xfer_modes = dev->id[ATA_ID_UDMA_MODES]; 1230 xfer_mask = ata_id_xfermask(id);
1326 if (!xfer_modes)
1327 xfer_modes = (dev->id[ATA_ID_MWDMA_MODES]) << ATA_SHIFT_MWDMA;
1328 if (!xfer_modes)
1329 xfer_modes = ata_pio_modes(dev);
1330 1231
1331 ata_dump_id(dev); 1232 ata_dump_id(id);
1332 1233
1333 /* ATA-specific feature tests */ 1234 /* ATA-specific feature tests */
1334 if (dev->class == ATA_DEV_ATA) { 1235 if (dev->class == ATA_DEV_ATA) {
1335 if (!ata_id_is_ata(dev->id)) /* sanity check */ 1236 dev->n_sectors = ata_id_n_sectors(id);
1336 goto err_out_nosup;
1337 1237
1338 /* get major version */ 1238 if (ata_id_has_lba(id)) {
1339 tmp = dev->id[ATA_ID_MAJOR_VER]; 1239 const char *lba_desc;
1340 for (major_version = 14; major_version >= 1; major_version--)
1341 if (tmp & (1 << major_version))
1342 break;
1343 1240
1344 /* 1241 lba_desc = "LBA";
1345 * The exact sequence expected by certain pre-ATA4 drives is:
1346 * SRST RESET
1347 * IDENTIFY
1348 * INITIALIZE DEVICE PARAMETERS
1349 * anything else..
1350 * Some drives were very specific about that exact sequence.
1351 */
1352 if (major_version < 4 || (!ata_id_has_lba(dev->id))) {
1353 ata_dev_init_params(ap, dev);
1354
1355 /* current CHS translation info (id[53-58]) might be
1356 * changed. reread the identify device info.
1357 */
1358 ata_dev_reread_id(ap, dev);
1359 }
1360
1361 if (ata_id_has_lba(dev->id)) {
1362 dev->flags |= ATA_DFLAG_LBA; 1242 dev->flags |= ATA_DFLAG_LBA;
1363 1243 if (ata_id_has_lba48(id)) {
1364 if (ata_id_has_lba48(dev->id)) {
1365 dev->flags |= ATA_DFLAG_LBA48; 1244 dev->flags |= ATA_DFLAG_LBA48;
1366 dev->n_sectors = ata_id_u64(dev->id, 100); 1245 lba_desc = "LBA48";
1367 } else {
1368 dev->n_sectors = ata_id_u32(dev->id, 60);
1369 } 1246 }
1370 1247
1371 /* print device info to dmesg */ 1248 /* print device info to dmesg */
1372 printk(KERN_INFO "ata%u: dev %u ATA-%d, max %s, %Lu sectors:%s\n", 1249 if (print_info)
1373 ap->id, device, 1250 printk(KERN_INFO "ata%u: dev %u ATA-%d, "
1374 major_version, 1251 "max %s, %Lu sectors: %s\n",
1375 ata_mode_string(xfer_modes), 1252 ap->id, dev->devno,
1376 (unsigned long long)dev->n_sectors, 1253 ata_id_major_version(id),
1377 dev->flags & ATA_DFLAG_LBA48 ? " LBA48" : " LBA"); 1254 ata_mode_string(xfer_mask),
1378 } else { 1255 (unsigned long long)dev->n_sectors,
1256 lba_desc);
1257 } else {
1379 /* CHS */ 1258 /* CHS */
1380 1259
1381 /* Default translation */ 1260 /* Default translation */
1382 dev->cylinders = dev->id[1]; 1261 dev->cylinders = id[1];
1383 dev->heads = dev->id[3]; 1262 dev->heads = id[3];
1384 dev->sectors = dev->id[6]; 1263 dev->sectors = id[6];
1385 dev->n_sectors = dev->cylinders * dev->heads * dev->sectors;
1386 1264
1387 if (ata_id_current_chs_valid(dev->id)) { 1265 if (ata_id_current_chs_valid(id)) {
1388 /* Current CHS translation is valid. */ 1266 /* Current CHS translation is valid. */
1389 dev->cylinders = dev->id[54]; 1267 dev->cylinders = id[54];
1390 dev->heads = dev->id[55]; 1268 dev->heads = id[55];
1391 dev->sectors = dev->id[56]; 1269 dev->sectors = id[56];
1392
1393 dev->n_sectors = ata_id_u32(dev->id, 57);
1394 } 1270 }
1395 1271
1396 /* print device info to dmesg */ 1272 /* print device info to dmesg */
1397 printk(KERN_INFO "ata%u: dev %u ATA-%d, max %s, %Lu sectors: CHS %d/%d/%d\n", 1273 if (print_info)
1398 ap->id, device, 1274 printk(KERN_INFO "ata%u: dev %u ATA-%d, "
1399 major_version, 1275 "max %s, %Lu sectors: CHS %u/%u/%u\n",
1400 ata_mode_string(xfer_modes), 1276 ap->id, dev->devno,
1401 (unsigned long long)dev->n_sectors, 1277 ata_id_major_version(id),
1402 (int)dev->cylinders, (int)dev->heads, (int)dev->sectors); 1278 ata_mode_string(xfer_mask),
1403 1279 (unsigned long long)dev->n_sectors,
1280 dev->cylinders, dev->heads, dev->sectors);
1404 } 1281 }
1405 1282
1406 ap->host->max_cmd_len = 16; 1283 dev->cdb_len = 16;
1407 } 1284 }
1408 1285
1409 /* ATAPI-specific feature tests */ 1286 /* ATAPI-specific feature tests */
1410 else if (dev->class == ATA_DEV_ATAPI) { 1287 else if (dev->class == ATA_DEV_ATAPI) {
1411 if (ata_id_is_ata(dev->id)) /* sanity check */ 1288 rc = atapi_cdb_len(id);
1412 goto err_out_nosup;
1413
1414 rc = atapi_cdb_len(dev->id);
1415 if ((rc < 12) || (rc > ATAPI_CDB_LEN)) { 1289 if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
1416 printk(KERN_WARNING "ata%u: unsupported CDB len\n", ap->id); 1290 printk(KERN_WARNING "ata%u: unsupported CDB len\n", ap->id);
1291 rc = -EINVAL;
1417 goto err_out_nosup; 1292 goto err_out_nosup;
1418 } 1293 }
1419 ap->cdb_len = (unsigned int) rc; 1294 dev->cdb_len = (unsigned int) rc;
1420 ap->host->max_cmd_len = (unsigned char) ap->cdb_len;
1421 1295
1422 /* print device info to dmesg */ 1296 /* print device info to dmesg */
1423 printk(KERN_INFO "ata%u: dev %u ATAPI, max %s\n", 1297 if (print_info)
1424 ap->id, device, 1298 printk(KERN_INFO "ata%u: dev %u ATAPI, max %s\n",
1425 ata_mode_string(xfer_modes)); 1299 ap->id, dev->devno, ata_mode_string(xfer_mask));
1426 } 1300 }
1427 1301
1428 DPRINTK("EXIT, drv_stat = 0x%x\n", ata_chk_status(ap)); 1302 ap->host->max_cmd_len = 0;
1429 return; 1303 for (i = 0; i < ATA_MAX_DEVICES; i++)
1430 1304 ap->host->max_cmd_len = max_t(unsigned int,
1431err_out_nosup: 1305 ap->host->max_cmd_len,
1432 printk(KERN_WARNING "ata%u: dev %u not supported, ignoring\n", 1306 ap->device[i].cdb_len);
1433 ap->id, device);
1434err_out:
1435 dev->class++; /* converts ATA_DEV_xxx into ATA_DEV_xxx_UNSUP */
1436 DPRINTK("EXIT, err\n");
1437}
1438
1439
1440static inline u8 ata_dev_knobble(const struct ata_port *ap)
1441{
1442 return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(ap->device->id)));
1443}
1444
1445/**
1446 * ata_dev_config - Run device specific handlers and check for
1447 * SATA->PATA bridges
1448 * @ap: Bus
1449 * @i: Device
1450 *
1451 * LOCKING:
1452 */
1453 1307
1454void ata_dev_config(struct ata_port *ap, unsigned int i)
1455{
1456 /* limit bridge transfers to udma5, 200 sectors */ 1308 /* limit bridge transfers to udma5, 200 sectors */
1457 if (ata_dev_knobble(ap)) { 1309 if (ata_dev_knobble(ap, dev)) {
1458 printk(KERN_INFO "ata%u(%u): applying bridge limits\n", 1310 if (print_info)
1459 ap->id, ap->device->devno); 1311 printk(KERN_INFO "ata%u(%u): applying bridge limits\n",
1312 ap->id, dev->devno);
1460 ap->udma_mask &= ATA_UDMA5; 1313 ap->udma_mask &= ATA_UDMA5;
1461 ap->host->max_sectors = ATA_MAX_SECTORS; 1314 dev->max_sectors = ATA_MAX_SECTORS;
1462 ap->host->hostt->max_sectors = ATA_MAX_SECTORS;
1463 ap->device[i].flags |= ATA_DFLAG_LOCK_SECTORS;
1464 } 1315 }
1465 1316
1466 if (ap->ops->dev_config) 1317 if (ap->ops->dev_config)
1467 ap->ops->dev_config(ap, &ap->device[i]); 1318 ap->ops->dev_config(ap, dev);
1319
1320 DPRINTK("EXIT, drv_stat = 0x%x\n", ata_chk_status(ap));
1321 return 0;
1322
1323err_out_nosup:
1324 printk(KERN_WARNING "ata%u: dev %u not supported, ignoring\n",
1325 ap->id, dev->devno);
1326 DPRINTK("EXIT, err\n");
1327 return rc;
1468} 1328}
1469 1329
1470/** 1330/**
@@ -1484,21 +1344,59 @@ void ata_dev_config(struct ata_port *ap, unsigned int i)
1484 1344
1485static int ata_bus_probe(struct ata_port *ap) 1345static int ata_bus_probe(struct ata_port *ap)
1486{ 1346{
1487 unsigned int i, found = 0; 1347 unsigned int classes[ATA_MAX_DEVICES];
1348 unsigned int i, rc, found = 0;
1488 1349
1489 ap->ops->phy_reset(ap); 1350 ata_port_probe(ap);
1490 if (ap->flags & ATA_FLAG_PORT_DISABLED) 1351
1491 goto err_out; 1352 /* reset and determine device classes */
1353 for (i = 0; i < ATA_MAX_DEVICES; i++)
1354 classes[i] = ATA_DEV_UNKNOWN;
1355
1356 if (ap->ops->probe_reset) {
1357 rc = ap->ops->probe_reset(ap, classes);
1358 if (rc) {
1359 printk("ata%u: reset failed (errno=%d)\n", ap->id, rc);
1360 return rc;
1361 }
1362 } else {
1363 ap->ops->phy_reset(ap);
1364
1365 if (!(ap->flags & ATA_FLAG_PORT_DISABLED))
1366 for (i = 0; i < ATA_MAX_DEVICES; i++)
1367 classes[i] = ap->device[i].class;
1368
1369 ata_port_probe(ap);
1370 }
1492 1371
1372 for (i = 0; i < ATA_MAX_DEVICES; i++)
1373 if (classes[i] == ATA_DEV_UNKNOWN)
1374 classes[i] = ATA_DEV_NONE;
1375
1376 /* read IDENTIFY page and configure devices */
1493 for (i = 0; i < ATA_MAX_DEVICES; i++) { 1377 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1494 ata_dev_identify(ap, i); 1378 struct ata_device *dev = &ap->device[i];
1495 if (ata_dev_present(&ap->device[i])) { 1379
1496 found = 1; 1380 dev->class = classes[i];
1497 ata_dev_config(ap,i); 1381
1382 if (!ata_dev_present(dev))
1383 continue;
1384
1385 WARN_ON(dev->id != NULL);
1386 if (ata_dev_read_id(ap, dev, &dev->class, 1, &dev->id)) {
1387 dev->class = ATA_DEV_NONE;
1388 continue;
1389 }
1390
1391 if (ata_dev_configure(ap, dev, 1)) {
1392 dev->class++; /* disable device */
1393 continue;
1498 } 1394 }
1395
1396 found = 1;
1499 } 1397 }
1500 1398
1501 if ((!found) || (ap->flags & ATA_FLAG_PORT_DISABLED)) 1399 if (!found)
1502 goto err_out_disable; 1400 goto err_out_disable;
1503 1401
1504 ata_set_mode(ap); 1402 ata_set_mode(ap);
@@ -1509,7 +1407,6 @@ static int ata_bus_probe(struct ata_port *ap)
1509 1407
1510err_out_disable: 1408err_out_disable:
1511 ap->ops->port_disable(ap); 1409 ap->ops->port_disable(ap);
1512err_out:
1513 return -1; 1410 return -1;
1514} 1411}
1515 1412
@@ -1530,6 +1427,41 @@ void ata_port_probe(struct ata_port *ap)
1530} 1427}
1531 1428
1532/** 1429/**
1430 * sata_print_link_status - Print SATA link status
1431 * @ap: SATA port to printk link status about
1432 *
1433 * This function prints link speed and status of a SATA link.
1434 *
1435 * LOCKING:
1436 * None.
1437 */
1438static void sata_print_link_status(struct ata_port *ap)
1439{
1440 u32 sstatus, tmp;
1441 const char *speed;
1442
1443 if (!ap->ops->scr_read)
1444 return;
1445
1446 sstatus = scr_read(ap, SCR_STATUS);
1447
1448 if (sata_dev_present(ap)) {
1449 tmp = (sstatus >> 4) & 0xf;
1450 if (tmp & (1 << 0))
1451 speed = "1.5";
1452 else if (tmp & (1 << 1))
1453 speed = "3.0";
1454 else
1455 speed = "<unknown>";
1456 printk(KERN_INFO "ata%u: SATA link up %s Gbps (SStatus %X)\n",
1457 ap->id, speed, sstatus);
1458 } else {
1459 printk(KERN_INFO "ata%u: SATA link down (SStatus %X)\n",
1460 ap->id, sstatus);
1461 }
1462}
1463
1464/**
1533 * __sata_phy_reset - Wake/reset a low-level SATA PHY 1465 * __sata_phy_reset - Wake/reset a low-level SATA PHY
1534 * @ap: SATA port associated with target SATA PHY. 1466 * @ap: SATA port associated with target SATA PHY.
1535 * 1467 *
@@ -1563,27 +1495,14 @@ void __sata_phy_reset(struct ata_port *ap)
1563 break; 1495 break;
1564 } while (time_before(jiffies, timeout)); 1496 } while (time_before(jiffies, timeout));
1565 1497
1566 /* TODO: phy layer with polling, timeouts, etc. */ 1498 /* print link status */
1567 sstatus = scr_read(ap, SCR_STATUS); 1499 sata_print_link_status(ap);
1568 if (sata_dev_present(ap)) {
1569 const char *speed;
1570 u32 tmp;
1571 1500
1572 tmp = (sstatus >> 4) & 0xf; 1501 /* TODO: phy layer with polling, timeouts, etc. */
1573 if (tmp & (1 << 0)) 1502 if (sata_dev_present(ap))
1574 speed = "1.5";
1575 else if (tmp & (1 << 1))
1576 speed = "3.0";
1577 else
1578 speed = "<unknown>";
1579 printk(KERN_INFO "ata%u: SATA link up %s Gbps (SStatus %X)\n",
1580 ap->id, speed, sstatus);
1581 ata_port_probe(ap); 1503 ata_port_probe(ap);
1582 } else { 1504 else
1583 printk(KERN_INFO "ata%u: SATA link down (SStatus %X)\n",
1584 ap->id, sstatus);
1585 ata_port_disable(ap); 1505 ata_port_disable(ap);
1586 }
1587 1506
1588 if (ap->flags & ATA_FLAG_PORT_DISABLED) 1507 if (ap->flags & ATA_FLAG_PORT_DISABLED)
1589 return; 1508 return;
@@ -1756,9 +1675,9 @@ int ata_timing_compute(struct ata_device *adev, unsigned short speed,
1756 ata_timing_quantize(t, t, T, UT); 1675 ata_timing_quantize(t, t, T, UT);
1757 1676
1758 /* 1677 /*
1759 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY, S.M.A.R.T 1678 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY,
1760 * and some other commands. We have to ensure that the DMA cycle timing is 1679 * S.M.A.R.T * and some other commands. We have to ensure that the
1761 * slower/equal than the fastest PIO timing. 1680 * DMA cycle timing is slower/equal than the fastest PIO timing.
1762 */ 1681 */
1763 1682
1764 if (speed > XFER_PIO_4) { 1683 if (speed > XFER_PIO_4) {
@@ -1767,7 +1686,7 @@ int ata_timing_compute(struct ata_device *adev, unsigned short speed,
1767 } 1686 }
1768 1687
1769 /* 1688 /*
1770 * Lenghten active & recovery time so that cycle time is correct. 1689 * Lengthen active & recovery time so that cycle time is correct.
1771 */ 1690 */
1772 1691
1773 if (t->act8b + t->rec8b < t->cyc8b) { 1692 if (t->act8b + t->rec8b < t->cyc8b) {
@@ -1783,31 +1702,8 @@ int ata_timing_compute(struct ata_device *adev, unsigned short speed,
1783 return 0; 1702 return 0;
1784} 1703}
1785 1704
1786static const struct {
1787 unsigned int shift;
1788 u8 base;
1789} xfer_mode_classes[] = {
1790 { ATA_SHIFT_UDMA, XFER_UDMA_0 },
1791 { ATA_SHIFT_MWDMA, XFER_MW_DMA_0 },
1792 { ATA_SHIFT_PIO, XFER_PIO_0 },
1793};
1794
1795static u8 base_from_shift(unsigned int shift)
1796{
1797 int i;
1798
1799 for (i = 0; i < ARRAY_SIZE(xfer_mode_classes); i++)
1800 if (xfer_mode_classes[i].shift == shift)
1801 return xfer_mode_classes[i].base;
1802
1803 return 0xff;
1804}
1805
1806static void ata_dev_set_mode(struct ata_port *ap, struct ata_device *dev) 1705static void ata_dev_set_mode(struct ata_port *ap, struct ata_device *dev)
1807{ 1706{
1808 int ofs, idx;
1809 u8 base;
1810
1811 if (!ata_dev_present(dev) || (ap->flags & ATA_FLAG_PORT_DISABLED)) 1707 if (!ata_dev_present(dev) || (ap->flags & ATA_FLAG_PORT_DISABLED))
1812 return; 1708 return;
1813 1709
@@ -1816,65 +1712,58 @@ static void ata_dev_set_mode(struct ata_port *ap, struct ata_device *dev)
1816 1712
1817 ata_dev_set_xfermode(ap, dev); 1713 ata_dev_set_xfermode(ap, dev);
1818 1714
1819 base = base_from_shift(dev->xfer_shift); 1715 if (ata_dev_revalidate(ap, dev, 0)) {
1820 ofs = dev->xfer_mode - base; 1716 printk(KERN_ERR "ata%u: failed to revalidate after set "
1821 idx = ofs + dev->xfer_shift; 1717 "xfermode, disabled\n", ap->id);
1822 WARN_ON(idx >= ARRAY_SIZE(xfer_mode_str)); 1718 ata_port_disable(ap);
1719 }
1823 1720
1824 DPRINTK("idx=%d xfer_shift=%u, xfer_mode=0x%x, base=0x%x, offset=%d\n", 1721 DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n",
1825 idx, dev->xfer_shift, (int)dev->xfer_mode, (int)base, ofs); 1722 dev->xfer_shift, (int)dev->xfer_mode);
1826 1723
1827 printk(KERN_INFO "ata%u: dev %u configured for %s\n", 1724 printk(KERN_INFO "ata%u: dev %u configured for %s\n",
1828 ap->id, dev->devno, xfer_mode_str[idx]); 1725 ap->id, dev->devno,
1726 ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)));
1829} 1727}
1830 1728
1831static int ata_host_set_pio(struct ata_port *ap) 1729static int ata_host_set_pio(struct ata_port *ap)
1832{ 1730{
1833 unsigned int mask; 1731 int i;
1834 int x, i;
1835 u8 base, xfer_mode;
1836
1837 mask = ata_get_mode_mask(ap, ATA_SHIFT_PIO);
1838 x = fgb(mask);
1839 if (x < 0) {
1840 printk(KERN_WARNING "ata%u: no PIO support\n", ap->id);
1841 return -1;
1842 }
1843
1844 base = base_from_shift(ATA_SHIFT_PIO);
1845 xfer_mode = base + x;
1846
1847 DPRINTK("base 0x%x xfer_mode 0x%x mask 0x%x x %d\n",
1848 (int)base, (int)xfer_mode, mask, x);
1849 1732
1850 for (i = 0; i < ATA_MAX_DEVICES; i++) { 1733 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1851 struct ata_device *dev = &ap->device[i]; 1734 struct ata_device *dev = &ap->device[i];
1852 if (ata_dev_present(dev)) { 1735
1853 dev->pio_mode = xfer_mode; 1736 if (!ata_dev_present(dev))
1854 dev->xfer_mode = xfer_mode; 1737 continue;
1855 dev->xfer_shift = ATA_SHIFT_PIO; 1738
1856 if (ap->ops->set_piomode) 1739 if (!dev->pio_mode) {
1857 ap->ops->set_piomode(ap, dev); 1740 printk(KERN_WARNING "ata%u: no PIO support\n", ap->id);
1741 return -1;
1858 } 1742 }
1743
1744 dev->xfer_mode = dev->pio_mode;
1745 dev->xfer_shift = ATA_SHIFT_PIO;
1746 if (ap->ops->set_piomode)
1747 ap->ops->set_piomode(ap, dev);
1859 } 1748 }
1860 1749
1861 return 0; 1750 return 0;
1862} 1751}
1863 1752
1864static void ata_host_set_dma(struct ata_port *ap, u8 xfer_mode, 1753static void ata_host_set_dma(struct ata_port *ap)
1865 unsigned int xfer_shift)
1866{ 1754{
1867 int i; 1755 int i;
1868 1756
1869 for (i = 0; i < ATA_MAX_DEVICES; i++) { 1757 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1870 struct ata_device *dev = &ap->device[i]; 1758 struct ata_device *dev = &ap->device[i];
1871 if (ata_dev_present(dev)) { 1759
1872 dev->dma_mode = xfer_mode; 1760 if (!ata_dev_present(dev) || !dev->dma_mode)
1873 dev->xfer_mode = xfer_mode; 1761 continue;
1874 dev->xfer_shift = xfer_shift; 1762
1875 if (ap->ops->set_dmamode) 1763 dev->xfer_mode = dev->dma_mode;
1876 ap->ops->set_dmamode(ap, dev); 1764 dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode);
1877 } 1765 if (ap->ops->set_dmamode)
1766 ap->ops->set_dmamode(ap, dev);
1878 } 1767 }
1879} 1768}
1880 1769
@@ -1886,32 +1775,37 @@ static void ata_host_set_dma(struct ata_port *ap, u8 xfer_mode,
1886 * 1775 *
1887 * LOCKING: 1776 * LOCKING:
1888 * PCI/etc. bus probe sem. 1777 * PCI/etc. bus probe sem.
1889 *
1890 */ 1778 */
1891static void ata_set_mode(struct ata_port *ap) 1779static void ata_set_mode(struct ata_port *ap)
1892{ 1780{
1893 unsigned int xfer_shift; 1781 int i, rc;
1894 u8 xfer_mode;
1895 int rc;
1896 1782
1897 /* step 1: always set host PIO timings */ 1783 /* step 1: calculate xfer_mask */
1898 rc = ata_host_set_pio(ap); 1784 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1899 if (rc) 1785 struct ata_device *dev = &ap->device[i];
1900 goto err_out; 1786 unsigned int xfer_mask;
1901 1787
1902 /* step 2: choose the best data xfer mode */ 1788 if (!ata_dev_present(dev))
1903 xfer_mode = xfer_shift = 0; 1789 continue;
1904 rc = ata_choose_xfer_mode(ap, &xfer_mode, &xfer_shift); 1790
1791 xfer_mask = ata_dev_xfermask(ap, dev);
1792
1793 dev->pio_mode = ata_xfer_mask2mode(xfer_mask & ATA_MASK_PIO);
1794 dev->dma_mode = ata_xfer_mask2mode(xfer_mask & (ATA_MASK_MWDMA |
1795 ATA_MASK_UDMA));
1796 }
1797
1798 /* step 2: always set host PIO timings */
1799 rc = ata_host_set_pio(ap);
1905 if (rc) 1800 if (rc)
1906 goto err_out; 1801 goto err_out;
1907 1802
1908 /* step 3: if that xfer mode isn't PIO, set host DMA timings */ 1803 /* step 3: set host DMA timings */
1909 if (xfer_shift != ATA_SHIFT_PIO) 1804 ata_host_set_dma(ap);
1910 ata_host_set_dma(ap, xfer_mode, xfer_shift);
1911 1805
1912 /* step 4: update devices' xfer mode */ 1806 /* step 4: update devices' xfer mode */
1913 ata_dev_set_mode(ap, &ap->device[0]); 1807 for (i = 0; i < ATA_MAX_DEVICES; i++)
1914 ata_dev_set_mode(ap, &ap->device[1]); 1808 ata_dev_set_mode(ap, &ap->device[i]);
1915 1809
1916 if (ap->flags & ATA_FLAG_PORT_DISABLED) 1810 if (ap->flags & ATA_FLAG_PORT_DISABLED)
1917 return; 1811 return;
@@ -1926,6 +1820,26 @@ err_out:
1926} 1820}
1927 1821
1928/** 1822/**
1823 * ata_tf_to_host - issue ATA taskfile to host controller
1824 * @ap: port to which command is being issued
1825 * @tf: ATA taskfile register set
1826 *
1827 * Issues ATA taskfile register set to ATA host controller,
1828 * with proper synchronization with interrupt handler and
1829 * other threads.
1830 *
1831 * LOCKING:
1832 * spin_lock_irqsave(host_set lock)
1833 */
1834
1835static inline void ata_tf_to_host(struct ata_port *ap,
1836 const struct ata_taskfile *tf)
1837{
1838 ap->ops->tf_load(ap, tf);
1839 ap->ops->exec_command(ap, tf);
1840}
1841
1842/**
1929 * ata_busy_sleep - sleep until BSY clears, or timeout 1843 * ata_busy_sleep - sleep until BSY clears, or timeout
1930 * @ap: port containing status register to be polled 1844 * @ap: port containing status register to be polled
1931 * @tmout_pat: impatience timeout 1845 * @tmout_pat: impatience timeout
@@ -1935,12 +1849,10 @@ err_out:
1935 * or a timeout occurs. 1849 * or a timeout occurs.
1936 * 1850 *
1937 * LOCKING: None. 1851 * LOCKING: None.
1938 *
1939 */ 1852 */
1940 1853
1941static unsigned int ata_busy_sleep (struct ata_port *ap, 1854unsigned int ata_busy_sleep (struct ata_port *ap,
1942 unsigned long tmout_pat, 1855 unsigned long tmout_pat, unsigned long tmout)
1943 unsigned long tmout)
1944{ 1856{
1945 unsigned long timer_start, timeout; 1857 unsigned long timer_start, timeout;
1946 u8 status; 1858 u8 status;
@@ -2159,9 +2071,9 @@ void ata_bus_reset(struct ata_port *ap)
2159 /* 2071 /*
2160 * determine by signature whether we have ATA or ATAPI devices 2072 * determine by signature whether we have ATA or ATAPI devices
2161 */ 2073 */
2162 err = ata_dev_try_classify(ap, 0); 2074 ap->device[0].class = ata_dev_try_classify(ap, 0, &err);
2163 if ((slave_possible) && (err != 0x81)) 2075 if ((slave_possible) && (err != 0x81))
2164 ata_dev_try_classify(ap, 1); 2076 ap->device[1].class = ata_dev_try_classify(ap, 1, &err);
2165 2077
2166 /* re-enable interrupts */ 2078 /* re-enable interrupts */
2167 if (ap->ioaddr.ctl_addr) /* FIXME: hack. create a hook instead */ 2079 if (ap->ioaddr.ctl_addr) /* FIXME: hack. create a hook instead */
@@ -2196,11 +2108,446 @@ err_out:
2196 DPRINTK("EXIT\n"); 2108 DPRINTK("EXIT\n");
2197} 2109}
2198 2110
2199static void ata_pr_blacklisted(const struct ata_port *ap, 2111static int sata_phy_resume(struct ata_port *ap)
2200 const struct ata_device *dev)
2201{ 2112{
2202 printk(KERN_WARNING "ata%u: dev %u is on DMA blacklist, disabling DMA\n", 2113 unsigned long timeout = jiffies + (HZ * 5);
2203 ap->id, dev->devno); 2114 u32 sstatus;
2115
2116 scr_write_flush(ap, SCR_CONTROL, 0x300);
2117
2118 /* Wait for phy to become ready, if necessary. */
2119 do {
2120 msleep(200);
2121 sstatus = scr_read(ap, SCR_STATUS);
2122 if ((sstatus & 0xf) != 1)
2123 return 0;
2124 } while (time_before(jiffies, timeout));
2125
2126 return -1;
2127}
2128
2129/**
2130 * ata_std_probeinit - initialize probing
2131 * @ap: port to be probed
2132 *
2133 * @ap is about to be probed. Initialize it. This function is
2134 * to be used as standard callback for ata_drive_probe_reset().
2135 *
2136 * NOTE!!! Do not use this function as probeinit if a low level
2137 * driver implements only hardreset. Just pass NULL as probeinit
2138 * in that case. Using this function is probably okay but doing
2139 * so makes reset sequence different from the original
2140 * ->phy_reset implementation and Jeff nervous. :-P
2141 */
2142extern void ata_std_probeinit(struct ata_port *ap)
2143{
2144 if (ap->flags & ATA_FLAG_SATA && ap->ops->scr_read) {
2145 sata_phy_resume(ap);
2146 if (sata_dev_present(ap))
2147 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
2148 }
2149}
2150
2151/**
2152 * ata_std_softreset - reset host port via ATA SRST
2153 * @ap: port to reset
2154 * @verbose: fail verbosely
2155 * @classes: resulting classes of attached devices
2156 *
2157 * Reset host port using ATA SRST. This function is to be used
2158 * as standard callback for ata_drive_*_reset() functions.
2159 *
2160 * LOCKING:
2161 * Kernel thread context (may sleep)
2162 *
2163 * RETURNS:
2164 * 0 on success, -errno otherwise.
2165 */
2166int ata_std_softreset(struct ata_port *ap, int verbose, unsigned int *classes)
2167{
2168 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
2169 unsigned int devmask = 0, err_mask;
2170 u8 err;
2171
2172 DPRINTK("ENTER\n");
2173
2174 if (ap->ops->scr_read && !sata_dev_present(ap)) {
2175 classes[0] = ATA_DEV_NONE;
2176 goto out;
2177 }
2178
2179 /* determine if device 0/1 are present */
2180 if (ata_devchk(ap, 0))
2181 devmask |= (1 << 0);
2182 if (slave_possible && ata_devchk(ap, 1))
2183 devmask |= (1 << 1);
2184
2185 /* select device 0 again */
2186 ap->ops->dev_select(ap, 0);
2187
2188 /* issue bus reset */
2189 DPRINTK("about to softreset, devmask=%x\n", devmask);
2190 err_mask = ata_bus_softreset(ap, devmask);
2191 if (err_mask) {
2192 if (verbose)
2193 printk(KERN_ERR "ata%u: SRST failed (err_mask=0x%x)\n",
2194 ap->id, err_mask);
2195 else
2196 DPRINTK("EXIT, softreset failed (err_mask=0x%x)\n",
2197 err_mask);
2198 return -EIO;
2199 }
2200
2201 /* determine by signature whether we have ATA or ATAPI devices */
2202 classes[0] = ata_dev_try_classify(ap, 0, &err);
2203 if (slave_possible && err != 0x81)
2204 classes[1] = ata_dev_try_classify(ap, 1, &err);
2205
2206 out:
2207 DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]);
2208 return 0;
2209}
2210
2211/**
2212 * sata_std_hardreset - reset host port via SATA phy reset
2213 * @ap: port to reset
2214 * @verbose: fail verbosely
2215 * @class: resulting class of attached device
2216 *
2217 * SATA phy-reset host port using DET bits of SControl register.
2218 * This function is to be used as standard callback for
2219 * ata_drive_*_reset().
2220 *
2221 * LOCKING:
2222 * Kernel thread context (may sleep)
2223 *
2224 * RETURNS:
2225 * 0 on success, -errno otherwise.
2226 */
2227int sata_std_hardreset(struct ata_port *ap, int verbose, unsigned int *class)
2228{
2229 DPRINTK("ENTER\n");
2230
2231 /* Issue phy wake/reset */
2232 scr_write_flush(ap, SCR_CONTROL, 0x301);
2233
2234 /*
2235 * Couldn't find anything in SATA I/II specs, but AHCI-1.1
2236 * 10.4.2 says at least 1 ms.
2237 */
2238 msleep(1);
2239
2240 /* Bring phy back */
2241 sata_phy_resume(ap);
2242
2243 /* TODO: phy layer with polling, timeouts, etc. */
2244 if (!sata_dev_present(ap)) {
2245 *class = ATA_DEV_NONE;
2246 DPRINTK("EXIT, link offline\n");
2247 return 0;
2248 }
2249
2250 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
2251 if (verbose)
2252 printk(KERN_ERR "ata%u: COMRESET failed "
2253 "(device not ready)\n", ap->id);
2254 else
2255 DPRINTK("EXIT, device not ready\n");
2256 return -EIO;
2257 }
2258
2259 ap->ops->dev_select(ap, 0); /* probably unnecessary */
2260
2261 *class = ata_dev_try_classify(ap, 0, NULL);
2262
2263 DPRINTK("EXIT, class=%u\n", *class);
2264 return 0;
2265}
2266
2267/**
2268 * ata_std_postreset - standard postreset callback
2269 * @ap: the target ata_port
2270 * @classes: classes of attached devices
2271 *
2272 * This function is invoked after a successful reset. Note that
2273 * the device might have been reset more than once using
2274 * different reset methods before postreset is invoked.
2275 *
2276 * This function is to be used as standard callback for
2277 * ata_drive_*_reset().
2278 *
2279 * LOCKING:
2280 * Kernel thread context (may sleep)
2281 */
2282void ata_std_postreset(struct ata_port *ap, unsigned int *classes)
2283{
2284 DPRINTK("ENTER\n");
2285
2286 /* set cable type if it isn't already set */
2287 if (ap->cbl == ATA_CBL_NONE && ap->flags & ATA_FLAG_SATA)
2288 ap->cbl = ATA_CBL_SATA;
2289
2290 /* print link status */
2291 if (ap->cbl == ATA_CBL_SATA)
2292 sata_print_link_status(ap);
2293
2294 /* re-enable interrupts */
2295 if (ap->ioaddr.ctl_addr) /* FIXME: hack. create a hook instead */
2296 ata_irq_on(ap);
2297
2298 /* is double-select really necessary? */
2299 if (classes[0] != ATA_DEV_NONE)
2300 ap->ops->dev_select(ap, 1);
2301 if (classes[1] != ATA_DEV_NONE)
2302 ap->ops->dev_select(ap, 0);
2303
2304 /* bail out if no device is present */
2305 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
2306 DPRINTK("EXIT, no device\n");
2307 return;
2308 }
2309
2310 /* set up device control */
2311 if (ap->ioaddr.ctl_addr) {
2312 if (ap->flags & ATA_FLAG_MMIO)
2313 writeb(ap->ctl, (void __iomem *) ap->ioaddr.ctl_addr);
2314 else
2315 outb(ap->ctl, ap->ioaddr.ctl_addr);
2316 }
2317
2318 DPRINTK("EXIT\n");
2319}
2320
2321/**
2322 * ata_std_probe_reset - standard probe reset method
2323 * @ap: prot to perform probe-reset
2324 * @classes: resulting classes of attached devices
2325 *
2326 * The stock off-the-shelf ->probe_reset method.
2327 *
2328 * LOCKING:
2329 * Kernel thread context (may sleep)
2330 *
2331 * RETURNS:
2332 * 0 on success, -errno otherwise.
2333 */
2334int ata_std_probe_reset(struct ata_port *ap, unsigned int *classes)
2335{
2336 ata_reset_fn_t hardreset;
2337
2338 hardreset = NULL;
2339 if (ap->flags & ATA_FLAG_SATA && ap->ops->scr_read)
2340 hardreset = sata_std_hardreset;
2341
2342 return ata_drive_probe_reset(ap, ata_std_probeinit,
2343 ata_std_softreset, hardreset,
2344 ata_std_postreset, classes);
2345}
2346
2347static int do_probe_reset(struct ata_port *ap, ata_reset_fn_t reset,
2348 ata_postreset_fn_t postreset,
2349 unsigned int *classes)
2350{
2351 int i, rc;
2352
2353 for (i = 0; i < ATA_MAX_DEVICES; i++)
2354 classes[i] = ATA_DEV_UNKNOWN;
2355
2356 rc = reset(ap, 0, classes);
2357 if (rc)
2358 return rc;
2359
2360 /* If any class isn't ATA_DEV_UNKNOWN, consider classification
2361 * is complete and convert all ATA_DEV_UNKNOWN to
2362 * ATA_DEV_NONE.
2363 */
2364 for (i = 0; i < ATA_MAX_DEVICES; i++)
2365 if (classes[i] != ATA_DEV_UNKNOWN)
2366 break;
2367
2368 if (i < ATA_MAX_DEVICES)
2369 for (i = 0; i < ATA_MAX_DEVICES; i++)
2370 if (classes[i] == ATA_DEV_UNKNOWN)
2371 classes[i] = ATA_DEV_NONE;
2372
2373 if (postreset)
2374 postreset(ap, classes);
2375
2376 return classes[0] != ATA_DEV_UNKNOWN ? 0 : -ENODEV;
2377}
2378
2379/**
2380 * ata_drive_probe_reset - Perform probe reset with given methods
2381 * @ap: port to reset
2382 * @probeinit: probeinit method (can be NULL)
2383 * @softreset: softreset method (can be NULL)
2384 * @hardreset: hardreset method (can be NULL)
2385 * @postreset: postreset method (can be NULL)
2386 * @classes: resulting classes of attached devices
2387 *
2388 * Reset the specified port and classify attached devices using
2389 * given methods. This function prefers softreset but tries all
2390 * possible reset sequences to reset and classify devices. This
2391 * function is intended to be used for constructing ->probe_reset
2392 * callback by low level drivers.
2393 *
2394 * Reset methods should follow the following rules.
2395 *
2396 * - Return 0 on sucess, -errno on failure.
2397 * - If classification is supported, fill classes[] with
2398 * recognized class codes.
2399 * - If classification is not supported, leave classes[] alone.
2400 * - If verbose is non-zero, print error message on failure;
2401 * otherwise, shut up.
2402 *
2403 * LOCKING:
2404 * Kernel thread context (may sleep)
2405 *
2406 * RETURNS:
2407 * 0 on success, -EINVAL if no reset method is avaliable, -ENODEV
2408 * if classification fails, and any error code from reset
2409 * methods.
2410 */
2411int ata_drive_probe_reset(struct ata_port *ap, ata_probeinit_fn_t probeinit,
2412 ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
2413 ata_postreset_fn_t postreset, unsigned int *classes)
2414{
2415 int rc = -EINVAL;
2416
2417 if (probeinit)
2418 probeinit(ap);
2419
2420 if (softreset) {
2421 rc = do_probe_reset(ap, softreset, postreset, classes);
2422 if (rc == 0)
2423 return 0;
2424 }
2425
2426 if (!hardreset)
2427 return rc;
2428
2429 rc = do_probe_reset(ap, hardreset, postreset, classes);
2430 if (rc == 0 || rc != -ENODEV)
2431 return rc;
2432
2433 if (softreset)
2434 rc = do_probe_reset(ap, softreset, postreset, classes);
2435
2436 return rc;
2437}
2438
2439/**
2440 * ata_dev_same_device - Determine whether new ID matches configured device
2441 * @ap: port on which the device to compare against resides
2442 * @dev: device to compare against
2443 * @new_class: class of the new device
2444 * @new_id: IDENTIFY page of the new device
2445 *
2446 * Compare @new_class and @new_id against @dev and determine
2447 * whether @dev is the device indicated by @new_class and
2448 * @new_id.
2449 *
2450 * LOCKING:
2451 * None.
2452 *
2453 * RETURNS:
2454 * 1 if @dev matches @new_class and @new_id, 0 otherwise.
2455 */
2456static int ata_dev_same_device(struct ata_port *ap, struct ata_device *dev,
2457 unsigned int new_class, const u16 *new_id)
2458{
2459 const u16 *old_id = dev->id;
2460 unsigned char model[2][41], serial[2][21];
2461 u64 new_n_sectors;
2462
2463 if (dev->class != new_class) {
2464 printk(KERN_INFO
2465 "ata%u: dev %u class mismatch %d != %d\n",
2466 ap->id, dev->devno, dev->class, new_class);
2467 return 0;
2468 }
2469
2470 ata_id_c_string(old_id, model[0], ATA_ID_PROD_OFS, sizeof(model[0]));
2471 ata_id_c_string(new_id, model[1], ATA_ID_PROD_OFS, sizeof(model[1]));
2472 ata_id_c_string(old_id, serial[0], ATA_ID_SERNO_OFS, sizeof(serial[0]));
2473 ata_id_c_string(new_id, serial[1], ATA_ID_SERNO_OFS, sizeof(serial[1]));
2474 new_n_sectors = ata_id_n_sectors(new_id);
2475
2476 if (strcmp(model[0], model[1])) {
2477 printk(KERN_INFO
2478 "ata%u: dev %u model number mismatch '%s' != '%s'\n",
2479 ap->id, dev->devno, model[0], model[1]);
2480 return 0;
2481 }
2482
2483 if (strcmp(serial[0], serial[1])) {
2484 printk(KERN_INFO
2485 "ata%u: dev %u serial number mismatch '%s' != '%s'\n",
2486 ap->id, dev->devno, serial[0], serial[1]);
2487 return 0;
2488 }
2489
2490 if (dev->class == ATA_DEV_ATA && dev->n_sectors != new_n_sectors) {
2491 printk(KERN_INFO
2492 "ata%u: dev %u n_sectors mismatch %llu != %llu\n",
2493 ap->id, dev->devno, (unsigned long long)dev->n_sectors,
2494 (unsigned long long)new_n_sectors);
2495 return 0;
2496 }
2497
2498 return 1;
2499}
2500
2501/**
2502 * ata_dev_revalidate - Revalidate ATA device
2503 * @ap: port on which the device to revalidate resides
2504 * @dev: device to revalidate
2505 * @post_reset: is this revalidation after reset?
2506 *
2507 * Re-read IDENTIFY page and make sure @dev is still attached to
2508 * the port.
2509 *
2510 * LOCKING:
2511 * Kernel thread context (may sleep)
2512 *
2513 * RETURNS:
2514 * 0 on success, negative errno otherwise
2515 */
2516int ata_dev_revalidate(struct ata_port *ap, struct ata_device *dev,
2517 int post_reset)
2518{
2519 unsigned int class;
2520 u16 *id;
2521 int rc;
2522
2523 if (!ata_dev_present(dev))
2524 return -ENODEV;
2525
2526 class = dev->class;
2527 id = NULL;
2528
2529 /* allocate & read ID data */
2530 rc = ata_dev_read_id(ap, dev, &class, post_reset, &id);
2531 if (rc)
2532 goto fail;
2533
2534 /* is the device still there? */
2535 if (!ata_dev_same_device(ap, dev, class, id)) {
2536 rc = -ENODEV;
2537 goto fail;
2538 }
2539
2540 kfree(dev->id);
2541 dev->id = id;
2542
2543 /* configure device according to the new ID */
2544 return ata_dev_configure(ap, dev, 0);
2545
2546 fail:
2547 printk(KERN_ERR "ata%u: dev %u revalidation failed (errno=%d)\n",
2548 ap->id, dev->devno, rc);
2549 kfree(id);
2550 return rc;
2204} 2551}
2205 2552
2206static const char * const ata_dma_blacklist [] = { 2553static const char * const ata_dma_blacklist [] = {
@@ -2237,151 +2584,57 @@ static const char * const ata_dma_blacklist [] = {
2237 2584
2238static int ata_dma_blacklisted(const struct ata_device *dev) 2585static int ata_dma_blacklisted(const struct ata_device *dev)
2239{ 2586{
2240 unsigned char model_num[40]; 2587 unsigned char model_num[41];
2241 char *s;
2242 unsigned int len;
2243 int i; 2588 int i;
2244 2589
2245 ata_dev_id_string(dev->id, model_num, ATA_ID_PROD_OFS, 2590 ata_id_c_string(dev->id, model_num, ATA_ID_PROD_OFS, sizeof(model_num));
2246 sizeof(model_num));
2247 s = &model_num[0];
2248 len = strnlen(s, sizeof(model_num));
2249
2250 /* ATAPI specifies that empty space is blank-filled; remove blanks */
2251 while ((len > 0) && (s[len - 1] == ' ')) {
2252 len--;
2253 s[len] = 0;
2254 }
2255 2591
2256 for (i = 0; i < ARRAY_SIZE(ata_dma_blacklist); i++) 2592 for (i = 0; i < ARRAY_SIZE(ata_dma_blacklist); i++)
2257 if (!strncmp(ata_dma_blacklist[i], s, len)) 2593 if (!strcmp(ata_dma_blacklist[i], model_num))
2258 return 1; 2594 return 1;
2259 2595
2260 return 0; 2596 return 0;
2261} 2597}
2262 2598
2263static unsigned int ata_get_mode_mask(const struct ata_port *ap, int shift)
2264{
2265 const struct ata_device *master, *slave;
2266 unsigned int mask;
2267
2268 master = &ap->device[0];
2269 slave = &ap->device[1];
2270
2271 assert (ata_dev_present(master) || ata_dev_present(slave));
2272
2273 if (shift == ATA_SHIFT_UDMA) {
2274 mask = ap->udma_mask;
2275 if (ata_dev_present(master)) {
2276 mask &= (master->id[ATA_ID_UDMA_MODES] & 0xff);
2277 if (ata_dma_blacklisted(master)) {
2278 mask = 0;
2279 ata_pr_blacklisted(ap, master);
2280 }
2281 }
2282 if (ata_dev_present(slave)) {
2283 mask &= (slave->id[ATA_ID_UDMA_MODES] & 0xff);
2284 if (ata_dma_blacklisted(slave)) {
2285 mask = 0;
2286 ata_pr_blacklisted(ap, slave);
2287 }
2288 }
2289 }
2290 else if (shift == ATA_SHIFT_MWDMA) {
2291 mask = ap->mwdma_mask;
2292 if (ata_dev_present(master)) {
2293 mask &= (master->id[ATA_ID_MWDMA_MODES] & 0x07);
2294 if (ata_dma_blacklisted(master)) {
2295 mask = 0;
2296 ata_pr_blacklisted(ap, master);
2297 }
2298 }
2299 if (ata_dev_present(slave)) {
2300 mask &= (slave->id[ATA_ID_MWDMA_MODES] & 0x07);
2301 if (ata_dma_blacklisted(slave)) {
2302 mask = 0;
2303 ata_pr_blacklisted(ap, slave);
2304 }
2305 }
2306 }
2307 else if (shift == ATA_SHIFT_PIO) {
2308 mask = ap->pio_mask;
2309 if (ata_dev_present(master)) {
2310 /* spec doesn't return explicit support for
2311 * PIO0-2, so we fake it
2312 */
2313 u16 tmp_mode = master->id[ATA_ID_PIO_MODES] & 0x03;
2314 tmp_mode <<= 3;
2315 tmp_mode |= 0x7;
2316 mask &= tmp_mode;
2317 }
2318 if (ata_dev_present(slave)) {
2319 /* spec doesn't return explicit support for
2320 * PIO0-2, so we fake it
2321 */
2322 u16 tmp_mode = slave->id[ATA_ID_PIO_MODES] & 0x03;
2323 tmp_mode <<= 3;
2324 tmp_mode |= 0x7;
2325 mask &= tmp_mode;
2326 }
2327 }
2328 else {
2329 mask = 0xffffffff; /* shut up compiler warning */
2330 BUG();
2331 }
2332
2333 return mask;
2334}
2335
2336/* find greatest bit */
2337static int fgb(u32 bitmap)
2338{
2339 unsigned int i;
2340 int x = -1;
2341
2342 for (i = 0; i < 32; i++)
2343 if (bitmap & (1 << i))
2344 x = i;
2345
2346 return x;
2347}
2348
2349/** 2599/**
2350 * ata_choose_xfer_mode - attempt to find best transfer mode 2600 * ata_dev_xfermask - Compute supported xfermask of the given device
2351 * @ap: Port for which an xfer mode will be selected 2601 * @ap: Port on which the device to compute xfermask for resides
2352 * @xfer_mode_out: (output) SET FEATURES - XFER MODE code 2602 * @dev: Device to compute xfermask for
2353 * @xfer_shift_out: (output) bit shift that selects this mode
2354 * 2603 *
2355 * Based on host and device capabilities, determine the 2604 * Compute supported xfermask of @dev. This function is
2356 * maximum transfer mode that is amenable to all. 2605 * responsible for applying all known limits including host
2606 * controller limits, device blacklist, etc...
2357 * 2607 *
2358 * LOCKING: 2608 * LOCKING:
2359 * PCI/etc. bus probe sem. 2609 * None.
2360 * 2610 *
2361 * RETURNS: 2611 * RETURNS:
2362 * Zero on success, negative on error. 2612 * Computed xfermask.
2363 */ 2613 */
2364 2614static unsigned int ata_dev_xfermask(struct ata_port *ap,
2365static int ata_choose_xfer_mode(const struct ata_port *ap, 2615 struct ata_device *dev)
2366 u8 *xfer_mode_out,
2367 unsigned int *xfer_shift_out)
2368{ 2616{
2369 unsigned int mask, shift; 2617 unsigned long xfer_mask;
2370 int x, i; 2618 int i;
2371 2619
2372 for (i = 0; i < ARRAY_SIZE(xfer_mode_classes); i++) { 2620 xfer_mask = ata_pack_xfermask(ap->pio_mask, ap->mwdma_mask,
2373 shift = xfer_mode_classes[i].shift; 2621 ap->udma_mask);
2374 mask = ata_get_mode_mask(ap, shift);
2375 2622
2376 x = fgb(mask); 2623 /* use port-wide xfermask for now */
2377 if (x >= 0) { 2624 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2378 *xfer_mode_out = xfer_mode_classes[i].base + x; 2625 struct ata_device *d = &ap->device[i];
2379 *xfer_shift_out = shift; 2626 if (!ata_dev_present(d))
2380 return 0; 2627 continue;
2381 } 2628 xfer_mask &= ata_id_xfermask(d->id);
2629 if (ata_dma_blacklisted(d))
2630 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
2382 } 2631 }
2383 2632
2384 return -1; 2633 if (ata_dma_blacklisted(dev))
2634 printk(KERN_WARNING "ata%u: dev %u is on DMA blacklist, "
2635 "disabling DMA\n", ap->id, dev->devno);
2636
2637 return xfer_mask;
2385} 2638}
2386 2639
2387/** 2640/**
@@ -2420,63 +2673,28 @@ static void ata_dev_set_xfermode(struct ata_port *ap, struct ata_device *dev)
2420} 2673}
2421 2674
2422/** 2675/**
2423 * ata_dev_reread_id - Reread the device identify device info
2424 * @ap: port where the device is
2425 * @dev: device to reread the identify device info
2426 *
2427 * LOCKING:
2428 */
2429
2430static void ata_dev_reread_id(struct ata_port *ap, struct ata_device *dev)
2431{
2432 struct ata_taskfile tf;
2433
2434 ata_tf_init(ap, &tf, dev->devno);
2435
2436 if (dev->class == ATA_DEV_ATA) {
2437 tf.command = ATA_CMD_ID_ATA;
2438 DPRINTK("do ATA identify\n");
2439 } else {
2440 tf.command = ATA_CMD_ID_ATAPI;
2441 DPRINTK("do ATAPI identify\n");
2442 }
2443
2444 tf.flags |= ATA_TFLAG_DEVICE;
2445 tf.protocol = ATA_PROT_PIO;
2446
2447 if (ata_exec_internal(ap, dev, &tf, DMA_FROM_DEVICE,
2448 dev->id, sizeof(dev->id)))
2449 goto err_out;
2450
2451 swap_buf_le16(dev->id, ATA_ID_WORDS);
2452
2453 ata_dump_id(dev);
2454
2455 DPRINTK("EXIT\n");
2456
2457 return;
2458err_out:
2459 printk(KERN_ERR "ata%u: failed to reread ID, disabled\n", ap->id);
2460 ata_port_disable(ap);
2461}
2462
2463/**
2464 * ata_dev_init_params - Issue INIT DEV PARAMS command 2676 * ata_dev_init_params - Issue INIT DEV PARAMS command
2465 * @ap: Port associated with device @dev 2677 * @ap: Port associated with device @dev
2466 * @dev: Device to which command will be sent 2678 * @dev: Device to which command will be sent
2467 * 2679 *
2468 * LOCKING: 2680 * LOCKING:
2681 * Kernel thread context (may sleep)
2682 *
2683 * RETURNS:
2684 * 0 on success, AC_ERR_* mask otherwise.
2469 */ 2685 */
2470 2686
2471static void ata_dev_init_params(struct ata_port *ap, struct ata_device *dev) 2687static unsigned int ata_dev_init_params(struct ata_port *ap,
2688 struct ata_device *dev)
2472{ 2689{
2473 struct ata_taskfile tf; 2690 struct ata_taskfile tf;
2691 unsigned int err_mask;
2474 u16 sectors = dev->id[6]; 2692 u16 sectors = dev->id[6];
2475 u16 heads = dev->id[3]; 2693 u16 heads = dev->id[3];
2476 2694
2477 /* Number of sectors per track 1-255. Number of heads 1-16 */ 2695 /* Number of sectors per track 1-255. Number of heads 1-16 */
2478 if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16) 2696 if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
2479 return; 2697 return 0;
2480 2698
2481 /* set up init dev params taskfile */ 2699 /* set up init dev params taskfile */
2482 DPRINTK("init dev params \n"); 2700 DPRINTK("init dev params \n");
@@ -2488,13 +2706,10 @@ static void ata_dev_init_params(struct ata_port *ap, struct ata_device *dev)
2488 tf.nsect = sectors; 2706 tf.nsect = sectors;
2489 tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */ 2707 tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
2490 2708
2491 if (ata_exec_internal(ap, dev, &tf, DMA_NONE, NULL, 0)) { 2709 err_mask = ata_exec_internal(ap, dev, &tf, DMA_NONE, NULL, 0);
2492 printk(KERN_ERR "ata%u: failed to init parameters, disabled\n",
2493 ap->id);
2494 ata_port_disable(ap);
2495 }
2496 2710
2497 DPRINTK("EXIT\n"); 2711 DPRINTK("EXIT, err_mask=%x\n", err_mask);
2712 return err_mask;
2498} 2713}
2499 2714
2500/** 2715/**
@@ -2514,11 +2729,11 @@ static void ata_sg_clean(struct ata_queued_cmd *qc)
2514 int dir = qc->dma_dir; 2729 int dir = qc->dma_dir;
2515 void *pad_buf = NULL; 2730 void *pad_buf = NULL;
2516 2731
2517 assert(qc->flags & ATA_QCFLAG_DMAMAP); 2732 WARN_ON(!(qc->flags & ATA_QCFLAG_DMAMAP));
2518 assert(sg != NULL); 2733 WARN_ON(sg == NULL);
2519 2734
2520 if (qc->flags & ATA_QCFLAG_SINGLE) 2735 if (qc->flags & ATA_QCFLAG_SINGLE)
2521 assert(qc->n_elem <= 1); 2736 WARN_ON(qc->n_elem > 1);
2522 2737
2523 VPRINTK("unmapping %u sg elements\n", qc->n_elem); 2738 VPRINTK("unmapping %u sg elements\n", qc->n_elem);
2524 2739
@@ -2573,8 +2788,8 @@ static void ata_fill_sg(struct ata_queued_cmd *qc)
2573 struct scatterlist *sg; 2788 struct scatterlist *sg;
2574 unsigned int idx; 2789 unsigned int idx;
2575 2790
2576 assert(qc->__sg != NULL); 2791 WARN_ON(qc->__sg == NULL);
2577 assert(qc->n_elem > 0 || qc->pad_len > 0); 2792 WARN_ON(qc->n_elem == 0 && qc->pad_len == 0);
2578 2793
2579 idx = 0; 2794 idx = 0;
2580 ata_for_each_sg(sg, qc) { 2795 ata_for_each_sg(sg, qc) {
@@ -2727,7 +2942,7 @@ static int ata_sg_setup_one(struct ata_queued_cmd *qc)
2727 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ); 2942 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
2728 struct scatterlist *psg = &qc->pad_sgent; 2943 struct scatterlist *psg = &qc->pad_sgent;
2729 2944
2730 assert(qc->dev->class == ATA_DEV_ATAPI); 2945 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
2731 2946
2732 memset(pad_buf, 0, ATA_DMA_PAD_SZ); 2947 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
2733 2948
@@ -2791,7 +3006,7 @@ static int ata_sg_setup(struct ata_queued_cmd *qc)
2791 int n_elem, pre_n_elem, dir, trim_sg = 0; 3006 int n_elem, pre_n_elem, dir, trim_sg = 0;
2792 3007
2793 VPRINTK("ENTER, ata%u\n", ap->id); 3008 VPRINTK("ENTER, ata%u\n", ap->id);
2794 assert(qc->flags & ATA_QCFLAG_SG); 3009 WARN_ON(!(qc->flags & ATA_QCFLAG_SG));
2795 3010
2796 /* we must lengthen transfers to end on a 32-bit boundary */ 3011 /* we must lengthen transfers to end on a 32-bit boundary */
2797 qc->pad_len = lsg->length & 3; 3012 qc->pad_len = lsg->length & 3;
@@ -2800,7 +3015,7 @@ static int ata_sg_setup(struct ata_queued_cmd *qc)
2800 struct scatterlist *psg = &qc->pad_sgent; 3015 struct scatterlist *psg = &qc->pad_sgent;
2801 unsigned int offset; 3016 unsigned int offset;
2802 3017
2803 assert(qc->dev->class == ATA_DEV_ATAPI); 3018 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
2804 3019
2805 memset(pad_buf, 0, ATA_DMA_PAD_SZ); 3020 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
2806 3021
@@ -2876,7 +3091,7 @@ void ata_poll_qc_complete(struct ata_queued_cmd *qc)
2876} 3091}
2877 3092
2878/** 3093/**
2879 * ata_pio_poll - 3094 * ata_pio_poll - poll using PIO, depending on current state
2880 * @ap: the target ata_port 3095 * @ap: the target ata_port
2881 * 3096 *
2882 * LOCKING: 3097 * LOCKING:
@@ -2894,7 +3109,7 @@ static unsigned long ata_pio_poll(struct ata_port *ap)
2894 unsigned int reg_state = HSM_ST_UNKNOWN; 3109 unsigned int reg_state = HSM_ST_UNKNOWN;
2895 3110
2896 qc = ata_qc_from_tag(ap, ap->active_tag); 3111 qc = ata_qc_from_tag(ap, ap->active_tag);
2897 assert(qc != NULL); 3112 WARN_ON(qc == NULL);
2898 3113
2899 switch (ap->hsm_task_state) { 3114 switch (ap->hsm_task_state) {
2900 case HSM_ST: 3115 case HSM_ST:
@@ -2915,7 +3130,7 @@ static unsigned long ata_pio_poll(struct ata_port *ap)
2915 status = ata_chk_status(ap); 3130 status = ata_chk_status(ap);
2916 if (status & ATA_BUSY) { 3131 if (status & ATA_BUSY) {
2917 if (time_after(jiffies, ap->pio_task_timeout)) { 3132 if (time_after(jiffies, ap->pio_task_timeout)) {
2918 qc->err_mask |= AC_ERR_ATA_BUS; 3133 qc->err_mask |= AC_ERR_TIMEOUT;
2919 ap->hsm_task_state = HSM_ST_TMOUT; 3134 ap->hsm_task_state = HSM_ST_TMOUT;
2920 return 0; 3135 return 0;
2921 } 3136 }
@@ -2962,7 +3177,7 @@ static int ata_pio_complete (struct ata_port *ap)
2962 } 3177 }
2963 3178
2964 qc = ata_qc_from_tag(ap, ap->active_tag); 3179 qc = ata_qc_from_tag(ap, ap->active_tag);
2965 assert(qc != NULL); 3180 WARN_ON(qc == NULL);
2966 3181
2967 drv_stat = ata_wait_idle(ap); 3182 drv_stat = ata_wait_idle(ap);
2968 if (!ata_ok(drv_stat)) { 3183 if (!ata_ok(drv_stat)) {
@@ -2973,7 +3188,7 @@ static int ata_pio_complete (struct ata_port *ap)
2973 3188
2974 ap->hsm_task_state = HSM_ST_IDLE; 3189 ap->hsm_task_state = HSM_ST_IDLE;
2975 3190
2976 assert(qc->err_mask == 0); 3191 WARN_ON(qc->err_mask);
2977 ata_poll_qc_complete(qc); 3192 ata_poll_qc_complete(qc);
2978 3193
2979 /* another command may start at this point */ 3194 /* another command may start at this point */
@@ -2983,7 +3198,7 @@ static int ata_pio_complete (struct ata_port *ap)
2983 3198
2984 3199
2985/** 3200/**
2986 * swap_buf_le16 - swap halves of 16-words in place 3201 * swap_buf_le16 - swap halves of 16-bit words in place
2987 * @buf: Buffer to swap 3202 * @buf: Buffer to swap
2988 * @buf_words: Number of 16-bit words in buffer. 3203 * @buf_words: Number of 16-bit words in buffer.
2989 * 3204 *
@@ -3293,7 +3508,7 @@ static void atapi_pio_bytes(struct ata_queued_cmd *qc)
3293err_out: 3508err_out:
3294 printk(KERN_INFO "ata%u: dev %u: ATAPI check failed\n", 3509 printk(KERN_INFO "ata%u: dev %u: ATAPI check failed\n",
3295 ap->id, dev->devno); 3510 ap->id, dev->devno);
3296 qc->err_mask |= AC_ERR_ATA_BUS; 3511 qc->err_mask |= AC_ERR_HSM;
3297 ap->hsm_task_state = HSM_ST_ERR; 3512 ap->hsm_task_state = HSM_ST_ERR;
3298} 3513}
3299 3514
@@ -3330,7 +3545,7 @@ static void ata_pio_block(struct ata_port *ap)
3330 } 3545 }
3331 3546
3332 qc = ata_qc_from_tag(ap, ap->active_tag); 3547 qc = ata_qc_from_tag(ap, ap->active_tag);
3333 assert(qc != NULL); 3548 WARN_ON(qc == NULL);
3334 3549
3335 /* check error */ 3550 /* check error */
3336 if (status & (ATA_ERR | ATA_DF)) { 3551 if (status & (ATA_ERR | ATA_DF)) {
@@ -3351,7 +3566,7 @@ static void ata_pio_block(struct ata_port *ap)
3351 } else { 3566 } else {
3352 /* handle BSY=0, DRQ=0 as error */ 3567 /* handle BSY=0, DRQ=0 as error */
3353 if ((status & ATA_DRQ) == 0) { 3568 if ((status & ATA_DRQ) == 0) {
3354 qc->err_mask |= AC_ERR_ATA_BUS; 3569 qc->err_mask |= AC_ERR_HSM;
3355 ap->hsm_task_state = HSM_ST_ERR; 3570 ap->hsm_task_state = HSM_ST_ERR;
3356 return; 3571 return;
3357 } 3572 }
@@ -3365,7 +3580,7 @@ static void ata_pio_error(struct ata_port *ap)
3365 struct ata_queued_cmd *qc; 3580 struct ata_queued_cmd *qc;
3366 3581
3367 qc = ata_qc_from_tag(ap, ap->active_tag); 3582 qc = ata_qc_from_tag(ap, ap->active_tag);
3368 assert(qc != NULL); 3583 WARN_ON(qc == NULL);
3369 3584
3370 if (qc->tf.command != ATA_CMD_PACKET) 3585 if (qc->tf.command != ATA_CMD_PACKET)
3371 printk(KERN_WARNING "ata%u: PIO error\n", ap->id); 3586 printk(KERN_WARNING "ata%u: PIO error\n", ap->id);
@@ -3373,7 +3588,7 @@ static void ata_pio_error(struct ata_port *ap)
3373 /* make sure qc->err_mask is available to 3588 /* make sure qc->err_mask is available to
3374 * know what's wrong and recover 3589 * know what's wrong and recover
3375 */ 3590 */
3376 assert(qc->err_mask); 3591 WARN_ON(qc->err_mask == 0);
3377 3592
3378 ap->hsm_task_state = HSM_ST_IDLE; 3593 ap->hsm_task_state = HSM_ST_IDLE;
3379 3594
@@ -3414,12 +3629,84 @@ fsm_start:
3414 } 3629 }
3415 3630
3416 if (timeout) 3631 if (timeout)
3417 queue_delayed_work(ata_wq, &ap->pio_task, timeout); 3632 ata_port_queue_task(ap, ata_pio_task, ap, timeout);
3418 else if (!qc_completed) 3633 else if (!qc_completed)
3419 goto fsm_start; 3634 goto fsm_start;
3420} 3635}
3421 3636
3422/** 3637/**
3638 * atapi_packet_task - Write CDB bytes to hardware
3639 * @_data: Port to which ATAPI device is attached.
3640 *
3641 * When device has indicated its readiness to accept
3642 * a CDB, this function is called. Send the CDB.
3643 * If DMA is to be performed, exit immediately.
3644 * Otherwise, we are in polling mode, so poll
3645 * status under operation succeeds or fails.
3646 *
3647 * LOCKING:
3648 * Kernel thread context (may sleep)
3649 */
3650
3651static void atapi_packet_task(void *_data)
3652{
3653 struct ata_port *ap = _data;
3654 struct ata_queued_cmd *qc;
3655 u8 status;
3656
3657 qc = ata_qc_from_tag(ap, ap->active_tag);
3658 WARN_ON(qc == NULL);
3659 WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
3660
3661 /* sleep-wait for BSY to clear */
3662 DPRINTK("busy wait\n");
3663 if (ata_busy_sleep(ap, ATA_TMOUT_CDB_QUICK, ATA_TMOUT_CDB)) {
3664 qc->err_mask |= AC_ERR_TIMEOUT;
3665 goto err_out;
3666 }
3667
3668 /* make sure DRQ is set */
3669 status = ata_chk_status(ap);
3670 if ((status & (ATA_BUSY | ATA_DRQ)) != ATA_DRQ) {
3671 qc->err_mask |= AC_ERR_HSM;
3672 goto err_out;
3673 }
3674
3675 /* send SCSI cdb */
3676 DPRINTK("send cdb\n");
3677 WARN_ON(qc->dev->cdb_len < 12);
3678
3679 if (qc->tf.protocol == ATA_PROT_ATAPI_DMA ||
3680 qc->tf.protocol == ATA_PROT_ATAPI_NODATA) {
3681 unsigned long flags;
3682
3683 /* Once we're done issuing command and kicking bmdma,
3684 * irq handler takes over. To not lose irq, we need
3685 * to clear NOINTR flag before sending cdb, but
3686 * interrupt handler shouldn't be invoked before we're
3687 * finished. Hence, the following locking.
3688 */
3689 spin_lock_irqsave(&ap->host_set->lock, flags);
3690 ap->flags &= ~ATA_FLAG_NOINTR;
3691 ata_data_xfer(ap, qc->cdb, qc->dev->cdb_len, 1);
3692 if (qc->tf.protocol == ATA_PROT_ATAPI_DMA)
3693 ap->ops->bmdma_start(qc); /* initiate bmdma */
3694 spin_unlock_irqrestore(&ap->host_set->lock, flags);
3695 } else {
3696 ata_data_xfer(ap, qc->cdb, qc->dev->cdb_len, 1);
3697
3698 /* PIO commands are handled by polling */
3699 ap->hsm_task_state = HSM_ST;
3700 ata_port_queue_task(ap, ata_pio_task, ap, 0);
3701 }
3702
3703 return;
3704
3705err_out:
3706 ata_poll_qc_complete(qc);
3707}
3708
3709/**
3423 * ata_qc_timeout - Handle timeout of queued command 3710 * ata_qc_timeout - Handle timeout of queued command
3424 * @qc: Command that timed out 3711 * @qc: Command that timed out
3425 * 3712 *
@@ -3447,15 +3734,9 @@ static void ata_qc_timeout(struct ata_queued_cmd *qc)
3447 3734
3448 DPRINTK("ENTER\n"); 3735 DPRINTK("ENTER\n");
3449 3736
3450 spin_lock_irqsave(&host_set->lock, flags); 3737 ap->hsm_task_state = HSM_ST_IDLE;
3451 3738
3452 /* hack alert! We cannot use the supplied completion 3739 spin_lock_irqsave(&host_set->lock, flags);
3453 * function from inside the ->eh_strategy_handler() thread.
3454 * libata is the only user of ->eh_strategy_handler() in
3455 * any kernel, so the default scsi_done() assumes it is
3456 * not being called from the SCSI EH.
3457 */
3458 qc->scsidone = scsi_finish_command;
3459 3740
3460 switch (qc->tf.protocol) { 3741 switch (qc->tf.protocol) {
3461 3742
@@ -3480,12 +3761,13 @@ static void ata_qc_timeout(struct ata_queued_cmd *qc)
3480 3761
3481 /* complete taskfile transaction */ 3762 /* complete taskfile transaction */
3482 qc->err_mask |= ac_err_mask(drv_stat); 3763 qc->err_mask |= ac_err_mask(drv_stat);
3483 ata_qc_complete(qc);
3484 break; 3764 break;
3485 } 3765 }
3486 3766
3487 spin_unlock_irqrestore(&host_set->lock, flags); 3767 spin_unlock_irqrestore(&host_set->lock, flags);
3488 3768
3769 ata_eh_qc_complete(qc);
3770
3489 DPRINTK("EXIT\n"); 3771 DPRINTK("EXIT\n");
3490} 3772}
3491 3773
@@ -3510,20 +3792,10 @@ static void ata_qc_timeout(struct ata_queued_cmd *qc)
3510 3792
3511void ata_eng_timeout(struct ata_port *ap) 3793void ata_eng_timeout(struct ata_port *ap)
3512{ 3794{
3513 struct ata_queued_cmd *qc;
3514
3515 DPRINTK("ENTER\n"); 3795 DPRINTK("ENTER\n");
3516 3796
3517 qc = ata_qc_from_tag(ap, ap->active_tag); 3797 ata_qc_timeout(ata_qc_from_tag(ap, ap->active_tag));
3518 if (qc)
3519 ata_qc_timeout(qc);
3520 else {
3521 printk(KERN_ERR "ata%u: BUG: timeout without command\n",
3522 ap->id);
3523 goto out;
3524 }
3525 3798
3526out:
3527 DPRINTK("EXIT\n"); 3799 DPRINTK("EXIT\n");
3528} 3800}
3529 3801
@@ -3579,21 +3851,6 @@ struct ata_queued_cmd *ata_qc_new_init(struct ata_port *ap,
3579 return qc; 3851 return qc;
3580} 3852}
3581 3853
3582static void __ata_qc_complete(struct ata_queued_cmd *qc)
3583{
3584 struct ata_port *ap = qc->ap;
3585 unsigned int tag;
3586
3587 qc->flags = 0;
3588 tag = qc->tag;
3589 if (likely(ata_tag_valid(tag))) {
3590 if (tag == ap->active_tag)
3591 ap->active_tag = ATA_TAG_POISON;
3592 qc->tag = ATA_TAG_POISON;
3593 clear_bit(tag, &ap->qactive);
3594 }
3595}
3596
3597/** 3854/**
3598 * ata_qc_free - free unused ata_queued_cmd 3855 * ata_qc_free - free unused ata_queued_cmd
3599 * @qc: Command to complete 3856 * @qc: Command to complete
@@ -3606,29 +3863,25 @@ static void __ata_qc_complete(struct ata_queued_cmd *qc)
3606 */ 3863 */
3607void ata_qc_free(struct ata_queued_cmd *qc) 3864void ata_qc_free(struct ata_queued_cmd *qc)
3608{ 3865{
3609 assert(qc != NULL); /* ata_qc_from_tag _might_ return NULL */ 3866 struct ata_port *ap = qc->ap;
3867 unsigned int tag;
3610 3868
3611 __ata_qc_complete(qc); 3869 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
3612}
3613 3870
3614/** 3871 qc->flags = 0;
3615 * ata_qc_complete - Complete an active ATA command 3872 tag = qc->tag;
3616 * @qc: Command to complete 3873 if (likely(ata_tag_valid(tag))) {
3617 * @err_mask: ATA Status register contents 3874 if (tag == ap->active_tag)
3618 * 3875 ap->active_tag = ATA_TAG_POISON;
3619 * Indicate to the mid and upper layers that an ATA 3876 qc->tag = ATA_TAG_POISON;
3620 * command has completed, with either an ok or not-ok status. 3877 clear_bit(tag, &ap->qactive);
3621 * 3878 }
3622 * LOCKING: 3879}
3623 * spin_lock_irqsave(host_set lock)
3624 */
3625 3880
3626void ata_qc_complete(struct ata_queued_cmd *qc) 3881void __ata_qc_complete(struct ata_queued_cmd *qc)
3627{ 3882{
3628 int rc; 3883 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
3629 3884 WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
3630 assert(qc != NULL); /* ata_qc_from_tag _might_ return NULL */
3631 assert(qc->flags & ATA_QCFLAG_ACTIVE);
3632 3885
3633 if (likely(qc->flags & ATA_QCFLAG_DMAMAP)) 3886 if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
3634 ata_sg_clean(qc); 3887 ata_sg_clean(qc);
@@ -3640,17 +3893,7 @@ void ata_qc_complete(struct ata_queued_cmd *qc)
3640 qc->flags &= ~ATA_QCFLAG_ACTIVE; 3893 qc->flags &= ~ATA_QCFLAG_ACTIVE;
3641 3894
3642 /* call completion callback */ 3895 /* call completion callback */
3643 rc = qc->complete_fn(qc); 3896 qc->complete_fn(qc);
3644
3645 /* if callback indicates not to complete command (non-zero),
3646 * return immediately
3647 */
3648 if (rc != 0)
3649 return;
3650
3651 __ata_qc_complete(qc);
3652
3653 VPRINTK("EXIT\n");
3654} 3897}
3655 3898
3656static inline int ata_should_dma_map(struct ata_queued_cmd *qc) 3899static inline int ata_should_dma_map(struct ata_queued_cmd *qc)
@@ -3690,20 +3933,20 @@ static inline int ata_should_dma_map(struct ata_queued_cmd *qc)
3690 * spin_lock_irqsave(host_set lock) 3933 * spin_lock_irqsave(host_set lock)
3691 * 3934 *
3692 * RETURNS: 3935 * RETURNS:
3693 * Zero on success, negative on error. 3936 * Zero on success, AC_ERR_* mask on failure
3694 */ 3937 */
3695 3938
3696int ata_qc_issue(struct ata_queued_cmd *qc) 3939unsigned int ata_qc_issue(struct ata_queued_cmd *qc)
3697{ 3940{
3698 struct ata_port *ap = qc->ap; 3941 struct ata_port *ap = qc->ap;
3699 3942
3700 if (ata_should_dma_map(qc)) { 3943 if (ata_should_dma_map(qc)) {
3701 if (qc->flags & ATA_QCFLAG_SG) { 3944 if (qc->flags & ATA_QCFLAG_SG) {
3702 if (ata_sg_setup(qc)) 3945 if (ata_sg_setup(qc))
3703 goto err_out; 3946 goto sg_err;
3704 } else if (qc->flags & ATA_QCFLAG_SINGLE) { 3947 } else if (qc->flags & ATA_QCFLAG_SINGLE) {
3705 if (ata_sg_setup_one(qc)) 3948 if (ata_sg_setup_one(qc))
3706 goto err_out; 3949 goto sg_err;
3707 } 3950 }
3708 } else { 3951 } else {
3709 qc->flags &= ~ATA_QCFLAG_DMAMAP; 3952 qc->flags &= ~ATA_QCFLAG_DMAMAP;
@@ -3716,8 +3959,9 @@ int ata_qc_issue(struct ata_queued_cmd *qc)
3716 3959
3717 return ap->ops->qc_issue(qc); 3960 return ap->ops->qc_issue(qc);
3718 3961
3719err_out: 3962sg_err:
3720 return -1; 3963 qc->flags &= ~ATA_QCFLAG_DMAMAP;
3964 return AC_ERR_SYSTEM;
3721} 3965}
3722 3966
3723 3967
@@ -3736,10 +3980,10 @@ err_out:
3736 * spin_lock_irqsave(host_set lock) 3980 * spin_lock_irqsave(host_set lock)
3737 * 3981 *
3738 * RETURNS: 3982 * RETURNS:
3739 * Zero on success, negative on error. 3983 * Zero on success, AC_ERR_* mask on failure
3740 */ 3984 */
3741 3985
3742int ata_qc_issue_prot(struct ata_queued_cmd *qc) 3986unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc)
3743{ 3987{
3744 struct ata_port *ap = qc->ap; 3988 struct ata_port *ap = qc->ap;
3745 3989
@@ -3760,31 +4004,31 @@ int ata_qc_issue_prot(struct ata_queued_cmd *qc)
3760 ata_qc_set_polling(qc); 4004 ata_qc_set_polling(qc);
3761 ata_tf_to_host(ap, &qc->tf); 4005 ata_tf_to_host(ap, &qc->tf);
3762 ap->hsm_task_state = HSM_ST; 4006 ap->hsm_task_state = HSM_ST;
3763 queue_work(ata_wq, &ap->pio_task); 4007 ata_port_queue_task(ap, ata_pio_task, ap, 0);
3764 break; 4008 break;
3765 4009
3766 case ATA_PROT_ATAPI: 4010 case ATA_PROT_ATAPI:
3767 ata_qc_set_polling(qc); 4011 ata_qc_set_polling(qc);
3768 ata_tf_to_host(ap, &qc->tf); 4012 ata_tf_to_host(ap, &qc->tf);
3769 queue_work(ata_wq, &ap->packet_task); 4013 ata_port_queue_task(ap, atapi_packet_task, ap, 0);
3770 break; 4014 break;
3771 4015
3772 case ATA_PROT_ATAPI_NODATA: 4016 case ATA_PROT_ATAPI_NODATA:
3773 ap->flags |= ATA_FLAG_NOINTR; 4017 ap->flags |= ATA_FLAG_NOINTR;
3774 ata_tf_to_host(ap, &qc->tf); 4018 ata_tf_to_host(ap, &qc->tf);
3775 queue_work(ata_wq, &ap->packet_task); 4019 ata_port_queue_task(ap, atapi_packet_task, ap, 0);
3776 break; 4020 break;
3777 4021
3778 case ATA_PROT_ATAPI_DMA: 4022 case ATA_PROT_ATAPI_DMA:
3779 ap->flags |= ATA_FLAG_NOINTR; 4023 ap->flags |= ATA_FLAG_NOINTR;
3780 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */ 4024 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
3781 ap->ops->bmdma_setup(qc); /* set up bmdma */ 4025 ap->ops->bmdma_setup(qc); /* set up bmdma */
3782 queue_work(ata_wq, &ap->packet_task); 4026 ata_port_queue_task(ap, atapi_packet_task, ap, 0);
3783 break; 4027 break;
3784 4028
3785 default: 4029 default:
3786 WARN_ON(1); 4030 WARN_ON(1);
3787 return -1; 4031 return AC_ERR_SYSTEM;
3788 } 4032 }
3789 4033
3790 return 0; 4034 return 0;
@@ -4147,91 +4391,6 @@ irqreturn_t ata_interrupt (int irq, void *dev_instance, struct pt_regs *regs)
4147 return IRQ_RETVAL(handled); 4391 return IRQ_RETVAL(handled);
4148} 4392}
4149 4393
4150/**
4151 * atapi_packet_task - Write CDB bytes to hardware
4152 * @_data: Port to which ATAPI device is attached.
4153 *
4154 * When device has indicated its readiness to accept
4155 * a CDB, this function is called. Send the CDB.
4156 * If DMA is to be performed, exit immediately.
4157 * Otherwise, we are in polling mode, so poll
4158 * status under operation succeeds or fails.
4159 *
4160 * LOCKING:
4161 * Kernel thread context (may sleep)
4162 */
4163
4164static void atapi_packet_task(void *_data)
4165{
4166 struct ata_port *ap = _data;
4167 struct ata_queued_cmd *qc;
4168 u8 status;
4169
4170 qc = ata_qc_from_tag(ap, ap->active_tag);
4171 assert(qc != NULL);
4172 assert(qc->flags & ATA_QCFLAG_ACTIVE);
4173
4174 /* sleep-wait for BSY to clear */
4175 DPRINTK("busy wait\n");
4176 if (ata_busy_sleep(ap, ATA_TMOUT_CDB_QUICK, ATA_TMOUT_CDB)) {
4177 qc->err_mask |= AC_ERR_ATA_BUS;
4178 goto err_out;
4179 }
4180
4181 /* make sure DRQ is set */
4182 status = ata_chk_status(ap);
4183 if ((status & (ATA_BUSY | ATA_DRQ)) != ATA_DRQ) {
4184 qc->err_mask |= AC_ERR_ATA_BUS;
4185 goto err_out;
4186 }
4187
4188 /* send SCSI cdb */
4189 DPRINTK("send cdb\n");
4190 assert(ap->cdb_len >= 12);
4191
4192 if (qc->tf.protocol == ATA_PROT_ATAPI_DMA ||
4193 qc->tf.protocol == ATA_PROT_ATAPI_NODATA) {
4194 unsigned long flags;
4195
4196 /* Once we're done issuing command and kicking bmdma,
4197 * irq handler takes over. To not lose irq, we need
4198 * to clear NOINTR flag before sending cdb, but
4199 * interrupt handler shouldn't be invoked before we're
4200 * finished. Hence, the following locking.
4201 */
4202 spin_lock_irqsave(&ap->host_set->lock, flags);
4203 ap->flags &= ~ATA_FLAG_NOINTR;
4204 ata_data_xfer(ap, qc->cdb, ap->cdb_len, 1);
4205 if (qc->tf.protocol == ATA_PROT_ATAPI_DMA)
4206 ap->ops->bmdma_start(qc); /* initiate bmdma */
4207 spin_unlock_irqrestore(&ap->host_set->lock, flags);
4208 } else {
4209 ata_data_xfer(ap, qc->cdb, ap->cdb_len, 1);
4210
4211 /* PIO commands are handled by polling */
4212 ap->hsm_task_state = HSM_ST;
4213 queue_work(ata_wq, &ap->pio_task);
4214 }
4215
4216 return;
4217
4218err_out:
4219 ata_poll_qc_complete(qc);
4220}
4221
4222
4223/**
4224 * ata_port_start - Set port up for dma.
4225 * @ap: Port to initialize
4226 *
4227 * Called just after data structures for each port are
4228 * initialized. Allocates space for PRD table.
4229 *
4230 * May be used as the port_start() entry in ata_port_operations.
4231 *
4232 * LOCKING:
4233 * Inherited from caller.
4234 */
4235 4394
4236/* 4395/*
4237 * Execute a 'simple' command, that only consists of the opcode 'cmd' itself, 4396 * Execute a 'simple' command, that only consists of the opcode 'cmd' itself,
@@ -4284,6 +4443,8 @@ static int ata_start_drive(struct ata_port *ap, struct ata_device *dev)
4284 4443
4285/** 4444/**
4286 * ata_device_resume - wakeup a previously suspended devices 4445 * ata_device_resume - wakeup a previously suspended devices
4446 * @ap: port the device is connected to
4447 * @dev: the device to resume
4287 * 4448 *
4288 * Kick the drive back into action, by sending it an idle immediate 4449 * Kick the drive back into action, by sending it an idle immediate
4289 * command and making sure its transfer mode matches between drive 4450 * command and making sure its transfer mode matches between drive
@@ -4306,10 +4467,11 @@ int ata_device_resume(struct ata_port *ap, struct ata_device *dev)
4306 4467
4307/** 4468/**
4308 * ata_device_suspend - prepare a device for suspend 4469 * ata_device_suspend - prepare a device for suspend
4470 * @ap: port the device is connected to
4471 * @dev: the device to suspend
4309 * 4472 *
4310 * Flush the cache on the drive, if appropriate, then issue a 4473 * Flush the cache on the drive, if appropriate, then issue a
4311 * standbynow command. 4474 * standbynow command.
4312 *
4313 */ 4475 */
4314int ata_device_suspend(struct ata_port *ap, struct ata_device *dev) 4476int ata_device_suspend(struct ata_port *ap, struct ata_device *dev)
4315{ 4477{
@@ -4323,6 +4485,19 @@ int ata_device_suspend(struct ata_port *ap, struct ata_device *dev)
4323 return 0; 4485 return 0;
4324} 4486}
4325 4487
4488/**
4489 * ata_port_start - Set port up for dma.
4490 * @ap: Port to initialize
4491 *
4492 * Called just after data structures for each port are
4493 * initialized. Allocates space for PRD table.
4494 *
4495 * May be used as the port_start() entry in ata_port_operations.
4496 *
4497 * LOCKING:
4498 * Inherited from caller.
4499 */
4500
4326int ata_port_start (struct ata_port *ap) 4501int ata_port_start (struct ata_port *ap)
4327{ 4502{
4328 struct device *dev = ap->host_set->dev; 4503 struct device *dev = ap->host_set->dev;
@@ -4436,8 +4611,8 @@ static void ata_host_init(struct ata_port *ap, struct Scsi_Host *host,
4436 ap->active_tag = ATA_TAG_POISON; 4611 ap->active_tag = ATA_TAG_POISON;
4437 ap->last_ctl = 0xFF; 4612 ap->last_ctl = 0xFF;
4438 4613
4439 INIT_WORK(&ap->packet_task, atapi_packet_task, ap); 4614 INIT_WORK(&ap->port_task, NULL, NULL);
4440 INIT_WORK(&ap->pio_task, ata_pio_task, ap); 4615 INIT_LIST_HEAD(&ap->eh_done_q);
4441 4616
4442 for (i = 0; i < ATA_MAX_DEVICES; i++) 4617 for (i = 0; i < ATA_MAX_DEVICES; i++)
4443 ap->device[i].devno = i; 4618 ap->device[i].devno = i;
@@ -4579,9 +4754,9 @@ int ata_device_add(const struct ata_probe_ent *ent)
4579 4754
4580 ap = host_set->ports[i]; 4755 ap = host_set->ports[i];
4581 4756
4582 DPRINTK("ata%u: probe begin\n", ap->id); 4757 DPRINTK("ata%u: bus probe begin\n", ap->id);
4583 rc = ata_bus_probe(ap); 4758 rc = ata_bus_probe(ap);
4584 DPRINTK("ata%u: probe end\n", ap->id); 4759 DPRINTK("ata%u: bus probe end\n", ap->id);
4585 4760
4586 if (rc) { 4761 if (rc) {
4587 /* FIXME: do something useful here? 4762 /* FIXME: do something useful here?
@@ -4605,7 +4780,7 @@ int ata_device_add(const struct ata_probe_ent *ent)
4605 } 4780 }
4606 4781
4607 /* probes are done, now scan each port's disk(s) */ 4782 /* probes are done, now scan each port's disk(s) */
4608 DPRINTK("probe begin\n"); 4783 DPRINTK("host probe begin\n");
4609 for (i = 0; i < count; i++) { 4784 for (i = 0; i < count; i++) {
4610 struct ata_port *ap = host_set->ports[i]; 4785 struct ata_port *ap = host_set->ports[i];
4611 4786
@@ -4691,11 +4866,14 @@ void ata_host_set_remove(struct ata_host_set *host_set)
4691int ata_scsi_release(struct Scsi_Host *host) 4866int ata_scsi_release(struct Scsi_Host *host)
4692{ 4867{
4693 struct ata_port *ap = (struct ata_port *) &host->hostdata[0]; 4868 struct ata_port *ap = (struct ata_port *) &host->hostdata[0];
4869 int i;
4694 4870
4695 DPRINTK("ENTER\n"); 4871 DPRINTK("ENTER\n");
4696 4872
4697 ap->ops->port_disable(ap); 4873 ap->ops->port_disable(ap);
4698 ata_host_remove(ap, 0); 4874 ata_host_remove(ap, 0);
4875 for (i = 0; i < ATA_MAX_DEVICES; i++)
4876 kfree(ap->device[i].id);
4699 4877
4700 DPRINTK("EXIT\n"); 4878 DPRINTK("EXIT\n");
4701 return 1; 4879 return 1;
@@ -4727,32 +4905,6 @@ void ata_std_ports(struct ata_ioports *ioaddr)
4727 ioaddr->command_addr = ioaddr->cmd_addr + ATA_REG_CMD; 4905 ioaddr->command_addr = ioaddr->cmd_addr + ATA_REG_CMD;
4728} 4906}
4729 4907
4730static struct ata_probe_ent *
4731ata_probe_ent_alloc(struct device *dev, const struct ata_port_info *port)
4732{
4733 struct ata_probe_ent *probe_ent;
4734
4735 probe_ent = kzalloc(sizeof(*probe_ent), GFP_KERNEL);
4736 if (!probe_ent) {
4737 printk(KERN_ERR DRV_NAME "(%s): out of memory\n",
4738 kobject_name(&(dev->kobj)));
4739 return NULL;
4740 }
4741
4742 INIT_LIST_HEAD(&probe_ent->node);
4743 probe_ent->dev = dev;
4744
4745 probe_ent->sht = port->sht;
4746 probe_ent->host_flags = port->host_flags;
4747 probe_ent->pio_mask = port->pio_mask;
4748 probe_ent->mwdma_mask = port->mwdma_mask;
4749 probe_ent->udma_mask = port->udma_mask;
4750 probe_ent->port_ops = port->port_ops;
4751
4752 return probe_ent;
4753}
4754
4755
4756 4908
4757#ifdef CONFIG_PCI 4909#ifdef CONFIG_PCI
4758 4910
@@ -4764,256 +4916,6 @@ void ata_pci_host_stop (struct ata_host_set *host_set)
4764} 4916}
4765 4917
4766/** 4918/**
4767 * ata_pci_init_native_mode - Initialize native-mode driver
4768 * @pdev: pci device to be initialized
4769 * @port: array[2] of pointers to port info structures.
4770 * @ports: bitmap of ports present
4771 *
4772 * Utility function which allocates and initializes an
4773 * ata_probe_ent structure for a standard dual-port
4774 * PIO-based IDE controller. The returned ata_probe_ent
4775 * structure can be passed to ata_device_add(). The returned
4776 * ata_probe_ent structure should then be freed with kfree().
4777 *
4778 * The caller need only pass the address of the primary port, the
4779 * secondary will be deduced automatically. If the device has non
4780 * standard secondary port mappings this function can be called twice,
4781 * once for each interface.
4782 */
4783
4784struct ata_probe_ent *
4785ata_pci_init_native_mode(struct pci_dev *pdev, struct ata_port_info **port, int ports)
4786{
4787 struct ata_probe_ent *probe_ent =
4788 ata_probe_ent_alloc(pci_dev_to_dev(pdev), port[0]);
4789 int p = 0;
4790
4791 if (!probe_ent)
4792 return NULL;
4793
4794 probe_ent->irq = pdev->irq;
4795 probe_ent->irq_flags = SA_SHIRQ;
4796 probe_ent->private_data = port[0]->private_data;
4797
4798 if (ports & ATA_PORT_PRIMARY) {
4799 probe_ent->port[p].cmd_addr = pci_resource_start(pdev, 0);
4800 probe_ent->port[p].altstatus_addr =
4801 probe_ent->port[p].ctl_addr =
4802 pci_resource_start(pdev, 1) | ATA_PCI_CTL_OFS;
4803 probe_ent->port[p].bmdma_addr = pci_resource_start(pdev, 4);
4804 ata_std_ports(&probe_ent->port[p]);
4805 p++;
4806 }
4807
4808 if (ports & ATA_PORT_SECONDARY) {
4809 probe_ent->port[p].cmd_addr = pci_resource_start(pdev, 2);
4810 probe_ent->port[p].altstatus_addr =
4811 probe_ent->port[p].ctl_addr =
4812 pci_resource_start(pdev, 3) | ATA_PCI_CTL_OFS;
4813 probe_ent->port[p].bmdma_addr = pci_resource_start(pdev, 4) + 8;
4814 ata_std_ports(&probe_ent->port[p]);
4815 p++;
4816 }
4817
4818 probe_ent->n_ports = p;
4819 return probe_ent;
4820}
4821
4822static struct ata_probe_ent *ata_pci_init_legacy_port(struct pci_dev *pdev, struct ata_port_info *port, int port_num)
4823{
4824 struct ata_probe_ent *probe_ent;
4825
4826 probe_ent = ata_probe_ent_alloc(pci_dev_to_dev(pdev), port);
4827 if (!probe_ent)
4828 return NULL;
4829
4830 probe_ent->legacy_mode = 1;
4831 probe_ent->n_ports = 1;
4832 probe_ent->hard_port_no = port_num;
4833 probe_ent->private_data = port->private_data;
4834
4835 switch(port_num)
4836 {
4837 case 0:
4838 probe_ent->irq = 14;
4839 probe_ent->port[0].cmd_addr = 0x1f0;
4840 probe_ent->port[0].altstatus_addr =
4841 probe_ent->port[0].ctl_addr = 0x3f6;
4842 break;
4843 case 1:
4844 probe_ent->irq = 15;
4845 probe_ent->port[0].cmd_addr = 0x170;
4846 probe_ent->port[0].altstatus_addr =
4847 probe_ent->port[0].ctl_addr = 0x376;
4848 break;
4849 }
4850 probe_ent->port[0].bmdma_addr = pci_resource_start(pdev, 4) + 8 * port_num;
4851 ata_std_ports(&probe_ent->port[0]);
4852 return probe_ent;
4853}
4854
4855/**
4856 * ata_pci_init_one - Initialize/register PCI IDE host controller
4857 * @pdev: Controller to be initialized
4858 * @port_info: Information from low-level host driver
4859 * @n_ports: Number of ports attached to host controller
4860 *
4861 * This is a helper function which can be called from a driver's
4862 * xxx_init_one() probe function if the hardware uses traditional
4863 * IDE taskfile registers.
4864 *
4865 * This function calls pci_enable_device(), reserves its register
4866 * regions, sets the dma mask, enables bus master mode, and calls
4867 * ata_device_add()
4868 *
4869 * LOCKING:
4870 * Inherited from PCI layer (may sleep).
4871 *
4872 * RETURNS:
4873 * Zero on success, negative on errno-based value on error.
4874 */
4875
4876int ata_pci_init_one (struct pci_dev *pdev, struct ata_port_info **port_info,
4877 unsigned int n_ports)
4878{
4879 struct ata_probe_ent *probe_ent = NULL, *probe_ent2 = NULL;
4880 struct ata_port_info *port[2];
4881 u8 tmp8, mask;
4882 unsigned int legacy_mode = 0;
4883 int disable_dev_on_err = 1;
4884 int rc;
4885
4886 DPRINTK("ENTER\n");
4887
4888 port[0] = port_info[0];
4889 if (n_ports > 1)
4890 port[1] = port_info[1];
4891 else
4892 port[1] = port[0];
4893
4894 if ((port[0]->host_flags & ATA_FLAG_NO_LEGACY) == 0
4895 && (pdev->class >> 8) == PCI_CLASS_STORAGE_IDE) {
4896 /* TODO: What if one channel is in native mode ... */
4897 pci_read_config_byte(pdev, PCI_CLASS_PROG, &tmp8);
4898 mask = (1 << 2) | (1 << 0);
4899 if ((tmp8 & mask) != mask)
4900 legacy_mode = (1 << 3);
4901 }
4902
4903 /* FIXME... */
4904 if ((!legacy_mode) && (n_ports > 2)) {
4905 printk(KERN_ERR "ata: BUG: native mode, n_ports > 2\n");
4906 n_ports = 2;
4907 /* For now */
4908 }
4909
4910 /* FIXME: Really for ATA it isn't safe because the device may be
4911 multi-purpose and we want to leave it alone if it was already
4912 enabled. Secondly for shared use as Arjan says we want refcounting
4913
4914 Checking dev->is_enabled is insufficient as this is not set at
4915 boot for the primary video which is BIOS enabled
4916 */
4917
4918 rc = pci_enable_device(pdev);
4919 if (rc)
4920 return rc;
4921
4922 rc = pci_request_regions(pdev, DRV_NAME);
4923 if (rc) {
4924 disable_dev_on_err = 0;
4925 goto err_out;
4926 }
4927
4928 /* FIXME: Should use platform specific mappers for legacy port ranges */
4929 if (legacy_mode) {
4930 if (!request_region(0x1f0, 8, "libata")) {
4931 struct resource *conflict, res;
4932 res.start = 0x1f0;
4933 res.end = 0x1f0 + 8 - 1;
4934 conflict = ____request_resource(&ioport_resource, &res);
4935 if (!strcmp(conflict->name, "libata"))
4936 legacy_mode |= (1 << 0);
4937 else {
4938 disable_dev_on_err = 0;
4939 printk(KERN_WARNING "ata: 0x1f0 IDE port busy\n");
4940 }
4941 } else
4942 legacy_mode |= (1 << 0);
4943
4944 if (!request_region(0x170, 8, "libata")) {
4945 struct resource *conflict, res;
4946 res.start = 0x170;
4947 res.end = 0x170 + 8 - 1;
4948 conflict = ____request_resource(&ioport_resource, &res);
4949 if (!strcmp(conflict->name, "libata"))
4950 legacy_mode |= (1 << 1);
4951 else {
4952 disable_dev_on_err = 0;
4953 printk(KERN_WARNING "ata: 0x170 IDE port busy\n");
4954 }
4955 } else
4956 legacy_mode |= (1 << 1);
4957 }
4958
4959 /* we have legacy mode, but all ports are unavailable */
4960 if (legacy_mode == (1 << 3)) {
4961 rc = -EBUSY;
4962 goto err_out_regions;
4963 }
4964
4965 rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
4966 if (rc)
4967 goto err_out_regions;
4968 rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
4969 if (rc)
4970 goto err_out_regions;
4971
4972 if (legacy_mode) {
4973 if (legacy_mode & (1 << 0))
4974 probe_ent = ata_pci_init_legacy_port(pdev, port[0], 0);
4975 if (legacy_mode & (1 << 1))
4976 probe_ent2 = ata_pci_init_legacy_port(pdev, port[1], 1);
4977 } else {
4978 if (n_ports == 2)
4979 probe_ent = ata_pci_init_native_mode(pdev, port, ATA_PORT_PRIMARY | ATA_PORT_SECONDARY);
4980 else
4981 probe_ent = ata_pci_init_native_mode(pdev, port, ATA_PORT_PRIMARY);
4982 }
4983 if (!probe_ent && !probe_ent2) {
4984 rc = -ENOMEM;
4985 goto err_out_regions;
4986 }
4987
4988 pci_set_master(pdev);
4989
4990 /* FIXME: check ata_device_add return */
4991 if (legacy_mode) {
4992 if (legacy_mode & (1 << 0))
4993 ata_device_add(probe_ent);
4994 if (legacy_mode & (1 << 1))
4995 ata_device_add(probe_ent2);
4996 } else
4997 ata_device_add(probe_ent);
4998
4999 kfree(probe_ent);
5000 kfree(probe_ent2);
5001
5002 return 0;
5003
5004err_out_regions:
5005 if (legacy_mode & (1 << 0))
5006 release_region(0x1f0, 8);
5007 if (legacy_mode & (1 << 1))
5008 release_region(0x170, 8);
5009 pci_release_regions(pdev);
5010err_out:
5011 if (disable_dev_on_err)
5012 pci_disable_device(pdev);
5013 return rc;
5014}
5015
5016/**
5017 * ata_pci_remove_one - PCI layer callback for device removal 4919 * ata_pci_remove_one - PCI layer callback for device removal
5018 * @pdev: PCI device that was removed 4920 * @pdev: PCI device that was removed
5019 * 4921 *
@@ -5143,7 +5045,7 @@ EXPORT_SYMBOL_GPL(ata_device_add);
5143EXPORT_SYMBOL_GPL(ata_host_set_remove); 5045EXPORT_SYMBOL_GPL(ata_host_set_remove);
5144EXPORT_SYMBOL_GPL(ata_sg_init); 5046EXPORT_SYMBOL_GPL(ata_sg_init);
5145EXPORT_SYMBOL_GPL(ata_sg_init_one); 5047EXPORT_SYMBOL_GPL(ata_sg_init_one);
5146EXPORT_SYMBOL_GPL(ata_qc_complete); 5048EXPORT_SYMBOL_GPL(__ata_qc_complete);
5147EXPORT_SYMBOL_GPL(ata_qc_issue_prot); 5049EXPORT_SYMBOL_GPL(ata_qc_issue_prot);
5148EXPORT_SYMBOL_GPL(ata_eng_timeout); 5050EXPORT_SYMBOL_GPL(ata_eng_timeout);
5149EXPORT_SYMBOL_GPL(ata_tf_load); 5051EXPORT_SYMBOL_GPL(ata_tf_load);
@@ -5169,18 +5071,30 @@ EXPORT_SYMBOL_GPL(ata_port_probe);
5169EXPORT_SYMBOL_GPL(sata_phy_reset); 5071EXPORT_SYMBOL_GPL(sata_phy_reset);
5170EXPORT_SYMBOL_GPL(__sata_phy_reset); 5072EXPORT_SYMBOL_GPL(__sata_phy_reset);
5171EXPORT_SYMBOL_GPL(ata_bus_reset); 5073EXPORT_SYMBOL_GPL(ata_bus_reset);
5074EXPORT_SYMBOL_GPL(ata_std_probeinit);
5075EXPORT_SYMBOL_GPL(ata_std_softreset);
5076EXPORT_SYMBOL_GPL(sata_std_hardreset);
5077EXPORT_SYMBOL_GPL(ata_std_postreset);
5078EXPORT_SYMBOL_GPL(ata_std_probe_reset);
5079EXPORT_SYMBOL_GPL(ata_drive_probe_reset);
5080EXPORT_SYMBOL_GPL(ata_dev_revalidate);
5172EXPORT_SYMBOL_GPL(ata_port_disable); 5081EXPORT_SYMBOL_GPL(ata_port_disable);
5173EXPORT_SYMBOL_GPL(ata_ratelimit); 5082EXPORT_SYMBOL_GPL(ata_ratelimit);
5083EXPORT_SYMBOL_GPL(ata_busy_sleep);
5084EXPORT_SYMBOL_GPL(ata_port_queue_task);
5174EXPORT_SYMBOL_GPL(ata_scsi_ioctl); 5085EXPORT_SYMBOL_GPL(ata_scsi_ioctl);
5175EXPORT_SYMBOL_GPL(ata_scsi_queuecmd); 5086EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
5087EXPORT_SYMBOL_GPL(ata_scsi_timed_out);
5176EXPORT_SYMBOL_GPL(ata_scsi_error); 5088EXPORT_SYMBOL_GPL(ata_scsi_error);
5177EXPORT_SYMBOL_GPL(ata_scsi_slave_config); 5089EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
5178EXPORT_SYMBOL_GPL(ata_scsi_release); 5090EXPORT_SYMBOL_GPL(ata_scsi_release);
5179EXPORT_SYMBOL_GPL(ata_host_intr); 5091EXPORT_SYMBOL_GPL(ata_host_intr);
5180EXPORT_SYMBOL_GPL(ata_dev_classify); 5092EXPORT_SYMBOL_GPL(ata_dev_classify);
5181EXPORT_SYMBOL_GPL(ata_dev_id_string); 5093EXPORT_SYMBOL_GPL(ata_id_string);
5182EXPORT_SYMBOL_GPL(ata_dev_config); 5094EXPORT_SYMBOL_GPL(ata_id_c_string);
5183EXPORT_SYMBOL_GPL(ata_scsi_simulate); 5095EXPORT_SYMBOL_GPL(ata_scsi_simulate);
5096EXPORT_SYMBOL_GPL(ata_eh_qc_complete);
5097EXPORT_SYMBOL_GPL(ata_eh_qc_retry);
5184 5098
5185EXPORT_SYMBOL_GPL(ata_pio_need_iordy); 5099EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
5186EXPORT_SYMBOL_GPL(ata_timing_compute); 5100EXPORT_SYMBOL_GPL(ata_timing_compute);
diff --git a/drivers/scsi/libata-scsi.c b/drivers/scsi/libata-scsi.c
index 59503c9ccac9..ccedb4536977 100644
--- a/drivers/scsi/libata-scsi.c
+++ b/drivers/scsi/libata-scsi.c
@@ -151,7 +151,7 @@ int ata_cmd_ioctl(struct scsi_device *scsidev, void __user *arg)
151 struct scsi_sense_hdr sshdr; 151 struct scsi_sense_hdr sshdr;
152 enum dma_data_direction data_dir; 152 enum dma_data_direction data_dir;
153 153
154 if (NULL == (void *)arg) 154 if (arg == NULL)
155 return -EINVAL; 155 return -EINVAL;
156 156
157 if (copy_from_user(args, arg, sizeof(args))) 157 if (copy_from_user(args, arg, sizeof(args)))
@@ -201,7 +201,7 @@ int ata_cmd_ioctl(struct scsi_device *scsidev, void __user *arg)
201 /* Need code to retrieve data from check condition? */ 201 /* Need code to retrieve data from check condition? */
202 202
203 if ((argbuf) 203 if ((argbuf)
204 && copy_to_user((void *)(arg + sizeof(args)), argbuf, argsize)) 204 && copy_to_user(arg + sizeof(args), argbuf, argsize))
205 rc = -EFAULT; 205 rc = -EFAULT;
206error: 206error:
207 if (argbuf) 207 if (argbuf)
@@ -228,7 +228,7 @@ int ata_task_ioctl(struct scsi_device *scsidev, void __user *arg)
228 u8 args[7]; 228 u8 args[7];
229 struct scsi_sense_hdr sshdr; 229 struct scsi_sense_hdr sshdr;
230 230
231 if (NULL == (void *)arg) 231 if (arg == NULL)
232 return -EINVAL; 232 return -EINVAL;
233 233
234 if (copy_from_user(args, arg, sizeof(args))) 234 if (copy_from_user(args, arg, sizeof(args)))
@@ -553,7 +553,7 @@ void ata_gen_ata_desc_sense(struct ata_queued_cmd *qc)
553 /* 553 /*
554 * Read the controller registers. 554 * Read the controller registers.
555 */ 555 */
556 assert(NULL != qc->ap->ops->tf_read); 556 WARN_ON(qc->ap->ops->tf_read == NULL);
557 qc->ap->ops->tf_read(qc->ap, tf); 557 qc->ap->ops->tf_read(qc->ap, tf);
558 558
559 /* 559 /*
@@ -628,7 +628,7 @@ void ata_gen_fixed_sense(struct ata_queued_cmd *qc)
628 /* 628 /*
629 * Read the controller registers. 629 * Read the controller registers.
630 */ 630 */
631 assert(NULL != qc->ap->ops->tf_read); 631 WARN_ON(qc->ap->ops->tf_read == NULL);
632 qc->ap->ops->tf_read(qc->ap, tf); 632 qc->ap->ops->tf_read(qc->ap, tf);
633 633
634 /* 634 /*
@@ -684,23 +684,23 @@ int ata_scsi_slave_config(struct scsi_device *sdev)
684 if (sdev->id < ATA_MAX_DEVICES) { 684 if (sdev->id < ATA_MAX_DEVICES) {
685 struct ata_port *ap; 685 struct ata_port *ap;
686 struct ata_device *dev; 686 struct ata_device *dev;
687 unsigned int max_sectors;
687 688
688 ap = (struct ata_port *) &sdev->host->hostdata[0]; 689 ap = (struct ata_port *) &sdev->host->hostdata[0];
689 dev = &ap->device[sdev->id]; 690 dev = &ap->device[sdev->id];
690 691
691 /* TODO: 1024 is an arbitrary number, not the 692 /* TODO: 2048 is an arbitrary number, not the
692 * hardware maximum. This should be increased to 693 * hardware maximum. This should be increased to
693 * 65534 when Jens Axboe's patch for dynamically 694 * 65534 when Jens Axboe's patch for dynamically
694 * determining max_sectors is merged. 695 * determining max_sectors is merged.
695 */ 696 */
696 if ((dev->flags & ATA_DFLAG_LBA48) && 697 max_sectors = ATA_MAX_SECTORS;
697 ((dev->flags & ATA_DFLAG_LOCK_SECTORS) == 0)) { 698 if (dev->flags & ATA_DFLAG_LBA48)
698 /* 699 max_sectors = 2048;
699 * do not overwrite sdev->host->max_sectors, since 700 if (dev->max_sectors)
700 * other drives on this host may not support LBA48 701 max_sectors = dev->max_sectors;
701 */ 702
702 blk_queue_max_sectors(sdev->request_queue, 2048); 703 blk_queue_max_sectors(sdev->request_queue, max_sectors);
703 }
704 704
705 /* 705 /*
706 * SATA DMA transfers must be multiples of 4 byte, so 706 * SATA DMA transfers must be multiples of 4 byte, so
@@ -717,6 +717,47 @@ int ata_scsi_slave_config(struct scsi_device *sdev)
717} 717}
718 718
719/** 719/**
720 * ata_scsi_timed_out - SCSI layer time out callback
721 * @cmd: timed out SCSI command
722 *
723 * Handles SCSI layer timeout. We race with normal completion of
724 * the qc for @cmd. If the qc is already gone, we lose and let
725 * the scsi command finish (EH_HANDLED). Otherwise, the qc has
726 * timed out and EH should be invoked. Prevent ata_qc_complete()
727 * from finishing it by setting EH_SCHEDULED and return
728 * EH_NOT_HANDLED.
729 *
730 * LOCKING:
731 * Called from timer context
732 *
733 * RETURNS:
734 * EH_HANDLED or EH_NOT_HANDLED
735 */
736enum scsi_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd)
737{
738 struct Scsi_Host *host = cmd->device->host;
739 struct ata_port *ap = (struct ata_port *) &host->hostdata[0];
740 unsigned long flags;
741 struct ata_queued_cmd *qc;
742 enum scsi_eh_timer_return ret = EH_HANDLED;
743
744 DPRINTK("ENTER\n");
745
746 spin_lock_irqsave(&ap->host_set->lock, flags);
747 qc = ata_qc_from_tag(ap, ap->active_tag);
748 if (qc) {
749 WARN_ON(qc->scsicmd != cmd);
750 qc->flags |= ATA_QCFLAG_EH_SCHEDULED;
751 qc->err_mask |= AC_ERR_TIMEOUT;
752 ret = EH_NOT_HANDLED;
753 }
754 spin_unlock_irqrestore(&ap->host_set->lock, flags);
755
756 DPRINTK("EXIT, ret=%d\n", ret);
757 return ret;
758}
759
760/**
720 * ata_scsi_error - SCSI layer error handler callback 761 * ata_scsi_error - SCSI layer error handler callback
721 * @host: SCSI host on which error occurred 762 * @host: SCSI host on which error occurred
722 * 763 *
@@ -732,23 +773,84 @@ int ata_scsi_slave_config(struct scsi_device *sdev)
732int ata_scsi_error(struct Scsi_Host *host) 773int ata_scsi_error(struct Scsi_Host *host)
733{ 774{
734 struct ata_port *ap; 775 struct ata_port *ap;
776 unsigned long flags;
735 777
736 DPRINTK("ENTER\n"); 778 DPRINTK("ENTER\n");
737 779
738 ap = (struct ata_port *) &host->hostdata[0]; 780 ap = (struct ata_port *) &host->hostdata[0];
781
782 spin_lock_irqsave(&ap->host_set->lock, flags);
783 WARN_ON(ap->flags & ATA_FLAG_IN_EH);
784 ap->flags |= ATA_FLAG_IN_EH;
785 WARN_ON(ata_qc_from_tag(ap, ap->active_tag) == NULL);
786 spin_unlock_irqrestore(&ap->host_set->lock, flags);
787
788 ata_port_flush_task(ap);
789
739 ap->ops->eng_timeout(ap); 790 ap->ops->eng_timeout(ap);
740 791
741 /* TODO: this is per-command; when queueing is supported 792 WARN_ON(host->host_failed || !list_empty(&host->eh_cmd_q));
742 * this code will either change or move to a more 793
743 * appropriate place 794 scsi_eh_flush_done_q(&ap->eh_done_q);
744 */ 795
745 host->host_failed--; 796 spin_lock_irqsave(&ap->host_set->lock, flags);
746 INIT_LIST_HEAD(&host->eh_cmd_q); 797 ap->flags &= ~ATA_FLAG_IN_EH;
798 spin_unlock_irqrestore(&ap->host_set->lock, flags);
747 799
748 DPRINTK("EXIT\n"); 800 DPRINTK("EXIT\n");
749 return 0; 801 return 0;
750} 802}
751 803
804static void ata_eh_scsidone(struct scsi_cmnd *scmd)
805{
806 /* nada */
807}
808
809static void __ata_eh_qc_complete(struct ata_queued_cmd *qc)
810{
811 struct ata_port *ap = qc->ap;
812 struct scsi_cmnd *scmd = qc->scsicmd;
813 unsigned long flags;
814
815 spin_lock_irqsave(&ap->host_set->lock, flags);
816 qc->scsidone = ata_eh_scsidone;
817 __ata_qc_complete(qc);
818 WARN_ON(ata_tag_valid(qc->tag));
819 spin_unlock_irqrestore(&ap->host_set->lock, flags);
820
821 scsi_eh_finish_cmd(scmd, &ap->eh_done_q);
822}
823
824/**
825 * ata_eh_qc_complete - Complete an active ATA command from EH
826 * @qc: Command to complete
827 *
828 * Indicate to the mid and upper layers that an ATA command has
829 * completed. To be used from EH.
830 */
831void ata_eh_qc_complete(struct ata_queued_cmd *qc)
832{
833 struct scsi_cmnd *scmd = qc->scsicmd;
834 scmd->retries = scmd->allowed;
835 __ata_eh_qc_complete(qc);
836}
837
838/**
839 * ata_eh_qc_retry - Tell midlayer to retry an ATA command after EH
840 * @qc: Command to retry
841 *
842 * Indicate to the mid and upper layers that an ATA command
843 * should be retried. To be used from EH.
844 *
845 * SCSI midlayer limits the number of retries to scmd->allowed.
846 * This function might need to adjust scmd->retries for commands
847 * which get retried due to unrelated NCQ failures.
848 */
849void ata_eh_qc_retry(struct ata_queued_cmd *qc)
850{
851 __ata_eh_qc_complete(qc);
852}
853
752/** 854/**
753 * ata_scsi_start_stop_xlat - Translate SCSI START STOP UNIT command 855 * ata_scsi_start_stop_xlat - Translate SCSI START STOP UNIT command
754 * @qc: Storage for translated ATA taskfile 856 * @qc: Storage for translated ATA taskfile
@@ -985,9 +1087,13 @@ static unsigned int ata_scsi_verify_xlat(struct ata_queued_cmd *qc, const u8 *sc
985 if (dev->flags & ATA_DFLAG_LBA) { 1087 if (dev->flags & ATA_DFLAG_LBA) {
986 tf->flags |= ATA_TFLAG_LBA; 1088 tf->flags |= ATA_TFLAG_LBA;
987 1089
988 if (dev->flags & ATA_DFLAG_LBA48) { 1090 if (lba_28_ok(block, n_block)) {
989 if (n_block > (64 * 1024)) 1091 /* use LBA28 */
990 goto invalid_fld; 1092 tf->command = ATA_CMD_VERIFY;
1093 tf->device |= (block >> 24) & 0xf;
1094 } else if (lba_48_ok(block, n_block)) {
1095 if (!(dev->flags & ATA_DFLAG_LBA48))
1096 goto out_of_range;
991 1097
992 /* use LBA48 */ 1098 /* use LBA48 */
993 tf->flags |= ATA_TFLAG_LBA48; 1099 tf->flags |= ATA_TFLAG_LBA48;
@@ -998,15 +1104,9 @@ static unsigned int ata_scsi_verify_xlat(struct ata_queued_cmd *qc, const u8 *sc
998 tf->hob_lbah = (block >> 40) & 0xff; 1104 tf->hob_lbah = (block >> 40) & 0xff;
999 tf->hob_lbam = (block >> 32) & 0xff; 1105 tf->hob_lbam = (block >> 32) & 0xff;
1000 tf->hob_lbal = (block >> 24) & 0xff; 1106 tf->hob_lbal = (block >> 24) & 0xff;
1001 } else { 1107 } else
1002 if (n_block > 256) 1108 /* request too large even for LBA48 */
1003 goto invalid_fld; 1109 goto out_of_range;
1004
1005 /* use LBA28 */
1006 tf->command = ATA_CMD_VERIFY;
1007
1008 tf->device |= (block >> 24) & 0xf;
1009 }
1010 1110
1011 tf->nsect = n_block & 0xff; 1111 tf->nsect = n_block & 0xff;
1012 1112
@@ -1019,8 +1119,8 @@ static unsigned int ata_scsi_verify_xlat(struct ata_queued_cmd *qc, const u8 *sc
1019 /* CHS */ 1119 /* CHS */
1020 u32 sect, head, cyl, track; 1120 u32 sect, head, cyl, track;
1021 1121
1022 if (n_block > 256) 1122 if (!lba_28_ok(block, n_block))
1023 goto invalid_fld; 1123 goto out_of_range;
1024 1124
1025 /* Convert LBA to CHS */ 1125 /* Convert LBA to CHS */
1026 track = (u32)block / dev->sectors; 1126 track = (u32)block / dev->sectors;
@@ -1139,9 +1239,11 @@ static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc, const u8 *scsicm
1139 if (dev->flags & ATA_DFLAG_LBA) { 1239 if (dev->flags & ATA_DFLAG_LBA) {
1140 tf->flags |= ATA_TFLAG_LBA; 1240 tf->flags |= ATA_TFLAG_LBA;
1141 1241
1142 if (dev->flags & ATA_DFLAG_LBA48) { 1242 if (lba_28_ok(block, n_block)) {
1143 /* The request -may- be too large for LBA48. */ 1243 /* use LBA28 */
1144 if ((block >> 48) || (n_block > 65536)) 1244 tf->device |= (block >> 24) & 0xf;
1245 } else if (lba_48_ok(block, n_block)) {
1246 if (!(dev->flags & ATA_DFLAG_LBA48))
1145 goto out_of_range; 1247 goto out_of_range;
1146 1248
1147 /* use LBA48 */ 1249 /* use LBA48 */
@@ -1152,15 +1254,9 @@ static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc, const u8 *scsicm
1152 tf->hob_lbah = (block >> 40) & 0xff; 1254 tf->hob_lbah = (block >> 40) & 0xff;
1153 tf->hob_lbam = (block >> 32) & 0xff; 1255 tf->hob_lbam = (block >> 32) & 0xff;
1154 tf->hob_lbal = (block >> 24) & 0xff; 1256 tf->hob_lbal = (block >> 24) & 0xff;
1155 } else { 1257 } else
1156 /* use LBA28 */ 1258 /* request too large even for LBA48 */
1157 1259 goto out_of_range;
1158 /* The request -may- be too large for LBA28. */
1159 if ((block >> 28) || (n_block > 256))
1160 goto out_of_range;
1161
1162 tf->device |= (block >> 24) & 0xf;
1163 }
1164 1260
1165 if (unlikely(ata_rwcmd_protocol(qc) < 0)) 1261 if (unlikely(ata_rwcmd_protocol(qc) < 0))
1166 goto invalid_fld; 1262 goto invalid_fld;
@@ -1178,7 +1274,7 @@ static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc, const u8 *scsicm
1178 u32 sect, head, cyl, track; 1274 u32 sect, head, cyl, track;
1179 1275
1180 /* The request -may- be too large for CHS addressing. */ 1276 /* The request -may- be too large for CHS addressing. */
1181 if ((block >> 28) || (n_block > 256)) 1277 if (!lba_28_ok(block, n_block))
1182 goto out_of_range; 1278 goto out_of_range;
1183 1279
1184 if (unlikely(ata_rwcmd_protocol(qc) < 0)) 1280 if (unlikely(ata_rwcmd_protocol(qc) < 0))
@@ -1225,7 +1321,7 @@ nothing_to_do:
1225 return 1; 1321 return 1;
1226} 1322}
1227 1323
1228static int ata_scsi_qc_complete(struct ata_queued_cmd *qc) 1324static void ata_scsi_qc_complete(struct ata_queued_cmd *qc)
1229{ 1325{
1230 struct scsi_cmnd *cmd = qc->scsicmd; 1326 struct scsi_cmnd *cmd = qc->scsicmd;
1231 u8 *cdb = cmd->cmnd; 1327 u8 *cdb = cmd->cmnd;
@@ -1262,7 +1358,7 @@ static int ata_scsi_qc_complete(struct ata_queued_cmd *qc)
1262 1358
1263 qc->scsidone(cmd); 1359 qc->scsidone(cmd);
1264 1360
1265 return 0; 1361 ata_qc_free(qc);
1266} 1362}
1267 1363
1268/** 1364/**
@@ -1328,8 +1424,9 @@ static void ata_scsi_translate(struct ata_port *ap, struct ata_device *dev,
1328 goto early_finish; 1424 goto early_finish;
1329 1425
1330 /* select device, send command to hardware */ 1426 /* select device, send command to hardware */
1331 if (ata_qc_issue(qc)) 1427 qc->err_mask = ata_qc_issue(qc);
1332 goto err_did; 1428 if (qc->err_mask)
1429 ata_qc_complete(qc);
1333 1430
1334 VPRINTK("EXIT\n"); 1431 VPRINTK("EXIT\n");
1335 return; 1432 return;
@@ -1472,8 +1569,8 @@ unsigned int ata_scsiop_inq_std(struct ata_scsi_args *args, u8 *rbuf,
1472 1569
1473 if (buflen > 35) { 1570 if (buflen > 35) {
1474 memcpy(&rbuf[8], "ATA ", 8); 1571 memcpy(&rbuf[8], "ATA ", 8);
1475 ata_dev_id_string(args->id, &rbuf[16], ATA_ID_PROD_OFS, 16); 1572 ata_id_string(args->id, &rbuf[16], ATA_ID_PROD_OFS, 16);
1476 ata_dev_id_string(args->id, &rbuf[32], ATA_ID_FW_REV_OFS, 4); 1573 ata_id_string(args->id, &rbuf[32], ATA_ID_FW_REV_OFS, 4);
1477 if (rbuf[32] == 0 || rbuf[32] == ' ') 1574 if (rbuf[32] == 0 || rbuf[32] == ' ')
1478 memcpy(&rbuf[32], "n/a ", 4); 1575 memcpy(&rbuf[32], "n/a ", 4);
1479 } 1576 }
@@ -1547,8 +1644,8 @@ unsigned int ata_scsiop_inq_80(struct ata_scsi_args *args, u8 *rbuf,
1547 memcpy(rbuf, hdr, sizeof(hdr)); 1644 memcpy(rbuf, hdr, sizeof(hdr));
1548 1645
1549 if (buflen > (ATA_SERNO_LEN + 4 - 1)) 1646 if (buflen > (ATA_SERNO_LEN + 4 - 1))
1550 ata_dev_id_string(args->id, (unsigned char *) &rbuf[4], 1647 ata_id_string(args->id, (unsigned char *) &rbuf[4],
1551 ATA_ID_SERNO_OFS, ATA_SERNO_LEN); 1648 ATA_ID_SERNO_OFS, ATA_SERNO_LEN);
1552 1649
1553 return 0; 1650 return 0;
1554} 1651}
@@ -1713,15 +1810,12 @@ static int ata_dev_supports_fua(u16 *id)
1713 if (!ata_id_has_fua(id)) 1810 if (!ata_id_has_fua(id))
1714 return 0; 1811 return 0;
1715 1812
1716 model[40] = '\0'; 1813 ata_id_c_string(id, model, ATA_ID_PROD_OFS, sizeof(model));
1717 fw[8] = '\0'; 1814 ata_id_c_string(id, fw, ATA_ID_FW_REV_OFS, sizeof(fw));
1718
1719 ata_dev_id_string(id, model, ATA_ID_PROD_OFS, sizeof(model) - 1);
1720 ata_dev_id_string(id, fw, ATA_ID_FW_REV_OFS, sizeof(fw) - 1);
1721 1815
1722 if (strncmp(model, "Maxtor", 6)) 1816 if (strcmp(model, "Maxtor"))
1723 return 1; 1817 return 1;
1724 if (strncmp(fw, "BANC1G10", 8)) 1818 if (strcmp(fw, "BANC1G10"))
1725 return 1; 1819 return 1;
1726 1820
1727 return 0; /* blacklisted */ 1821 return 0; /* blacklisted */
@@ -2015,7 +2109,7 @@ void ata_scsi_badcmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *), u8
2015 done(cmd); 2109 done(cmd);
2016} 2110}
2017 2111
2018static int atapi_sense_complete(struct ata_queued_cmd *qc) 2112static void atapi_sense_complete(struct ata_queued_cmd *qc)
2019{ 2113{
2020 if (qc->err_mask && ((qc->err_mask & AC_ERR_DEV) == 0)) 2114 if (qc->err_mask && ((qc->err_mask & AC_ERR_DEV) == 0))
2021 /* FIXME: not quite right; we don't want the 2115 /* FIXME: not quite right; we don't want the
@@ -2026,7 +2120,7 @@ static int atapi_sense_complete(struct ata_queued_cmd *qc)
2026 ata_gen_ata_desc_sense(qc); 2120 ata_gen_ata_desc_sense(qc);
2027 2121
2028 qc->scsidone(qc->scsicmd); 2122 qc->scsidone(qc->scsicmd);
2029 return 0; 2123 ata_qc_free(qc);
2030} 2124}
2031 2125
2032/* is it pointless to prefer PIO for "safety reasons"? */ 2126/* is it pointless to prefer PIO for "safety reasons"? */
@@ -2056,7 +2150,7 @@ static void atapi_request_sense(struct ata_queued_cmd *qc)
2056 ata_sg_init_one(qc, cmd->sense_buffer, sizeof(cmd->sense_buffer)); 2150 ata_sg_init_one(qc, cmd->sense_buffer, sizeof(cmd->sense_buffer));
2057 qc->dma_dir = DMA_FROM_DEVICE; 2151 qc->dma_dir = DMA_FROM_DEVICE;
2058 2152
2059 memset(&qc->cdb, 0, ap->cdb_len); 2153 memset(&qc->cdb, 0, qc->dev->cdb_len);
2060 qc->cdb[0] = REQUEST_SENSE; 2154 qc->cdb[0] = REQUEST_SENSE;
2061 qc->cdb[4] = SCSI_SENSE_BUFFERSIZE; 2155 qc->cdb[4] = SCSI_SENSE_BUFFERSIZE;
2062 2156
@@ -2075,15 +2169,14 @@ static void atapi_request_sense(struct ata_queued_cmd *qc)
2075 2169
2076 qc->complete_fn = atapi_sense_complete; 2170 qc->complete_fn = atapi_sense_complete;
2077 2171
2078 if (ata_qc_issue(qc)) { 2172 qc->err_mask = ata_qc_issue(qc);
2079 qc->err_mask |= AC_ERR_OTHER; 2173 if (qc->err_mask)
2080 ata_qc_complete(qc); 2174 ata_qc_complete(qc);
2081 }
2082 2175
2083 DPRINTK("EXIT\n"); 2176 DPRINTK("EXIT\n");
2084} 2177}
2085 2178
2086static int atapi_qc_complete(struct ata_queued_cmd *qc) 2179static void atapi_qc_complete(struct ata_queued_cmd *qc)
2087{ 2180{
2088 struct scsi_cmnd *cmd = qc->scsicmd; 2181 struct scsi_cmnd *cmd = qc->scsicmd;
2089 unsigned int err_mask = qc->err_mask; 2182 unsigned int err_mask = qc->err_mask;
@@ -2093,7 +2186,7 @@ static int atapi_qc_complete(struct ata_queued_cmd *qc)
2093 if (unlikely(err_mask & AC_ERR_DEV)) { 2186 if (unlikely(err_mask & AC_ERR_DEV)) {
2094 cmd->result = SAM_STAT_CHECK_CONDITION; 2187 cmd->result = SAM_STAT_CHECK_CONDITION;
2095 atapi_request_sense(qc); 2188 atapi_request_sense(qc);
2096 return 1; 2189 return;
2097 } 2190 }
2098 2191
2099 else if (unlikely(err_mask)) 2192 else if (unlikely(err_mask))
@@ -2133,7 +2226,7 @@ static int atapi_qc_complete(struct ata_queued_cmd *qc)
2133 } 2226 }
2134 2227
2135 qc->scsidone(cmd); 2228 qc->scsidone(cmd);
2136 return 0; 2229 ata_qc_free(qc);
2137} 2230}
2138/** 2231/**
2139 * atapi_xlat - Initialize PACKET taskfile 2232 * atapi_xlat - Initialize PACKET taskfile
@@ -2159,7 +2252,7 @@ static unsigned int atapi_xlat(struct ata_queued_cmd *qc, const u8 *scsicmd)
2159 if (ata_check_atapi_dma(qc)) 2252 if (ata_check_atapi_dma(qc))
2160 using_pio = 1; 2253 using_pio = 1;
2161 2254
2162 memcpy(&qc->cdb, scsicmd, qc->ap->cdb_len); 2255 memcpy(&qc->cdb, scsicmd, dev->cdb_len);
2163 2256
2164 qc->complete_fn = atapi_qc_complete; 2257 qc->complete_fn = atapi_qc_complete;
2165 2258
@@ -2519,7 +2612,8 @@ out_unlock:
2519 2612
2520/** 2613/**
2521 * ata_scsi_simulate - simulate SCSI command on ATA device 2614 * ata_scsi_simulate - simulate SCSI command on ATA device
2522 * @id: current IDENTIFY data for target device. 2615 * @ap: port the device is connected to
2616 * @dev: the target device
2523 * @cmd: SCSI command being sent to device. 2617 * @cmd: SCSI command being sent to device.
2524 * @done: SCSI command completion function. 2618 * @done: SCSI command completion function.
2525 * 2619 *
diff --git a/drivers/scsi/libata.h b/drivers/scsi/libata.h
index fddaf479a544..f4c48c91b63d 100644
--- a/drivers/scsi/libata.h
+++ b/drivers/scsi/libata.h
@@ -45,8 +45,9 @@ extern int libata_fua;
45extern struct ata_queued_cmd *ata_qc_new_init(struct ata_port *ap, 45extern struct ata_queued_cmd *ata_qc_new_init(struct ata_port *ap,
46 struct ata_device *dev); 46 struct ata_device *dev);
47extern int ata_rwcmd_protocol(struct ata_queued_cmd *qc); 47extern int ata_rwcmd_protocol(struct ata_queued_cmd *qc);
48extern void ata_port_flush_task(struct ata_port *ap);
48extern void ata_qc_free(struct ata_queued_cmd *qc); 49extern void ata_qc_free(struct ata_queued_cmd *qc);
49extern int ata_qc_issue(struct ata_queued_cmd *qc); 50extern unsigned int ata_qc_issue(struct ata_queued_cmd *qc);
50extern int ata_check_atapi_dma(struct ata_queued_cmd *qc); 51extern int ata_check_atapi_dma(struct ata_queued_cmd *qc);
51extern void ata_dev_select(struct ata_port *ap, unsigned int device, 52extern void ata_dev_select(struct ata_port *ap, unsigned int device,
52 unsigned int wait, unsigned int can_sleep); 53 unsigned int wait, unsigned int can_sleep);
diff --git a/drivers/scsi/pcmcia/Kconfig b/drivers/scsi/pcmcia/Kconfig
index df52190f4d94..eac8e179cfff 100644
--- a/drivers/scsi/pcmcia/Kconfig
+++ b/drivers/scsi/pcmcia/Kconfig
@@ -8,6 +8,7 @@ menu "PCMCIA SCSI adapter support"
8config PCMCIA_AHA152X 8config PCMCIA_AHA152X
9 tristate "Adaptec AHA152X PCMCIA support" 9 tristate "Adaptec AHA152X PCMCIA support"
10 depends on m && !64BIT 10 depends on m && !64BIT
11 select SCSI_SPI_ATTRS
11 help 12 help
12 Say Y here if you intend to attach this type of PCMCIA SCSI host 13 Say Y here if you intend to attach this type of PCMCIA SCSI host
13 adapter to your computer. 14 adapter to your computer.
diff --git a/drivers/scsi/pdc_adma.c b/drivers/scsi/pdc_adma.c
index e8df0c9ec1e6..5f33cc932e70 100644
--- a/drivers/scsi/pdc_adma.c
+++ b/drivers/scsi/pdc_adma.c
@@ -131,7 +131,7 @@ static void adma_host_stop(struct ata_host_set *host_set);
131static void adma_port_stop(struct ata_port *ap); 131static void adma_port_stop(struct ata_port *ap);
132static void adma_phy_reset(struct ata_port *ap); 132static void adma_phy_reset(struct ata_port *ap);
133static void adma_qc_prep(struct ata_queued_cmd *qc); 133static void adma_qc_prep(struct ata_queued_cmd *qc);
134static int adma_qc_issue(struct ata_queued_cmd *qc); 134static unsigned int adma_qc_issue(struct ata_queued_cmd *qc);
135static int adma_check_atapi_dma(struct ata_queued_cmd *qc); 135static int adma_check_atapi_dma(struct ata_queued_cmd *qc);
136static void adma_bmdma_stop(struct ata_queued_cmd *qc); 136static void adma_bmdma_stop(struct ata_queued_cmd *qc);
137static u8 adma_bmdma_status(struct ata_port *ap); 137static u8 adma_bmdma_status(struct ata_port *ap);
@@ -143,11 +143,11 @@ static struct scsi_host_template adma_ata_sht = {
143 .name = DRV_NAME, 143 .name = DRV_NAME,
144 .ioctl = ata_scsi_ioctl, 144 .ioctl = ata_scsi_ioctl,
145 .queuecommand = ata_scsi_queuecmd, 145 .queuecommand = ata_scsi_queuecmd,
146 .eh_timed_out = ata_scsi_timed_out,
146 .eh_strategy_handler = ata_scsi_error, 147 .eh_strategy_handler = ata_scsi_error,
147 .can_queue = ATA_DEF_QUEUE, 148 .can_queue = ATA_DEF_QUEUE,
148 .this_id = ATA_SHT_THIS_ID, 149 .this_id = ATA_SHT_THIS_ID,
149 .sg_tablesize = LIBATA_MAX_PRD, 150 .sg_tablesize = LIBATA_MAX_PRD,
150 .max_sectors = ATA_MAX_SECTORS,
151 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 151 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
152 .emulated = ATA_SHT_EMULATED, 152 .emulated = ATA_SHT_EMULATED,
153 .use_clustering = ENABLE_CLUSTERING, 153 .use_clustering = ENABLE_CLUSTERING,
@@ -419,7 +419,7 @@ static inline void adma_packet_start(struct ata_queued_cmd *qc)
419 writew(aPIOMD4 | aGO, chan + ADMA_CONTROL); 419 writew(aPIOMD4 | aGO, chan + ADMA_CONTROL);
420} 420}
421 421
422static int adma_qc_issue(struct ata_queued_cmd *qc) 422static unsigned int adma_qc_issue(struct ata_queued_cmd *qc)
423{ 423{
424 struct adma_port_priv *pp = qc->ap->private_data; 424 struct adma_port_priv *pp = qc->ap->private_data;
425 425
diff --git a/drivers/scsi/sata_mv.c b/drivers/scsi/sata_mv.c
index 2770005324b4..e561281967dd 100644
--- a/drivers/scsi/sata_mv.c
+++ b/drivers/scsi/sata_mv.c
@@ -37,7 +37,7 @@
37#include <asm/io.h> 37#include <asm/io.h>
38 38
39#define DRV_NAME "sata_mv" 39#define DRV_NAME "sata_mv"
40#define DRV_VERSION "0.5" 40#define DRV_VERSION "0.6"
41 41
42enum { 42enum {
43 /* BAR's are enumerated in terms of pci_resource_start() terms */ 43 /* BAR's are enumerated in terms of pci_resource_start() terms */
@@ -228,7 +228,9 @@ enum {
228 MV_HP_ERRATA_50XXB2 = (1 << 2), 228 MV_HP_ERRATA_50XXB2 = (1 << 2),
229 MV_HP_ERRATA_60X1B2 = (1 << 3), 229 MV_HP_ERRATA_60X1B2 = (1 << 3),
230 MV_HP_ERRATA_60X1C0 = (1 << 4), 230 MV_HP_ERRATA_60X1C0 = (1 << 4),
231 MV_HP_50XX = (1 << 5), 231 MV_HP_ERRATA_XX42A0 = (1 << 5),
232 MV_HP_50XX = (1 << 6),
233 MV_HP_GEN_IIE = (1 << 7),
232 234
233 /* Port private flags (pp_flags) */ 235 /* Port private flags (pp_flags) */
234 MV_PP_FLAG_EDMA_EN = (1 << 0), 236 MV_PP_FLAG_EDMA_EN = (1 << 0),
@@ -237,6 +239,9 @@ enum {
237 239
238#define IS_50XX(hpriv) ((hpriv)->hp_flags & MV_HP_50XX) 240#define IS_50XX(hpriv) ((hpriv)->hp_flags & MV_HP_50XX)
239#define IS_60XX(hpriv) (((hpriv)->hp_flags & MV_HP_50XX) == 0) 241#define IS_60XX(hpriv) (((hpriv)->hp_flags & MV_HP_50XX) == 0)
242#define IS_GEN_I(hpriv) IS_50XX(hpriv)
243#define IS_GEN_II(hpriv) IS_60XX(hpriv)
244#define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
240 245
241enum { 246enum {
242 /* Our DMA boundary is determined by an ePRD being unable to handle 247 /* Our DMA boundary is determined by an ePRD being unable to handle
@@ -255,6 +260,8 @@ enum chip_type {
255 chip_5080, 260 chip_5080,
256 chip_604x, 261 chip_604x,
257 chip_608x, 262 chip_608x,
263 chip_6042,
264 chip_7042,
258}; 265};
259 266
260/* Command ReQuest Block: 32B */ 267/* Command ReQuest Block: 32B */
@@ -265,6 +272,14 @@ struct mv_crqb {
265 u16 ata_cmd[11]; 272 u16 ata_cmd[11];
266}; 273};
267 274
275struct mv_crqb_iie {
276 u32 addr;
277 u32 addr_hi;
278 u32 flags;
279 u32 len;
280 u32 ata_cmd[4];
281};
282
268/* Command ResPonse Block: 8B */ 283/* Command ResPonse Block: 8B */
269struct mv_crpb { 284struct mv_crpb {
270 u16 id; 285 u16 id;
@@ -328,7 +343,8 @@ static void mv_host_stop(struct ata_host_set *host_set);
328static int mv_port_start(struct ata_port *ap); 343static int mv_port_start(struct ata_port *ap);
329static void mv_port_stop(struct ata_port *ap); 344static void mv_port_stop(struct ata_port *ap);
330static void mv_qc_prep(struct ata_queued_cmd *qc); 345static void mv_qc_prep(struct ata_queued_cmd *qc);
331static int mv_qc_issue(struct ata_queued_cmd *qc); 346static void mv_qc_prep_iie(struct ata_queued_cmd *qc);
347static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
332static irqreturn_t mv_interrupt(int irq, void *dev_instance, 348static irqreturn_t mv_interrupt(int irq, void *dev_instance,
333 struct pt_regs *regs); 349 struct pt_regs *regs);
334static void mv_eng_timeout(struct ata_port *ap); 350static void mv_eng_timeout(struct ata_port *ap);
@@ -362,11 +378,11 @@ static struct scsi_host_template mv_sht = {
362 .name = DRV_NAME, 378 .name = DRV_NAME,
363 .ioctl = ata_scsi_ioctl, 379 .ioctl = ata_scsi_ioctl,
364 .queuecommand = ata_scsi_queuecmd, 380 .queuecommand = ata_scsi_queuecmd,
381 .eh_timed_out = ata_scsi_timed_out,
365 .eh_strategy_handler = ata_scsi_error, 382 .eh_strategy_handler = ata_scsi_error,
366 .can_queue = MV_USE_Q_DEPTH, 383 .can_queue = MV_USE_Q_DEPTH,
367 .this_id = ATA_SHT_THIS_ID, 384 .this_id = ATA_SHT_THIS_ID,
368 .sg_tablesize = MV_MAX_SG_CT / 2, 385 .sg_tablesize = MV_MAX_SG_CT / 2,
369 .max_sectors = ATA_MAX_SECTORS,
370 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 386 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
371 .emulated = ATA_SHT_EMULATED, 387 .emulated = ATA_SHT_EMULATED,
372 .use_clustering = ATA_SHT_USE_CLUSTERING, 388 .use_clustering = ATA_SHT_USE_CLUSTERING,
@@ -430,6 +446,33 @@ static const struct ata_port_operations mv6_ops = {
430 .host_stop = mv_host_stop, 446 .host_stop = mv_host_stop,
431}; 447};
432 448
449static const struct ata_port_operations mv_iie_ops = {
450 .port_disable = ata_port_disable,
451
452 .tf_load = ata_tf_load,
453 .tf_read = ata_tf_read,
454 .check_status = ata_check_status,
455 .exec_command = ata_exec_command,
456 .dev_select = ata_std_dev_select,
457
458 .phy_reset = mv_phy_reset,
459
460 .qc_prep = mv_qc_prep_iie,
461 .qc_issue = mv_qc_issue,
462
463 .eng_timeout = mv_eng_timeout,
464
465 .irq_handler = mv_interrupt,
466 .irq_clear = mv_irq_clear,
467
468 .scr_read = mv_scr_read,
469 .scr_write = mv_scr_write,
470
471 .port_start = mv_port_start,
472 .port_stop = mv_port_stop,
473 .host_stop = mv_host_stop,
474};
475
433static const struct ata_port_info mv_port_info[] = { 476static const struct ata_port_info mv_port_info[] = {
434 { /* chip_504x */ 477 { /* chip_504x */
435 .sht = &mv_sht, 478 .sht = &mv_sht,
@@ -467,6 +510,21 @@ static const struct ata_port_info mv_port_info[] = {
467 .udma_mask = 0x7f, /* udma0-6 */ 510 .udma_mask = 0x7f, /* udma0-6 */
468 .port_ops = &mv6_ops, 511 .port_ops = &mv6_ops,
469 }, 512 },
513 { /* chip_6042 */
514 .sht = &mv_sht,
515 .host_flags = (MV_COMMON_FLAGS | MV_6XXX_FLAGS),
516 .pio_mask = 0x1f, /* pio0-4 */
517 .udma_mask = 0x7f, /* udma0-6 */
518 .port_ops = &mv_iie_ops,
519 },
520 { /* chip_7042 */
521 .sht = &mv_sht,
522 .host_flags = (MV_COMMON_FLAGS | MV_6XXX_FLAGS |
523 MV_FLAG_DUAL_HC),
524 .pio_mask = 0x1f, /* pio0-4 */
525 .udma_mask = 0x7f, /* udma0-6 */
526 .port_ops = &mv_iie_ops,
527 },
470}; 528};
471 529
472static const struct pci_device_id mv_pci_tbl[] = { 530static const struct pci_device_id mv_pci_tbl[] = {
@@ -477,6 +535,7 @@ static const struct pci_device_id mv_pci_tbl[] = {
477 535
478 {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x6040), 0, 0, chip_604x}, 536 {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x6040), 0, 0, chip_604x},
479 {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x6041), 0, 0, chip_604x}, 537 {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x6041), 0, 0, chip_604x},
538 {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x6042), 0, 0, chip_6042},
480 {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x6080), 0, 0, chip_608x}, 539 {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x6080), 0, 0, chip_608x},
481 {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x6081), 0, 0, chip_608x}, 540 {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x6081), 0, 0, chip_608x},
482 541
@@ -572,8 +631,8 @@ static void mv_irq_clear(struct ata_port *ap)
572 * @base: port base address 631 * @base: port base address
573 * @pp: port private data 632 * @pp: port private data
574 * 633 *
575 * Verify the local cache of the eDMA state is accurate with an 634 * Verify the local cache of the eDMA state is accurate with a
576 * assert. 635 * WARN_ON.
577 * 636 *
578 * LOCKING: 637 * LOCKING:
579 * Inherited from caller. 638 * Inherited from caller.
@@ -584,15 +643,15 @@ static void mv_start_dma(void __iomem *base, struct mv_port_priv *pp)
584 writelfl(EDMA_EN, base + EDMA_CMD_OFS); 643 writelfl(EDMA_EN, base + EDMA_CMD_OFS);
585 pp->pp_flags |= MV_PP_FLAG_EDMA_EN; 644 pp->pp_flags |= MV_PP_FLAG_EDMA_EN;
586 } 645 }
587 assert(EDMA_EN & readl(base + EDMA_CMD_OFS)); 646 WARN_ON(!(EDMA_EN & readl(base + EDMA_CMD_OFS)));
588} 647}
589 648
590/** 649/**
591 * mv_stop_dma - Disable eDMA engine 650 * mv_stop_dma - Disable eDMA engine
592 * @ap: ATA channel to manipulate 651 * @ap: ATA channel to manipulate
593 * 652 *
594 * Verify the local cache of the eDMA state is accurate with an 653 * Verify the local cache of the eDMA state is accurate with a
595 * assert. 654 * WARN_ON.
596 * 655 *
597 * LOCKING: 656 * LOCKING:
598 * Inherited from caller. 657 * Inherited from caller.
@@ -610,7 +669,7 @@ static void mv_stop_dma(struct ata_port *ap)
610 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS); 669 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
611 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN; 670 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
612 } else { 671 } else {
613 assert(!(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS))); 672 WARN_ON(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS));
614 } 673 }
615 674
616 /* now properly wait for the eDMA to stop */ 675 /* now properly wait for the eDMA to stop */
@@ -773,6 +832,33 @@ static inline void mv_priv_free(struct mv_port_priv *pp, struct device *dev)
773 dma_free_coherent(dev, MV_PORT_PRIV_DMA_SZ, pp->crpb, pp->crpb_dma); 832 dma_free_coherent(dev, MV_PORT_PRIV_DMA_SZ, pp->crpb, pp->crpb_dma);
774} 833}
775 834
835static void mv_edma_cfg(struct mv_host_priv *hpriv, void __iomem *port_mmio)
836{
837 u32 cfg = readl(port_mmio + EDMA_CFG_OFS);
838
839 /* set up non-NCQ EDMA configuration */
840 cfg &= ~0x1f; /* clear queue depth */
841 cfg &= ~EDMA_CFG_NCQ; /* clear NCQ mode */
842 cfg &= ~(1 << 9); /* disable equeue */
843
844 if (IS_GEN_I(hpriv))
845 cfg |= (1 << 8); /* enab config burst size mask */
846
847 else if (IS_GEN_II(hpriv))
848 cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN;
849
850 else if (IS_GEN_IIE(hpriv)) {
851 cfg |= (1 << 23); /* dis RX PM port mask */
852 cfg &= ~(1 << 16); /* dis FIS-based switching (for now) */
853 cfg &= ~(1 << 19); /* dis 128-entry queue (for now?) */
854 cfg |= (1 << 18); /* enab early completion */
855 cfg |= (1 << 17); /* enab host q cache */
856 cfg |= (1 << 22); /* enab cutthrough */
857 }
858
859 writelfl(cfg, port_mmio + EDMA_CFG_OFS);
860}
861
776/** 862/**
777 * mv_port_start - Port specific init/start routine. 863 * mv_port_start - Port specific init/start routine.
778 * @ap: ATA channel to manipulate 864 * @ap: ATA channel to manipulate
@@ -786,6 +872,7 @@ static inline void mv_priv_free(struct mv_port_priv *pp, struct device *dev)
786static int mv_port_start(struct ata_port *ap) 872static int mv_port_start(struct ata_port *ap)
787{ 873{
788 struct device *dev = ap->host_set->dev; 874 struct device *dev = ap->host_set->dev;
875 struct mv_host_priv *hpriv = ap->host_set->private_data;
789 struct mv_port_priv *pp; 876 struct mv_port_priv *pp;
790 void __iomem *port_mmio = mv_ap_base(ap); 877 void __iomem *port_mmio = mv_ap_base(ap);
791 void *mem; 878 void *mem;
@@ -829,17 +916,26 @@ static int mv_port_start(struct ata_port *ap)
829 pp->sg_tbl = mem; 916 pp->sg_tbl = mem;
830 pp->sg_tbl_dma = mem_dma; 917 pp->sg_tbl_dma = mem_dma;
831 918
832 writelfl(EDMA_CFG_Q_DEPTH | EDMA_CFG_RD_BRST_EXT | 919 mv_edma_cfg(hpriv, port_mmio);
833 EDMA_CFG_WR_BUFF_LEN, port_mmio + EDMA_CFG_OFS);
834 920
835 writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS); 921 writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS);
836 writelfl(pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK, 922 writelfl(pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK,
837 port_mmio + EDMA_REQ_Q_IN_PTR_OFS); 923 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
838 924
839 writelfl(0, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS); 925 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
840 writelfl(0, port_mmio + EDMA_RSP_Q_IN_PTR_OFS); 926 writelfl(pp->crqb_dma & 0xffffffff,
927 port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
928 else
929 writelfl(0, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
841 930
842 writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS); 931 writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS);
932
933 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
934 writelfl(pp->crpb_dma & 0xffffffff,
935 port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
936 else
937 writelfl(0, port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
938
843 writelfl(pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK, 939 writelfl(pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK,
844 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS); 940 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
845 941
@@ -960,21 +1056,19 @@ static void mv_qc_prep(struct ata_queued_cmd *qc)
960 struct ata_taskfile *tf; 1056 struct ata_taskfile *tf;
961 u16 flags = 0; 1057 u16 flags = 0;
962 1058
963 if (ATA_PROT_DMA != qc->tf.protocol) { 1059 if (ATA_PROT_DMA != qc->tf.protocol)
964 return; 1060 return;
965 }
966 1061
967 /* the req producer index should be the same as we remember it */ 1062 /* the req producer index should be the same as we remember it */
968 assert(((readl(mv_ap_base(qc->ap) + EDMA_REQ_Q_IN_PTR_OFS) >> 1063 WARN_ON(((readl(mv_ap_base(qc->ap) + EDMA_REQ_Q_IN_PTR_OFS) >>
969 EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) == 1064 EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) !=
970 pp->req_producer); 1065 pp->req_producer);
971 1066
972 /* Fill in command request block 1067 /* Fill in command request block
973 */ 1068 */
974 if (!(qc->tf.flags & ATA_TFLAG_WRITE)) { 1069 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
975 flags |= CRQB_FLAG_READ; 1070 flags |= CRQB_FLAG_READ;
976 } 1071 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
977 assert(MV_MAX_Q_DEPTH > qc->tag);
978 flags |= qc->tag << CRQB_TAG_SHIFT; 1072 flags |= qc->tag << CRQB_TAG_SHIFT;
979 1073
980 pp->crqb[pp->req_producer].sg_addr = 1074 pp->crqb[pp->req_producer].sg_addr =
@@ -1029,9 +1123,76 @@ static void mv_qc_prep(struct ata_queued_cmd *qc)
1029 mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0); 1123 mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0);
1030 mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */ 1124 mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */
1031 1125
1032 if (!(qc->flags & ATA_QCFLAG_DMAMAP)) { 1126 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1127 return;
1128 mv_fill_sg(qc);
1129}
1130
1131/**
1132 * mv_qc_prep_iie - Host specific command preparation.
1133 * @qc: queued command to prepare
1134 *
1135 * This routine simply redirects to the general purpose routine
1136 * if command is not DMA. Else, it handles prep of the CRQB
1137 * (command request block), does some sanity checking, and calls
1138 * the SG load routine.
1139 *
1140 * LOCKING:
1141 * Inherited from caller.
1142 */
1143static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
1144{
1145 struct ata_port *ap = qc->ap;
1146 struct mv_port_priv *pp = ap->private_data;
1147 struct mv_crqb_iie *crqb;
1148 struct ata_taskfile *tf;
1149 u32 flags = 0;
1150
1151 if (ATA_PROT_DMA != qc->tf.protocol)
1152 return;
1153
1154 /* the req producer index should be the same as we remember it */
1155 WARN_ON(((readl(mv_ap_base(qc->ap) + EDMA_REQ_Q_IN_PTR_OFS) >>
1156 EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) !=
1157 pp->req_producer);
1158
1159 /* Fill in Gen IIE command request block
1160 */
1161 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1162 flags |= CRQB_FLAG_READ;
1163
1164 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
1165 flags |= qc->tag << CRQB_TAG_SHIFT;
1166
1167 crqb = (struct mv_crqb_iie *) &pp->crqb[pp->req_producer];
1168 crqb->addr = cpu_to_le32(pp->sg_tbl_dma & 0xffffffff);
1169 crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16);
1170 crqb->flags = cpu_to_le32(flags);
1171
1172 tf = &qc->tf;
1173 crqb->ata_cmd[0] = cpu_to_le32(
1174 (tf->command << 16) |
1175 (tf->feature << 24)
1176 );
1177 crqb->ata_cmd[1] = cpu_to_le32(
1178 (tf->lbal << 0) |
1179 (tf->lbam << 8) |
1180 (tf->lbah << 16) |
1181 (tf->device << 24)
1182 );
1183 crqb->ata_cmd[2] = cpu_to_le32(
1184 (tf->hob_lbal << 0) |
1185 (tf->hob_lbam << 8) |
1186 (tf->hob_lbah << 16) |
1187 (tf->hob_feature << 24)
1188 );
1189 crqb->ata_cmd[3] = cpu_to_le32(
1190 (tf->nsect << 0) |
1191 (tf->hob_nsect << 8)
1192 );
1193
1194 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1033 return; 1195 return;
1034 }
1035 mv_fill_sg(qc); 1196 mv_fill_sg(qc);
1036} 1197}
1037 1198
@@ -1047,7 +1208,7 @@ static void mv_qc_prep(struct ata_queued_cmd *qc)
1047 * LOCKING: 1208 * LOCKING:
1048 * Inherited from caller. 1209 * Inherited from caller.
1049 */ 1210 */
1050static int mv_qc_issue(struct ata_queued_cmd *qc) 1211static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
1051{ 1212{
1052 void __iomem *port_mmio = mv_ap_base(qc->ap); 1213 void __iomem *port_mmio = mv_ap_base(qc->ap);
1053 struct mv_port_priv *pp = qc->ap->private_data; 1214 struct mv_port_priv *pp = qc->ap->private_data;
@@ -1065,12 +1226,12 @@ static int mv_qc_issue(struct ata_queued_cmd *qc)
1065 in_ptr = readl(port_mmio + EDMA_REQ_Q_IN_PTR_OFS); 1226 in_ptr = readl(port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
1066 1227
1067 /* the req producer index should be the same as we remember it */ 1228 /* the req producer index should be the same as we remember it */
1068 assert(((in_ptr >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) == 1229 WARN_ON(((in_ptr >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) !=
1069 pp->req_producer); 1230 pp->req_producer);
1070 /* until we do queuing, the queue should be empty at this point */ 1231 /* until we do queuing, the queue should be empty at this point */
1071 assert(((in_ptr >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) == 1232 WARN_ON(((in_ptr >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) !=
1072 ((readl(port_mmio + EDMA_REQ_Q_OUT_PTR_OFS) >> 1233 ((readl(port_mmio + EDMA_REQ_Q_OUT_PTR_OFS) >>
1073 EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK)); 1234 EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK));
1074 1235
1075 mv_inc_q_index(&pp->req_producer); /* now incr producer index */ 1236 mv_inc_q_index(&pp->req_producer); /* now incr producer index */
1076 1237
@@ -1090,7 +1251,7 @@ static int mv_qc_issue(struct ata_queued_cmd *qc)
1090 * 1251 *
1091 * This routine is for use when the port is in DMA mode, when it 1252 * This routine is for use when the port is in DMA mode, when it
1092 * will be using the CRPB (command response block) method of 1253 * will be using the CRPB (command response block) method of
1093 * returning command completion information. We assert indices 1254 * returning command completion information. We check indices
1094 * are good, grab status, and bump the response consumer index to 1255 * are good, grab status, and bump the response consumer index to
1095 * prove that we're up to date. 1256 * prove that we're up to date.
1096 * 1257 *
@@ -1106,16 +1267,16 @@ static u8 mv_get_crpb_status(struct ata_port *ap)
1106 out_ptr = readl(port_mmio + EDMA_RSP_Q_OUT_PTR_OFS); 1267 out_ptr = readl(port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
1107 1268
1108 /* the response consumer index should be the same as we remember it */ 1269 /* the response consumer index should be the same as we remember it */
1109 assert(((out_ptr >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) == 1270 WARN_ON(((out_ptr >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) !=
1110 pp->rsp_consumer); 1271 pp->rsp_consumer);
1111 1272
1112 /* increment our consumer index... */ 1273 /* increment our consumer index... */
1113 pp->rsp_consumer = mv_inc_q_index(&pp->rsp_consumer); 1274 pp->rsp_consumer = mv_inc_q_index(&pp->rsp_consumer);
1114 1275
1115 /* and, until we do NCQ, there should only be 1 CRPB waiting */ 1276 /* and, until we do NCQ, there should only be 1 CRPB waiting */
1116 assert(((readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS) >> 1277 WARN_ON(((readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS) >>
1117 EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) == 1278 EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) !=
1118 pp->rsp_consumer); 1279 pp->rsp_consumer);
1119 1280
1120 /* write out our inc'd consumer index so EDMA knows we're caught up */ 1281 /* write out our inc'd consumer index so EDMA knows we're caught up */
1121 out_ptr &= EDMA_RSP_Q_BASE_LO_MASK; 1282 out_ptr &= EDMA_RSP_Q_BASE_LO_MASK;
@@ -1192,7 +1353,6 @@ static void mv_host_intr(struct ata_host_set *host_set, u32 relevant,
1192 u32 hc_irq_cause; 1353 u32 hc_irq_cause;
1193 int shift, port, port0, hard_port, handled; 1354 int shift, port, port0, hard_port, handled;
1194 unsigned int err_mask; 1355 unsigned int err_mask;
1195 u8 ata_status = 0;
1196 1356
1197 if (hc == 0) { 1357 if (hc == 0) {
1198 port0 = 0; 1358 port0 = 0;
@@ -1210,6 +1370,7 @@ static void mv_host_intr(struct ata_host_set *host_set, u32 relevant,
1210 hc,relevant,hc_irq_cause); 1370 hc,relevant,hc_irq_cause);
1211 1371
1212 for (port = port0; port < port0 + MV_PORTS_PER_HC; port++) { 1372 for (port = port0; port < port0 + MV_PORTS_PER_HC; port++) {
1373 u8 ata_status = 0;
1213 ap = host_set->ports[port]; 1374 ap = host_set->ports[port];
1214 hard_port = port & MV_PORT_MASK; /* range 0-3 */ 1375 hard_port = port & MV_PORT_MASK; /* range 0-3 */
1215 handled = 0; /* ensure ata_status is set if handled++ */ 1376 handled = 0; /* ensure ata_status is set if handled++ */
@@ -1681,6 +1842,12 @@ static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
1681 m2 |= hpriv->signal[port].pre; 1842 m2 |= hpriv->signal[port].pre;
1682 m2 &= ~(1 << 16); 1843 m2 &= ~(1 << 16);
1683 1844
1845 /* according to mvSata 3.6.1, some IIE values are fixed */
1846 if (IS_GEN_IIE(hpriv)) {
1847 m2 &= ~0xC30FF01F;
1848 m2 |= 0x0000900F;
1849 }
1850
1684 writel(m2, port_mmio + PHY_MODE2); 1851 writel(m2, port_mmio + PHY_MODE2);
1685} 1852}
1686 1853
@@ -1846,7 +2013,6 @@ static void mv_phy_reset(struct ata_port *ap)
1846static void mv_eng_timeout(struct ata_port *ap) 2013static void mv_eng_timeout(struct ata_port *ap)
1847{ 2014{
1848 struct ata_queued_cmd *qc; 2015 struct ata_queued_cmd *qc;
1849 unsigned long flags;
1850 2016
1851 printk(KERN_ERR "ata%u: Entering mv_eng_timeout\n",ap->id); 2017 printk(KERN_ERR "ata%u: Entering mv_eng_timeout\n",ap->id);
1852 DPRINTK("All regs @ start of eng_timeout\n"); 2018 DPRINTK("All regs @ start of eng_timeout\n");
@@ -1861,22 +2027,8 @@ static void mv_eng_timeout(struct ata_port *ap)
1861 mv_err_intr(ap); 2027 mv_err_intr(ap);
1862 mv_stop_and_reset(ap); 2028 mv_stop_and_reset(ap);
1863 2029
1864 if (!qc) { 2030 qc->err_mask |= AC_ERR_TIMEOUT;
1865 printk(KERN_ERR "ata%u: BUG: timeout without command\n", 2031 ata_eh_qc_complete(qc);
1866 ap->id);
1867 } else {
1868 /* hack alert! We cannot use the supplied completion
1869 * function from inside the ->eh_strategy_handler() thread.
1870 * libata is the only user of ->eh_strategy_handler() in
1871 * any kernel, so the default scsi_done() assumes it is
1872 * not being called from the SCSI EH.
1873 */
1874 spin_lock_irqsave(&ap->host_set->lock, flags);
1875 qc->scsidone = scsi_finish_command;
1876 qc->err_mask |= AC_ERR_OTHER;
1877 ata_qc_complete(qc);
1878 spin_unlock_irqrestore(&ap->host_set->lock, flags);
1879 }
1880} 2032}
1881 2033
1882/** 2034/**
@@ -1995,6 +2147,27 @@ static int mv_chip_id(struct pci_dev *pdev, struct mv_host_priv *hpriv,
1995 } 2147 }
1996 break; 2148 break;
1997 2149
2150 case chip_7042:
2151 case chip_6042:
2152 hpriv->ops = &mv6xxx_ops;
2153
2154 hp_flags |= MV_HP_GEN_IIE;
2155
2156 switch (rev_id) {
2157 case 0x0:
2158 hp_flags |= MV_HP_ERRATA_XX42A0;
2159 break;
2160 case 0x1:
2161 hp_flags |= MV_HP_ERRATA_60X1C0;
2162 break;
2163 default:
2164 dev_printk(KERN_WARNING, &pdev->dev,
2165 "Applying 60X1C0 workarounds to unknown rev\n");
2166 hp_flags |= MV_HP_ERRATA_60X1C0;
2167 break;
2168 }
2169 break;
2170
1998 default: 2171 default:
1999 printk(KERN_ERR DRV_NAME ": BUG: invalid board index %u\n", board_idx); 2172 printk(KERN_ERR DRV_NAME ": BUG: invalid board index %u\n", board_idx);
2000 return 1; 2173 return 1;
diff --git a/drivers/scsi/sata_nv.c b/drivers/scsi/sata_nv.c
index bbbb55eeb73a..caffadc2e0ae 100644
--- a/drivers/scsi/sata_nv.c
+++ b/drivers/scsi/sata_nv.c
@@ -229,11 +229,11 @@ static struct scsi_host_template nv_sht = {
229 .name = DRV_NAME, 229 .name = DRV_NAME,
230 .ioctl = ata_scsi_ioctl, 230 .ioctl = ata_scsi_ioctl,
231 .queuecommand = ata_scsi_queuecmd, 231 .queuecommand = ata_scsi_queuecmd,
232 .eh_timed_out = ata_scsi_timed_out,
232 .eh_strategy_handler = ata_scsi_error, 233 .eh_strategy_handler = ata_scsi_error,
233 .can_queue = ATA_DEF_QUEUE, 234 .can_queue = ATA_DEF_QUEUE,
234 .this_id = ATA_SHT_THIS_ID, 235 .this_id = ATA_SHT_THIS_ID,
235 .sg_tablesize = LIBATA_MAX_PRD, 236 .sg_tablesize = LIBATA_MAX_PRD,
236 .max_sectors = ATA_MAX_SECTORS,
237 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 237 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
238 .emulated = ATA_SHT_EMULATED, 238 .emulated = ATA_SHT_EMULATED,
239 .use_clustering = ATA_SHT_USE_CLUSTERING, 239 .use_clustering = ATA_SHT_USE_CLUSTERING,
diff --git a/drivers/scsi/sata_promise.c b/drivers/scsi/sata_promise.c
index b0b0a69b3563..84cb3940ad88 100644
--- a/drivers/scsi/sata_promise.c
+++ b/drivers/scsi/sata_promise.c
@@ -46,7 +46,7 @@
46#include "sata_promise.h" 46#include "sata_promise.h"
47 47
48#define DRV_NAME "sata_promise" 48#define DRV_NAME "sata_promise"
49#define DRV_VERSION "1.03" 49#define DRV_VERSION "1.04"
50 50
51 51
52enum { 52enum {
@@ -58,6 +58,7 @@ enum {
58 PDC_GLOBAL_CTL = 0x48, /* Global control/status (per port) */ 58 PDC_GLOBAL_CTL = 0x48, /* Global control/status (per port) */
59 PDC_CTLSTAT = 0x60, /* IDE control and status (per port) */ 59 PDC_CTLSTAT = 0x60, /* IDE control and status (per port) */
60 PDC_SATA_PLUG_CSR = 0x6C, /* SATA Plug control/status reg */ 60 PDC_SATA_PLUG_CSR = 0x6C, /* SATA Plug control/status reg */
61 PDC2_SATA_PLUG_CSR = 0x60, /* SATAII Plug control/status reg */
61 PDC_SLEW_CTL = 0x470, /* slew rate control reg */ 62 PDC_SLEW_CTL = 0x470, /* slew rate control reg */
62 63
63 PDC_ERR_MASK = (1<<19) | (1<<20) | (1<<21) | (1<<22) | 64 PDC_ERR_MASK = (1<<19) | (1<<20) | (1<<21) | (1<<22) |
@@ -67,8 +68,10 @@ enum {
67 board_20319 = 1, /* FastTrak S150 TX4 */ 68 board_20319 = 1, /* FastTrak S150 TX4 */
68 board_20619 = 2, /* FastTrak TX4000 */ 69 board_20619 = 2, /* FastTrak TX4000 */
69 board_20771 = 3, /* FastTrak TX2300 */ 70 board_20771 = 3, /* FastTrak TX2300 */
71 board_2057x = 4, /* SATAII150 Tx2plus */
72 board_40518 = 5, /* SATAII150 Tx4 */
70 73
71 PDC_HAS_PATA = (1 << 1), /* PDC20375 has PATA */ 74 PDC_HAS_PATA = (1 << 1), /* PDC20375/20575 has PATA */
72 75
73 PDC_RESET = (1 << 11), /* HDMA reset */ 76 PDC_RESET = (1 << 11), /* HDMA reset */
74 77
@@ -82,6 +85,10 @@ struct pdc_port_priv {
82 dma_addr_t pkt_dma; 85 dma_addr_t pkt_dma;
83}; 86};
84 87
88struct pdc_host_priv {
89 int hotplug_offset;
90};
91
85static u32 pdc_sata_scr_read (struct ata_port *ap, unsigned int sc_reg); 92static u32 pdc_sata_scr_read (struct ata_port *ap, unsigned int sc_reg);
86static void pdc_sata_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val); 93static void pdc_sata_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val);
87static int pdc_ata_init_one (struct pci_dev *pdev, const struct pci_device_id *ent); 94static int pdc_ata_init_one (struct pci_dev *pdev, const struct pci_device_id *ent);
@@ -95,7 +102,8 @@ static void pdc_qc_prep(struct ata_queued_cmd *qc);
95static void pdc_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf); 102static void pdc_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf);
96static void pdc_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf); 103static void pdc_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf);
97static void pdc_irq_clear(struct ata_port *ap); 104static void pdc_irq_clear(struct ata_port *ap);
98static int pdc_qc_issue_prot(struct ata_queued_cmd *qc); 105static unsigned int pdc_qc_issue_prot(struct ata_queued_cmd *qc);
106static void pdc_host_stop(struct ata_host_set *host_set);
99 107
100 108
101static struct scsi_host_template pdc_ata_sht = { 109static struct scsi_host_template pdc_ata_sht = {
@@ -103,11 +111,11 @@ static struct scsi_host_template pdc_ata_sht = {
103 .name = DRV_NAME, 111 .name = DRV_NAME,
104 .ioctl = ata_scsi_ioctl, 112 .ioctl = ata_scsi_ioctl,
105 .queuecommand = ata_scsi_queuecmd, 113 .queuecommand = ata_scsi_queuecmd,
114 .eh_timed_out = ata_scsi_timed_out,
106 .eh_strategy_handler = ata_scsi_error, 115 .eh_strategy_handler = ata_scsi_error,
107 .can_queue = ATA_DEF_QUEUE, 116 .can_queue = ATA_DEF_QUEUE,
108 .this_id = ATA_SHT_THIS_ID, 117 .this_id = ATA_SHT_THIS_ID,
109 .sg_tablesize = LIBATA_MAX_PRD, 118 .sg_tablesize = LIBATA_MAX_PRD,
110 .max_sectors = ATA_MAX_SECTORS,
111 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 119 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
112 .emulated = ATA_SHT_EMULATED, 120 .emulated = ATA_SHT_EMULATED,
113 .use_clustering = ATA_SHT_USE_CLUSTERING, 121 .use_clustering = ATA_SHT_USE_CLUSTERING,
@@ -137,7 +145,7 @@ static const struct ata_port_operations pdc_sata_ops = {
137 .scr_write = pdc_sata_scr_write, 145 .scr_write = pdc_sata_scr_write,
138 .port_start = pdc_port_start, 146 .port_start = pdc_port_start,
139 .port_stop = pdc_port_stop, 147 .port_stop = pdc_port_stop,
140 .host_stop = ata_pci_host_stop, 148 .host_stop = pdc_host_stop,
141}; 149};
142 150
143static const struct ata_port_operations pdc_pata_ops = { 151static const struct ata_port_operations pdc_pata_ops = {
@@ -158,7 +166,7 @@ static const struct ata_port_operations pdc_pata_ops = {
158 166
159 .port_start = pdc_port_start, 167 .port_start = pdc_port_start,
160 .port_stop = pdc_port_stop, 168 .port_stop = pdc_port_stop,
161 .host_stop = ata_pci_host_stop, 169 .host_stop = pdc_host_stop,
162}; 170};
163 171
164static const struct ata_port_info pdc_port_info[] = { 172static const struct ata_port_info pdc_port_info[] = {
@@ -201,6 +209,26 @@ static const struct ata_port_info pdc_port_info[] = {
201 .udma_mask = 0x7f, /* udma0-6 ; FIXME */ 209 .udma_mask = 0x7f, /* udma0-6 ; FIXME */
202 .port_ops = &pdc_sata_ops, 210 .port_ops = &pdc_sata_ops,
203 }, 211 },
212
213 /* board_2057x */
214 {
215 .sht = &pdc_ata_sht,
216 .host_flags = PDC_COMMON_FLAGS | ATA_FLAG_SATA,
217 .pio_mask = 0x1f, /* pio0-4 */
218 .mwdma_mask = 0x07, /* mwdma0-2 */
219 .udma_mask = 0x7f, /* udma0-6 ; FIXME */
220 .port_ops = &pdc_sata_ops,
221 },
222
223 /* board_40518 */
224 {
225 .sht = &pdc_ata_sht,
226 .host_flags = PDC_COMMON_FLAGS | ATA_FLAG_SATA,
227 .pio_mask = 0x1f, /* pio0-4 */
228 .mwdma_mask = 0x07, /* mwdma0-2 */
229 .udma_mask = 0x7f, /* udma0-6 ; FIXME */
230 .port_ops = &pdc_sata_ops,
231 },
204}; 232};
205 233
206static const struct pci_device_id pdc_ata_pci_tbl[] = { 234static const struct pci_device_id pdc_ata_pci_tbl[] = {
@@ -217,9 +245,9 @@ static const struct pci_device_id pdc_ata_pci_tbl[] = {
217 { PCI_VENDOR_ID_PROMISE, 0x3376, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 245 { PCI_VENDOR_ID_PROMISE, 0x3376, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
218 board_2037x }, 246 board_2037x },
219 { PCI_VENDOR_ID_PROMISE, 0x3574, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 247 { PCI_VENDOR_ID_PROMISE, 0x3574, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
220 board_2037x }, 248 board_2057x },
221 { PCI_VENDOR_ID_PROMISE, 0x3d75, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 249 { PCI_VENDOR_ID_PROMISE, 0x3d75, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
222 board_2037x }, 250 board_2057x },
223 { PCI_VENDOR_ID_PROMISE, 0x3d73, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 251 { PCI_VENDOR_ID_PROMISE, 0x3d73, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
224 board_2037x }, 252 board_2037x },
225 253
@@ -227,12 +255,14 @@ static const struct pci_device_id pdc_ata_pci_tbl[] = {
227 board_20319 }, 255 board_20319 },
228 { PCI_VENDOR_ID_PROMISE, 0x3319, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 256 { PCI_VENDOR_ID_PROMISE, 0x3319, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
229 board_20319 }, 257 board_20319 },
258 { PCI_VENDOR_ID_PROMISE, 0x3515, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
259 board_20319 },
230 { PCI_VENDOR_ID_PROMISE, 0x3519, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 260 { PCI_VENDOR_ID_PROMISE, 0x3519, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
231 board_20319 }, 261 board_20319 },
232 { PCI_VENDOR_ID_PROMISE, 0x3d17, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 262 { PCI_VENDOR_ID_PROMISE, 0x3d17, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
233 board_20319 }, 263 board_20319 },
234 { PCI_VENDOR_ID_PROMISE, 0x3d18, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 264 { PCI_VENDOR_ID_PROMISE, 0x3d18, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
235 board_20319 }, 265 board_40518 },
236 266
237 { PCI_VENDOR_ID_PROMISE, 0x6629, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 267 { PCI_VENDOR_ID_PROMISE, 0x6629, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
238 board_20619 }, 268 board_20619 },
@@ -261,12 +291,11 @@ static int pdc_port_start(struct ata_port *ap)
261 if (rc) 291 if (rc)
262 return rc; 292 return rc;
263 293
264 pp = kmalloc(sizeof(*pp), GFP_KERNEL); 294 pp = kzalloc(sizeof(*pp), GFP_KERNEL);
265 if (!pp) { 295 if (!pp) {
266 rc = -ENOMEM; 296 rc = -ENOMEM;
267 goto err_out; 297 goto err_out;
268 } 298 }
269 memset(pp, 0, sizeof(*pp));
270 299
271 pp->pkt = dma_alloc_coherent(dev, 128, &pp->pkt_dma, GFP_KERNEL); 300 pp->pkt = dma_alloc_coherent(dev, 128, &pp->pkt_dma, GFP_KERNEL);
272 if (!pp->pkt) { 301 if (!pp->pkt) {
@@ -298,6 +327,16 @@ static void pdc_port_stop(struct ata_port *ap)
298} 327}
299 328
300 329
330static void pdc_host_stop(struct ata_host_set *host_set)
331{
332 struct pdc_host_priv *hp = host_set->private_data;
333
334 ata_pci_host_stop(host_set);
335
336 kfree(hp);
337}
338
339
301static void pdc_reset_port(struct ata_port *ap) 340static void pdc_reset_port(struct ata_port *ap)
302{ 341{
303 void __iomem *mmio = (void __iomem *) ap->ioaddr.cmd_addr + PDC_CTLSTAT; 342 void __iomem *mmio = (void __iomem *) ap->ioaddr.cmd_addr + PDC_CTLSTAT;
@@ -394,19 +433,6 @@ static void pdc_eng_timeout(struct ata_port *ap)
394 spin_lock_irqsave(&host_set->lock, flags); 433 spin_lock_irqsave(&host_set->lock, flags);
395 434
396 qc = ata_qc_from_tag(ap, ap->active_tag); 435 qc = ata_qc_from_tag(ap, ap->active_tag);
397 if (!qc) {
398 printk(KERN_ERR "ata%u: BUG: timeout without command\n",
399 ap->id);
400 goto out;
401 }
402
403 /* hack alert! We cannot use the supplied completion
404 * function from inside the ->eh_strategy_handler() thread.
405 * libata is the only user of ->eh_strategy_handler() in
406 * any kernel, so the default scsi_done() assumes it is
407 * not being called from the SCSI EH.
408 */
409 qc->scsidone = scsi_finish_command;
410 436
411 switch (qc->tf.protocol) { 437 switch (qc->tf.protocol) {
412 case ATA_PROT_DMA: 438 case ATA_PROT_DMA:
@@ -414,7 +440,6 @@ static void pdc_eng_timeout(struct ata_port *ap)
414 printk(KERN_ERR "ata%u: command timeout\n", ap->id); 440 printk(KERN_ERR "ata%u: command timeout\n", ap->id);
415 drv_stat = ata_wait_idle(ap); 441 drv_stat = ata_wait_idle(ap);
416 qc->err_mask |= __ac_err_mask(drv_stat); 442 qc->err_mask |= __ac_err_mask(drv_stat);
417 ata_qc_complete(qc);
418 break; 443 break;
419 444
420 default: 445 default:
@@ -424,12 +449,11 @@ static void pdc_eng_timeout(struct ata_port *ap)
424 ap->id, qc->tf.command, drv_stat); 449 ap->id, qc->tf.command, drv_stat);
425 450
426 qc->err_mask |= ac_err_mask(drv_stat); 451 qc->err_mask |= ac_err_mask(drv_stat);
427 ata_qc_complete(qc);
428 break; 452 break;
429 } 453 }
430 454
431out:
432 spin_unlock_irqrestore(&host_set->lock, flags); 455 spin_unlock_irqrestore(&host_set->lock, flags);
456 ata_eh_qc_complete(qc);
433 DPRINTK("EXIT\n"); 457 DPRINTK("EXIT\n");
434} 458}
435 459
@@ -495,14 +519,15 @@ static irqreturn_t pdc_interrupt (int irq, void *dev_instance, struct pt_regs *r
495 VPRINTK("QUICK EXIT 2\n"); 519 VPRINTK("QUICK EXIT 2\n");
496 return IRQ_NONE; 520 return IRQ_NONE;
497 } 521 }
522
523 spin_lock(&host_set->lock);
524
498 mask &= 0xffff; /* only 16 tags possible */ 525 mask &= 0xffff; /* only 16 tags possible */
499 if (!mask) { 526 if (!mask) {
500 VPRINTK("QUICK EXIT 3\n"); 527 VPRINTK("QUICK EXIT 3\n");
501 return IRQ_NONE; 528 goto done_irq;
502 } 529 }
503 530
504 spin_lock(&host_set->lock);
505
506 writel(mask, mmio_base + PDC_INT_SEQMASK); 531 writel(mask, mmio_base + PDC_INT_SEQMASK);
507 532
508 for (i = 0; i < host_set->n_ports; i++) { 533 for (i = 0; i < host_set->n_ports; i++) {
@@ -519,10 +544,10 @@ static irqreturn_t pdc_interrupt (int irq, void *dev_instance, struct pt_regs *r
519 } 544 }
520 } 545 }
521 546
522 spin_unlock(&host_set->lock);
523
524 VPRINTK("EXIT\n"); 547 VPRINTK("EXIT\n");
525 548
549done_irq:
550 spin_unlock(&host_set->lock);
526 return IRQ_RETVAL(handled); 551 return IRQ_RETVAL(handled);
527} 552}
528 553
@@ -544,7 +569,7 @@ static inline void pdc_packet_start(struct ata_queued_cmd *qc)
544 readl((void __iomem *) ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT); /* flush */ 569 readl((void __iomem *) ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT); /* flush */
545} 570}
546 571
547static int pdc_qc_issue_prot(struct ata_queued_cmd *qc) 572static unsigned int pdc_qc_issue_prot(struct ata_queued_cmd *qc)
548{ 573{
549 switch (qc->tf.protocol) { 574 switch (qc->tf.protocol) {
550 case ATA_PROT_DMA: 575 case ATA_PROT_DMA:
@@ -600,6 +625,8 @@ static void pdc_ata_setup_port(struct ata_ioports *port, unsigned long base)
600static void pdc_host_init(unsigned int chip_id, struct ata_probe_ent *pe) 625static void pdc_host_init(unsigned int chip_id, struct ata_probe_ent *pe)
601{ 626{
602 void __iomem *mmio = pe->mmio_base; 627 void __iomem *mmio = pe->mmio_base;
628 struct pdc_host_priv *hp = pe->private_data;
629 int hotplug_offset = hp->hotplug_offset;
603 u32 tmp; 630 u32 tmp;
604 631
605 /* 632 /*
@@ -614,12 +641,12 @@ static void pdc_host_init(unsigned int chip_id, struct ata_probe_ent *pe)
614 writel(tmp, mmio + PDC_FLASH_CTL); 641 writel(tmp, mmio + PDC_FLASH_CTL);
615 642
616 /* clear plug/unplug flags for all ports */ 643 /* clear plug/unplug flags for all ports */
617 tmp = readl(mmio + PDC_SATA_PLUG_CSR); 644 tmp = readl(mmio + hotplug_offset);
618 writel(tmp | 0xff, mmio + PDC_SATA_PLUG_CSR); 645 writel(tmp | 0xff, mmio + hotplug_offset);
619 646
620 /* mask plug/unplug ints */ 647 /* mask plug/unplug ints */
621 tmp = readl(mmio + PDC_SATA_PLUG_CSR); 648 tmp = readl(mmio + hotplug_offset);
622 writel(tmp | 0xff0000, mmio + PDC_SATA_PLUG_CSR); 649 writel(tmp | 0xff0000, mmio + hotplug_offset);
623 650
624 /* reduce TBG clock to 133 Mhz. */ 651 /* reduce TBG clock to 133 Mhz. */
625 tmp = readl(mmio + PDC_TBG_MODE); 652 tmp = readl(mmio + PDC_TBG_MODE);
@@ -641,6 +668,7 @@ static int pdc_ata_init_one (struct pci_dev *pdev, const struct pci_device_id *e
641{ 668{
642 static int printed_version; 669 static int printed_version;
643 struct ata_probe_ent *probe_ent = NULL; 670 struct ata_probe_ent *probe_ent = NULL;
671 struct pdc_host_priv *hp;
644 unsigned long base; 672 unsigned long base;
645 void __iomem *mmio_base; 673 void __iomem *mmio_base;
646 unsigned int board_idx = (unsigned int) ent->driver_data; 674 unsigned int board_idx = (unsigned int) ent->driver_data;
@@ -671,13 +699,12 @@ static int pdc_ata_init_one (struct pci_dev *pdev, const struct pci_device_id *e
671 if (rc) 699 if (rc)
672 goto err_out_regions; 700 goto err_out_regions;
673 701
674 probe_ent = kmalloc(sizeof(*probe_ent), GFP_KERNEL); 702 probe_ent = kzalloc(sizeof(*probe_ent), GFP_KERNEL);
675 if (probe_ent == NULL) { 703 if (probe_ent == NULL) {
676 rc = -ENOMEM; 704 rc = -ENOMEM;
677 goto err_out_regions; 705 goto err_out_regions;
678 } 706 }
679 707
680 memset(probe_ent, 0, sizeof(*probe_ent));
681 probe_ent->dev = pci_dev_to_dev(pdev); 708 probe_ent->dev = pci_dev_to_dev(pdev);
682 INIT_LIST_HEAD(&probe_ent->node); 709 INIT_LIST_HEAD(&probe_ent->node);
683 710
@@ -688,6 +715,16 @@ static int pdc_ata_init_one (struct pci_dev *pdev, const struct pci_device_id *e
688 } 715 }
689 base = (unsigned long) mmio_base; 716 base = (unsigned long) mmio_base;
690 717
718 hp = kzalloc(sizeof(*hp), GFP_KERNEL);
719 if (hp == NULL) {
720 rc = -ENOMEM;
721 goto err_out_free_ent;
722 }
723
724 /* Set default hotplug offset */
725 hp->hotplug_offset = PDC_SATA_PLUG_CSR;
726 probe_ent->private_data = hp;
727
691 probe_ent->sht = pdc_port_info[board_idx].sht; 728 probe_ent->sht = pdc_port_info[board_idx].sht;
692 probe_ent->host_flags = pdc_port_info[board_idx].host_flags; 729 probe_ent->host_flags = pdc_port_info[board_idx].host_flags;
693 probe_ent->pio_mask = pdc_port_info[board_idx].pio_mask; 730 probe_ent->pio_mask = pdc_port_info[board_idx].pio_mask;
@@ -707,6 +744,10 @@ static int pdc_ata_init_one (struct pci_dev *pdev, const struct pci_device_id *e
707 744
708 /* notice 4-port boards */ 745 /* notice 4-port boards */
709 switch (board_idx) { 746 switch (board_idx) {
747 case board_40518:
748 /* Override hotplug offset for SATAII150 */
749 hp->hotplug_offset = PDC2_SATA_PLUG_CSR;
750 /* Fall through */
710 case board_20319: 751 case board_20319:
711 probe_ent->n_ports = 4; 752 probe_ent->n_ports = 4;
712 753
@@ -716,6 +757,10 @@ static int pdc_ata_init_one (struct pci_dev *pdev, const struct pci_device_id *e
716 probe_ent->port[2].scr_addr = base + 0x600; 757 probe_ent->port[2].scr_addr = base + 0x600;
717 probe_ent->port[3].scr_addr = base + 0x700; 758 probe_ent->port[3].scr_addr = base + 0x700;
718 break; 759 break;
760 case board_2057x:
761 /* Override hotplug offset for SATAII150 */
762 hp->hotplug_offset = PDC2_SATA_PLUG_CSR;
763 /* Fall through */
719 case board_2037x: 764 case board_2037x:
720 probe_ent->n_ports = 2; 765 probe_ent->n_ports = 2;
721 break; 766 break;
@@ -741,8 +786,10 @@ static int pdc_ata_init_one (struct pci_dev *pdev, const struct pci_device_id *e
741 /* initialize adapter */ 786 /* initialize adapter */
742 pdc_host_init(board_idx, probe_ent); 787 pdc_host_init(board_idx, probe_ent);
743 788
744 /* FIXME: check ata_device_add return value */ 789 /* FIXME: Need any other frees than hp? */
745 ata_device_add(probe_ent); 790 if (!ata_device_add(probe_ent))
791 kfree(hp);
792
746 kfree(probe_ent); 793 kfree(probe_ent);
747 794
748 return 0; 795 return 0;
diff --git a/drivers/scsi/sata_qstor.c b/drivers/scsi/sata_qstor.c
index 80480f0fb2b8..9602f43a298e 100644
--- a/drivers/scsi/sata_qstor.c
+++ b/drivers/scsi/sata_qstor.c
@@ -120,7 +120,7 @@ static void qs_host_stop(struct ata_host_set *host_set);
120static void qs_port_stop(struct ata_port *ap); 120static void qs_port_stop(struct ata_port *ap);
121static void qs_phy_reset(struct ata_port *ap); 121static void qs_phy_reset(struct ata_port *ap);
122static void qs_qc_prep(struct ata_queued_cmd *qc); 122static void qs_qc_prep(struct ata_queued_cmd *qc);
123static int qs_qc_issue(struct ata_queued_cmd *qc); 123static unsigned int qs_qc_issue(struct ata_queued_cmd *qc);
124static int qs_check_atapi_dma(struct ata_queued_cmd *qc); 124static int qs_check_atapi_dma(struct ata_queued_cmd *qc);
125static void qs_bmdma_stop(struct ata_queued_cmd *qc); 125static void qs_bmdma_stop(struct ata_queued_cmd *qc);
126static u8 qs_bmdma_status(struct ata_port *ap); 126static u8 qs_bmdma_status(struct ata_port *ap);
@@ -132,11 +132,11 @@ static struct scsi_host_template qs_ata_sht = {
132 .name = DRV_NAME, 132 .name = DRV_NAME,
133 .ioctl = ata_scsi_ioctl, 133 .ioctl = ata_scsi_ioctl,
134 .queuecommand = ata_scsi_queuecmd, 134 .queuecommand = ata_scsi_queuecmd,
135 .eh_timed_out = ata_scsi_timed_out,
135 .eh_strategy_handler = ata_scsi_error, 136 .eh_strategy_handler = ata_scsi_error,
136 .can_queue = ATA_DEF_QUEUE, 137 .can_queue = ATA_DEF_QUEUE,
137 .this_id = ATA_SHT_THIS_ID, 138 .this_id = ATA_SHT_THIS_ID,
138 .sg_tablesize = QS_MAX_PRD, 139 .sg_tablesize = QS_MAX_PRD,
139 .max_sectors = ATA_MAX_SECTORS,
140 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 140 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
141 .emulated = ATA_SHT_EMULATED, 141 .emulated = ATA_SHT_EMULATED,
142 //FIXME .use_clustering = ATA_SHT_USE_CLUSTERING, 142 //FIXME .use_clustering = ATA_SHT_USE_CLUSTERING,
@@ -276,8 +276,8 @@ static unsigned int qs_fill_sg(struct ata_queued_cmd *qc)
276 unsigned int nelem; 276 unsigned int nelem;
277 u8 *prd = pp->pkt + QS_CPB_BYTES; 277 u8 *prd = pp->pkt + QS_CPB_BYTES;
278 278
279 assert(qc->__sg != NULL); 279 WARN_ON(qc->__sg == NULL);
280 assert(qc->n_elem > 0 || qc->pad_len > 0); 280 WARN_ON(qc->n_elem == 0 && qc->pad_len == 0);
281 281
282 nelem = 0; 282 nelem = 0;
283 ata_for_each_sg(sg, qc) { 283 ata_for_each_sg(sg, qc) {
@@ -352,7 +352,7 @@ static inline void qs_packet_start(struct ata_queued_cmd *qc)
352 readl(chan + QS_CCT_CFF); /* flush */ 352 readl(chan + QS_CCT_CFF); /* flush */
353} 353}
354 354
355static int qs_qc_issue(struct ata_queued_cmd *qc) 355static unsigned int qs_qc_issue(struct ata_queued_cmd *qc)
356{ 356{
357 struct qs_port_priv *pp = qc->ap->private_data; 357 struct qs_port_priv *pp = qc->ap->private_data;
358 358
diff --git a/drivers/scsi/sata_sil.c b/drivers/scsi/sata_sil.c
index 9face3c6aa21..4f2a67ed39d8 100644
--- a/drivers/scsi/sata_sil.c
+++ b/drivers/scsi/sata_sil.c
@@ -49,24 +49,30 @@
49#define DRV_VERSION "0.9" 49#define DRV_VERSION "0.9"
50 50
51enum { 51enum {
52 /*
53 * host flags
54 */
52 SIL_FLAG_RERR_ON_DMA_ACT = (1 << 29), 55 SIL_FLAG_RERR_ON_DMA_ACT = (1 << 29),
53 SIL_FLAG_MOD15WRITE = (1 << 30), 56 SIL_FLAG_MOD15WRITE = (1 << 30),
57 SIL_DFL_HOST_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
58 ATA_FLAG_MMIO,
54 59
60 /*
61 * Controller IDs
62 */
55 sil_3112 = 0, 63 sil_3112 = 0,
56 sil_3112_m15w = 1, 64 sil_3512 = 1,
57 sil_3512 = 2, 65 sil_3114 = 2,
58 sil_3114 = 3,
59
60 SIL_FIFO_R0 = 0x40,
61 SIL_FIFO_W0 = 0x41,
62 SIL_FIFO_R1 = 0x44,
63 SIL_FIFO_W1 = 0x45,
64 SIL_FIFO_R2 = 0x240,
65 SIL_FIFO_W2 = 0x241,
66 SIL_FIFO_R3 = 0x244,
67 SIL_FIFO_W3 = 0x245,
68 66
67 /*
68 * Register offsets
69 */
69 SIL_SYSCFG = 0x48, 70 SIL_SYSCFG = 0x48,
71
72 /*
73 * Register bits
74 */
75 /* SYSCFG */
70 SIL_MASK_IDE0_INT = (1 << 22), 76 SIL_MASK_IDE0_INT = (1 << 22),
71 SIL_MASK_IDE1_INT = (1 << 23), 77 SIL_MASK_IDE1_INT = (1 << 23),
72 SIL_MASK_IDE2_INT = (1 << 24), 78 SIL_MASK_IDE2_INT = (1 << 24),
@@ -75,9 +81,12 @@ enum {
75 SIL_MASK_4PORT = SIL_MASK_2PORT | 81 SIL_MASK_4PORT = SIL_MASK_2PORT |
76 SIL_MASK_IDE2_INT | SIL_MASK_IDE3_INT, 82 SIL_MASK_IDE2_INT | SIL_MASK_IDE3_INT,
77 83
78 SIL_IDE2_BMDMA = 0x200, 84 /* BMDMA/BMDMA2 */
79
80 SIL_INTR_STEERING = (1 << 1), 85 SIL_INTR_STEERING = (1 << 1),
86
87 /*
88 * Others
89 */
81 SIL_QUIRK_MOD15WRITE = (1 << 0), 90 SIL_QUIRK_MOD15WRITE = (1 << 0),
82 SIL_QUIRK_UDMA5MAX = (1 << 1), 91 SIL_QUIRK_UDMA5MAX = (1 << 1),
83}; 92};
@@ -90,13 +99,13 @@ static void sil_post_set_mode (struct ata_port *ap);
90 99
91 100
92static const struct pci_device_id sil_pci_tbl[] = { 101static const struct pci_device_id sil_pci_tbl[] = {
93 { 0x1095, 0x3112, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3112_m15w }, 102 { 0x1095, 0x3112, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3112 },
94 { 0x1095, 0x0240, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3112_m15w }, 103 { 0x1095, 0x0240, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3112 },
95 { 0x1095, 0x3512, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3512 }, 104 { 0x1095, 0x3512, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3512 },
96 { 0x1095, 0x3114, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3114 }, 105 { 0x1095, 0x3114, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3114 },
97 { 0x1002, 0x436e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3112_m15w }, 106 { 0x1002, 0x436e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3112 },
98 { 0x1002, 0x4379, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3112_m15w }, 107 { 0x1002, 0x4379, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3112 },
99 { 0x1002, 0x437a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3112_m15w }, 108 { 0x1002, 0x437a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3112 },
100 { } /* terminate list */ 109 { } /* terminate list */
101}; 110};
102 111
@@ -137,11 +146,11 @@ static struct scsi_host_template sil_sht = {
137 .name = DRV_NAME, 146 .name = DRV_NAME,
138 .ioctl = ata_scsi_ioctl, 147 .ioctl = ata_scsi_ioctl,
139 .queuecommand = ata_scsi_queuecmd, 148 .queuecommand = ata_scsi_queuecmd,
149 .eh_timed_out = ata_scsi_timed_out,
140 .eh_strategy_handler = ata_scsi_error, 150 .eh_strategy_handler = ata_scsi_error,
141 .can_queue = ATA_DEF_QUEUE, 151 .can_queue = ATA_DEF_QUEUE,
142 .this_id = ATA_SHT_THIS_ID, 152 .this_id = ATA_SHT_THIS_ID,
143 .sg_tablesize = LIBATA_MAX_PRD, 153 .sg_tablesize = LIBATA_MAX_PRD,
144 .max_sectors = ATA_MAX_SECTORS,
145 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 154 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
146 .emulated = ATA_SHT_EMULATED, 155 .emulated = ATA_SHT_EMULATED,
147 .use_clustering = ATA_SHT_USE_CLUSTERING, 156 .use_clustering = ATA_SHT_USE_CLUSTERING,
@@ -159,7 +168,7 @@ static const struct ata_port_operations sil_ops = {
159 .check_status = ata_check_status, 168 .check_status = ata_check_status,
160 .exec_command = ata_exec_command, 169 .exec_command = ata_exec_command,
161 .dev_select = ata_std_dev_select, 170 .dev_select = ata_std_dev_select,
162 .phy_reset = sata_phy_reset, 171 .probe_reset = ata_std_probe_reset,
163 .post_set_mode = sil_post_set_mode, 172 .post_set_mode = sil_post_set_mode,
164 .bmdma_setup = ata_bmdma_setup, 173 .bmdma_setup = ata_bmdma_setup,
165 .bmdma_start = ata_bmdma_start, 174 .bmdma_start = ata_bmdma_start,
@@ -181,19 +190,7 @@ static const struct ata_port_info sil_port_info[] = {
181 /* sil_3112 */ 190 /* sil_3112 */
182 { 191 {
183 .sht = &sil_sht, 192 .sht = &sil_sht,
184 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 193 .host_flags = SIL_DFL_HOST_FLAGS | SIL_FLAG_MOD15WRITE,
185 ATA_FLAG_SRST | ATA_FLAG_MMIO,
186 .pio_mask = 0x1f, /* pio0-4 */
187 .mwdma_mask = 0x07, /* mwdma0-2 */
188 .udma_mask = 0x3f, /* udma0-5 */
189 .port_ops = &sil_ops,
190 },
191 /* sil_3112_15w - keep it sync'd w/ sil_3112 */
192 {
193 .sht = &sil_sht,
194 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
195 ATA_FLAG_SRST | ATA_FLAG_MMIO |
196 SIL_FLAG_MOD15WRITE,
197 .pio_mask = 0x1f, /* pio0-4 */ 194 .pio_mask = 0x1f, /* pio0-4 */
198 .mwdma_mask = 0x07, /* mwdma0-2 */ 195 .mwdma_mask = 0x07, /* mwdma0-2 */
199 .udma_mask = 0x3f, /* udma0-5 */ 196 .udma_mask = 0x3f, /* udma0-5 */
@@ -202,9 +199,7 @@ static const struct ata_port_info sil_port_info[] = {
202 /* sil_3512 */ 199 /* sil_3512 */
203 { 200 {
204 .sht = &sil_sht, 201 .sht = &sil_sht,
205 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 202 .host_flags = SIL_DFL_HOST_FLAGS | SIL_FLAG_RERR_ON_DMA_ACT,
206 ATA_FLAG_SRST | ATA_FLAG_MMIO |
207 SIL_FLAG_RERR_ON_DMA_ACT,
208 .pio_mask = 0x1f, /* pio0-4 */ 203 .pio_mask = 0x1f, /* pio0-4 */
209 .mwdma_mask = 0x07, /* mwdma0-2 */ 204 .mwdma_mask = 0x07, /* mwdma0-2 */
210 .udma_mask = 0x3f, /* udma0-5 */ 205 .udma_mask = 0x3f, /* udma0-5 */
@@ -213,9 +208,7 @@ static const struct ata_port_info sil_port_info[] = {
213 /* sil_3114 */ 208 /* sil_3114 */
214 { 209 {
215 .sht = &sil_sht, 210 .sht = &sil_sht,
216 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 211 .host_flags = SIL_DFL_HOST_FLAGS | SIL_FLAG_RERR_ON_DMA_ACT,
217 ATA_FLAG_SRST | ATA_FLAG_MMIO |
218 SIL_FLAG_RERR_ON_DMA_ACT,
219 .pio_mask = 0x1f, /* pio0-4 */ 212 .pio_mask = 0x1f, /* pio0-4 */
220 .mwdma_mask = 0x07, /* mwdma0-2 */ 213 .mwdma_mask = 0x07, /* mwdma0-2 */
221 .udma_mask = 0x3f, /* udma0-5 */ 214 .udma_mask = 0x3f, /* udma0-5 */
@@ -229,16 +222,17 @@ static const struct {
229 unsigned long tf; /* ATA taskfile register block */ 222 unsigned long tf; /* ATA taskfile register block */
230 unsigned long ctl; /* ATA control/altstatus register block */ 223 unsigned long ctl; /* ATA control/altstatus register block */
231 unsigned long bmdma; /* DMA register block */ 224 unsigned long bmdma; /* DMA register block */
225 unsigned long fifo_cfg; /* FIFO Valid Byte Count and Control */
232 unsigned long scr; /* SATA control register block */ 226 unsigned long scr; /* SATA control register block */
233 unsigned long sien; /* SATA Interrupt Enable register */ 227 unsigned long sien; /* SATA Interrupt Enable register */
234 unsigned long xfer_mode;/* data transfer mode register */ 228 unsigned long xfer_mode;/* data transfer mode register */
235 unsigned long sfis_cfg; /* SATA FIS reception config register */ 229 unsigned long sfis_cfg; /* SATA FIS reception config register */
236} sil_port[] = { 230} sil_port[] = {
237 /* port 0 ... */ 231 /* port 0 ... */
238 { 0x80, 0x8A, 0x00, 0x100, 0x148, 0xb4, 0x14c }, 232 { 0x80, 0x8A, 0x00, 0x40, 0x100, 0x148, 0xb4, 0x14c },
239 { 0xC0, 0xCA, 0x08, 0x180, 0x1c8, 0xf4, 0x1cc }, 233 { 0xC0, 0xCA, 0x08, 0x44, 0x180, 0x1c8, 0xf4, 0x1cc },
240 { 0x280, 0x28A, 0x200, 0x300, 0x348, 0x2b4, 0x34c }, 234 { 0x280, 0x28A, 0x200, 0x240, 0x300, 0x348, 0x2b4, 0x34c },
241 { 0x2C0, 0x2CA, 0x208, 0x380, 0x3c8, 0x2f4, 0x3cc }, 235 { 0x2C0, 0x2CA, 0x208, 0x244, 0x380, 0x3c8, 0x2f4, 0x3cc },
242 /* ... port 3 */ 236 /* ... port 3 */
243}; 237};
244 238
@@ -354,22 +348,12 @@ static void sil_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val)
354static void sil_dev_config(struct ata_port *ap, struct ata_device *dev) 348static void sil_dev_config(struct ata_port *ap, struct ata_device *dev)
355{ 349{
356 unsigned int n, quirks = 0; 350 unsigned int n, quirks = 0;
357 unsigned char model_num[40]; 351 unsigned char model_num[41];
358 const char *s;
359 unsigned int len;
360 352
361 ata_dev_id_string(dev->id, model_num, ATA_ID_PROD_OFS, 353 ata_id_c_string(dev->id, model_num, ATA_ID_PROD_OFS, sizeof(model_num));
362 sizeof(model_num));
363 s = &model_num[0];
364 len = strnlen(s, sizeof(model_num));
365
366 /* ATAPI specifies that empty space is blank-filled; remove blanks */
367 while ((len > 0) && (s[len - 1] == ' '))
368 len--;
369 354
370 for (n = 0; sil_blacklist[n].product; n++) 355 for (n = 0; sil_blacklist[n].product; n++)
371 if (!memcmp(sil_blacklist[n].product, s, 356 if (!strcmp(sil_blacklist[n].product, model_num)) {
372 strlen(sil_blacklist[n].product))) {
373 quirks = sil_blacklist[n].quirk; 357 quirks = sil_blacklist[n].quirk;
374 break; 358 break;
375 } 359 }
@@ -380,16 +364,14 @@ static void sil_dev_config(struct ata_port *ap, struct ata_device *dev)
380 (quirks & SIL_QUIRK_MOD15WRITE))) { 364 (quirks & SIL_QUIRK_MOD15WRITE))) {
381 printk(KERN_INFO "ata%u(%u): applying Seagate errata fix (mod15write workaround)\n", 365 printk(KERN_INFO "ata%u(%u): applying Seagate errata fix (mod15write workaround)\n",
382 ap->id, dev->devno); 366 ap->id, dev->devno);
383 ap->host->max_sectors = 15; 367 dev->max_sectors = 15;
384 ap->host->hostt->max_sectors = 15;
385 dev->flags |= ATA_DFLAG_LOCK_SECTORS;
386 return; 368 return;
387 } 369 }
388 370
389 /* limit to udma5 */ 371 /* limit to udma5 */
390 if (quirks & SIL_QUIRK_UDMA5MAX) { 372 if (quirks & SIL_QUIRK_UDMA5MAX) {
391 printk(KERN_INFO "ata%u(%u): applying Maxtor errata fix %s\n", 373 printk(KERN_INFO "ata%u(%u): applying Maxtor errata fix %s\n",
392 ap->id, dev->devno, s); 374 ap->id, dev->devno, model_num);
393 ap->udma_mask &= ATA_UDMA5; 375 ap->udma_mask &= ATA_UDMA5;
394 return; 376 return;
395 } 377 }
@@ -431,13 +413,12 @@ static int sil_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
431 if (rc) 413 if (rc)
432 goto err_out_regions; 414 goto err_out_regions;
433 415
434 probe_ent = kmalloc(sizeof(*probe_ent), GFP_KERNEL); 416 probe_ent = kzalloc(sizeof(*probe_ent), GFP_KERNEL);
435 if (probe_ent == NULL) { 417 if (probe_ent == NULL) {
436 rc = -ENOMEM; 418 rc = -ENOMEM;
437 goto err_out_regions; 419 goto err_out_regions;
438 } 420 }
439 421
440 memset(probe_ent, 0, sizeof(*probe_ent));
441 INIT_LIST_HEAD(&probe_ent->node); 422 INIT_LIST_HEAD(&probe_ent->node);
442 probe_ent->dev = pci_dev_to_dev(pdev); 423 probe_ent->dev = pci_dev_to_dev(pdev);
443 probe_ent->port_ops = sil_port_info[ent->driver_data].port_ops; 424 probe_ent->port_ops = sil_port_info[ent->driver_data].port_ops;
@@ -474,19 +455,12 @@ static int sil_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
474 if (cls) { 455 if (cls) {
475 cls >>= 3; 456 cls >>= 3;
476 cls++; /* cls = (line_size/8)+1 */ 457 cls++; /* cls = (line_size/8)+1 */
477 writeb(cls, mmio_base + SIL_FIFO_R0); 458 for (i = 0; i < probe_ent->n_ports; i++)
478 writeb(cls, mmio_base + SIL_FIFO_W0); 459 writew(cls << 8 | cls,
479 writeb(cls, mmio_base + SIL_FIFO_R1); 460 mmio_base + sil_port[i].fifo_cfg);
480 writeb(cls, mmio_base + SIL_FIFO_W1);
481 if (ent->driver_data == sil_3114) {
482 writeb(cls, mmio_base + SIL_FIFO_R2);
483 writeb(cls, mmio_base + SIL_FIFO_W2);
484 writeb(cls, mmio_base + SIL_FIFO_R3);
485 writeb(cls, mmio_base + SIL_FIFO_W3);
486 }
487 } else 461 } else
488 dev_printk(KERN_WARNING, &pdev->dev, 462 dev_printk(KERN_WARNING, &pdev->dev,
489 "cache line size not set. Driver may not function\n"); 463 "cache line size not set. Driver may not function\n");
490 464
491 /* Apply R_ERR on DMA activate FIS errata workaround */ 465 /* Apply R_ERR on DMA activate FIS errata workaround */
492 if (probe_ent->host_flags & SIL_FLAG_RERR_ON_DMA_ACT) { 466 if (probe_ent->host_flags & SIL_FLAG_RERR_ON_DMA_ACT) {
@@ -509,10 +483,10 @@ static int sil_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
509 irq_mask = SIL_MASK_4PORT; 483 irq_mask = SIL_MASK_4PORT;
510 484
511 /* flip the magic "make 4 ports work" bit */ 485 /* flip the magic "make 4 ports work" bit */
512 tmp = readl(mmio_base + SIL_IDE2_BMDMA); 486 tmp = readl(mmio_base + sil_port[2].bmdma);
513 if ((tmp & SIL_INTR_STEERING) == 0) 487 if ((tmp & SIL_INTR_STEERING) == 0)
514 writel(tmp | SIL_INTR_STEERING, 488 writel(tmp | SIL_INTR_STEERING,
515 mmio_base + SIL_IDE2_BMDMA); 489 mmio_base + sil_port[2].bmdma);
516 490
517 } else { 491 } else {
518 irq_mask = SIL_MASK_2PORT; 492 irq_mask = SIL_MASK_2PORT;
diff --git a/drivers/scsi/sata_sil24.c b/drivers/scsi/sata_sil24.c
index 923130185a9e..9a53a5ed38c5 100644
--- a/drivers/scsi/sata_sil24.c
+++ b/drivers/scsi/sata_sil24.c
@@ -249,9 +249,9 @@ static u8 sil24_check_status(struct ata_port *ap);
249static u32 sil24_scr_read(struct ata_port *ap, unsigned sc_reg); 249static u32 sil24_scr_read(struct ata_port *ap, unsigned sc_reg);
250static void sil24_scr_write(struct ata_port *ap, unsigned sc_reg, u32 val); 250static void sil24_scr_write(struct ata_port *ap, unsigned sc_reg, u32 val);
251static void sil24_tf_read(struct ata_port *ap, struct ata_taskfile *tf); 251static void sil24_tf_read(struct ata_port *ap, struct ata_taskfile *tf);
252static void sil24_phy_reset(struct ata_port *ap); 252static int sil24_probe_reset(struct ata_port *ap, unsigned int *classes);
253static void sil24_qc_prep(struct ata_queued_cmd *qc); 253static void sil24_qc_prep(struct ata_queued_cmd *qc);
254static int sil24_qc_issue(struct ata_queued_cmd *qc); 254static unsigned int sil24_qc_issue(struct ata_queued_cmd *qc);
255static void sil24_irq_clear(struct ata_port *ap); 255static void sil24_irq_clear(struct ata_port *ap);
256static void sil24_eng_timeout(struct ata_port *ap); 256static void sil24_eng_timeout(struct ata_port *ap);
257static irqreturn_t sil24_interrupt(int irq, void *dev_instance, struct pt_regs *regs); 257static irqreturn_t sil24_interrupt(int irq, void *dev_instance, struct pt_regs *regs);
@@ -262,6 +262,7 @@ static int sil24_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
262 262
263static const struct pci_device_id sil24_pci_tbl[] = { 263static const struct pci_device_id sil24_pci_tbl[] = {
264 { 0x1095, 0x3124, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BID_SIL3124 }, 264 { 0x1095, 0x3124, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BID_SIL3124 },
265 { 0x8086, 0x3124, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BID_SIL3124 },
265 { 0x1095, 0x3132, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BID_SIL3132 }, 266 { 0x1095, 0x3132, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BID_SIL3132 },
266 { 0x1095, 0x3131, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BID_SIL3131 }, 267 { 0x1095, 0x3131, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BID_SIL3131 },
267 { 0x1095, 0x3531, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BID_SIL3131 }, 268 { 0x1095, 0x3531, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BID_SIL3131 },
@@ -280,11 +281,11 @@ static struct scsi_host_template sil24_sht = {
280 .name = DRV_NAME, 281 .name = DRV_NAME,
281 .ioctl = ata_scsi_ioctl, 282 .ioctl = ata_scsi_ioctl,
282 .queuecommand = ata_scsi_queuecmd, 283 .queuecommand = ata_scsi_queuecmd,
284 .eh_timed_out = ata_scsi_timed_out,
283 .eh_strategy_handler = ata_scsi_error, 285 .eh_strategy_handler = ata_scsi_error,
284 .can_queue = ATA_DEF_QUEUE, 286 .can_queue = ATA_DEF_QUEUE,
285 .this_id = ATA_SHT_THIS_ID, 287 .this_id = ATA_SHT_THIS_ID,
286 .sg_tablesize = LIBATA_MAX_PRD, 288 .sg_tablesize = LIBATA_MAX_PRD,
287 .max_sectors = ATA_MAX_SECTORS,
288 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 289 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
289 .emulated = ATA_SHT_EMULATED, 290 .emulated = ATA_SHT_EMULATED,
290 .use_clustering = ATA_SHT_USE_CLUSTERING, 291 .use_clustering = ATA_SHT_USE_CLUSTERING,
@@ -305,7 +306,7 @@ static const struct ata_port_operations sil24_ops = {
305 306
306 .tf_read = sil24_tf_read, 307 .tf_read = sil24_tf_read,
307 308
308 .phy_reset = sil24_phy_reset, 309 .probe_reset = sil24_probe_reset,
309 310
310 .qc_prep = sil24_qc_prep, 311 .qc_prep = sil24_qc_prep,
311 .qc_issue = sil24_qc_issue, 312 .qc_issue = sil24_qc_issue,
@@ -335,8 +336,8 @@ static struct ata_port_info sil24_port_info[] = {
335 { 336 {
336 .sht = &sil24_sht, 337 .sht = &sil24_sht,
337 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 338 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
338 ATA_FLAG_SRST | ATA_FLAG_MMIO | 339 ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA |
339 ATA_FLAG_PIO_DMA | SIL24_NPORTS2FLAG(4), 340 SIL24_NPORTS2FLAG(4),
340 .pio_mask = 0x1f, /* pio0-4 */ 341 .pio_mask = 0x1f, /* pio0-4 */
341 .mwdma_mask = 0x07, /* mwdma0-2 */ 342 .mwdma_mask = 0x07, /* mwdma0-2 */
342 .udma_mask = 0x3f, /* udma0-5 */ 343 .udma_mask = 0x3f, /* udma0-5 */
@@ -346,8 +347,8 @@ static struct ata_port_info sil24_port_info[] = {
346 { 347 {
347 .sht = &sil24_sht, 348 .sht = &sil24_sht,
348 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 349 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
349 ATA_FLAG_SRST | ATA_FLAG_MMIO | 350 ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA |
350 ATA_FLAG_PIO_DMA | SIL24_NPORTS2FLAG(2), 351 SIL24_NPORTS2FLAG(2),
351 .pio_mask = 0x1f, /* pio0-4 */ 352 .pio_mask = 0x1f, /* pio0-4 */
352 .mwdma_mask = 0x07, /* mwdma0-2 */ 353 .mwdma_mask = 0x07, /* mwdma0-2 */
353 .udma_mask = 0x3f, /* udma0-5 */ 354 .udma_mask = 0x3f, /* udma0-5 */
@@ -357,8 +358,8 @@ static struct ata_port_info sil24_port_info[] = {
357 { 358 {
358 .sht = &sil24_sht, 359 .sht = &sil24_sht,
359 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 360 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
360 ATA_FLAG_SRST | ATA_FLAG_MMIO | 361 ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA |
361 ATA_FLAG_PIO_DMA | SIL24_NPORTS2FLAG(1), 362 SIL24_NPORTS2FLAG(1),
362 .pio_mask = 0x1f, /* pio0-4 */ 363 .pio_mask = 0x1f, /* pio0-4 */
363 .mwdma_mask = 0x07, /* mwdma0-2 */ 364 .mwdma_mask = 0x07, /* mwdma0-2 */
364 .udma_mask = 0x3f, /* udma0-5 */ 365 .udma_mask = 0x3f, /* udma0-5 */
@@ -370,7 +371,7 @@ static void sil24_dev_config(struct ata_port *ap, struct ata_device *dev)
370{ 371{
371 void __iomem *port = (void __iomem *)ap->ioaddr.cmd_addr; 372 void __iomem *port = (void __iomem *)ap->ioaddr.cmd_addr;
372 373
373 if (ap->cdb_len == 16) 374 if (dev->cdb_len == 16)
374 writel(PORT_CS_CDB16, port + PORT_CTRL_STAT); 375 writel(PORT_CS_CDB16, port + PORT_CTRL_STAT);
375 else 376 else
376 writel(PORT_CS_CDB16, port + PORT_CTRL_CLR); 377 writel(PORT_CS_CDB16, port + PORT_CTRL_CLR);
@@ -427,14 +428,23 @@ static void sil24_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
427 *tf = pp->tf; 428 *tf = pp->tf;
428} 429}
429 430
430static int sil24_issue_SRST(struct ata_port *ap) 431static int sil24_softreset(struct ata_port *ap, int verbose,
432 unsigned int *class)
431{ 433{
432 void __iomem *port = (void __iomem *)ap->ioaddr.cmd_addr; 434 void __iomem *port = (void __iomem *)ap->ioaddr.cmd_addr;
433 struct sil24_port_priv *pp = ap->private_data; 435 struct sil24_port_priv *pp = ap->private_data;
434 struct sil24_prb *prb = &pp->cmd_block[0].ata.prb; 436 struct sil24_prb *prb = &pp->cmd_block[0].ata.prb;
435 dma_addr_t paddr = pp->cmd_block_dma; 437 dma_addr_t paddr = pp->cmd_block_dma;
438 unsigned long timeout = jiffies + ATA_TMOUT_BOOT * HZ;
436 u32 irq_enable, irq_stat; 439 u32 irq_enable, irq_stat;
437 int cnt; 440
441 DPRINTK("ENTER\n");
442
443 if (!sata_dev_present(ap)) {
444 DPRINTK("PHY reports no device\n");
445 *class = ATA_DEV_NONE;
446 goto out;
447 }
438 448
439 /* temporarily turn off IRQs during SRST */ 449 /* temporarily turn off IRQs during SRST */
440 irq_enable = readl(port + PORT_IRQ_ENABLE_SET); 450 irq_enable = readl(port + PORT_IRQ_ENABLE_SET);
@@ -451,7 +461,7 @@ static int sil24_issue_SRST(struct ata_port *ap)
451 461
452 writel((u32)paddr, port + PORT_CMD_ACTIVATE); 462 writel((u32)paddr, port + PORT_CMD_ACTIVATE);
453 463
454 for (cnt = 0; cnt < 100; cnt++) { 464 do {
455 irq_stat = readl(port + PORT_IRQ_STAT); 465 irq_stat = readl(port + PORT_IRQ_STAT);
456 writel(irq_stat, port + PORT_IRQ_STAT); /* clear irq */ 466 writel(irq_stat, port + PORT_IRQ_STAT); /* clear irq */
457 467
@@ -459,36 +469,42 @@ static int sil24_issue_SRST(struct ata_port *ap)
459 if (irq_stat & (PORT_IRQ_COMPLETE | PORT_IRQ_ERROR)) 469 if (irq_stat & (PORT_IRQ_COMPLETE | PORT_IRQ_ERROR))
460 break; 470 break;
461 471
462 msleep(1); 472 msleep(100);
463 } 473 } while (time_before(jiffies, timeout));
464 474
465 /* restore IRQs */ 475 /* restore IRQs */
466 writel(irq_enable, port + PORT_IRQ_ENABLE_SET); 476 writel(irq_enable, port + PORT_IRQ_ENABLE_SET);
467 477
468 if (!(irq_stat & PORT_IRQ_COMPLETE)) 478 if (!(irq_stat & PORT_IRQ_COMPLETE)) {
469 return -1; 479 DPRINTK("EXIT, srst failed\n");
480 return -EIO;
481 }
470 482
471 /* update TF */
472 sil24_update_tf(ap); 483 sil24_update_tf(ap);
484 *class = ata_dev_classify(&pp->tf);
485
486 if (*class == ATA_DEV_UNKNOWN)
487 *class = ATA_DEV_NONE;
488
489 out:
490 DPRINTK("EXIT, class=%u\n", *class);
473 return 0; 491 return 0;
474} 492}
475 493
476static void sil24_phy_reset(struct ata_port *ap) 494static int sil24_hardreset(struct ata_port *ap, int verbose,
495 unsigned int *class)
477{ 496{
478 struct sil24_port_priv *pp = ap->private_data; 497 unsigned int dummy_class;
479 498
480 __sata_phy_reset(ap); 499 /* sil24 doesn't report device signature after hard reset */
481 if (ap->flags & ATA_FLAG_PORT_DISABLED) 500 return sata_std_hardreset(ap, verbose, &dummy_class);
482 return; 501}
483
484 if (sil24_issue_SRST(ap) < 0) {
485 printk(KERN_ERR DRV_NAME
486 " ata%u: SRST failed, disabling port\n", ap->id);
487 ap->ops->port_disable(ap);
488 return;
489 }
490 502
491 ap->device->class = ata_dev_classify(&pp->tf); 503static int sil24_probe_reset(struct ata_port *ap, unsigned int *classes)
504{
505 return ata_drive_probe_reset(ap, ata_std_probeinit,
506 sil24_softreset, sil24_hardreset,
507 ata_std_postreset, classes);
492} 508}
493 509
494static inline void sil24_fill_sg(struct ata_queued_cmd *qc, 510static inline void sil24_fill_sg(struct ata_queued_cmd *qc,
@@ -533,7 +549,7 @@ static void sil24_qc_prep(struct ata_queued_cmd *qc)
533 prb = &cb->atapi.prb; 549 prb = &cb->atapi.prb;
534 sge = cb->atapi.sge; 550 sge = cb->atapi.sge;
535 memset(cb->atapi.cdb, 0, 32); 551 memset(cb->atapi.cdb, 0, 32);
536 memcpy(cb->atapi.cdb, qc->cdb, ap->cdb_len); 552 memcpy(cb->atapi.cdb, qc->cdb, qc->dev->cdb_len);
537 553
538 if (qc->tf.protocol != ATA_PROT_ATAPI_NODATA) { 554 if (qc->tf.protocol != ATA_PROT_ATAPI_NODATA) {
539 if (qc->tf.flags & ATA_TFLAG_WRITE) 555 if (qc->tf.flags & ATA_TFLAG_WRITE)
@@ -557,7 +573,7 @@ static void sil24_qc_prep(struct ata_queued_cmd *qc)
557 sil24_fill_sg(qc, sge); 573 sil24_fill_sg(qc, sge);
558} 574}
559 575
560static int sil24_qc_issue(struct ata_queued_cmd *qc) 576static unsigned int sil24_qc_issue(struct ata_queued_cmd *qc)
561{ 577{
562 struct ata_port *ap = qc->ap; 578 struct ata_port *ap = qc->ap;
563 void __iomem *port = (void __iomem *)ap->ioaddr.cmd_addr; 579 void __iomem *port = (void __iomem *)ap->ioaddr.cmd_addr;
@@ -638,23 +654,10 @@ static void sil24_eng_timeout(struct ata_port *ap)
638 struct ata_queued_cmd *qc; 654 struct ata_queued_cmd *qc;
639 655
640 qc = ata_qc_from_tag(ap, ap->active_tag); 656 qc = ata_qc_from_tag(ap, ap->active_tag);
641 if (!qc) {
642 printk(KERN_ERR "ata%u: BUG: timeout without command\n",
643 ap->id);
644 return;
645 }
646 657
647 /*
648 * hack alert! We cannot use the supplied completion
649 * function from inside the ->eh_strategy_handler() thread.
650 * libata is the only user of ->eh_strategy_handler() in
651 * any kernel, so the default scsi_done() assumes it is
652 * not being called from the SCSI EH.
653 */
654 printk(KERN_ERR "ata%u: command timeout\n", ap->id); 658 printk(KERN_ERR "ata%u: command timeout\n", ap->id);
655 qc->scsidone = scsi_finish_command; 659 qc->err_mask |= AC_ERR_TIMEOUT;
656 qc->err_mask |= AC_ERR_OTHER; 660 ata_eh_qc_complete(qc);
657 ata_qc_complete(qc);
658 661
659 sil24_reset_controller(ap); 662 sil24_reset_controller(ap);
660} 663}
@@ -895,6 +898,7 @@ static int sil24_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
895 probe_ent->sht = pinfo->sht; 898 probe_ent->sht = pinfo->sht;
896 probe_ent->host_flags = pinfo->host_flags; 899 probe_ent->host_flags = pinfo->host_flags;
897 probe_ent->pio_mask = pinfo->pio_mask; 900 probe_ent->pio_mask = pinfo->pio_mask;
901 probe_ent->mwdma_mask = pinfo->mwdma_mask;
898 probe_ent->udma_mask = pinfo->udma_mask; 902 probe_ent->udma_mask = pinfo->udma_mask;
899 probe_ent->port_ops = pinfo->port_ops; 903 probe_ent->port_ops = pinfo->port_ops;
900 probe_ent->n_ports = SIL24_FLAG2NPORTS(pinfo->host_flags); 904 probe_ent->n_ports = SIL24_FLAG2NPORTS(pinfo->host_flags);
diff --git a/drivers/scsi/sata_sis.c b/drivers/scsi/sata_sis.c
index 2df8c5632ac3..7fd45f86de99 100644
--- a/drivers/scsi/sata_sis.c
+++ b/drivers/scsi/sata_sis.c
@@ -87,11 +87,11 @@ static struct scsi_host_template sis_sht = {
87 .name = DRV_NAME, 87 .name = DRV_NAME,
88 .ioctl = ata_scsi_ioctl, 88 .ioctl = ata_scsi_ioctl,
89 .queuecommand = ata_scsi_queuecmd, 89 .queuecommand = ata_scsi_queuecmd,
90 .eh_timed_out = ata_scsi_timed_out,
90 .eh_strategy_handler = ata_scsi_error, 91 .eh_strategy_handler = ata_scsi_error,
91 .can_queue = ATA_DEF_QUEUE, 92 .can_queue = ATA_DEF_QUEUE,
92 .this_id = ATA_SHT_THIS_ID, 93 .this_id = ATA_SHT_THIS_ID,
93 .sg_tablesize = ATA_MAX_PRD, 94 .sg_tablesize = ATA_MAX_PRD,
94 .max_sectors = ATA_MAX_SECTORS,
95 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 95 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
96 .emulated = ATA_SHT_EMULATED, 96 .emulated = ATA_SHT_EMULATED,
97 .use_clustering = ATA_SHT_USE_CLUSTERING, 97 .use_clustering = ATA_SHT_USE_CLUSTERING,
diff --git a/drivers/scsi/sata_svw.c b/drivers/scsi/sata_svw.c
index d8472563fde8..4aaccd53e736 100644
--- a/drivers/scsi/sata_svw.c
+++ b/drivers/scsi/sata_svw.c
@@ -288,11 +288,11 @@ static struct scsi_host_template k2_sata_sht = {
288 .name = DRV_NAME, 288 .name = DRV_NAME,
289 .ioctl = ata_scsi_ioctl, 289 .ioctl = ata_scsi_ioctl,
290 .queuecommand = ata_scsi_queuecmd, 290 .queuecommand = ata_scsi_queuecmd,
291 .eh_timed_out = ata_scsi_timed_out,
291 .eh_strategy_handler = ata_scsi_error, 292 .eh_strategy_handler = ata_scsi_error,
292 .can_queue = ATA_DEF_QUEUE, 293 .can_queue = ATA_DEF_QUEUE,
293 .this_id = ATA_SHT_THIS_ID, 294 .this_id = ATA_SHT_THIS_ID,
294 .sg_tablesize = LIBATA_MAX_PRD, 295 .sg_tablesize = LIBATA_MAX_PRD,
295 .max_sectors = ATA_MAX_SECTORS,
296 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 296 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
297 .emulated = ATA_SHT_EMULATED, 297 .emulated = ATA_SHT_EMULATED,
298 .use_clustering = ATA_SHT_USE_CLUSTERING, 298 .use_clustering = ATA_SHT_USE_CLUSTERING,
diff --git a/drivers/scsi/sata_sx4.c b/drivers/scsi/sata_sx4.c
index bc87c16c80d2..9f8a76815402 100644
--- a/drivers/scsi/sata_sx4.c
+++ b/drivers/scsi/sata_sx4.c
@@ -174,7 +174,7 @@ static void pdc20621_get_from_dimm(struct ata_probe_ent *pe,
174static void pdc20621_put_to_dimm(struct ata_probe_ent *pe, 174static void pdc20621_put_to_dimm(struct ata_probe_ent *pe,
175 void *psource, u32 offset, u32 size); 175 void *psource, u32 offset, u32 size);
176static void pdc20621_irq_clear(struct ata_port *ap); 176static void pdc20621_irq_clear(struct ata_port *ap);
177static int pdc20621_qc_issue_prot(struct ata_queued_cmd *qc); 177static unsigned int pdc20621_qc_issue_prot(struct ata_queued_cmd *qc);
178 178
179 179
180static struct scsi_host_template pdc_sata_sht = { 180static struct scsi_host_template pdc_sata_sht = {
@@ -182,11 +182,11 @@ static struct scsi_host_template pdc_sata_sht = {
182 .name = DRV_NAME, 182 .name = DRV_NAME,
183 .ioctl = ata_scsi_ioctl, 183 .ioctl = ata_scsi_ioctl,
184 .queuecommand = ata_scsi_queuecmd, 184 .queuecommand = ata_scsi_queuecmd,
185 .eh_timed_out = ata_scsi_timed_out,
185 .eh_strategy_handler = ata_scsi_error, 186 .eh_strategy_handler = ata_scsi_error,
186 .can_queue = ATA_DEF_QUEUE, 187 .can_queue = ATA_DEF_QUEUE,
187 .this_id = ATA_SHT_THIS_ID, 188 .this_id = ATA_SHT_THIS_ID,
188 .sg_tablesize = LIBATA_MAX_PRD, 189 .sg_tablesize = LIBATA_MAX_PRD,
189 .max_sectors = ATA_MAX_SECTORS,
190 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 190 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
191 .emulated = ATA_SHT_EMULATED, 191 .emulated = ATA_SHT_EMULATED,
192 .use_clustering = ATA_SHT_USE_CLUSTERING, 192 .use_clustering = ATA_SHT_USE_CLUSTERING,
@@ -460,7 +460,7 @@ static void pdc20621_dma_prep(struct ata_queued_cmd *qc)
460 unsigned int i, idx, total_len = 0, sgt_len; 460 unsigned int i, idx, total_len = 0, sgt_len;
461 u32 *buf = (u32 *) &pp->dimm_buf[PDC_DIMM_HEADER_SZ]; 461 u32 *buf = (u32 *) &pp->dimm_buf[PDC_DIMM_HEADER_SZ];
462 462
463 assert(qc->flags & ATA_QCFLAG_DMAMAP); 463 WARN_ON(!(qc->flags & ATA_QCFLAG_DMAMAP));
464 464
465 VPRINTK("ata%u: ENTER\n", ap->id); 465 VPRINTK("ata%u: ENTER\n", ap->id);
466 466
@@ -678,7 +678,7 @@ static void pdc20621_packet_start(struct ata_queued_cmd *qc)
678 } 678 }
679} 679}
680 680
681static int pdc20621_qc_issue_prot(struct ata_queued_cmd *qc) 681static unsigned int pdc20621_qc_issue_prot(struct ata_queued_cmd *qc)
682{ 682{
683 switch (qc->tf.protocol) { 683 switch (qc->tf.protocol) {
684 case ATA_PROT_DMA: 684 case ATA_PROT_DMA:
@@ -866,26 +866,12 @@ static void pdc_eng_timeout(struct ata_port *ap)
866 spin_lock_irqsave(&host_set->lock, flags); 866 spin_lock_irqsave(&host_set->lock, flags);
867 867
868 qc = ata_qc_from_tag(ap, ap->active_tag); 868 qc = ata_qc_from_tag(ap, ap->active_tag);
869 if (!qc) {
870 printk(KERN_ERR "ata%u: BUG: timeout without command\n",
871 ap->id);
872 goto out;
873 }
874
875 /* hack alert! We cannot use the supplied completion
876 * function from inside the ->eh_strategy_handler() thread.
877 * libata is the only user of ->eh_strategy_handler() in
878 * any kernel, so the default scsi_done() assumes it is
879 * not being called from the SCSI EH.
880 */
881 qc->scsidone = scsi_finish_command;
882 869
883 switch (qc->tf.protocol) { 870 switch (qc->tf.protocol) {
884 case ATA_PROT_DMA: 871 case ATA_PROT_DMA:
885 case ATA_PROT_NODATA: 872 case ATA_PROT_NODATA:
886 printk(KERN_ERR "ata%u: command timeout\n", ap->id); 873 printk(KERN_ERR "ata%u: command timeout\n", ap->id);
887 qc->err_mask |= __ac_err_mask(ata_wait_idle(ap)); 874 qc->err_mask |= __ac_err_mask(ata_wait_idle(ap));
888 ata_qc_complete(qc);
889 break; 875 break;
890 876
891 default: 877 default:
@@ -895,12 +881,11 @@ static void pdc_eng_timeout(struct ata_port *ap)
895 ap->id, qc->tf.command, drv_stat); 881 ap->id, qc->tf.command, drv_stat);
896 882
897 qc->err_mask |= ac_err_mask(drv_stat); 883 qc->err_mask |= ac_err_mask(drv_stat);
898 ata_qc_complete(qc);
899 break; 884 break;
900 } 885 }
901 886
902out:
903 spin_unlock_irqrestore(&host_set->lock, flags); 887 spin_unlock_irqrestore(&host_set->lock, flags);
888 ata_eh_qc_complete(qc);
904 DPRINTK("EXIT\n"); 889 DPRINTK("EXIT\n");
905} 890}
906 891
diff --git a/drivers/scsi/sata_uli.c b/drivers/scsi/sata_uli.c
index 9635ca700977..37a487b7d655 100644
--- a/drivers/scsi/sata_uli.c
+++ b/drivers/scsi/sata_uli.c
@@ -75,11 +75,11 @@ static struct scsi_host_template uli_sht = {
75 .name = DRV_NAME, 75 .name = DRV_NAME,
76 .ioctl = ata_scsi_ioctl, 76 .ioctl = ata_scsi_ioctl,
77 .queuecommand = ata_scsi_queuecmd, 77 .queuecommand = ata_scsi_queuecmd,
78 .eh_timed_out = ata_scsi_timed_out,
78 .eh_strategy_handler = ata_scsi_error, 79 .eh_strategy_handler = ata_scsi_error,
79 .can_queue = ATA_DEF_QUEUE, 80 .can_queue = ATA_DEF_QUEUE,
80 .this_id = ATA_SHT_THIS_ID, 81 .this_id = ATA_SHT_THIS_ID,
81 .sg_tablesize = LIBATA_MAX_PRD, 82 .sg_tablesize = LIBATA_MAX_PRD,
82 .max_sectors = ATA_MAX_SECTORS,
83 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 83 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
84 .emulated = ATA_SHT_EMULATED, 84 .emulated = ATA_SHT_EMULATED,
85 .use_clustering = ATA_SHT_USE_CLUSTERING, 85 .use_clustering = ATA_SHT_USE_CLUSTERING,
diff --git a/drivers/scsi/sata_via.c b/drivers/scsi/sata_via.c
index 6d5b0a794cfd..ff65a0b0457f 100644
--- a/drivers/scsi/sata_via.c
+++ b/drivers/scsi/sata_via.c
@@ -94,11 +94,11 @@ static struct scsi_host_template svia_sht = {
94 .name = DRV_NAME, 94 .name = DRV_NAME,
95 .ioctl = ata_scsi_ioctl, 95 .ioctl = ata_scsi_ioctl,
96 .queuecommand = ata_scsi_queuecmd, 96 .queuecommand = ata_scsi_queuecmd,
97 .eh_timed_out = ata_scsi_timed_out,
97 .eh_strategy_handler = ata_scsi_error, 98 .eh_strategy_handler = ata_scsi_error,
98 .can_queue = ATA_DEF_QUEUE, 99 .can_queue = ATA_DEF_QUEUE,
99 .this_id = ATA_SHT_THIS_ID, 100 .this_id = ATA_SHT_THIS_ID,
100 .sg_tablesize = LIBATA_MAX_PRD, 101 .sg_tablesize = LIBATA_MAX_PRD,
101 .max_sectors = ATA_MAX_SECTORS,
102 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 102 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
103 .emulated = ATA_SHT_EMULATED, 103 .emulated = ATA_SHT_EMULATED,
104 .use_clustering = ATA_SHT_USE_CLUSTERING, 104 .use_clustering = ATA_SHT_USE_CLUSTERING,
diff --git a/drivers/scsi/sata_vsc.c b/drivers/scsi/sata_vsc.c
index e484e8db6810..b574379a7a82 100644
--- a/drivers/scsi/sata_vsc.c
+++ b/drivers/scsi/sata_vsc.c
@@ -251,11 +251,11 @@ static struct scsi_host_template vsc_sata_sht = {
251 .name = DRV_NAME, 251 .name = DRV_NAME,
252 .ioctl = ata_scsi_ioctl, 252 .ioctl = ata_scsi_ioctl,
253 .queuecommand = ata_scsi_queuecmd, 253 .queuecommand = ata_scsi_queuecmd,
254 .eh_timed_out = ata_scsi_timed_out,
254 .eh_strategy_handler = ata_scsi_error, 255 .eh_strategy_handler = ata_scsi_error,
255 .can_queue = ATA_DEF_QUEUE, 256 .can_queue = ATA_DEF_QUEUE,
256 .this_id = ATA_SHT_THIS_ID, 257 .this_id = ATA_SHT_THIS_ID,
257 .sg_tablesize = LIBATA_MAX_PRD, 258 .sg_tablesize = LIBATA_MAX_PRD,
258 .max_sectors = ATA_MAX_SECTORS,
259 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 259 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
260 .emulated = ATA_SHT_EMULATED, 260 .emulated = ATA_SHT_EMULATED,
261 .use_clustering = ATA_SHT_USE_CLUSTERING, 261 .use_clustering = ATA_SHT_USE_CLUSTERING,
diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c
index f01ec0a7c506..84c3937ae8fb 100644
--- a/drivers/scsi/scsi_devinfo.c
+++ b/drivers/scsi/scsi_devinfo.c
@@ -126,6 +126,7 @@ static struct {
126 {"ADAPTEC", "Adaptec 5400S", NULL, BLIST_FORCELUN}, 126 {"ADAPTEC", "Adaptec 5400S", NULL, BLIST_FORCELUN},
127 {"AFT PRO", "-IX CF", "0.0>", BLIST_FORCELUN}, 127 {"AFT PRO", "-IX CF", "0.0>", BLIST_FORCELUN},
128 {"BELKIN", "USB 2 HS-CF", "1.95", BLIST_FORCELUN | BLIST_INQUIRY_36}, 128 {"BELKIN", "USB 2 HS-CF", "1.95", BLIST_FORCELUN | BLIST_INQUIRY_36},
129 {"BROWNIE", "1600U3P", NULL, BLIST_NOREPORTLUN},
129 {"CANON", "IPUBJD", NULL, BLIST_SPARSELUN}, 130 {"CANON", "IPUBJD", NULL, BLIST_SPARSELUN},
130 {"CBOX3", "USB Storage-SMC", "300A", BLIST_FORCELUN | BLIST_INQUIRY_36}, 131 {"CBOX3", "USB Storage-SMC", "300A", BLIST_FORCELUN | BLIST_INQUIRY_36},
131 {"CMD", "CRA-7280", NULL, BLIST_SPARSELUN}, /* CMD RAID Controller */ 132 {"CMD", "CRA-7280", NULL, BLIST_SPARSELUN}, /* CMD RAID Controller */
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index f988418d805d..5f0fdfb2618c 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -581,8 +581,7 @@ static int scsi_request_sense(struct scsi_cmnd *scmd)
581 * keep a list of pending commands for final completion, and once we 581 * keep a list of pending commands for final completion, and once we
582 * are ready to leave error handling we handle completion for real. 582 * are ready to leave error handling we handle completion for real.
583 **/ 583 **/
584static void scsi_eh_finish_cmd(struct scsi_cmnd *scmd, 584void scsi_eh_finish_cmd(struct scsi_cmnd *scmd, struct list_head *done_q)
585 struct list_head *done_q)
586{ 585{
587 scmd->device->host->host_failed--; 586 scmd->device->host->host_failed--;
588 scmd->eh_eflags = 0; 587 scmd->eh_eflags = 0;
@@ -594,6 +593,7 @@ static void scsi_eh_finish_cmd(struct scsi_cmnd *scmd,
594 scsi_setup_cmd_retry(scmd); 593 scsi_setup_cmd_retry(scmd);
595 list_move_tail(&scmd->eh_entry, done_q); 594 list_move_tail(&scmd->eh_entry, done_q);
596} 595}
596EXPORT_SYMBOL(scsi_eh_finish_cmd);
597 597
598/** 598/**
599 * scsi_eh_get_sense - Get device sense data. 599 * scsi_eh_get_sense - Get device sense data.
@@ -1422,7 +1422,7 @@ static void scsi_eh_ready_devs(struct Scsi_Host *shost,
1422 * @done_q: list_head of processed commands. 1422 * @done_q: list_head of processed commands.
1423 * 1423 *
1424 **/ 1424 **/
1425static void scsi_eh_flush_done_q(struct list_head *done_q) 1425void scsi_eh_flush_done_q(struct list_head *done_q)
1426{ 1426{
1427 struct scsi_cmnd *scmd, *next; 1427 struct scsi_cmnd *scmd, *next;
1428 1428
@@ -1451,6 +1451,7 @@ static void scsi_eh_flush_done_q(struct list_head *done_q)
1451 } 1451 }
1452 } 1452 }
1453} 1453}
1454EXPORT_SYMBOL(scsi_eh_flush_done_q);
1454 1455
1455/** 1456/**
1456 * scsi_unjam_host - Attempt to fix a host which has a cmd that failed. 1457 * scsi_unjam_host - Attempt to fix a host which has a cmd that failed.
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
index 1883fee96bd9..8db656214b5c 100644
--- a/drivers/scsi/scsi_transport_fc.c
+++ b/drivers/scsi/scsi_transport_fc.c
@@ -224,7 +224,7 @@ static void fc_rport_terminate(struct fc_rport *rport);
224 */ 224 */
225#define FC_STARGET_NUM_ATTRS 3 225#define FC_STARGET_NUM_ATTRS 3
226#define FC_RPORT_NUM_ATTRS 9 226#define FC_RPORT_NUM_ATTRS 9
227#define FC_HOST_NUM_ATTRS 16 227#define FC_HOST_NUM_ATTRS 17
228 228
229struct fc_internal { 229struct fc_internal {
230 struct scsi_transport_template t; 230 struct scsi_transport_template t;