aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/ata/sata_promise.c
diff options
context:
space:
mode:
authorMikael Pettersson <mikpe@it.uu.se>2007-10-30 09:20:49 -0400
committerJeff Garzik <jeff@garzik.org>2007-10-30 09:27:57 -0400
commitb9ccd4a90bbb964506f01b4bdcff4f50f8d5d334 (patch)
treec8517b78c7de68fa3e4a66f3de069130b2c9b8fa /drivers/ata/sata_promise.c
parent3529a233421fc43fa7bfdf7a4317daf28348a23d (diff)
sata_promise: ASIC PRD table bug workaround, take 2
Second-generation Promise SATA controllers have an ASIC bug which can trigger if the last PRD entry is larger than 164 bytes, resulting in intermittent errors and possible data corruption. Work around this by replacing calls to ata_qc_prep() with a private version that fills the PRD, checks the size of the last entry, and if necessary splits it to avoid the bug. Also reduce sg_tablesize by 1 to accommodate the new entry. Tested on the second-generation SATA300 TX4 and SATA300 TX2plus, and the first-generation PDC20378. Thanks to Alexander Sabourenkov for verifying the bug by studying the vendor driver, and for writing the initial patch upon which this one is based. Signed-off-by: Mikael Pettersson <mikpe@it.uu.se> -- Changes since previous version: * use new PDC_MAX_PRD constant to initialise sg_tablesize drivers/ata/sata_promise.c | 87 ++++++++++++++++++++++++++++++++++++++++++--- 1 files changed, 83 insertions(+), 4 deletions(-) Signed-off-by: Jeff Garzik <jeff@garzik.org>
Diffstat (limited to 'drivers/ata/sata_promise.c')
-rw-r--r--drivers/ata/sata_promise.c87
1 files changed, 83 insertions, 4 deletions
diff --git a/drivers/ata/sata_promise.c b/drivers/ata/sata_promise.c
index deb26f04f2d7..3fdc7cbd9436 100644
--- a/drivers/ata/sata_promise.c
+++ b/drivers/ata/sata_promise.c
@@ -50,6 +50,7 @@
50enum { 50enum {
51 PDC_MAX_PORTS = 4, 51 PDC_MAX_PORTS = 4,
52 PDC_MMIO_BAR = 3, 52 PDC_MMIO_BAR = 3,
53 PDC_MAX_PRD = LIBATA_MAX_PRD - 1, /* -1 for ASIC PRD bug workaround */
53 54
54 /* register offsets */ 55 /* register offsets */
55 PDC_FEATURE = 0x04, /* Feature/Error reg (per port) */ 56 PDC_FEATURE = 0x04, /* Feature/Error reg (per port) */
@@ -157,7 +158,7 @@ static struct scsi_host_template pdc_ata_sht = {
157 .queuecommand = ata_scsi_queuecmd, 158 .queuecommand = ata_scsi_queuecmd,
158 .can_queue = ATA_DEF_QUEUE, 159 .can_queue = ATA_DEF_QUEUE,
159 .this_id = ATA_SHT_THIS_ID, 160 .this_id = ATA_SHT_THIS_ID,
160 .sg_tablesize = LIBATA_MAX_PRD, 161 .sg_tablesize = PDC_MAX_PRD,
161 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 162 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
162 .emulated = ATA_SHT_EMULATED, 163 .emulated = ATA_SHT_EMULATED,
163 .use_clustering = ATA_SHT_USE_CLUSTERING, 164 .use_clustering = ATA_SHT_USE_CLUSTERING,
@@ -523,6 +524,84 @@ static void pdc_atapi_pkt(struct ata_queued_cmd *qc)
523 memcpy(buf+31, cdb, cdb_len); 524 memcpy(buf+31, cdb, cdb_len);
524} 525}
525 526
527/**
528 * pdc_fill_sg - Fill PCI IDE PRD table
529 * @qc: Metadata associated with taskfile to be transferred
530 *
531 * Fill PCI IDE PRD (scatter-gather) table with segments
532 * associated with the current disk command.
533 * Make sure hardware does not choke on it.
534 *
535 * LOCKING:
536 * spin_lock_irqsave(host lock)
537 *
538 */
539static void pdc_fill_sg(struct ata_queued_cmd *qc)
540{
541 struct ata_port *ap = qc->ap;
542 struct scatterlist *sg;
543 unsigned int idx;
544 const u32 SG_COUNT_ASIC_BUG = 41*4;
545
546 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
547 return;
548
549 WARN_ON(qc->__sg == NULL);
550 WARN_ON(qc->n_elem == 0 && qc->pad_len == 0);
551
552 idx = 0;
553 ata_for_each_sg(sg, qc) {
554 u32 addr, offset;
555 u32 sg_len, len;
556
557 /* determine if physical DMA addr spans 64K boundary.
558 * Note h/w doesn't support 64-bit, so we unconditionally
559 * truncate dma_addr_t to u32.
560 */
561 addr = (u32) sg_dma_address(sg);
562 sg_len = sg_dma_len(sg);
563
564 while (sg_len) {
565 offset = addr & 0xffff;
566 len = sg_len;
567 if ((offset + sg_len) > 0x10000)
568 len = 0x10000 - offset;
569
570 ap->prd[idx].addr = cpu_to_le32(addr);
571 ap->prd[idx].flags_len = cpu_to_le32(len & 0xffff);
572 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
573
574 idx++;
575 sg_len -= len;
576 addr += len;
577 }
578 }
579
580 if (idx) {
581 u32 len = le32_to_cpu(ap->prd[idx - 1].flags_len);
582
583 if (len > SG_COUNT_ASIC_BUG) {
584 u32 addr;
585
586 VPRINTK("Splitting last PRD.\n");
587
588 addr = le32_to_cpu(ap->prd[idx - 1].addr);
589 ap->prd[idx - 1].flags_len -= cpu_to_le32(SG_COUNT_ASIC_BUG);
590 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx - 1, addr, SG_COUNT_ASIC_BUG);
591
592 addr = addr + len - SG_COUNT_ASIC_BUG;
593 len = SG_COUNT_ASIC_BUG;
594 ap->prd[idx].addr = cpu_to_le32(addr);
595 ap->prd[idx].flags_len = cpu_to_le32(len);
596 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
597
598 idx++;
599 }
600
601 ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
602 }
603}
604
526static void pdc_qc_prep(struct ata_queued_cmd *qc) 605static void pdc_qc_prep(struct ata_queued_cmd *qc)
527{ 606{
528 struct pdc_port_priv *pp = qc->ap->private_data; 607 struct pdc_port_priv *pp = qc->ap->private_data;
@@ -532,7 +611,7 @@ static void pdc_qc_prep(struct ata_queued_cmd *qc)
532 611
533 switch (qc->tf.protocol) { 612 switch (qc->tf.protocol) {
534 case ATA_PROT_DMA: 613 case ATA_PROT_DMA:
535 ata_qc_prep(qc); 614 pdc_fill_sg(qc);
536 /* fall through */ 615 /* fall through */
537 616
538 case ATA_PROT_NODATA: 617 case ATA_PROT_NODATA:
@@ -548,11 +627,11 @@ static void pdc_qc_prep(struct ata_queued_cmd *qc)
548 break; 627 break;
549 628
550 case ATA_PROT_ATAPI: 629 case ATA_PROT_ATAPI:
551 ata_qc_prep(qc); 630 pdc_fill_sg(qc);
552 break; 631 break;
553 632
554 case ATA_PROT_ATAPI_DMA: 633 case ATA_PROT_ATAPI_DMA:
555 ata_qc_prep(qc); 634 pdc_fill_sg(qc);
556 /*FALLTHROUGH*/ 635 /*FALLTHROUGH*/
557 case ATA_PROT_ATAPI_NODATA: 636 case ATA_PROT_ATAPI_NODATA:
558 pdc_atapi_pkt(qc); 637 pdc_atapi_pkt(qc);