aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/ata
diff options
context:
space:
mode:
authorTejun Heo <htejun@gmail.com>2008-04-30 03:35:11 -0400
committerJeff Garzik <jgarzik@redhat.com>2008-05-06 11:40:54 -0400
commit3ad400a92e9c7d2f7caa6c6f811dad9b7d3f333c (patch)
tree625fd562ea910adecba3bb0cbded541462830886 /drivers/ata
parent364fac0e56b9bd379330ef9e39d3761f0b491e2c (diff)
sata_inic162x: use IDMA for ATA_PROT_DMA
The modified driver on initio site has enough clue on how to use IDMA. Use IDMA for ATA_PROT_DMA. * LBA48 now works as long as it uses DMA (LBA48 devices still aren't allowed as it can destroy data if PIO is used for any reason). * No need to mask IRQs for read DMAs as IDMA_DONE is properly raised after transfer to memory is actually completed. There will be some spurious interrupts but host_intr will handle it correctly and manipulating port IRQ mask interacts badly with the other port for some reason, so command type dependent port IRQ masking is not used anymore. Signed-off-by: Tejun Heo <htejun@gmail.com> Signed-off-by: Jeff Garzik <jgarzik@redhat.com>
Diffstat (limited to 'drivers/ata')
-rw-r--r--drivers/ata/sata_inic162x.c270
1 files changed, 235 insertions, 35 deletions
diff --git a/drivers/ata/sata_inic162x.c b/drivers/ata/sata_inic162x.c
index 97267ab001ed..db57f34d2211 100644
--- a/drivers/ata/sata_inic162x.c
+++ b/drivers/ata/sata_inic162x.c
@@ -35,6 +35,10 @@ enum {
35 35
36 NR_PORTS = 2, 36 NR_PORTS = 2,
37 37
38 IDMA_CPB_TBL_SIZE = 4 * 32,
39
40 INIC_DMA_BOUNDARY = 0xffffff,
41
38 HOST_ACTRL = 0x08, 42 HOST_ACTRL = 0x08,
39 HOST_CTL = 0x7c, 43 HOST_CTL = 0x7c,
40 HOST_STAT = 0x7e, 44 HOST_STAT = 0x7e,
@@ -151,11 +155,57 @@ enum {
151 PRD_END = (1 << 7), /* APRD chain end */ 155 PRD_END = (1 << 7), /* APRD chain end */
152}; 156};
153 157
158/* Comman Parameter Block */
159struct inic_cpb {
160 u8 resp_flags; /* Response Flags */
161 u8 error; /* ATA Error */
162 u8 status; /* ATA Status */
163 u8 ctl_flags; /* Control Flags */
164 __le32 len; /* Total Transfer Length */
165 __le32 prd; /* First PRD pointer */
166 u8 rsvd[4];
167 /* 16 bytes */
168 u8 feature; /* ATA Feature */
169 u8 hob_feature; /* ATA Ex. Feature */
170 u8 device; /* ATA Device/Head */
171 u8 mirctl; /* Mirror Control */
172 u8 nsect; /* ATA Sector Count */
173 u8 hob_nsect; /* ATA Ex. Sector Count */
174 u8 lbal; /* ATA Sector Number */
175 u8 hob_lbal; /* ATA Ex. Sector Number */
176 u8 lbam; /* ATA Cylinder Low */
177 u8 hob_lbam; /* ATA Ex. Cylinder Low */
178 u8 lbah; /* ATA Cylinder High */
179 u8 hob_lbah; /* ATA Ex. Cylinder High */
180 u8 command; /* ATA Command */
181 u8 ctl; /* ATA Control */
182 u8 slave_error; /* Slave ATA Error */
183 u8 slave_status; /* Slave ATA Status */
184 /* 32 bytes */
185} __packed;
186
187/* Physical Region Descriptor */
188struct inic_prd {
189 __le32 mad; /* Physical Memory Address */
190 __le16 len; /* Transfer Length */
191 u8 rsvd;
192 u8 flags; /* Control Flags */
193} __packed;
194
195struct inic_pkt {
196 struct inic_cpb cpb;
197 struct inic_prd prd[LIBATA_MAX_PRD];
198} __packed;
199
154struct inic_host_priv { 200struct inic_host_priv {
155 u16 cached_hctl; 201 u16 cached_hctl;
156}; 202};
157 203
158struct inic_port_priv { 204struct inic_port_priv {
205 struct inic_pkt *pkt;
206 dma_addr_t pkt_dma;
207 u32 *cpb_tbl;
208 dma_addr_t cpb_tbl_dma;
159 u8 dfl_prdctl; 209 u8 dfl_prdctl;
160 u8 cached_prdctl; 210 u8 cached_prdctl;
161 u8 cached_pirq_mask; 211 u8 cached_pirq_mask;
@@ -163,6 +213,7 @@ struct inic_port_priv {
163 213
164static struct scsi_host_template inic_sht = { 214static struct scsi_host_template inic_sht = {
165 ATA_BMDMA_SHT(DRV_NAME), 215 ATA_BMDMA_SHT(DRV_NAME),
216 .dma_boundary = INIC_DMA_BOUNDARY,
166}; 217};
167 218
168static const int scr_map[] = { 219static const int scr_map[] = {
@@ -303,42 +354,112 @@ static u8 inic_bmdma_status(struct ata_port *ap)
303 return ATA_DMA_INTR; 354 return ATA_DMA_INTR;
304} 355}
305 356
306static void inic_host_intr(struct ata_port *ap) 357static void inic_stop_idma(struct ata_port *ap)
307{ 358{
308 void __iomem *port_base = inic_port_base(ap); 359 void __iomem *port_base = inic_port_base(ap);
360
361 readb(port_base + PORT_RPQ_FIFO);
362 readb(port_base + PORT_RPQ_CNT);
363 writew(0, port_base + PORT_IDMA_CTL);
364}
365
366static void inic_host_err_intr(struct ata_port *ap, u8 irq_stat, u16 idma_stat)
367{
309 struct ata_eh_info *ehi = &ap->link.eh_info; 368 struct ata_eh_info *ehi = &ap->link.eh_info;
369 struct inic_port_priv *pp = ap->private_data;
370 struct inic_cpb *cpb = &pp->pkt->cpb;
371 bool freeze = false;
372
373 ata_ehi_clear_desc(ehi);
374 ata_ehi_push_desc(ehi, "irq_stat=0x%x idma_stat=0x%x",
375 irq_stat, idma_stat);
376
377 inic_stop_idma(ap);
378
379 if (irq_stat & (PIRQ_OFFLINE | PIRQ_ONLINE)) {
380 ata_ehi_push_desc(ehi, "hotplug");
381 ata_ehi_hotplugged(ehi);
382 freeze = true;
383 }
384
385 if (idma_stat & IDMA_STAT_PERR) {
386 ata_ehi_push_desc(ehi, "PCI error");
387 freeze = true;
388 }
389
390 if (idma_stat & IDMA_STAT_CPBERR) {
391 ata_ehi_push_desc(ehi, "CPB error");
392
393 if (cpb->resp_flags & CPB_RESP_IGNORED) {
394 __ata_ehi_push_desc(ehi, " ignored");
395 ehi->err_mask |= AC_ERR_INVALID;
396 freeze = true;
397 }
398
399 if (cpb->resp_flags & CPB_RESP_ATA_ERR)
400 ehi->err_mask |= AC_ERR_DEV;
401
402 if (cpb->resp_flags & CPB_RESP_SPURIOUS) {
403 __ata_ehi_push_desc(ehi, " spurious-intr");
404 ehi->err_mask |= AC_ERR_HSM;
405 freeze = true;
406 }
407
408 if (cpb->resp_flags &
409 (CPB_RESP_UNDERFLOW | CPB_RESP_OVERFLOW)) {
410 __ata_ehi_push_desc(ehi, " data-over/underflow");
411 ehi->err_mask |= AC_ERR_HSM;
412 freeze = true;
413 }
414 }
415
416 if (freeze)
417 ata_port_freeze(ap);
418 else
419 ata_port_abort(ap);
420}
421
422static void inic_host_intr(struct ata_port *ap)
423{
424 void __iomem *port_base = inic_port_base(ap);
425 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag);
310 u8 irq_stat; 426 u8 irq_stat;
427 u16 idma_stat;
311 428
312 /* fetch and clear irq */ 429 /* read and clear IRQ status */
313 irq_stat = readb(port_base + PORT_IRQ_STAT); 430 irq_stat = readb(port_base + PORT_IRQ_STAT);
314 writeb(irq_stat, port_base + PORT_IRQ_STAT); 431 writeb(irq_stat, port_base + PORT_IRQ_STAT);
432 idma_stat = readw(port_base + PORT_IDMA_STAT);
433
434 if (unlikely((irq_stat & PIRQ_ERR) || (idma_stat & IDMA_STAT_ERR)))
435 inic_host_err_intr(ap, irq_stat, idma_stat);
436
437 if (unlikely(!qc || (qc->tf.flags & ATA_TFLAG_POLLING))) {
438 ap->ops->sff_check_status(ap); /* clear ATA interrupt */
439 goto spurious;
440 }
441
442 if (qc->tf.protocol == ATA_PROT_DMA) {
443 if (likely(idma_stat & IDMA_STAT_DONE)) {
444 inic_stop_idma(ap);
315 445
316 if (likely(!(irq_stat & PIRQ_ERR))) { 446 /* Depending on circumstances, device error
317 struct ata_queued_cmd *qc = 447 * isn't reported by IDMA, check it explicitly.
318 ata_qc_from_tag(ap, ap->link.active_tag); 448 */
449 if (unlikely(readb(port_base + PORT_TF_COMMAND) &
450 (ATA_DF | ATA_ERR)))
451 qc->err_mask |= AC_ERR_DEV;
319 452
320 if (unlikely(!qc || (qc->tf.flags & ATA_TFLAG_POLLING))) { 453 ata_qc_complete(qc);
321 ap->ops->sff_check_status(ap); /* clear ATA interrupt */
322 return; 454 return;
323 } 455 }
324 456 } else {
325 if (likely(ata_sff_host_intr(ap, qc))) 457 if (likely(ata_sff_host_intr(ap, qc)))
326 return; 458 return;
327
328 ap->ops->sff_check_status(ap); /* clear ATA interrupt */
329 ata_port_printk(ap, KERN_WARNING, "unhandled "
330 "interrupt, irq_stat=%x\n", irq_stat);
331 return;
332 } 459 }
333 460
334 /* error */ 461 spurious:
335 ata_ehi_push_desc(ehi, "irq_stat=0x%x", irq_stat); 462 ap->ops->sff_check_status(ap); /* clear ATA interrupt */
336
337 if (irq_stat & (PIRQ_OFFLINE | PIRQ_ONLINE)) {
338 ata_ehi_hotplugged(ehi);
339 ata_port_freeze(ap);
340 } else
341 ata_port_abort(ap);
342} 463}
343 464
344static irqreturn_t inic_interrupt(int irq, void *dev_instance) 465static irqreturn_t inic_interrupt(int irq, void *dev_instance)
@@ -378,22 +499,83 @@ static irqreturn_t inic_interrupt(int irq, void *dev_instance)
378 return IRQ_RETVAL(handled); 499 return IRQ_RETVAL(handled);
379} 500}
380 501
502static void inic_fill_sg(struct inic_prd *prd, struct ata_queued_cmd *qc)
503{
504 struct scatterlist *sg;
505 unsigned int si;
506 u8 flags = PRD_DMA;
507
508 if (qc->tf.flags & ATA_TFLAG_WRITE)
509 flags |= PRD_WRITE;
510
511 for_each_sg(qc->sg, sg, qc->n_elem, si) {
512 prd->mad = cpu_to_le32(sg_dma_address(sg));
513 prd->len = cpu_to_le16(sg_dma_len(sg));
514 prd->flags = flags;
515 prd++;
516 }
517
518 WARN_ON(!si);
519 prd[-1].flags |= PRD_END;
520}
521
522static void inic_qc_prep(struct ata_queued_cmd *qc)
523{
524 struct inic_port_priv *pp = qc->ap->private_data;
525 struct inic_pkt *pkt = pp->pkt;
526 struct inic_cpb *cpb = &pkt->cpb;
527 struct inic_prd *prd = pkt->prd;
528
529 VPRINTK("ENTER\n");
530
531 if (qc->tf.protocol != ATA_PROT_DMA)
532 return;
533
534 /* prepare packet, based on initio driver */
535 memset(pkt, 0, sizeof(struct inic_pkt));
536
537 cpb->ctl_flags = CPB_CTL_VALID | CPB_CTL_IEN | CPB_CTL_DATA;
538
539 cpb->len = cpu_to_le32(qc->nbytes);
540 cpb->prd = cpu_to_le32(pp->pkt_dma + offsetof(struct inic_pkt, prd));
541
542 cpb->device = qc->tf.device;
543 cpb->feature = qc->tf.feature;
544 cpb->nsect = qc->tf.nsect;
545 cpb->lbal = qc->tf.lbal;
546 cpb->lbam = qc->tf.lbam;
547 cpb->lbah = qc->tf.lbah;
548
549 if (qc->tf.flags & ATA_TFLAG_LBA48) {
550 cpb->hob_feature = qc->tf.hob_feature;
551 cpb->hob_nsect = qc->tf.hob_nsect;
552 cpb->hob_lbal = qc->tf.hob_lbal;
553 cpb->hob_lbam = qc->tf.hob_lbam;
554 cpb->hob_lbah = qc->tf.hob_lbah;
555 }
556
557 cpb->command = qc->tf.command;
558 /* don't load ctl - dunno why. it's like that in the initio driver */
559
560 /* setup sg table */
561 inic_fill_sg(prd, qc);
562
563 pp->cpb_tbl[0] = pp->pkt_dma;
564}
565
381static unsigned int inic_qc_issue(struct ata_queued_cmd *qc) 566static unsigned int inic_qc_issue(struct ata_queued_cmd *qc)
382{ 567{
383 struct ata_port *ap = qc->ap; 568 struct ata_port *ap = qc->ap;
569 void __iomem *port_base = inic_port_base(ap);
384 570
385 /* ATA IRQ doesn't wait for DMA transfer completion and vice 571 if (qc->tf.protocol == ATA_PROT_DMA) {
386 * versa. Mask IRQ selectively to detect command completion. 572 /* fire up the ADMA engine */
387 * Without it, ATA DMA read command can cause data corruption. 573 writew(HCTL_FTHD0, port_base + HOST_CTL);
388 * 574 writew(IDMA_CTL_GO, port_base + PORT_IDMA_CTL);
389 * Something similar might be needed for ATAPI writes. I 575 writeb(0, port_base + PORT_CPB_PTQFIFO);
390 * tried a lot of combinations but couldn't find the solution. 576
391 */ 577 return 0;
392 if (qc->tf.protocol == ATA_PROT_DMA && 578 }
393 !(qc->tf.flags & ATA_TFLAG_WRITE))
394 inic_set_pirq_mask(ap, PIRQ_MASK_DMA_READ);
395 else
396 inic_set_pirq_mask(ap, PIRQ_MASK_OTHER);
397 579
398 /* Issuing a command to yet uninitialized port locks up the 580 /* Issuing a command to yet uninitialized port locks up the
399 * controller. Most of the time, this happens for the first 581 * controller. Most of the time, this happens for the first
@@ -564,9 +746,15 @@ static void inic_dev_config(struct ata_device *dev)
564static void init_port(struct ata_port *ap) 746static void init_port(struct ata_port *ap)
565{ 747{
566 void __iomem *port_base = inic_port_base(ap); 748 void __iomem *port_base = inic_port_base(ap);
749 struct inic_port_priv *pp = ap->private_data;
567 750
568 /* Setup PRD address */ 751 /* clear packet and CPB table */
752 memset(pp->pkt, 0, sizeof(struct inic_pkt));
753 memset(pp->cpb_tbl, 0, IDMA_CPB_TBL_SIZE);
754
755 /* setup PRD and CPB lookup table addresses */
569 writel(ap->prd_dma, port_base + PORT_PRD_ADDR); 756 writel(ap->prd_dma, port_base + PORT_PRD_ADDR);
757 writel(pp->cpb_tbl_dma, port_base + PORT_CPB_CPBLAR);
570} 758}
571 759
572static int inic_port_resume(struct ata_port *ap) 760static int inic_port_resume(struct ata_port *ap)
@@ -578,12 +766,13 @@ static int inic_port_resume(struct ata_port *ap)
578static int inic_port_start(struct ata_port *ap) 766static int inic_port_start(struct ata_port *ap)
579{ 767{
580 void __iomem *port_base = inic_port_base(ap); 768 void __iomem *port_base = inic_port_base(ap);
769 struct device *dev = ap->host->dev;
581 struct inic_port_priv *pp; 770 struct inic_port_priv *pp;
582 u8 tmp; 771 u8 tmp;
583 int rc; 772 int rc;
584 773
585 /* alloc and initialize private data */ 774 /* alloc and initialize private data */
586 pp = devm_kzalloc(ap->host->dev, sizeof(*pp), GFP_KERNEL); 775 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
587 if (!pp) 776 if (!pp)
588 return -ENOMEM; 777 return -ENOMEM;
589 ap->private_data = pp; 778 ap->private_data = pp;
@@ -598,6 +787,16 @@ static int inic_port_start(struct ata_port *ap)
598 if (rc) 787 if (rc)
599 return rc; 788 return rc;
600 789
790 pp->pkt = dmam_alloc_coherent(dev, sizeof(struct inic_pkt),
791 &pp->pkt_dma, GFP_KERNEL);
792 if (!pp->pkt)
793 return -ENOMEM;
794
795 pp->cpb_tbl = dmam_alloc_coherent(dev, IDMA_CPB_TBL_SIZE,
796 &pp->cpb_tbl_dma, GFP_KERNEL);
797 if (!pp->cpb_tbl)
798 return -ENOMEM;
799
601 init_port(ap); 800 init_port(ap);
602 801
603 return 0; 802 return 0;
@@ -610,6 +809,7 @@ static struct ata_port_operations inic_port_ops = {
610 .bmdma_start = inic_bmdma_start, 809 .bmdma_start = inic_bmdma_start,
611 .bmdma_stop = inic_bmdma_stop, 810 .bmdma_stop = inic_bmdma_stop,
612 .bmdma_status = inic_bmdma_status, 811 .bmdma_status = inic_bmdma_status,
812 .qc_prep = inic_qc_prep,
613 .qc_issue = inic_qc_issue, 813 .qc_issue = inic_qc_issue,
614 .qc_fill_rtf = inic_qc_fill_rtf, 814 .qc_fill_rtf = inic_qc_fill_rtf,
615 815