diff options
author | Tejun Heo <htejun@gmail.com> | 2008-04-30 03:35:13 -0400 |
---|---|---|
committer | Jeff Garzik <jgarzik@redhat.com> | 2008-05-06 11:40:55 -0400 |
commit | 049e8e04986bde66df9648d88d0960ab4cbd6992 (patch) | |
tree | bf3bc3e8feb87290b7458beea77ce4a6551ac1cd | |
parent | ab5b0235c4e819c9bc45fa62c99f9fe49e73e701 (diff) |
sata_inic162x: use IDMA for non DMA ATA commands
Use IDMA for PIO and non-data commands. This allows sata_inic162x to
safely drive LBA48 devices. Kill inic_dev_config() which contains
code to reject LBA48 devices.
With this change, status checking in inic_qc_issue() to avoid hard
lock up after hotplug can go away too.
Signed-off-by: Tejun Heo <htejun@gmail.com>
Signed-off-by: Jeff Garzik <jgarzik@redhat.com>
-rw-r--r-- | drivers/ata/sata_inic162x.c | 49 |
1 files changed, 15 insertions, 34 deletions
diff --git a/drivers/ata/sata_inic162x.c b/drivers/ata/sata_inic162x.c index 3ca0ee93bc1f..579154c27902 100644 --- a/drivers/ata/sata_inic162x.c +++ b/drivers/ata/sata_inic162x.c | |||
@@ -356,12 +356,12 @@ static void inic_host_intr(struct ata_port *ap) | |||
356 | if (unlikely((irq_stat & PIRQ_ERR) || (idma_stat & IDMA_STAT_ERR))) | 356 | if (unlikely((irq_stat & PIRQ_ERR) || (idma_stat & IDMA_STAT_ERR))) |
357 | inic_host_err_intr(ap, irq_stat, idma_stat); | 357 | inic_host_err_intr(ap, irq_stat, idma_stat); |
358 | 358 | ||
359 | if (unlikely(!qc || (qc->tf.flags & ATA_TFLAG_POLLING))) { | 359 | if (unlikely(!qc)) { |
360 | ap->ops->sff_check_status(ap); /* clear ATA interrupt */ | 360 | ap->ops->sff_check_status(ap); /* clear ATA interrupt */ |
361 | goto spurious; | 361 | goto spurious; |
362 | } | 362 | } |
363 | 363 | ||
364 | if (qc->tf.protocol == ATA_PROT_DMA) { | 364 | if (!ata_is_atapi(qc->tf.protocol)) { |
365 | if (likely(idma_stat & IDMA_STAT_DONE)) { | 365 | if (likely(idma_stat & IDMA_STAT_DONE)) { |
366 | inic_stop_idma(ap); | 366 | inic_stop_idma(ap); |
367 | 367 | ||
@@ -425,11 +425,14 @@ static void inic_fill_sg(struct inic_prd *prd, struct ata_queued_cmd *qc) | |||
425 | { | 425 | { |
426 | struct scatterlist *sg; | 426 | struct scatterlist *sg; |
427 | unsigned int si; | 427 | unsigned int si; |
428 | u8 flags = PRD_DMA; | 428 | u8 flags = 0; |
429 | 429 | ||
430 | if (qc->tf.flags & ATA_TFLAG_WRITE) | 430 | if (qc->tf.flags & ATA_TFLAG_WRITE) |
431 | flags |= PRD_WRITE; | 431 | flags |= PRD_WRITE; |
432 | 432 | ||
433 | if (ata_is_dma(qc->tf.protocol)) | ||
434 | flags |= PRD_DMA; | ||
435 | |||
433 | for_each_sg(qc->sg, sg, qc->n_elem, si) { | 436 | for_each_sg(qc->sg, sg, qc->n_elem, si) { |
434 | prd->mad = cpu_to_le32(sg_dma_address(sg)); | 437 | prd->mad = cpu_to_le32(sg_dma_address(sg)); |
435 | prd->len = cpu_to_le16(sg_dma_len(sg)); | 438 | prd->len = cpu_to_le16(sg_dma_len(sg)); |
@@ -447,16 +450,20 @@ static void inic_qc_prep(struct ata_queued_cmd *qc) | |||
447 | struct inic_pkt *pkt = pp->pkt; | 450 | struct inic_pkt *pkt = pp->pkt; |
448 | struct inic_cpb *cpb = &pkt->cpb; | 451 | struct inic_cpb *cpb = &pkt->cpb; |
449 | struct inic_prd *prd = pkt->prd; | 452 | struct inic_prd *prd = pkt->prd; |
453 | bool is_atapi = ata_is_atapi(qc->tf.protocol); | ||
454 | bool is_data = ata_is_data(qc->tf.protocol); | ||
450 | 455 | ||
451 | VPRINTK("ENTER\n"); | 456 | VPRINTK("ENTER\n"); |
452 | 457 | ||
453 | if (qc->tf.protocol != ATA_PROT_DMA) | 458 | if (is_atapi) |
454 | return; | 459 | return; |
455 | 460 | ||
456 | /* prepare packet, based on initio driver */ | 461 | /* prepare packet, based on initio driver */ |
457 | memset(pkt, 0, sizeof(struct inic_pkt)); | 462 | memset(pkt, 0, sizeof(struct inic_pkt)); |
458 | 463 | ||
459 | cpb->ctl_flags = CPB_CTL_VALID | CPB_CTL_IEN | CPB_CTL_DATA; | 464 | cpb->ctl_flags = CPB_CTL_VALID | CPB_CTL_IEN; |
465 | if (is_data) | ||
466 | cpb->ctl_flags |= CPB_CTL_DATA; | ||
460 | 467 | ||
461 | cpb->len = cpu_to_le32(qc->nbytes); | 468 | cpb->len = cpu_to_le32(qc->nbytes); |
462 | cpb->prd = cpu_to_le32(pp->pkt_dma + offsetof(struct inic_pkt, prd)); | 469 | cpb->prd = cpu_to_le32(pp->pkt_dma + offsetof(struct inic_pkt, prd)); |
@@ -480,7 +487,8 @@ static void inic_qc_prep(struct ata_queued_cmd *qc) | |||
480 | /* don't load ctl - dunno why. it's like that in the initio driver */ | 487 | /* don't load ctl - dunno why. it's like that in the initio driver */ |
481 | 488 | ||
482 | /* setup sg table */ | 489 | /* setup sg table */ |
483 | inic_fill_sg(prd, qc); | 490 | if (is_data) |
491 | inic_fill_sg(prd, qc); | ||
484 | 492 | ||
485 | pp->cpb_tbl[0] = pp->pkt_dma; | 493 | pp->cpb_tbl[0] = pp->pkt_dma; |
486 | } | 494 | } |
@@ -490,7 +498,7 @@ static unsigned int inic_qc_issue(struct ata_queued_cmd *qc) | |||
490 | struct ata_port *ap = qc->ap; | 498 | struct ata_port *ap = qc->ap; |
491 | void __iomem *port_base = inic_port_base(ap); | 499 | void __iomem *port_base = inic_port_base(ap); |
492 | 500 | ||
493 | if (qc->tf.protocol == ATA_PROT_DMA) { | 501 | if (!ata_is_atapi(qc->tf.protocol)) { |
494 | /* fire up the ADMA engine */ | 502 | /* fire up the ADMA engine */ |
495 | writew(HCTL_FTHD0, port_base + HOST_CTL); | 503 | writew(HCTL_FTHD0, port_base + HOST_CTL); |
496 | writew(IDMA_CTL_GO, port_base + PORT_IDMA_CTL); | 504 | writew(IDMA_CTL_GO, port_base + PORT_IDMA_CTL); |
@@ -499,18 +507,6 @@ static unsigned int inic_qc_issue(struct ata_queued_cmd *qc) | |||
499 | return 0; | 507 | return 0; |
500 | } | 508 | } |
501 | 509 | ||
502 | /* Issuing a command to yet uninitialized port locks up the | ||
503 | * controller. Most of the time, this happens for the first | ||
504 | * command after reset which are ATA and ATAPI IDENTIFYs. | ||
505 | * Fast fail if stat is 0x7f or 0xff for those commands. | ||
506 | */ | ||
507 | if (unlikely(qc->tf.command == ATA_CMD_ID_ATA || | ||
508 | qc->tf.command == ATA_CMD_ID_ATAPI)) { | ||
509 | u8 stat = ap->ops->sff_check_status(ap); | ||
510 | if (stat == 0x7f || stat == 0xff) | ||
511 | return AC_ERR_HSM; | ||
512 | } | ||
513 | |||
514 | return ata_sff_qc_issue(qc); | 510 | return ata_sff_qc_issue(qc); |
515 | } | 511 | } |
516 | 512 | ||
@@ -647,20 +643,6 @@ static void inic_post_internal_cmd(struct ata_queued_cmd *qc) | |||
647 | inic_reset_port(inic_port_base(qc->ap)); | 643 | inic_reset_port(inic_port_base(qc->ap)); |
648 | } | 644 | } |
649 | 645 | ||
650 | static void inic_dev_config(struct ata_device *dev) | ||
651 | { | ||
652 | /* inic can only handle upto LBA28 max sectors */ | ||
653 | if (dev->max_sectors > ATA_MAX_SECTORS) | ||
654 | dev->max_sectors = ATA_MAX_SECTORS; | ||
655 | |||
656 | if (dev->n_sectors >= 1 << 28) { | ||
657 | ata_dev_printk(dev, KERN_ERR, | ||
658 | "ERROR: This driver doesn't support LBA48 yet and may cause\n" | ||
659 | " data corruption on such devices. Disabling.\n"); | ||
660 | ata_dev_disable(dev); | ||
661 | } | ||
662 | } | ||
663 | |||
664 | static void init_port(struct ata_port *ap) | 646 | static void init_port(struct ata_port *ap) |
665 | { | 647 | { |
666 | void __iomem *port_base = inic_port_base(ap); | 648 | void __iomem *port_base = inic_port_base(ap); |
@@ -726,7 +708,6 @@ static struct ata_port_operations inic_port_ops = { | |||
726 | .hardreset = inic_hardreset, | 708 | .hardreset = inic_hardreset, |
727 | .error_handler = inic_error_handler, | 709 | .error_handler = inic_error_handler, |
728 | .post_internal_cmd = inic_post_internal_cmd, | 710 | .post_internal_cmd = inic_post_internal_cmd, |
729 | .dev_config = inic_dev_config, | ||
730 | 711 | ||
731 | .scr_read = inic_scr_read, | 712 | .scr_read = inic_scr_read, |
732 | .scr_write = inic_scr_write, | 713 | .scr_write = inic_scr_write, |