diff options
author | Jeff Garzik <jeff@garzik.org> | 2007-02-26 01:26:06 -0500 |
---|---|---|
committer | Jeff Garzik <jeff@garzik.org> | 2007-04-28 14:15:55 -0400 |
commit | d88184fb2348a50f7c34f5d49a901c875b2e0114 (patch) | |
tree | 83993038a7525f6b61aea8f76ea38ddb96325a2f /drivers/ata | |
parent | 43727fbc753c63f9d2764c56467303698cc52c14 (diff) |
[libata] sata_mv: clean up DMA boundary issues, turn on 64-bit DMA
The chips covered by sata_mv have a 32-bit DMA boundary they must not
cross, not a 64K boundary. We are merely limited to a 64K maximum
segment size. Therefore, the DMA scatter/gather table fill code can be
greatly simplified, and we need not cut in half the S/G table size as
reported to the SCSI layer.
Also, the driver forget to turn on 64-bit DMA at the PCI layer. All
other data structures (both hardware and software) have been prepped for
64-bit PCI DMA. It was simply never turned on. <fingers crossed> let's
see if it still works...
Signed-off-by: Jeff Garzik <jeff@garzik.org>
Diffstat (limited to 'drivers/ata')
-rw-r--r-- | drivers/ata/sata_mv.c | 84 |
1 files changed, 55 insertions, 29 deletions
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c index 5f3d524c3387..7b73c73b3f34 100644 --- a/drivers/ata/sata_mv.c +++ b/drivers/ata/sata_mv.c | |||
@@ -253,10 +253,7 @@ enum { | |||
253 | #define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE) | 253 | #define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE) |
254 | 254 | ||
255 | enum { | 255 | enum { |
256 | /* Our DMA boundary is determined by an ePRD being unable to handle | 256 | MV_DMA_BOUNDARY = 0xffffffffU, |
257 | * anything larger than 64KB | ||
258 | */ | ||
259 | MV_DMA_BOUNDARY = 0xffffU, | ||
260 | 257 | ||
261 | EDMA_REQ_Q_BASE_LO_MASK = 0xfffffc00U, | 258 | EDMA_REQ_Q_BASE_LO_MASK = 0xfffffc00U, |
262 | 259 | ||
@@ -384,10 +381,10 @@ static struct scsi_host_template mv_sht = { | |||
384 | .queuecommand = ata_scsi_queuecmd, | 381 | .queuecommand = ata_scsi_queuecmd, |
385 | .can_queue = MV_USE_Q_DEPTH, | 382 | .can_queue = MV_USE_Q_DEPTH, |
386 | .this_id = ATA_SHT_THIS_ID, | 383 | .this_id = ATA_SHT_THIS_ID, |
387 | .sg_tablesize = MV_MAX_SG_CT / 2, | 384 | .sg_tablesize = MV_MAX_SG_CT, |
388 | .cmd_per_lun = ATA_SHT_CMD_PER_LUN, | 385 | .cmd_per_lun = ATA_SHT_CMD_PER_LUN, |
389 | .emulated = ATA_SHT_EMULATED, | 386 | .emulated = ATA_SHT_EMULATED, |
390 | .use_clustering = ATA_SHT_USE_CLUSTERING, | 387 | .use_clustering = 1, |
391 | .proc_name = DRV_NAME, | 388 | .proc_name = DRV_NAME, |
392 | .dma_boundary = MV_DMA_BOUNDARY, | 389 | .dma_boundary = MV_DMA_BOUNDARY, |
393 | .slave_configure = ata_scsi_slave_config, | 390 | .slave_configure = ata_scsi_slave_config, |
@@ -585,6 +582,39 @@ static const struct mv_hw_ops mv6xxx_ops = { | |||
585 | static int msi; /* Use PCI msi; either zero (off, default) or non-zero */ | 582 | static int msi; /* Use PCI msi; either zero (off, default) or non-zero */ |
586 | 583 | ||
587 | 584 | ||
585 | /* move to PCI layer or libata core? */ | ||
586 | static int pci_go_64(struct pci_dev *pdev) | ||
587 | { | ||
588 | int rc; | ||
589 | |||
590 | if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) { | ||
591 | rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK); | ||
592 | if (rc) { | ||
593 | rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK); | ||
594 | if (rc) { | ||
595 | dev_printk(KERN_ERR, &pdev->dev, | ||
596 | "64-bit DMA enable failed\n"); | ||
597 | return rc; | ||
598 | } | ||
599 | } | ||
600 | } else { | ||
601 | rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK); | ||
602 | if (rc) { | ||
603 | dev_printk(KERN_ERR, &pdev->dev, | ||
604 | "32-bit DMA enable failed\n"); | ||
605 | return rc; | ||
606 | } | ||
607 | rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK); | ||
608 | if (rc) { | ||
609 | dev_printk(KERN_ERR, &pdev->dev, | ||
610 | "32-bit consistent DMA enable failed\n"); | ||
611 | return rc; | ||
612 | } | ||
613 | } | ||
614 | |||
615 | return rc; | ||
616 | } | ||
617 | |||
588 | /* | 618 | /* |
589 | * Functions | 619 | * Functions |
590 | */ | 620 | */ |
@@ -957,38 +987,30 @@ static void mv_port_stop(struct ata_port *ap) | |||
957 | * LOCKING: | 987 | * LOCKING: |
958 | * Inherited from caller. | 988 | * Inherited from caller. |
959 | */ | 989 | */ |
960 | static void mv_fill_sg(struct ata_queued_cmd *qc) | 990 | static unsigned int mv_fill_sg(struct ata_queued_cmd *qc) |
961 | { | 991 | { |
962 | struct mv_port_priv *pp = qc->ap->private_data; | 992 | struct mv_port_priv *pp = qc->ap->private_data; |
963 | unsigned int i = 0; | 993 | unsigned int n_sg = 0; |
964 | struct scatterlist *sg; | 994 | struct scatterlist *sg; |
995 | struct mv_sg *mv_sg; | ||
965 | 996 | ||
997 | mv_sg = pp->sg_tbl; | ||
966 | ata_for_each_sg(sg, qc) { | 998 | ata_for_each_sg(sg, qc) { |
967 | dma_addr_t addr; | 999 | dma_addr_t addr = sg_dma_address(sg); |
968 | u32 sg_len, len, offset; | 1000 | u32 sg_len = sg_dma_len(sg); |
969 | |||
970 | addr = sg_dma_address(sg); | ||
971 | sg_len = sg_dma_len(sg); | ||
972 | |||
973 | while (sg_len) { | ||
974 | offset = addr & MV_DMA_BOUNDARY; | ||
975 | len = sg_len; | ||
976 | if ((offset + sg_len) > 0x10000) | ||
977 | len = 0x10000 - offset; | ||
978 | |||
979 | pp->sg_tbl[i].addr = cpu_to_le32(addr & 0xffffffff); | ||
980 | pp->sg_tbl[i].addr_hi = cpu_to_le32((addr >> 16) >> 16); | ||
981 | pp->sg_tbl[i].flags_size = cpu_to_le32(len & 0xffff); | ||
982 | 1001 | ||
983 | sg_len -= len; | 1002 | mv_sg->addr = cpu_to_le32(addr & 0xffffffff); |
984 | addr += len; | 1003 | mv_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16); |
1004 | mv_sg->flags_size = cpu_to_le32(sg_len & 0xffff); | ||
985 | 1005 | ||
986 | if (!sg_len && ata_sg_is_last(sg, qc)) | 1006 | if (ata_sg_is_last(sg, qc)) |
987 | pp->sg_tbl[i].flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL); | 1007 | mv_sg->flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL); |
988 | 1008 | ||
989 | i++; | 1009 | mv_sg++; |
990 | } | 1010 | n_sg++; |
991 | } | 1011 | } |
1012 | |||
1013 | return n_sg; | ||
992 | } | 1014 | } |
993 | 1015 | ||
994 | static inline unsigned mv_inc_q_index(unsigned index) | 1016 | static inline unsigned mv_inc_q_index(unsigned index) |
@@ -2327,6 +2349,10 @@ static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
2327 | if (rc) | 2349 | if (rc) |
2328 | return rc; | 2350 | return rc; |
2329 | 2351 | ||
2352 | rc = pci_go_64(pdev); | ||
2353 | if (rc) | ||
2354 | return rc; | ||
2355 | |||
2330 | probe_ent = devm_kzalloc(dev, sizeof(*probe_ent), GFP_KERNEL); | 2356 | probe_ent = devm_kzalloc(dev, sizeof(*probe_ent), GFP_KERNEL); |
2331 | if (probe_ent == NULL) | 2357 | if (probe_ent == NULL) |
2332 | return -ENOMEM; | 2358 | return -ENOMEM; |