aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorRobert Hancock <hancockr@shaw.ca>2008-02-04 20:39:02 -0500
committerJeff Garzik <jeff@garzik.org>2008-02-06 07:00:27 -0500
commit8959d300a79c1b70526cdf9e00485262cf8d979f (patch)
tree49a69098a15da63317ea230e11f86a9d4d7341f6 /drivers
parent8d8b60046d6a2328ca4b9031b4948084f775f607 (diff)
sata_nv: fix ATAPI issues with memory over 4GB (v7)
This fixes some problems with ATAPI devices on nForce4 controllers in ADMA mode on systems with memory located above 4GB. We need to delay setting the 64-bit DMA mask until the PRD table and padding buffer are allocated so that they don't get allocated above 4GB and break legacy mode (which is needed for ATAPI devices). Also, if either port is in ATAPI mode we need to set the DMA mask for the PCI device to 32-bit to ensure that the IOMMU code properly bounces requests above 4GB, as it appears setting the bounce limit does not guarantee that we will not try to map requests above this point. Reported to fix https://bugzilla.redhat.com/show_bug.cgi?id=351451 Signed-off-by: Robert Hancock <hancockr@shaw.ca> Signed-off-by: Jeff Garzik <jeff@garzik.org>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/ata/sata_nv.c78
1 files changed, 65 insertions, 13 deletions
diff --git a/drivers/ata/sata_nv.c b/drivers/ata/sata_nv.c
index bfe92a43cf89..ed5473bf7a0a 100644
--- a/drivers/ata/sata_nv.c
+++ b/drivers/ata/sata_nv.c
@@ -247,6 +247,7 @@ struct nv_adma_port_priv {
247 void __iomem *ctl_block; 247 void __iomem *ctl_block;
248 void __iomem *gen_block; 248 void __iomem *gen_block;
249 void __iomem *notifier_clear_block; 249 void __iomem *notifier_clear_block;
250 u64 adma_dma_mask;
250 u8 flags; 251 u8 flags;
251 int last_issue_ncq; 252 int last_issue_ncq;
252}; 253};
@@ -715,9 +716,10 @@ static int nv_adma_slave_config(struct scsi_device *sdev)
715{ 716{
716 struct ata_port *ap = ata_shost_to_port(sdev->host); 717 struct ata_port *ap = ata_shost_to_port(sdev->host);
717 struct nv_adma_port_priv *pp = ap->private_data; 718 struct nv_adma_port_priv *pp = ap->private_data;
719 struct nv_adma_port_priv *port0, *port1;
720 struct scsi_device *sdev0, *sdev1;
718 struct pci_dev *pdev = to_pci_dev(ap->host->dev); 721 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
719 u64 bounce_limit; 722 unsigned long segment_boundary, flags;
720 unsigned long segment_boundary;
721 unsigned short sg_tablesize; 723 unsigned short sg_tablesize;
722 int rc; 724 int rc;
723 int adma_enable; 725 int adma_enable;
@@ -729,6 +731,8 @@ static int nv_adma_slave_config(struct scsi_device *sdev)
729 /* Not a proper libata device, ignore */ 731 /* Not a proper libata device, ignore */
730 return rc; 732 return rc;
731 733
734 spin_lock_irqsave(ap->lock, flags);
735
732 if (ap->link.device[sdev->id].class == ATA_DEV_ATAPI) { 736 if (ap->link.device[sdev->id].class == ATA_DEV_ATAPI) {
733 /* 737 /*
734 * NVIDIA reports that ADMA mode does not support ATAPI commands. 738 * NVIDIA reports that ADMA mode does not support ATAPI commands.
@@ -737,7 +741,6 @@ static int nv_adma_slave_config(struct scsi_device *sdev)
737 * Restrict DMA parameters as required by the legacy interface 741 * Restrict DMA parameters as required by the legacy interface
738 * when an ATAPI device is connected. 742 * when an ATAPI device is connected.
739 */ 743 */
740 bounce_limit = ATA_DMA_MASK;
741 segment_boundary = ATA_DMA_BOUNDARY; 744 segment_boundary = ATA_DMA_BOUNDARY;
742 /* Subtract 1 since an extra entry may be needed for padding, see 745 /* Subtract 1 since an extra entry may be needed for padding, see
743 libata-scsi.c */ 746 libata-scsi.c */
@@ -748,7 +751,6 @@ static int nv_adma_slave_config(struct scsi_device *sdev)
748 adma_enable = 0; 751 adma_enable = 0;
749 nv_adma_register_mode(ap); 752 nv_adma_register_mode(ap);
750 } else { 753 } else {
751 bounce_limit = *ap->dev->dma_mask;
752 segment_boundary = NV_ADMA_DMA_BOUNDARY; 754 segment_boundary = NV_ADMA_DMA_BOUNDARY;
753 sg_tablesize = NV_ADMA_SGTBL_TOTAL_LEN; 755 sg_tablesize = NV_ADMA_SGTBL_TOTAL_LEN;
754 adma_enable = 1; 756 adma_enable = 1;
@@ -774,12 +776,49 @@ static int nv_adma_slave_config(struct scsi_device *sdev)
774 if (current_reg != new_reg) 776 if (current_reg != new_reg)
775 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, new_reg); 777 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, new_reg);
776 778
777 blk_queue_bounce_limit(sdev->request_queue, bounce_limit); 779 port0 = ap->host->ports[0]->private_data;
780 port1 = ap->host->ports[1]->private_data;
781 sdev0 = ap->host->ports[0]->link.device[0].sdev;
782 sdev1 = ap->host->ports[1]->link.device[0].sdev;
783 if ((port0->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) ||
784 (port1->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)) {
785 /** We have to set the DMA mask to 32-bit if either port is in
786 ATAPI mode, since they are on the same PCI device which is
787 used for DMA mapping. If we set the mask we also need to set
788 the bounce limit on both ports to ensure that the block
789 layer doesn't feed addresses that cause DMA mapping to
790 choke. If either SCSI device is not allocated yet, it's OK
791 since that port will discover its correct setting when it
792 does get allocated.
793 Note: Setting 32-bit mask should not fail. */
794 if (sdev0)
795 blk_queue_bounce_limit(sdev0->request_queue,
796 ATA_DMA_MASK);
797 if (sdev1)
798 blk_queue_bounce_limit(sdev1->request_queue,
799 ATA_DMA_MASK);
800
801 pci_set_dma_mask(pdev, ATA_DMA_MASK);
802 } else {
803 /** This shouldn't fail as it was set to this value before */
804 pci_set_dma_mask(pdev, pp->adma_dma_mask);
805 if (sdev0)
806 blk_queue_bounce_limit(sdev0->request_queue,
807 pp->adma_dma_mask);
808 if (sdev1)
809 blk_queue_bounce_limit(sdev1->request_queue,
810 pp->adma_dma_mask);
811 }
812
778 blk_queue_segment_boundary(sdev->request_queue, segment_boundary); 813 blk_queue_segment_boundary(sdev->request_queue, segment_boundary);
779 blk_queue_max_hw_segments(sdev->request_queue, sg_tablesize); 814 blk_queue_max_hw_segments(sdev->request_queue, sg_tablesize);
780 ata_port_printk(ap, KERN_INFO, 815 ata_port_printk(ap, KERN_INFO,
781 "bounce limit 0x%llX, segment boundary 0x%lX, hw segs %hu\n", 816 "DMA mask 0x%llX, segment boundary 0x%lX, hw segs %hu\n",
782 (unsigned long long)bounce_limit, segment_boundary, sg_tablesize); 817 (unsigned long long)*ap->host->dev->dma_mask,
818 segment_boundary, sg_tablesize);
819
820 spin_unlock_irqrestore(ap->lock, flags);
821
783 return rc; 822 return rc;
784} 823}
785 824
@@ -1140,10 +1179,20 @@ static int nv_adma_port_start(struct ata_port *ap)
1140 void *mem; 1179 void *mem;
1141 dma_addr_t mem_dma; 1180 dma_addr_t mem_dma;
1142 void __iomem *mmio; 1181 void __iomem *mmio;
1182 struct pci_dev *pdev = to_pci_dev(dev);
1143 u16 tmp; 1183 u16 tmp;
1144 1184
1145 VPRINTK("ENTER\n"); 1185 VPRINTK("ENTER\n");
1146 1186
1187 /* Ensure DMA mask is set to 32-bit before allocating legacy PRD and
1188 pad buffers */
1189 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
1190 if (rc)
1191 return rc;
1192 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
1193 if (rc)
1194 return rc;
1195
1147 rc = ata_port_start(ap); 1196 rc = ata_port_start(ap);
1148 if (rc) 1197 if (rc)
1149 return rc; 1198 return rc;
@@ -1159,6 +1208,15 @@ static int nv_adma_port_start(struct ata_port *ap)
1159 pp->notifier_clear_block = pp->gen_block + 1208 pp->notifier_clear_block = pp->gen_block +
1160 NV_ADMA_NOTIFIER_CLEAR + (4 * ap->port_no); 1209 NV_ADMA_NOTIFIER_CLEAR + (4 * ap->port_no);
1161 1210
1211 /* Now that the legacy PRD and padding buffer are allocated we can
1212 safely raise the DMA mask to allocate the CPB/APRD table.
1213 These are allowed to fail since we store the value that ends up
1214 being used to set as the bounce limit in slave_config later if
1215 needed. */
1216 pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
1217 pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
1218 pp->adma_dma_mask = *dev->dma_mask;
1219
1162 mem = dmam_alloc_coherent(dev, NV_ADMA_PORT_PRIV_DMA_SZ, 1220 mem = dmam_alloc_coherent(dev, NV_ADMA_PORT_PRIV_DMA_SZ,
1163 &mem_dma, GFP_KERNEL); 1221 &mem_dma, GFP_KERNEL);
1164 if (!mem) 1222 if (!mem)
@@ -2417,12 +2475,6 @@ static int nv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2417 hpriv->type = type; 2475 hpriv->type = type;
2418 host->private_data = hpriv; 2476 host->private_data = hpriv;
2419 2477
2420 /* set 64bit dma masks, may fail */
2421 if (type == ADMA) {
2422 if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0)
2423 pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
2424 }
2425
2426 /* request and iomap NV_MMIO_BAR */ 2478 /* request and iomap NV_MMIO_BAR */
2427 rc = pcim_iomap_regions(pdev, 1 << NV_MMIO_BAR, DRV_NAME); 2479 rc = pcim_iomap_regions(pdev, 1 << NV_MMIO_BAR, DRV_NAME);
2428 if (rc) 2480 if (rc)