aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/tile/include/asm/device.h5
-rw-r--r--arch/tile/include/asm/dma-mapping.h21
-rw-r--r--arch/tile/kernel/pci-dma.c21
-rw-r--r--arch/tile/kernel/pci_gx.c15
4 files changed, 42 insertions, 20 deletions
diff --git a/arch/tile/include/asm/device.h b/arch/tile/include/asm/device.h
index 5182705bd056..6ab8bf146d4c 100644
--- a/arch/tile/include/asm/device.h
+++ b/arch/tile/include/asm/device.h
@@ -23,7 +23,10 @@ struct dev_archdata {
23 /* Offset of the DMA address from the PA. */ 23 /* Offset of the DMA address from the PA. */
24 dma_addr_t dma_offset; 24 dma_addr_t dma_offset;
25 25
26 /* Highest DMA address that can be generated by this device. */ 26 /*
27 * Highest DMA address that can be generated by devices that
28 * have limited DMA capability, i.e. non 64-bit capable.
29 */
27 dma_addr_t max_direct_dma_addr; 30 dma_addr_t max_direct_dma_addr;
28}; 31};
29 32
diff --git a/arch/tile/include/asm/dma-mapping.h b/arch/tile/include/asm/dma-mapping.h
index 6f522d569132..1eae359d8315 100644
--- a/arch/tile/include/asm/dma-mapping.h
+++ b/arch/tile/include/asm/dma-mapping.h
@@ -92,14 +92,19 @@ dma_set_mask(struct device *dev, u64 mask)
92{ 92{
93 struct dma_map_ops *dma_ops = get_dma_ops(dev); 93 struct dma_map_ops *dma_ops = get_dma_ops(dev);
94 94
95 /* Handle legacy PCI devices with limited memory addressability. */ 95 /*
96 if ((dma_ops == gx_pci_dma_map_ops || 96 * For PCI devices with 64-bit DMA addressing capability, promote
97 dma_ops == gx_hybrid_pci_dma_map_ops || 97 * the dma_ops to hybrid, with the consistent memory DMA space limited
98 dma_ops == gx_legacy_pci_dma_map_ops) && 98 * to 32-bit. For 32-bit capable devices, limit the streaming DMA
99 (mask <= DMA_BIT_MASK(32))) { 99 * address range to max_direct_dma_addr.
100 set_dma_ops(dev, gx_legacy_pci_dma_map_ops); 100 */
101 set_dma_offset(dev, 0); 101 if (dma_ops == gx_pci_dma_map_ops ||
102 if (mask > dev->archdata.max_direct_dma_addr) 102 dma_ops == gx_hybrid_pci_dma_map_ops ||
103 dma_ops == gx_legacy_pci_dma_map_ops) {
104 if (mask == DMA_BIT_MASK(64) &&
105 dma_ops == gx_legacy_pci_dma_map_ops)
106 set_dma_ops(dev, gx_hybrid_pci_dma_map_ops);
107 else if (mask > dev->archdata.max_direct_dma_addr)
103 mask = dev->archdata.max_direct_dma_addr; 108 mask = dev->archdata.max_direct_dma_addr;
104 } 109 }
105 110
diff --git a/arch/tile/kernel/pci-dma.c b/arch/tile/kernel/pci-dma.c
index d94f4872e94f..09b58703ac26 100644
--- a/arch/tile/kernel/pci-dma.c
+++ b/arch/tile/kernel/pci-dma.c
@@ -588,15 +588,18 @@ int dma_set_coherent_mask(struct device *dev, u64 mask)
588{ 588{
589 struct dma_map_ops *dma_ops = get_dma_ops(dev); 589 struct dma_map_ops *dma_ops = get_dma_ops(dev);
590 590
591 /* Handle hybrid PCI devices with limited memory addressability. */ 591 /*
592 if ((dma_ops == gx_pci_dma_map_ops || 592 * For PCI devices with 64-bit DMA addressing capability, promote
593 dma_ops == gx_hybrid_pci_dma_map_ops || 593 * the dma_ops to full capability for both streams and consistent
594 dma_ops == gx_legacy_pci_dma_map_ops) && 594 * memory access. For 32-bit capable devices, limit the consistent
595 (mask <= DMA_BIT_MASK(32))) { 595 * memory DMA range to max_direct_dma_addr.
596 if (dma_ops == gx_pci_dma_map_ops) 596 */
597 set_dma_ops(dev, gx_hybrid_pci_dma_map_ops); 597 if (dma_ops == gx_pci_dma_map_ops ||
598 598 dma_ops == gx_hybrid_pci_dma_map_ops ||
599 if (mask > dev->archdata.max_direct_dma_addr) 599 dma_ops == gx_legacy_pci_dma_map_ops) {
600 if (mask == DMA_BIT_MASK(64))
601 set_dma_ops(dev, gx_pci_dma_map_ops);
602 else if (mask > dev->archdata.max_direct_dma_addr)
600 mask = dev->archdata.max_direct_dma_addr; 603 mask = dev->archdata.max_direct_dma_addr;
601 } 604 }
602 605
diff --git a/arch/tile/kernel/pci_gx.c b/arch/tile/kernel/pci_gx.c
index 66ef9db5c2fb..29acac6af4ba 100644
--- a/arch/tile/kernel/pci_gx.c
+++ b/arch/tile/kernel/pci_gx.c
@@ -1081,13 +1081,24 @@ int pcibios_enable_device(struct pci_dev *dev, int mask)
1081 return pci_enable_resources(dev, mask); 1081 return pci_enable_resources(dev, mask);
1082} 1082}
1083 1083
1084/* Called for each device after PCI setup is done. */ 1084/*
1085 * Called for each device after PCI setup is done.
1086 * We initialize the PCI device capabilities conservatively, assuming that
1087 * all devices can only address the 32-bit DMA space. The exception here is
1088 * that the device dma_offset is set to the value that matches the 64-bit
1089 * capable devices. This is OK because dma_offset is not used by legacy
1090 * dma_ops, nor by the hybrid dma_ops's streaming DMAs, which are 64-bit ops.
1091 * This implementation matches the kernel design of setting PCI devices'
1092 * coherent_dma_mask to 0xffffffffull by default, allowing the device drivers
1093 * to skip calling pci_set_consistent_dma_mask(DMA_BIT_MASK(32)).
1094 */
1085static void pcibios_fixup_final(struct pci_dev *pdev) 1095static void pcibios_fixup_final(struct pci_dev *pdev)
1086{ 1096{
1087 set_dma_ops(&pdev->dev, gx_pci_dma_map_ops); 1097 set_dma_ops(&pdev->dev, gx_legacy_pci_dma_map_ops);
1088 set_dma_offset(&pdev->dev, TILE_PCI_MEM_MAP_BASE_OFFSET); 1098 set_dma_offset(&pdev->dev, TILE_PCI_MEM_MAP_BASE_OFFSET);
1089 pdev->dev.archdata.max_direct_dma_addr = 1099 pdev->dev.archdata.max_direct_dma_addr =
1090 TILE_PCI_MAX_DIRECT_DMA_ADDRESS; 1100 TILE_PCI_MAX_DIRECT_DMA_ADDRESS;
1101 pdev->dev.coherent_dma_mask = TILE_PCI_MAX_DIRECT_DMA_ADDRESS;
1091} 1102}
1092DECLARE_PCI_FIXUP_FINAL(PCI_ANY_ID, PCI_ANY_ID, pcibios_fixup_final); 1103DECLARE_PCI_FIXUP_FINAL(PCI_ANY_ID, PCI_ANY_ID, pcibios_fixup_final);
1093 1104