aboutsummaryrefslogtreecommitdiffstats
path: root/arch/ia64/kernel/pci-dma.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/ia64/kernel/pci-dma.c')
-rw-r--r--arch/ia64/kernel/pci-dma.c52
1 files changed, 28 insertions, 24 deletions
diff --git a/arch/ia64/kernel/pci-dma.c b/arch/ia64/kernel/pci-dma.c
index d0ada067a4af..eb987386f691 100644
--- a/arch/ia64/kernel/pci-dma.c
+++ b/arch/ia64/kernel/pci-dma.c
@@ -32,30 +32,16 @@ int force_iommu __read_mostly = 1;
32int force_iommu __read_mostly; 32int force_iommu __read_mostly;
33#endif 33#endif
34 34
35/* Set this to 1 if there is a HW IOMMU in the system */
36int iommu_detected __read_mostly;
37
38/* Dummy device used for NULL arguments (normally ISA). Better would 35/* Dummy device used for NULL arguments (normally ISA). Better would
39 be probably a smaller DMA mask, but this is bug-to-bug compatible 36 be probably a smaller DMA mask, but this is bug-to-bug compatible
40 to i386. */ 37 to i386. */
41struct device fallback_dev = { 38struct device fallback_dev = {
42 .init_name = "fallback device", 39 .init_name = "fallback device",
43 .coherent_dma_mask = DMA_32BIT_MASK, 40 .coherent_dma_mask = DMA_BIT_MASK(32),
44 .dma_mask = &fallback_dev.coherent_dma_mask, 41 .dma_mask = &fallback_dev.coherent_dma_mask,
45}; 42};
46 43
47void __init pci_iommu_alloc(void) 44extern struct dma_map_ops intel_dma_ops;
48{
49 /*
50 * The order of these functions is important for
51 * fall-back/fail-over reasons
52 */
53 detect_intel_iommu();
54
55#ifdef CONFIG_SWIOTLB
56 pci_swiotlb_init();
57#endif
58}
59 45
60static int __init pci_iommu_init(void) 46static int __init pci_iommu_init(void)
61{ 47{
@@ -79,20 +65,17 @@ iommu_dma_init(void)
79 return; 65 return;
80} 66}
81 67
82struct dma_mapping_ops *dma_ops;
83EXPORT_SYMBOL(dma_ops);
84
85int iommu_dma_supported(struct device *dev, u64 mask) 68int iommu_dma_supported(struct device *dev, u64 mask)
86{ 69{
87 struct dma_mapping_ops *ops = get_dma_ops(dev); 70 struct dma_map_ops *ops = platform_dma_get_ops(dev);
88 71
89 if (ops->dma_supported_op) 72 if (ops->dma_supported)
90 return ops->dma_supported_op(dev, mask); 73 return ops->dma_supported(dev, mask);
91 74
92 /* Copied from i386. Doesn't make much sense, because it will 75 /* Copied from i386. Doesn't make much sense, because it will
93 only work for pci_alloc_coherent. 76 only work for pci_alloc_coherent.
94 The caller just has to use GFP_DMA in this case. */ 77 The caller just has to use GFP_DMA in this case. */
95 if (mask < DMA_24BIT_MASK) 78 if (mask < DMA_BIT_MASK(24))
96 return 0; 79 return 0;
97 80
98 /* Tell the device to use SAC when IOMMU force is on. This 81 /* Tell the device to use SAC when IOMMU force is on. This
@@ -107,7 +90,7 @@ int iommu_dma_supported(struct device *dev, u64 mask)
107 SAC for these. Assume all masks <= 40 bits are of this 90 SAC for these. Assume all masks <= 40 bits are of this
108 type. Normally this doesn't make any difference, but gives 91 type. Normally this doesn't make any difference, but gives
109 more gentle handling of IOMMU overflow. */ 92 more gentle handling of IOMMU overflow. */
110 if (iommu_sac_force && (mask >= DMA_40BIT_MASK)) { 93 if (iommu_sac_force && (mask >= DMA_BIT_MASK(40))) {
111 dev_info(dev, "Force SAC with mask %lx\n", mask); 94 dev_info(dev, "Force SAC with mask %lx\n", mask);
112 return 0; 95 return 0;
113 } 96 }
@@ -116,4 +99,25 @@ int iommu_dma_supported(struct device *dev, u64 mask)
116} 99}
117EXPORT_SYMBOL(iommu_dma_supported); 100EXPORT_SYMBOL(iommu_dma_supported);
118 101
102void __init pci_iommu_alloc(void)
103{
104 dma_ops = &intel_dma_ops;
105
106 dma_ops->sync_single_for_cpu = machvec_dma_sync_single;
107 dma_ops->sync_sg_for_cpu = machvec_dma_sync_sg;
108 dma_ops->sync_single_for_device = machvec_dma_sync_single;
109 dma_ops->sync_sg_for_device = machvec_dma_sync_sg;
110 dma_ops->dma_supported = iommu_dma_supported;
111
112 /*
113 * The order of these functions is important for
114 * fall-back/fail-over reasons
115 */
116 detect_intel_iommu();
117
118#ifdef CONFIG_SWIOTLB
119 pci_swiotlb_init();
120#endif
121}
122
119#endif 123#endif