diff options
Diffstat (limited to 'arch/ia64/kernel/pci-dma.c')
| -rw-r--r-- | arch/ia64/kernel/pci-dma.c | 46 |
1 files changed, 25 insertions, 21 deletions
diff --git a/arch/ia64/kernel/pci-dma.c b/arch/ia64/kernel/pci-dma.c index d0ada067a4af..e4cb443bb988 100644 --- a/arch/ia64/kernel/pci-dma.c +++ b/arch/ia64/kernel/pci-dma.c | |||
| @@ -32,9 +32,6 @@ int force_iommu __read_mostly = 1; | |||
| 32 | int force_iommu __read_mostly; | 32 | int force_iommu __read_mostly; |
| 33 | #endif | 33 | #endif |
| 34 | 34 | ||
| 35 | /* Set this to 1 if there is a HW IOMMU in the system */ | ||
| 36 | int iommu_detected __read_mostly; | ||
| 37 | |||
| 38 | /* Dummy device used for NULL arguments (normally ISA). Better would | 35 | /* Dummy device used for NULL arguments (normally ISA). Better would |
| 39 | be probably a smaller DMA mask, but this is bug-to-bug compatible | 36 | be probably a smaller DMA mask, but this is bug-to-bug compatible |
| 40 | to i386. */ | 37 | to i386. */ |
| @@ -44,18 +41,7 @@ struct device fallback_dev = { | |||
| 44 | .dma_mask = &fallback_dev.coherent_dma_mask, | 41 | .dma_mask = &fallback_dev.coherent_dma_mask, |
| 45 | }; | 42 | }; |
| 46 | 43 | ||
| 47 | void __init pci_iommu_alloc(void) | 44 | extern struct dma_map_ops intel_dma_ops; |
| 48 | { | ||
| 49 | /* | ||
| 50 | * The order of these functions is important for | ||
| 51 | * fall-back/fail-over reasons | ||
| 52 | */ | ||
| 53 | detect_intel_iommu(); | ||
| 54 | |||
| 55 | #ifdef CONFIG_SWIOTLB | ||
| 56 | pci_swiotlb_init(); | ||
| 57 | #endif | ||
| 58 | } | ||
| 59 | 45 | ||
| 60 | static int __init pci_iommu_init(void) | 46 | static int __init pci_iommu_init(void) |
| 61 | { | 47 | { |
| @@ -79,15 +65,12 @@ iommu_dma_init(void) | |||
| 79 | return; | 65 | return; |
| 80 | } | 66 | } |
| 81 | 67 | ||
| 82 | struct dma_mapping_ops *dma_ops; | ||
| 83 | EXPORT_SYMBOL(dma_ops); | ||
| 84 | |||
| 85 | int iommu_dma_supported(struct device *dev, u64 mask) | 68 | int iommu_dma_supported(struct device *dev, u64 mask) |
| 86 | { | 69 | { |
| 87 | struct dma_mapping_ops *ops = get_dma_ops(dev); | 70 | struct dma_map_ops *ops = platform_dma_get_ops(dev); |
| 88 | 71 | ||
| 89 | if (ops->dma_supported_op) | 72 | if (ops->dma_supported) |
| 90 | return ops->dma_supported_op(dev, mask); | 73 | return ops->dma_supported(dev, mask); |
| 91 | 74 | ||
| 92 | /* Copied from i386. Doesn't make much sense, because it will | 75 | /* Copied from i386. Doesn't make much sense, because it will |
| 93 | only work for pci_alloc_coherent. | 76 | only work for pci_alloc_coherent. |
| @@ -116,4 +99,25 @@ int iommu_dma_supported(struct device *dev, u64 mask) | |||
| 116 | } | 99 | } |
| 117 | EXPORT_SYMBOL(iommu_dma_supported); | 100 | EXPORT_SYMBOL(iommu_dma_supported); |
| 118 | 101 | ||
| 102 | void __init pci_iommu_alloc(void) | ||
| 103 | { | ||
| 104 | dma_ops = &intel_dma_ops; | ||
| 105 | |||
| 106 | dma_ops->sync_single_for_cpu = machvec_dma_sync_single; | ||
| 107 | dma_ops->sync_sg_for_cpu = machvec_dma_sync_sg; | ||
| 108 | dma_ops->sync_single_for_device = machvec_dma_sync_single; | ||
| 109 | dma_ops->sync_sg_for_device = machvec_dma_sync_sg; | ||
| 110 | dma_ops->dma_supported = iommu_dma_supported; | ||
| 111 | |||
| 112 | /* | ||
| 113 | * The order of these functions is important for | ||
| 114 | * fall-back/fail-over reasons | ||
| 115 | */ | ||
| 116 | detect_intel_iommu(); | ||
| 117 | |||
| 118 | #ifdef CONFIG_SWIOTLB | ||
| 119 | pci_swiotlb_init(); | ||
| 120 | #endif | ||
| 121 | } | ||
| 122 | |||
| 119 | #endif | 123 | #endif |
