diff options
author | Olof Johansson <olof@lixom.net> | 2006-04-12 22:05:59 -0400 |
---|---|---|
committer | Paul Mackerras <paulus@samba.org> | 2006-04-21 08:28:55 -0400 |
commit | 7daa411b810d7eadfaabe3765ec5f827893dbb30 (patch) | |
tree | c7cef2c78faa74928fb32942d9e9aaf262fe98ab /arch/powerpc/kernel/pci_iommu.c | |
parent | f4ffaa452e71495a06376f12f772342bc57051fc (diff) |
[PATCH] powerpc: IOMMU support for honoring dma_mask
Some devices don't support full 32-bit DMA address space, which we currently
assume. Add the required mask-passing to the IOMMU allocators.
Signed-off-by: Olof Johansson <olof@lixom.net>
Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'arch/powerpc/kernel/pci_iommu.c')
-rw-r--r-- | arch/powerpc/kernel/pci_iommu.c | 40 |
1 files changed, 36 insertions, 4 deletions
diff --git a/arch/powerpc/kernel/pci_iommu.c b/arch/powerpc/kernel/pci_iommu.c index c336f3e31cff..c1d95e14bbed 100644 --- a/arch/powerpc/kernel/pci_iommu.c +++ b/arch/powerpc/kernel/pci_iommu.c | |||
@@ -59,6 +59,25 @@ static inline struct iommu_table *devnode_table(struct device *dev) | |||
59 | } | 59 | } |
60 | 60 | ||
61 | 61 | ||
62 | static inline unsigned long device_to_mask(struct device *hwdev) | ||
63 | { | ||
64 | struct pci_dev *pdev; | ||
65 | |||
66 | if (!hwdev) { | ||
67 | pdev = ppc64_isabridge_dev; | ||
68 | if (!pdev) /* This is the best guess we can do */ | ||
69 | return 0xfffffffful; | ||
70 | } else | ||
71 | pdev = to_pci_dev(hwdev); | ||
72 | |||
73 | if (pdev->dma_mask) | ||
74 | return pdev->dma_mask; | ||
75 | |||
76 | /* Assume devices without mask can take 32 bit addresses */ | ||
77 | return 0xfffffffful; | ||
78 | } | ||
79 | |||
80 | |||
62 | /* Allocates a contiguous real buffer and creates mappings over it. | 81 | /* Allocates a contiguous real buffer and creates mappings over it. |
63 | * Returns the virtual address of the buffer and sets dma_handle | 82 | * Returns the virtual address of the buffer and sets dma_handle |
64 | * to the dma address (mapping) of the first page. | 83 | * to the dma address (mapping) of the first page. |
@@ -67,7 +86,7 @@ static void *pci_iommu_alloc_coherent(struct device *hwdev, size_t size, | |||
67 | dma_addr_t *dma_handle, gfp_t flag) | 86 | dma_addr_t *dma_handle, gfp_t flag) |
68 | { | 87 | { |
69 | return iommu_alloc_coherent(devnode_table(hwdev), size, dma_handle, | 88 | return iommu_alloc_coherent(devnode_table(hwdev), size, dma_handle, |
70 | flag); | 89 | device_to_mask(hwdev), flag); |
71 | } | 90 | } |
72 | 91 | ||
73 | static void pci_iommu_free_coherent(struct device *hwdev, size_t size, | 92 | static void pci_iommu_free_coherent(struct device *hwdev, size_t size, |
@@ -85,7 +104,8 @@ static void pci_iommu_free_coherent(struct device *hwdev, size_t size, | |||
85 | static dma_addr_t pci_iommu_map_single(struct device *hwdev, void *vaddr, | 104 | static dma_addr_t pci_iommu_map_single(struct device *hwdev, void *vaddr, |
86 | size_t size, enum dma_data_direction direction) | 105 | size_t size, enum dma_data_direction direction) |
87 | { | 106 | { |
88 | return iommu_map_single(devnode_table(hwdev), vaddr, size, direction); | 107 | return iommu_map_single(devnode_table(hwdev), vaddr, size, |
108 | device_to_mask(hwdev), direction); | ||
89 | } | 109 | } |
90 | 110 | ||
91 | 111 | ||
@@ -100,7 +120,7 @@ static int pci_iommu_map_sg(struct device *pdev, struct scatterlist *sglist, | |||
100 | int nelems, enum dma_data_direction direction) | 120 | int nelems, enum dma_data_direction direction) |
101 | { | 121 | { |
102 | return iommu_map_sg(pdev, devnode_table(pdev), sglist, | 122 | return iommu_map_sg(pdev, devnode_table(pdev), sglist, |
103 | nelems, direction); | 123 | nelems, device_to_mask(pdev), direction); |
104 | } | 124 | } |
105 | 125 | ||
106 | static void pci_iommu_unmap_sg(struct device *pdev, struct scatterlist *sglist, | 126 | static void pci_iommu_unmap_sg(struct device *pdev, struct scatterlist *sglist, |
@@ -112,7 +132,19 @@ static void pci_iommu_unmap_sg(struct device *pdev, struct scatterlist *sglist, | |||
112 | /* We support DMA to/from any memory page via the iommu */ | 132 | /* We support DMA to/from any memory page via the iommu */ |
113 | static int pci_iommu_dma_supported(struct device *dev, u64 mask) | 133 | static int pci_iommu_dma_supported(struct device *dev, u64 mask) |
114 | { | 134 | { |
115 | return 1; | 135 | struct iommu_table *tbl = devnode_table(dev); |
136 | |||
137 | if (!tbl || tbl->it_offset > mask) { | ||
138 | printk(KERN_INFO "Warning: IOMMU table offset too big for device mask\n"); | ||
139 | if (tbl) | ||
140 | printk(KERN_INFO "mask: 0x%08lx, table offset: 0x%08lx\n", | ||
141 | mask, tbl->it_offset); | ||
142 | else | ||
143 | printk(KERN_INFO "mask: 0x%08lx, table unavailable\n", | ||
144 | mask); | ||
145 | return 0; | ||
146 | } else | ||
147 | return 1; | ||
116 | } | 148 | } |
117 | 149 | ||
118 | void pci_iommu_init(void) | 150 | void pci_iommu_init(void) |