aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-x86
diff options
context:
space:
mode:
authorFUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>2008-07-25 22:44:49 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2008-07-26 15:00:03 -0400
commit8d8bb39b9eba32dd70e87fd5ad5c5dd4ba118e06 (patch)
tree64090a84f4c4466f9f30ff46c993e0cede379052 /include/asm-x86
parentc485b465a031b6f9b9a51300e0ee1f86efc6db87 (diff)
dma-mapping: add the device argument to dma_mapping_error()
Add per-device dma_mapping_ops support for CONFIG_X86_64 as POWER architecture does: This enables us to cleanly fix the Calgary IOMMU issue that some devices are not behind the IOMMU (http://lkml.org/lkml/2008/5/8/423). I think that per-device dma_mapping_ops support would be also helpful for KVM people to support PCI passthrough but Andi thinks that this makes it difficult to support the PCI passthrough (see the above thread). So I CC'ed this to KVM camp. Comments are appreciated. A pointer to dma_mapping_ops to struct dev_archdata is added. If the pointer is non NULL, DMA operations in asm/dma-mapping.h use it. If it's NULL, the system-wide dma_ops pointer is used as before. If it's useful for KVM people, I plan to implement a mechanism to register a hook called when a new pci (or dma capable) device is created (it works with hot plugging). It enables IOMMUs to set up an appropriate dma_mapping_ops per device. The major obstacle is that dma_mapping_error doesn't take a pointer to the device unlike other DMA operations. So x86 can't have dma_mapping_ops per device. Note all the POWER IOMMUs use the same dma_mapping_error function so this is not a problem for POWER but x86 IOMMUs use different dma_mapping_error functions. The first patch adds the device argument to dma_mapping_error. The patch is trivial but large since it touches lots of drivers and dma-mapping.h in all the architecture. This patch: dma_mapping_error() doesn't take a pointer to the device unlike other DMA operations. So we can't have dma_mapping_ops per device. Note that POWER already has dma_mapping_ops per device but all the POWER IOMMUs use the same dma_mapping_error function. x86 IOMMUs use device argument. [akpm@linux-foundation.org: fix sge] [akpm@linux-foundation.org: fix svc_rdma] [akpm@linux-foundation.org: build fix] [akpm@linux-foundation.org: fix bnx2x] [akpm@linux-foundation.org: fix s2io] [akpm@linux-foundation.org: fix pasemi_mac] [akpm@linux-foundation.org: fix sdhci] [akpm@linux-foundation.org: build fix] [akpm@linux-foundation.org: fix sparc] [akpm@linux-foundation.org: fix ibmvscsi] Signed-off-by: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp> Cc: Muli Ben-Yehuda <muli@il.ibm.com> Cc: Andi Kleen <andi@firstfloor.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Ingo Molnar <mingo@elte.hu> Cc: Avi Kivity <avi@qumranet.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'include/asm-x86')
-rw-r--r--include/asm-x86/device.h3
-rw-r--r--include/asm-x86/dma-mapping.h99
-rw-r--r--include/asm-x86/swiotlb.h2
3 files changed, 72 insertions, 32 deletions
diff --git a/include/asm-x86/device.h b/include/asm-x86/device.h
index 87a715367a1b..3c034f48fdb0 100644
--- a/include/asm-x86/device.h
+++ b/include/asm-x86/device.h
@@ -5,6 +5,9 @@ struct dev_archdata {
5#ifdef CONFIG_ACPI 5#ifdef CONFIG_ACPI
6 void *acpi_handle; 6 void *acpi_handle;
7#endif 7#endif
8#ifdef CONFIG_X86_64
9struct dma_mapping_ops *dma_ops;
10#endif
8#ifdef CONFIG_DMAR 11#ifdef CONFIG_DMAR
9 void *iommu; /* hook for IOMMU specific extension */ 12 void *iommu; /* hook for IOMMU specific extension */
10#endif 13#endif
diff --git a/include/asm-x86/dma-mapping.h b/include/asm-x86/dma-mapping.h
index c2ddd3d1b883..0eaa9bf6011f 100644
--- a/include/asm-x86/dma-mapping.h
+++ b/include/asm-x86/dma-mapping.h
@@ -17,7 +17,8 @@ extern int panic_on_overflow;
17extern int force_iommu; 17extern int force_iommu;
18 18
19struct dma_mapping_ops { 19struct dma_mapping_ops {
20 int (*mapping_error)(dma_addr_t dma_addr); 20 int (*mapping_error)(struct device *dev,
21 dma_addr_t dma_addr);
21 void* (*alloc_coherent)(struct device *dev, size_t size, 22 void* (*alloc_coherent)(struct device *dev, size_t size,
22 dma_addr_t *dma_handle, gfp_t gfp); 23 dma_addr_t *dma_handle, gfp_t gfp);
23 void (*free_coherent)(struct device *dev, size_t size, 24 void (*free_coherent)(struct device *dev, size_t size,
@@ -56,14 +57,32 @@ struct dma_mapping_ops {
56 int is_phys; 57 int is_phys;
57}; 58};
58 59
59extern const struct dma_mapping_ops *dma_ops; 60extern struct dma_mapping_ops *dma_ops;
60 61
61static inline int dma_mapping_error(dma_addr_t dma_addr) 62static inline struct dma_mapping_ops *get_dma_ops(struct device *dev)
62{ 63{
63 if (dma_ops->mapping_error) 64#ifdef CONFIG_X86_32
64 return dma_ops->mapping_error(dma_addr); 65 return dma_ops;
66#else
67 if (unlikely(!dev) || !dev->archdata.dma_ops)
68 return dma_ops;
69 else
70 return dev->archdata.dma_ops;
71#endif
72}
73
74/* Make sure we keep the same behaviour */
75static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
76{
77#ifdef CONFIG_X86_32
78 return 0;
79#else
80 struct dma_mapping_ops *ops = get_dma_ops(dev);
81 if (ops->mapping_error)
82 return ops->mapping_error(dev, dma_addr);
65 83
66 return (dma_addr == bad_dma_address); 84 return (dma_addr == bad_dma_address);
85#endif
67} 86}
68 87
69#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) 88#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
@@ -83,44 +102,53 @@ static inline dma_addr_t
83dma_map_single(struct device *hwdev, void *ptr, size_t size, 102dma_map_single(struct device *hwdev, void *ptr, size_t size,
84 int direction) 103 int direction)
85{ 104{
105 struct dma_mapping_ops *ops = get_dma_ops(hwdev);
106
86 BUG_ON(!valid_dma_direction(direction)); 107 BUG_ON(!valid_dma_direction(direction));
87 return dma_ops->map_single(hwdev, virt_to_phys(ptr), size, direction); 108 return ops->map_single(hwdev, virt_to_phys(ptr), size, direction);
88} 109}
89 110
90static inline void 111static inline void
91dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size, 112dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size,
92 int direction) 113 int direction)
93{ 114{
115 struct dma_mapping_ops *ops = get_dma_ops(dev);
116
94 BUG_ON(!valid_dma_direction(direction)); 117 BUG_ON(!valid_dma_direction(direction));
95 if (dma_ops->unmap_single) 118 if (ops->unmap_single)
96 dma_ops->unmap_single(dev, addr, size, direction); 119 ops->unmap_single(dev, addr, size, direction);
97} 120}
98 121
99static inline int 122static inline int
100dma_map_sg(struct device *hwdev, struct scatterlist *sg, 123dma_map_sg(struct device *hwdev, struct scatterlist *sg,
101 int nents, int direction) 124 int nents, int direction)
102{ 125{
126 struct dma_mapping_ops *ops = get_dma_ops(hwdev);
127
103 BUG_ON(!valid_dma_direction(direction)); 128 BUG_ON(!valid_dma_direction(direction));
104 return dma_ops->map_sg(hwdev, sg, nents, direction); 129 return ops->map_sg(hwdev, sg, nents, direction);
105} 130}
106 131
107static inline void 132static inline void
108dma_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents, 133dma_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents,
109 int direction) 134 int direction)
110{ 135{
136 struct dma_mapping_ops *ops = get_dma_ops(hwdev);
137
111 BUG_ON(!valid_dma_direction(direction)); 138 BUG_ON(!valid_dma_direction(direction));
112 if (dma_ops->unmap_sg) 139 if (ops->unmap_sg)
113 dma_ops->unmap_sg(hwdev, sg, nents, direction); 140 ops->unmap_sg(hwdev, sg, nents, direction);
114} 141}
115 142
116static inline void 143static inline void
117dma_sync_single_for_cpu(struct device *hwdev, dma_addr_t dma_handle, 144dma_sync_single_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
118 size_t size, int direction) 145 size_t size, int direction)
119{ 146{
147 struct dma_mapping_ops *ops = get_dma_ops(hwdev);
148
120 BUG_ON(!valid_dma_direction(direction)); 149 BUG_ON(!valid_dma_direction(direction));
121 if (dma_ops->sync_single_for_cpu) 150 if (ops->sync_single_for_cpu)
122 dma_ops->sync_single_for_cpu(hwdev, dma_handle, size, 151 ops->sync_single_for_cpu(hwdev, dma_handle, size, direction);
123 direction);
124 flush_write_buffers(); 152 flush_write_buffers();
125} 153}
126 154
@@ -128,10 +156,11 @@ static inline void
128dma_sync_single_for_device(struct device *hwdev, dma_addr_t dma_handle, 156dma_sync_single_for_device(struct device *hwdev, dma_addr_t dma_handle,
129 size_t size, int direction) 157 size_t size, int direction)
130{ 158{
159 struct dma_mapping_ops *ops = get_dma_ops(hwdev);
160
131 BUG_ON(!valid_dma_direction(direction)); 161 BUG_ON(!valid_dma_direction(direction));
132 if (dma_ops->sync_single_for_device) 162 if (ops->sync_single_for_device)
133 dma_ops->sync_single_for_device(hwdev, dma_handle, size, 163 ops->sync_single_for_device(hwdev, dma_handle, size, direction);
134 direction);
135 flush_write_buffers(); 164 flush_write_buffers();
136} 165}
137 166
@@ -139,11 +168,12 @@ static inline void
139dma_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dma_handle, 168dma_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
140 unsigned long offset, size_t size, int direction) 169 unsigned long offset, size_t size, int direction)
141{ 170{
142 BUG_ON(!valid_dma_direction(direction)); 171 struct dma_mapping_ops *ops = get_dma_ops(hwdev);
143 if (dma_ops->sync_single_range_for_cpu)
144 dma_ops->sync_single_range_for_cpu(hwdev, dma_handle, offset,
145 size, direction);
146 172
173 BUG_ON(!valid_dma_direction(direction));
174 if (ops->sync_single_range_for_cpu)
175 ops->sync_single_range_for_cpu(hwdev, dma_handle, offset,
176 size, direction);
147 flush_write_buffers(); 177 flush_write_buffers();
148} 178}
149 179
@@ -152,11 +182,12 @@ dma_sync_single_range_for_device(struct device *hwdev, dma_addr_t dma_handle,
152 unsigned long offset, size_t size, 182 unsigned long offset, size_t size,
153 int direction) 183 int direction)
154{ 184{
155 BUG_ON(!valid_dma_direction(direction)); 185 struct dma_mapping_ops *ops = get_dma_ops(hwdev);
156 if (dma_ops->sync_single_range_for_device)
157 dma_ops->sync_single_range_for_device(hwdev, dma_handle,
158 offset, size, direction);
159 186
187 BUG_ON(!valid_dma_direction(direction));
188 if (ops->sync_single_range_for_device)
189 ops->sync_single_range_for_device(hwdev, dma_handle,
190 offset, size, direction);
160 flush_write_buffers(); 191 flush_write_buffers();
161} 192}
162 193
@@ -164,9 +195,11 @@ static inline void
164dma_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg, 195dma_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
165 int nelems, int direction) 196 int nelems, int direction)
166{ 197{
198 struct dma_mapping_ops *ops = get_dma_ops(hwdev);
199
167 BUG_ON(!valid_dma_direction(direction)); 200 BUG_ON(!valid_dma_direction(direction));
168 if (dma_ops->sync_sg_for_cpu) 201 if (ops->sync_sg_for_cpu)
169 dma_ops->sync_sg_for_cpu(hwdev, sg, nelems, direction); 202 ops->sync_sg_for_cpu(hwdev, sg, nelems, direction);
170 flush_write_buffers(); 203 flush_write_buffers();
171} 204}
172 205
@@ -174,9 +207,11 @@ static inline void
174dma_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg, 207dma_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
175 int nelems, int direction) 208 int nelems, int direction)
176{ 209{
210 struct dma_mapping_ops *ops = get_dma_ops(hwdev);
211
177 BUG_ON(!valid_dma_direction(direction)); 212 BUG_ON(!valid_dma_direction(direction));
178 if (dma_ops->sync_sg_for_device) 213 if (ops->sync_sg_for_device)
179 dma_ops->sync_sg_for_device(hwdev, sg, nelems, direction); 214 ops->sync_sg_for_device(hwdev, sg, nelems, direction);
180 215
181 flush_write_buffers(); 216 flush_write_buffers();
182} 217}
@@ -185,9 +220,11 @@ static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
185 size_t offset, size_t size, 220 size_t offset, size_t size,
186 int direction) 221 int direction)
187{ 222{
223 struct dma_mapping_ops *ops = get_dma_ops(dev);
224
188 BUG_ON(!valid_dma_direction(direction)); 225 BUG_ON(!valid_dma_direction(direction));
189 return dma_ops->map_single(dev, page_to_phys(page)+offset, 226 return ops->map_single(dev, page_to_phys(page) + offset,
190 size, direction); 227 size, direction);
191} 228}
192 229
193static inline void dma_unmap_page(struct device *dev, dma_addr_t addr, 230static inline void dma_unmap_page(struct device *dev, dma_addr_t addr,
diff --git a/include/asm-x86/swiotlb.h b/include/asm-x86/swiotlb.h
index c706a7442633..2730b351afcf 100644
--- a/include/asm-x86/swiotlb.h
+++ b/include/asm-x86/swiotlb.h
@@ -35,7 +35,7 @@ extern int swiotlb_map_sg(struct device *hwdev, struct scatterlist *sg,
35 int nents, int direction); 35 int nents, int direction);
36extern void swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sg, 36extern void swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sg,
37 int nents, int direction); 37 int nents, int direction);
38extern int swiotlb_dma_mapping_error(dma_addr_t dma_addr); 38extern int swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr);
39extern void swiotlb_free_coherent(struct device *hwdev, size_t size, 39extern void swiotlb_free_coherent(struct device *hwdev, size_t size,
40 void *vaddr, dma_addr_t dma_handle); 40 void *vaddr, dma_addr_t dma_handle);
41extern int swiotlb_dma_supported(struct device *hwdev, u64 mask); 41extern int swiotlb_dma_supported(struct device *hwdev, u64 mask);