diff options
author | Joerg Roedel <joerg.roedel@amd.com> | 2009-01-09 09:13:15 -0500 |
---|---|---|
committer | Joerg Roedel <joerg.roedel@amd.com> | 2009-03-17 07:56:46 -0400 |
commit | 2118d0c548e8a2205e1a29eb5b89e5f2e9ae2c8b (patch) | |
tree | d21b131fd8f32d3515ab3c0c98c52ede95825473 /arch | |
parent | ac26c18bd35d982d1ba06020a992b1085fefc3e2 (diff) |
dma-debug: x86 architecture bindings
Impact: make use of DMA-API debugging code in x86
Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/x86/Kconfig | 1 | ||||
-rw-r--r-- | arch/x86/include/asm/dma-mapping.h | 45 | ||||
-rw-r--r-- | arch/x86/kernel/pci-dma.c | 6 |
3 files changed, 46 insertions, 6 deletions
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index bc2fbadff9f9..f2cb677b263f 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig | |||
@@ -40,6 +40,7 @@ config X86 | |||
40 | select HAVE_GENERIC_DMA_COHERENT if X86_32 | 40 | select HAVE_GENERIC_DMA_COHERENT if X86_32 |
41 | select HAVE_EFFICIENT_UNALIGNED_ACCESS | 41 | select HAVE_EFFICIENT_UNALIGNED_ACCESS |
42 | select USER_STACKTRACE_SUPPORT | 42 | select USER_STACKTRACE_SUPPORT |
43 | select HAVE_DMA_API_DEBUG | ||
43 | 44 | ||
44 | config ARCH_DEFCONFIG | 45 | config ARCH_DEFCONFIG |
45 | string | 46 | string |
diff --git a/arch/x86/include/asm/dma-mapping.h b/arch/x86/include/asm/dma-mapping.h index 9c78bd40ebec..cea7b74963e9 100644 --- a/arch/x86/include/asm/dma-mapping.h +++ b/arch/x86/include/asm/dma-mapping.h | |||
@@ -7,6 +7,7 @@ | |||
7 | */ | 7 | */ |
8 | 8 | ||
9 | #include <linux/scatterlist.h> | 9 | #include <linux/scatterlist.h> |
10 | #include <linux/dma-debug.h> | ||
10 | #include <linux/dma-attrs.h> | 11 | #include <linux/dma-attrs.h> |
11 | #include <asm/io.h> | 12 | #include <asm/io.h> |
12 | #include <asm/swiotlb.h> | 13 | #include <asm/swiotlb.h> |
@@ -56,11 +57,16 @@ dma_map_single(struct device *hwdev, void *ptr, size_t size, | |||
56 | enum dma_data_direction dir) | 57 | enum dma_data_direction dir) |
57 | { | 58 | { |
58 | struct dma_map_ops *ops = get_dma_ops(hwdev); | 59 | struct dma_map_ops *ops = get_dma_ops(hwdev); |
60 | dma_addr_t addr; | ||
59 | 61 | ||
60 | BUG_ON(!valid_dma_direction(dir)); | 62 | BUG_ON(!valid_dma_direction(dir)); |
61 | return ops->map_page(hwdev, virt_to_page(ptr), | 63 | addr = ops->map_page(hwdev, virt_to_page(ptr), |
62 | (unsigned long)ptr & ~PAGE_MASK, size, | 64 | (unsigned long)ptr & ~PAGE_MASK, size, |
63 | dir, NULL); | 65 | dir, NULL); |
66 | debug_dma_map_page(hwdev, virt_to_page(ptr), | ||
67 | (unsigned long)ptr & ~PAGE_MASK, size, | ||
68 | dir, addr, true); | ||
69 | return addr; | ||
64 | } | 70 | } |
65 | 71 | ||
66 | static inline void | 72 | static inline void |
@@ -72,6 +78,7 @@ dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size, | |||
72 | BUG_ON(!valid_dma_direction(dir)); | 78 | BUG_ON(!valid_dma_direction(dir)); |
73 | if (ops->unmap_page) | 79 | if (ops->unmap_page) |
74 | ops->unmap_page(dev, addr, size, dir, NULL); | 80 | ops->unmap_page(dev, addr, size, dir, NULL); |
81 | debug_dma_unmap_page(dev, addr, size, dir, true); | ||
75 | } | 82 | } |
76 | 83 | ||
77 | static inline int | 84 | static inline int |
@@ -79,9 +86,13 @@ dma_map_sg(struct device *hwdev, struct scatterlist *sg, | |||
79 | int nents, enum dma_data_direction dir) | 86 | int nents, enum dma_data_direction dir) |
80 | { | 87 | { |
81 | struct dma_map_ops *ops = get_dma_ops(hwdev); | 88 | struct dma_map_ops *ops = get_dma_ops(hwdev); |
89 | int ents; | ||
82 | 90 | ||
83 | BUG_ON(!valid_dma_direction(dir)); | 91 | BUG_ON(!valid_dma_direction(dir)); |
84 | return ops->map_sg(hwdev, sg, nents, dir, NULL); | 92 | ents = ops->map_sg(hwdev, sg, nents, dir, NULL); |
93 | debug_dma_map_sg(hwdev, sg, nents, ents, dir); | ||
94 | |||
95 | return ents; | ||
85 | } | 96 | } |
86 | 97 | ||
87 | static inline void | 98 | static inline void |
@@ -91,6 +102,7 @@ dma_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents, | |||
91 | struct dma_map_ops *ops = get_dma_ops(hwdev); | 102 | struct dma_map_ops *ops = get_dma_ops(hwdev); |
92 | 103 | ||
93 | BUG_ON(!valid_dma_direction(dir)); | 104 | BUG_ON(!valid_dma_direction(dir)); |
105 | debug_dma_unmap_sg(hwdev, sg, nents, dir); | ||
94 | if (ops->unmap_sg) | 106 | if (ops->unmap_sg) |
95 | ops->unmap_sg(hwdev, sg, nents, dir, NULL); | 107 | ops->unmap_sg(hwdev, sg, nents, dir, NULL); |
96 | } | 108 | } |
@@ -104,6 +116,7 @@ dma_sync_single_for_cpu(struct device *hwdev, dma_addr_t dma_handle, | |||
104 | BUG_ON(!valid_dma_direction(dir)); | 116 | BUG_ON(!valid_dma_direction(dir)); |
105 | if (ops->sync_single_for_cpu) | 117 | if (ops->sync_single_for_cpu) |
106 | ops->sync_single_for_cpu(hwdev, dma_handle, size, dir); | 118 | ops->sync_single_for_cpu(hwdev, dma_handle, size, dir); |
119 | debug_dma_sync_single_for_cpu(hwdev, dma_handle, size, dir); | ||
107 | flush_write_buffers(); | 120 | flush_write_buffers(); |
108 | } | 121 | } |
109 | 122 | ||
@@ -116,6 +129,7 @@ dma_sync_single_for_device(struct device *hwdev, dma_addr_t dma_handle, | |||
116 | BUG_ON(!valid_dma_direction(dir)); | 129 | BUG_ON(!valid_dma_direction(dir)); |
117 | if (ops->sync_single_for_device) | 130 | if (ops->sync_single_for_device) |
118 | ops->sync_single_for_device(hwdev, dma_handle, size, dir); | 131 | ops->sync_single_for_device(hwdev, dma_handle, size, dir); |
132 | debug_dma_sync_single_for_device(hwdev, dma_handle, size, dir); | ||
119 | flush_write_buffers(); | 133 | flush_write_buffers(); |
120 | } | 134 | } |
121 | 135 | ||
@@ -130,6 +144,8 @@ dma_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dma_handle, | |||
130 | if (ops->sync_single_range_for_cpu) | 144 | if (ops->sync_single_range_for_cpu) |
131 | ops->sync_single_range_for_cpu(hwdev, dma_handle, offset, | 145 | ops->sync_single_range_for_cpu(hwdev, dma_handle, offset, |
132 | size, dir); | 146 | size, dir); |
147 | debug_dma_sync_single_range_for_cpu(hwdev, dma_handle, | ||
148 | offset, size, dir); | ||
133 | flush_write_buffers(); | 149 | flush_write_buffers(); |
134 | } | 150 | } |
135 | 151 | ||
@@ -144,6 +160,8 @@ dma_sync_single_range_for_device(struct device *hwdev, dma_addr_t dma_handle, | |||
144 | if (ops->sync_single_range_for_device) | 160 | if (ops->sync_single_range_for_device) |
145 | ops->sync_single_range_for_device(hwdev, dma_handle, | 161 | ops->sync_single_range_for_device(hwdev, dma_handle, |
146 | offset, size, dir); | 162 | offset, size, dir); |
163 | debug_dma_sync_single_range_for_device(hwdev, dma_handle, | ||
164 | offset, size, dir); | ||
147 | flush_write_buffers(); | 165 | flush_write_buffers(); |
148 | } | 166 | } |
149 | 167 | ||
@@ -156,6 +174,7 @@ dma_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg, | |||
156 | BUG_ON(!valid_dma_direction(dir)); | 174 | BUG_ON(!valid_dma_direction(dir)); |
157 | if (ops->sync_sg_for_cpu) | 175 | if (ops->sync_sg_for_cpu) |
158 | ops->sync_sg_for_cpu(hwdev, sg, nelems, dir); | 176 | ops->sync_sg_for_cpu(hwdev, sg, nelems, dir); |
177 | debug_dma_sync_sg_for_cpu(hwdev, sg, nelems, dir); | ||
159 | flush_write_buffers(); | 178 | flush_write_buffers(); |
160 | } | 179 | } |
161 | 180 | ||
@@ -168,6 +187,7 @@ dma_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg, | |||
168 | BUG_ON(!valid_dma_direction(dir)); | 187 | BUG_ON(!valid_dma_direction(dir)); |
169 | if (ops->sync_sg_for_device) | 188 | if (ops->sync_sg_for_device) |
170 | ops->sync_sg_for_device(hwdev, sg, nelems, dir); | 189 | ops->sync_sg_for_device(hwdev, sg, nelems, dir); |
190 | debug_dma_sync_sg_for_device(hwdev, sg, nelems, dir); | ||
171 | 191 | ||
172 | flush_write_buffers(); | 192 | flush_write_buffers(); |
173 | } | 193 | } |
@@ -177,15 +197,24 @@ static inline dma_addr_t dma_map_page(struct device *dev, struct page *page, | |||
177 | enum dma_data_direction dir) | 197 | enum dma_data_direction dir) |
178 | { | 198 | { |
179 | struct dma_map_ops *ops = get_dma_ops(dev); | 199 | struct dma_map_ops *ops = get_dma_ops(dev); |
200 | dma_addr_t addr; | ||
180 | 201 | ||
181 | BUG_ON(!valid_dma_direction(dir)); | 202 | BUG_ON(!valid_dma_direction(dir)); |
182 | return ops->map_page(dev, page, offset, size, dir, NULL); | 203 | addr = ops->map_page(dev, page, offset, size, dir, NULL); |
204 | debug_dma_map_page(dev, page, offset, size, dir, addr, false); | ||
205 | |||
206 | return addr; | ||
183 | } | 207 | } |
184 | 208 | ||
185 | static inline void dma_unmap_page(struct device *dev, dma_addr_t addr, | 209 | static inline void dma_unmap_page(struct device *dev, dma_addr_t addr, |
186 | size_t size, enum dma_data_direction dir) | 210 | size_t size, enum dma_data_direction dir) |
187 | { | 211 | { |
188 | dma_unmap_single(dev, addr, size, dir); | 212 | struct dma_map_ops *ops = get_dma_ops(dev); |
213 | |||
214 | BUG_ON(!valid_dma_direction(dir)); | ||
215 | if (ops->unmap_page) | ||
216 | ops->unmap_page(dev, addr, size, dir, NULL); | ||
217 | debug_dma_unmap_page(dev, addr, size, dir, false); | ||
189 | } | 218 | } |
190 | 219 | ||
191 | static inline void | 220 | static inline void |
@@ -250,8 +279,11 @@ dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, | |||
250 | if (!ops->alloc_coherent) | 279 | if (!ops->alloc_coherent) |
251 | return NULL; | 280 | return NULL; |
252 | 281 | ||
253 | return ops->alloc_coherent(dev, size, dma_handle, | 282 | memory = ops->alloc_coherent(dev, size, dma_handle, |
254 | dma_alloc_coherent_gfp_flags(dev, gfp)); | 283 | dma_alloc_coherent_gfp_flags(dev, gfp)); |
284 | debug_dma_alloc_coherent(dev, size, *dma_handle, memory); | ||
285 | |||
286 | return memory; | ||
255 | } | 287 | } |
256 | 288 | ||
257 | static inline void dma_free_coherent(struct device *dev, size_t size, | 289 | static inline void dma_free_coherent(struct device *dev, size_t size, |
@@ -264,6 +296,7 @@ static inline void dma_free_coherent(struct device *dev, size_t size, | |||
264 | if (dma_release_from_coherent(dev, get_order(size), vaddr)) | 296 | if (dma_release_from_coherent(dev, get_order(size), vaddr)) |
265 | return; | 297 | return; |
266 | 298 | ||
299 | debug_dma_free_coherent(dev, size, vaddr, bus); | ||
267 | if (ops->free_coherent) | 300 | if (ops->free_coherent) |
268 | ops->free_coherent(dev, size, vaddr, bus); | 301 | ops->free_coherent(dev, size, vaddr, bus); |
269 | } | 302 | } |
diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c index f293a8df6828..ebf7d454f210 100644 --- a/arch/x86/kernel/pci-dma.c +++ b/arch/x86/kernel/pci-dma.c | |||
@@ -1,4 +1,5 @@ | |||
1 | #include <linux/dma-mapping.h> | 1 | #include <linux/dma-mapping.h> |
2 | #include <linux/dma-debug.h> | ||
2 | #include <linux/dmar.h> | 3 | #include <linux/dmar.h> |
3 | #include <linux/bootmem.h> | 4 | #include <linux/bootmem.h> |
4 | #include <linux/pci.h> | 5 | #include <linux/pci.h> |
@@ -44,6 +45,9 @@ struct device x86_dma_fallback_dev = { | |||
44 | }; | 45 | }; |
45 | EXPORT_SYMBOL(x86_dma_fallback_dev); | 46 | EXPORT_SYMBOL(x86_dma_fallback_dev); |
46 | 47 | ||
48 | /* Number of entries preallocated for DMA-API debugging */ | ||
49 | #define PREALLOC_DMA_DEBUG_ENTRIES 32768 | ||
50 | |||
47 | int dma_set_mask(struct device *dev, u64 mask) | 51 | int dma_set_mask(struct device *dev, u64 mask) |
48 | { | 52 | { |
49 | if (!dev->dma_mask || !dma_supported(dev, mask)) | 53 | if (!dev->dma_mask || !dma_supported(dev, mask)) |
@@ -265,6 +269,8 @@ EXPORT_SYMBOL(dma_supported); | |||
265 | 269 | ||
266 | static int __init pci_iommu_init(void) | 270 | static int __init pci_iommu_init(void) |
267 | { | 271 | { |
272 | dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES); | ||
273 | |||
268 | calgary_iommu_init(); | 274 | calgary_iommu_init(); |
269 | 275 | ||
270 | intel_iommu_init(); | 276 | intel_iommu_init(); |