diff options
author | Ingo Molnar <mingo@elte.hu> | 2009-03-18 05:37:48 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-03-18 05:37:48 -0400 |
commit | 95f3c4ebffb5f5dd6c06bf48a8e24c7247b5139c (patch) | |
tree | 4b2d80e82fb4fdcc0c0bde4292780c22392cd034 | |
parent | 04dfcfcb54b073133bcca2c8f25b55e904558931 (diff) | |
parent | 86f319529372953e353dc998bc6a761949614903 (diff) |
Merge branch 'dma-api/debug' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/linux-2.6-iommu into core/iommu
-rw-r--r-- | Documentation/DMA-API.txt | 106 | ||||
-rw-r--r-- | Documentation/kernel-parameters.txt | 10 | ||||
-rw-r--r-- | arch/Kconfig | 2 | ||||
-rw-r--r-- | arch/x86/Kconfig | 1 | ||||
-rw-r--r-- | arch/x86/include/asm/dma-mapping.h | 45 | ||||
-rw-r--r-- | arch/x86/kernel/pci-dma.c | 10 | ||||
-rw-r--r-- | include/linux/dma-debug.h | 174 | ||||
-rw-r--r-- | lib/Kconfig.debug | 11 | ||||
-rw-r--r-- | lib/Makefile | 2 | ||||
-rw-r--r-- | lib/dma-debug.c | 949 |
10 files changed, 1304 insertions, 6 deletions
diff --git a/Documentation/DMA-API.txt b/Documentation/DMA-API.txt index 2a3fcc55e981..d9aa43d78bcc 100644 --- a/Documentation/DMA-API.txt +++ b/Documentation/DMA-API.txt | |||
@@ -609,3 +609,109 @@ size is the size (and should be a page-sized multiple). | |||
609 | The return value will be either a pointer to the processor virtual | 609 | The return value will be either a pointer to the processor virtual |
610 | address of the memory, or an error (via PTR_ERR()) if any part of the | 610 | address of the memory, or an error (via PTR_ERR()) if any part of the |
611 | region is occupied. | 611 | region is occupied. |
612 | |||
613 | Part III - Debug drivers use of the DMA-API | ||
614 | ------------------------------------------- | ||
615 | |||
616 | The DMA-API as described above as some constraints. DMA addresses must be | ||
617 | released with the corresponding function with the same size for example. With | ||
618 | the advent of hardware IOMMUs it becomes more and more important that drivers | ||
619 | do not violate those constraints. In the worst case such a violation can | ||
620 | result in data corruption up to destroyed filesystems. | ||
621 | |||
622 | To debug drivers and find bugs in the usage of the DMA-API checking code can | ||
623 | be compiled into the kernel which will tell the developer about those | ||
624 | violations. If your architecture supports it you can select the "Enable | ||
625 | debugging of DMA-API usage" option in your kernel configuration. Enabling this | ||
626 | option has a performance impact. Do not enable it in production kernels. | ||
627 | |||
628 | If you boot the resulting kernel will contain code which does some bookkeeping | ||
629 | about what DMA memory was allocated for which device. If this code detects an | ||
630 | error it prints a warning message with some details into your kernel log. An | ||
631 | example warning message may look like this: | ||
632 | |||
633 | ------------[ cut here ]------------ | ||
634 | WARNING: at /data2/repos/linux-2.6-iommu/lib/dma-debug.c:448 | ||
635 | check_unmap+0x203/0x490() | ||
636 | Hardware name: | ||
637 | forcedeth 0000:00:08.0: DMA-API: device driver frees DMA memory with wrong | ||
638 | function [device address=0x00000000640444be] [size=66 bytes] [mapped as | ||
639 | single] [unmapped as page] | ||
640 | Modules linked in: nfsd exportfs bridge stp llc r8169 | ||
641 | Pid: 0, comm: swapper Tainted: G W 2.6.28-dmatest-09289-g8bb99c0 #1 | ||
642 | Call Trace: | ||
643 | <IRQ> [<ffffffff80240b22>] warn_slowpath+0xf2/0x130 | ||
644 | [<ffffffff80647b70>] _spin_unlock+0x10/0x30 | ||
645 | [<ffffffff80537e75>] usb_hcd_link_urb_to_ep+0x75/0xc0 | ||
646 | [<ffffffff80647c22>] _spin_unlock_irqrestore+0x12/0x40 | ||
647 | [<ffffffff8055347f>] ohci_urb_enqueue+0x19f/0x7c0 | ||
648 | [<ffffffff80252f96>] queue_work+0x56/0x60 | ||
649 | [<ffffffff80237e10>] enqueue_task_fair+0x20/0x50 | ||
650 | [<ffffffff80539279>] usb_hcd_submit_urb+0x379/0xbc0 | ||
651 | [<ffffffff803b78c3>] cpumask_next_and+0x23/0x40 | ||
652 | [<ffffffff80235177>] find_busiest_group+0x207/0x8a0 | ||
653 | [<ffffffff8064784f>] _spin_lock_irqsave+0x1f/0x50 | ||
654 | [<ffffffff803c7ea3>] check_unmap+0x203/0x490 | ||
655 | [<ffffffff803c8259>] debug_dma_unmap_page+0x49/0x50 | ||
656 | [<ffffffff80485f26>] nv_tx_done_optimized+0xc6/0x2c0 | ||
657 | [<ffffffff80486c13>] nv_nic_irq_optimized+0x73/0x2b0 | ||
658 | [<ffffffff8026df84>] handle_IRQ_event+0x34/0x70 | ||
659 | [<ffffffff8026ffe9>] handle_edge_irq+0xc9/0x150 | ||
660 | [<ffffffff8020e3ab>] do_IRQ+0xcb/0x1c0 | ||
661 | [<ffffffff8020c093>] ret_from_intr+0x0/0xa | ||
662 | <EOI> <4>---[ end trace f6435a98e2a38c0e ]--- | ||
663 | |||
664 | The driver developer can find the driver and the device including a stacktrace | ||
665 | of the DMA-API call which caused this warning. | ||
666 | |||
667 | Per default only the first error will result in a warning message. All other | ||
668 | errors will only silently counted. This limitation exist to prevent the code | ||
669 | from flooding your kernel log. To support debugging a device driver this can | ||
670 | be disabled via debugfs. See the debugfs interface documentation below for | ||
671 | details. | ||
672 | |||
673 | The debugfs directory for the DMA-API debugging code is called dma-api/. In | ||
674 | this directory the following files can currently be found: | ||
675 | |||
676 | dma-api/all_errors This file contains a numeric value. If this | ||
677 | value is not equal to zero the debugging code | ||
678 | will print a warning for every error it finds | ||
679 | into the kernel log. Be carefull with this | ||
680 | option. It can easily flood your logs. | ||
681 | |||
682 | dma-api/disabled This read-only file contains the character 'Y' | ||
683 | if the debugging code is disabled. This can | ||
684 | happen when it runs out of memory or if it was | ||
685 | disabled at boot time | ||
686 | |||
687 | dma-api/error_count This file is read-only and shows the total | ||
688 | numbers of errors found. | ||
689 | |||
690 | dma-api/num_errors The number in this file shows how many | ||
691 | warnings will be printed to the kernel log | ||
692 | before it stops. This number is initialized to | ||
693 | one at system boot and be set by writing into | ||
694 | this file | ||
695 | |||
696 | dma-api/min_free_entries | ||
697 | This read-only file can be read to get the | ||
698 | minimum number of free dma_debug_entries the | ||
699 | allocator has ever seen. If this value goes | ||
700 | down to zero the code will disable itself | ||
701 | because it is not longer reliable. | ||
702 | |||
703 | dma-api/num_free_entries | ||
704 | The current number of free dma_debug_entries | ||
705 | in the allocator. | ||
706 | |||
707 | If you have this code compiled into your kernel it will be enabled by default. | ||
708 | If you want to boot without the bookkeeping anyway you can provide | ||
709 | 'dma_debug=off' as a boot parameter. This will disable DMA-API debugging. | ||
710 | Notice that you can not enable it again at runtime. You have to reboot to do | ||
711 | so. | ||
712 | |||
713 | When the code disables itself at runtime this is most likely because it ran | ||
714 | out of dma_debug_entries. These entries are preallocated at boot. The number | ||
715 | of preallocated entries is defined per architecture. If it is too low for you | ||
716 | boot with 'dma_debug_entries=<your_desired_number>' to overwrite the | ||
717 | architectural default. | ||
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt index 54f21a5c262b..0fa3c0545994 100644 --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt | |||
@@ -491,6 +491,16 @@ and is between 256 and 4096 characters. It is defined in the file | |||
491 | Range: 0 - 8192 | 491 | Range: 0 - 8192 |
492 | Default: 64 | 492 | Default: 64 |
493 | 493 | ||
494 | dma_debug=off If the kernel is compiled with DMA_API_DEBUG support | ||
495 | this option disables the debugging code at boot. | ||
496 | |||
497 | dma_debug_entries=<number> | ||
498 | This option allows to tune the number of preallocated | ||
499 | entries for DMA-API debugging code. One entry is | ||
500 | required per DMA-API allocation. Use this if the | ||
501 | DMA-API debugging code disables itself because the | ||
502 | architectural default is too low. | ||
503 | |||
494 | hpet= [X86-32,HPET] option to control HPET usage | 504 | hpet= [X86-32,HPET] option to control HPET usage |
495 | Format: { enable (default) | disable | force } | 505 | Format: { enable (default) | disable | force } |
496 | disable: disable HPET and use PIT instead | 506 | disable: disable HPET and use PIT instead |
diff --git a/arch/Kconfig b/arch/Kconfig index 550dab22daa1..830c16a2b801 100644 --- a/arch/Kconfig +++ b/arch/Kconfig | |||
@@ -106,3 +106,5 @@ config HAVE_CLK | |||
106 | The <linux/clk.h> calls support software clock gating and | 106 | The <linux/clk.h> calls support software clock gating and |
107 | thus are a key power management tool on many systems. | 107 | thus are a key power management tool on many systems. |
108 | 108 | ||
109 | config HAVE_DMA_API_DEBUG | ||
110 | bool | ||
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index bc2fbadff9f9..f2cb677b263f 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig | |||
@@ -40,6 +40,7 @@ config X86 | |||
40 | select HAVE_GENERIC_DMA_COHERENT if X86_32 | 40 | select HAVE_GENERIC_DMA_COHERENT if X86_32 |
41 | select HAVE_EFFICIENT_UNALIGNED_ACCESS | 41 | select HAVE_EFFICIENT_UNALIGNED_ACCESS |
42 | select USER_STACKTRACE_SUPPORT | 42 | select USER_STACKTRACE_SUPPORT |
43 | select HAVE_DMA_API_DEBUG | ||
43 | 44 | ||
44 | config ARCH_DEFCONFIG | 45 | config ARCH_DEFCONFIG |
45 | string | 46 | string |
diff --git a/arch/x86/include/asm/dma-mapping.h b/arch/x86/include/asm/dma-mapping.h index 9c78bd40ebec..cea7b74963e9 100644 --- a/arch/x86/include/asm/dma-mapping.h +++ b/arch/x86/include/asm/dma-mapping.h | |||
@@ -7,6 +7,7 @@ | |||
7 | */ | 7 | */ |
8 | 8 | ||
9 | #include <linux/scatterlist.h> | 9 | #include <linux/scatterlist.h> |
10 | #include <linux/dma-debug.h> | ||
10 | #include <linux/dma-attrs.h> | 11 | #include <linux/dma-attrs.h> |
11 | #include <asm/io.h> | 12 | #include <asm/io.h> |
12 | #include <asm/swiotlb.h> | 13 | #include <asm/swiotlb.h> |
@@ -56,11 +57,16 @@ dma_map_single(struct device *hwdev, void *ptr, size_t size, | |||
56 | enum dma_data_direction dir) | 57 | enum dma_data_direction dir) |
57 | { | 58 | { |
58 | struct dma_map_ops *ops = get_dma_ops(hwdev); | 59 | struct dma_map_ops *ops = get_dma_ops(hwdev); |
60 | dma_addr_t addr; | ||
59 | 61 | ||
60 | BUG_ON(!valid_dma_direction(dir)); | 62 | BUG_ON(!valid_dma_direction(dir)); |
61 | return ops->map_page(hwdev, virt_to_page(ptr), | 63 | addr = ops->map_page(hwdev, virt_to_page(ptr), |
62 | (unsigned long)ptr & ~PAGE_MASK, size, | 64 | (unsigned long)ptr & ~PAGE_MASK, size, |
63 | dir, NULL); | 65 | dir, NULL); |
66 | debug_dma_map_page(hwdev, virt_to_page(ptr), | ||
67 | (unsigned long)ptr & ~PAGE_MASK, size, | ||
68 | dir, addr, true); | ||
69 | return addr; | ||
64 | } | 70 | } |
65 | 71 | ||
66 | static inline void | 72 | static inline void |
@@ -72,6 +78,7 @@ dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size, | |||
72 | BUG_ON(!valid_dma_direction(dir)); | 78 | BUG_ON(!valid_dma_direction(dir)); |
73 | if (ops->unmap_page) | 79 | if (ops->unmap_page) |
74 | ops->unmap_page(dev, addr, size, dir, NULL); | 80 | ops->unmap_page(dev, addr, size, dir, NULL); |
81 | debug_dma_unmap_page(dev, addr, size, dir, true); | ||
75 | } | 82 | } |
76 | 83 | ||
77 | static inline int | 84 | static inline int |
@@ -79,9 +86,13 @@ dma_map_sg(struct device *hwdev, struct scatterlist *sg, | |||
79 | int nents, enum dma_data_direction dir) | 86 | int nents, enum dma_data_direction dir) |
80 | { | 87 | { |
81 | struct dma_map_ops *ops = get_dma_ops(hwdev); | 88 | struct dma_map_ops *ops = get_dma_ops(hwdev); |
89 | int ents; | ||
82 | 90 | ||
83 | BUG_ON(!valid_dma_direction(dir)); | 91 | BUG_ON(!valid_dma_direction(dir)); |
84 | return ops->map_sg(hwdev, sg, nents, dir, NULL); | 92 | ents = ops->map_sg(hwdev, sg, nents, dir, NULL); |
93 | debug_dma_map_sg(hwdev, sg, nents, ents, dir); | ||
94 | |||
95 | return ents; | ||
85 | } | 96 | } |
86 | 97 | ||
87 | static inline void | 98 | static inline void |
@@ -91,6 +102,7 @@ dma_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents, | |||
91 | struct dma_map_ops *ops = get_dma_ops(hwdev); | 102 | struct dma_map_ops *ops = get_dma_ops(hwdev); |
92 | 103 | ||
93 | BUG_ON(!valid_dma_direction(dir)); | 104 | BUG_ON(!valid_dma_direction(dir)); |
105 | debug_dma_unmap_sg(hwdev, sg, nents, dir); | ||
94 | if (ops->unmap_sg) | 106 | if (ops->unmap_sg) |
95 | ops->unmap_sg(hwdev, sg, nents, dir, NULL); | 107 | ops->unmap_sg(hwdev, sg, nents, dir, NULL); |
96 | } | 108 | } |
@@ -104,6 +116,7 @@ dma_sync_single_for_cpu(struct device *hwdev, dma_addr_t dma_handle, | |||
104 | BUG_ON(!valid_dma_direction(dir)); | 116 | BUG_ON(!valid_dma_direction(dir)); |
105 | if (ops->sync_single_for_cpu) | 117 | if (ops->sync_single_for_cpu) |
106 | ops->sync_single_for_cpu(hwdev, dma_handle, size, dir); | 118 | ops->sync_single_for_cpu(hwdev, dma_handle, size, dir); |
119 | debug_dma_sync_single_for_cpu(hwdev, dma_handle, size, dir); | ||
107 | flush_write_buffers(); | 120 | flush_write_buffers(); |
108 | } | 121 | } |
109 | 122 | ||
@@ -116,6 +129,7 @@ dma_sync_single_for_device(struct device *hwdev, dma_addr_t dma_handle, | |||
116 | BUG_ON(!valid_dma_direction(dir)); | 129 | BUG_ON(!valid_dma_direction(dir)); |
117 | if (ops->sync_single_for_device) | 130 | if (ops->sync_single_for_device) |
118 | ops->sync_single_for_device(hwdev, dma_handle, size, dir); | 131 | ops->sync_single_for_device(hwdev, dma_handle, size, dir); |
132 | debug_dma_sync_single_for_device(hwdev, dma_handle, size, dir); | ||
119 | flush_write_buffers(); | 133 | flush_write_buffers(); |
120 | } | 134 | } |
121 | 135 | ||
@@ -130,6 +144,8 @@ dma_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dma_handle, | |||
130 | if (ops->sync_single_range_for_cpu) | 144 | if (ops->sync_single_range_for_cpu) |
131 | ops->sync_single_range_for_cpu(hwdev, dma_handle, offset, | 145 | ops->sync_single_range_for_cpu(hwdev, dma_handle, offset, |
132 | size, dir); | 146 | size, dir); |
147 | debug_dma_sync_single_range_for_cpu(hwdev, dma_handle, | ||
148 | offset, size, dir); | ||
133 | flush_write_buffers(); | 149 | flush_write_buffers(); |
134 | } | 150 | } |
135 | 151 | ||
@@ -144,6 +160,8 @@ dma_sync_single_range_for_device(struct device *hwdev, dma_addr_t dma_handle, | |||
144 | if (ops->sync_single_range_for_device) | 160 | if (ops->sync_single_range_for_device) |
145 | ops->sync_single_range_for_device(hwdev, dma_handle, | 161 | ops->sync_single_range_for_device(hwdev, dma_handle, |
146 | offset, size, dir); | 162 | offset, size, dir); |
163 | debug_dma_sync_single_range_for_device(hwdev, dma_handle, | ||
164 | offset, size, dir); | ||
147 | flush_write_buffers(); | 165 | flush_write_buffers(); |
148 | } | 166 | } |
149 | 167 | ||
@@ -156,6 +174,7 @@ dma_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg, | |||
156 | BUG_ON(!valid_dma_direction(dir)); | 174 | BUG_ON(!valid_dma_direction(dir)); |
157 | if (ops->sync_sg_for_cpu) | 175 | if (ops->sync_sg_for_cpu) |
158 | ops->sync_sg_for_cpu(hwdev, sg, nelems, dir); | 176 | ops->sync_sg_for_cpu(hwdev, sg, nelems, dir); |
177 | debug_dma_sync_sg_for_cpu(hwdev, sg, nelems, dir); | ||
159 | flush_write_buffers(); | 178 | flush_write_buffers(); |
160 | } | 179 | } |
161 | 180 | ||
@@ -168,6 +187,7 @@ dma_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg, | |||
168 | BUG_ON(!valid_dma_direction(dir)); | 187 | BUG_ON(!valid_dma_direction(dir)); |
169 | if (ops->sync_sg_for_device) | 188 | if (ops->sync_sg_for_device) |
170 | ops->sync_sg_for_device(hwdev, sg, nelems, dir); | 189 | ops->sync_sg_for_device(hwdev, sg, nelems, dir); |
190 | debug_dma_sync_sg_for_device(hwdev, sg, nelems, dir); | ||
171 | 191 | ||
172 | flush_write_buffers(); | 192 | flush_write_buffers(); |
173 | } | 193 | } |
@@ -177,15 +197,24 @@ static inline dma_addr_t dma_map_page(struct device *dev, struct page *page, | |||
177 | enum dma_data_direction dir) | 197 | enum dma_data_direction dir) |
178 | { | 198 | { |
179 | struct dma_map_ops *ops = get_dma_ops(dev); | 199 | struct dma_map_ops *ops = get_dma_ops(dev); |
200 | dma_addr_t addr; | ||
180 | 201 | ||
181 | BUG_ON(!valid_dma_direction(dir)); | 202 | BUG_ON(!valid_dma_direction(dir)); |
182 | return ops->map_page(dev, page, offset, size, dir, NULL); | 203 | addr = ops->map_page(dev, page, offset, size, dir, NULL); |
204 | debug_dma_map_page(dev, page, offset, size, dir, addr, false); | ||
205 | |||
206 | return addr; | ||
183 | } | 207 | } |
184 | 208 | ||
185 | static inline void dma_unmap_page(struct device *dev, dma_addr_t addr, | 209 | static inline void dma_unmap_page(struct device *dev, dma_addr_t addr, |
186 | size_t size, enum dma_data_direction dir) | 210 | size_t size, enum dma_data_direction dir) |
187 | { | 211 | { |
188 | dma_unmap_single(dev, addr, size, dir); | 212 | struct dma_map_ops *ops = get_dma_ops(dev); |
213 | |||
214 | BUG_ON(!valid_dma_direction(dir)); | ||
215 | if (ops->unmap_page) | ||
216 | ops->unmap_page(dev, addr, size, dir, NULL); | ||
217 | debug_dma_unmap_page(dev, addr, size, dir, false); | ||
189 | } | 218 | } |
190 | 219 | ||
191 | static inline void | 220 | static inline void |
@@ -250,8 +279,11 @@ dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, | |||
250 | if (!ops->alloc_coherent) | 279 | if (!ops->alloc_coherent) |
251 | return NULL; | 280 | return NULL; |
252 | 281 | ||
253 | return ops->alloc_coherent(dev, size, dma_handle, | 282 | memory = ops->alloc_coherent(dev, size, dma_handle, |
254 | dma_alloc_coherent_gfp_flags(dev, gfp)); | 283 | dma_alloc_coherent_gfp_flags(dev, gfp)); |
284 | debug_dma_alloc_coherent(dev, size, *dma_handle, memory); | ||
285 | |||
286 | return memory; | ||
255 | } | 287 | } |
256 | 288 | ||
257 | static inline void dma_free_coherent(struct device *dev, size_t size, | 289 | static inline void dma_free_coherent(struct device *dev, size_t size, |
@@ -264,6 +296,7 @@ static inline void dma_free_coherent(struct device *dev, size_t size, | |||
264 | if (dma_release_from_coherent(dev, get_order(size), vaddr)) | 296 | if (dma_release_from_coherent(dev, get_order(size), vaddr)) |
265 | return; | 297 | return; |
266 | 298 | ||
299 | debug_dma_free_coherent(dev, size, vaddr, bus); | ||
267 | if (ops->free_coherent) | 300 | if (ops->free_coherent) |
268 | ops->free_coherent(dev, size, vaddr, bus); | 301 | ops->free_coherent(dev, size, vaddr, bus); |
269 | } | 302 | } |
diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c index f293a8df6828..c7c4776ff630 100644 --- a/arch/x86/kernel/pci-dma.c +++ b/arch/x86/kernel/pci-dma.c | |||
@@ -1,4 +1,5 @@ | |||
1 | #include <linux/dma-mapping.h> | 1 | #include <linux/dma-mapping.h> |
2 | #include <linux/dma-debug.h> | ||
2 | #include <linux/dmar.h> | 3 | #include <linux/dmar.h> |
3 | #include <linux/bootmem.h> | 4 | #include <linux/bootmem.h> |
4 | #include <linux/pci.h> | 5 | #include <linux/pci.h> |
@@ -44,6 +45,9 @@ struct device x86_dma_fallback_dev = { | |||
44 | }; | 45 | }; |
45 | EXPORT_SYMBOL(x86_dma_fallback_dev); | 46 | EXPORT_SYMBOL(x86_dma_fallback_dev); |
46 | 47 | ||
48 | /* Number of entries preallocated for DMA-API debugging */ | ||
49 | #define PREALLOC_DMA_DEBUG_ENTRIES 32768 | ||
50 | |||
47 | int dma_set_mask(struct device *dev, u64 mask) | 51 | int dma_set_mask(struct device *dev, u64 mask) |
48 | { | 52 | { |
49 | if (!dev->dma_mask || !dma_supported(dev, mask)) | 53 | if (!dev->dma_mask || !dma_supported(dev, mask)) |
@@ -265,6 +269,12 @@ EXPORT_SYMBOL(dma_supported); | |||
265 | 269 | ||
266 | static int __init pci_iommu_init(void) | 270 | static int __init pci_iommu_init(void) |
267 | { | 271 | { |
272 | dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES); | ||
273 | |||
274 | #ifdef CONFIG_PCI | ||
275 | dma_debug_add_bus(&pci_bus_type); | ||
276 | #endif | ||
277 | |||
268 | calgary_iommu_init(); | 278 | calgary_iommu_init(); |
269 | 279 | ||
270 | intel_iommu_init(); | 280 | intel_iommu_init(); |
diff --git a/include/linux/dma-debug.h b/include/linux/dma-debug.h new file mode 100644 index 000000000000..e851d23e91eb --- /dev/null +++ b/include/linux/dma-debug.h | |||
@@ -0,0 +1,174 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2008 Advanced Micro Devices, Inc. | ||
3 | * | ||
4 | * Author: Joerg Roedel <joerg.roedel@amd.com> | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify it | ||
7 | * under the terms of the GNU General Public License version 2 as published | ||
8 | * by the Free Software Foundation. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, | ||
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
13 | * GNU General Public License for more details. | ||
14 | * | ||
15 | * You should have received a copy of the GNU General Public License | ||
16 | * along with this program; if not, write to the Free Software | ||
17 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
18 | */ | ||
19 | |||
20 | #ifndef __DMA_DEBUG_H | ||
21 | #define __DMA_DEBUG_H | ||
22 | |||
23 | #include <linux/types.h> | ||
24 | |||
25 | struct device; | ||
26 | struct scatterlist; | ||
27 | struct bus_type; | ||
28 | |||
29 | #ifdef CONFIG_DMA_API_DEBUG | ||
30 | |||
31 | extern void dma_debug_add_bus(struct bus_type *bus); | ||
32 | |||
33 | extern void dma_debug_init(u32 num_entries); | ||
34 | |||
35 | extern void debug_dma_map_page(struct device *dev, struct page *page, | ||
36 | size_t offset, size_t size, | ||
37 | int direction, dma_addr_t dma_addr, | ||
38 | bool map_single); | ||
39 | |||
40 | extern void debug_dma_unmap_page(struct device *dev, dma_addr_t addr, | ||
41 | size_t size, int direction, bool map_single); | ||
42 | |||
43 | extern void debug_dma_map_sg(struct device *dev, struct scatterlist *sg, | ||
44 | int nents, int mapped_ents, int direction); | ||
45 | |||
46 | extern void debug_dma_unmap_sg(struct device *dev, struct scatterlist *sglist, | ||
47 | int nelems, int dir); | ||
48 | |||
49 | extern void debug_dma_alloc_coherent(struct device *dev, size_t size, | ||
50 | dma_addr_t dma_addr, void *virt); | ||
51 | |||
52 | extern void debug_dma_free_coherent(struct device *dev, size_t size, | ||
53 | void *virt, dma_addr_t addr); | ||
54 | |||
55 | extern void debug_dma_sync_single_for_cpu(struct device *dev, | ||
56 | dma_addr_t dma_handle, size_t size, | ||
57 | int direction); | ||
58 | |||
59 | extern void debug_dma_sync_single_for_device(struct device *dev, | ||
60 | dma_addr_t dma_handle, | ||
61 | size_t size, int direction); | ||
62 | |||
63 | extern void debug_dma_sync_single_range_for_cpu(struct device *dev, | ||
64 | dma_addr_t dma_handle, | ||
65 | unsigned long offset, | ||
66 | size_t size, | ||
67 | int direction); | ||
68 | |||
69 | extern void debug_dma_sync_single_range_for_device(struct device *dev, | ||
70 | dma_addr_t dma_handle, | ||
71 | unsigned long offset, | ||
72 | size_t size, int direction); | ||
73 | |||
74 | extern void debug_dma_sync_sg_for_cpu(struct device *dev, | ||
75 | struct scatterlist *sg, | ||
76 | int nelems, int direction); | ||
77 | |||
78 | extern void debug_dma_sync_sg_for_device(struct device *dev, | ||
79 | struct scatterlist *sg, | ||
80 | int nelems, int direction); | ||
81 | |||
82 | extern void debug_dma_dump_mappings(struct device *dev); | ||
83 | |||
84 | #else /* CONFIG_DMA_API_DEBUG */ | ||
85 | |||
86 | void dma_debug_add_bus(struct bus_type *bus) | ||
87 | { | ||
88 | } | ||
89 | |||
90 | static inline void dma_debug_init(u32 num_entries) | ||
91 | { | ||
92 | } | ||
93 | |||
94 | static inline void debug_dma_map_page(struct device *dev, struct page *page, | ||
95 | size_t offset, size_t size, | ||
96 | int direction, dma_addr_t dma_addr, | ||
97 | bool map_single) | ||
98 | { | ||
99 | } | ||
100 | |||
101 | static inline void debug_dma_unmap_page(struct device *dev, dma_addr_t addr, | ||
102 | size_t size, int direction, | ||
103 | bool map_single) | ||
104 | { | ||
105 | } | ||
106 | |||
107 | static inline void debug_dma_map_sg(struct device *dev, struct scatterlist *sg, | ||
108 | int nents, int mapped_ents, int direction) | ||
109 | { | ||
110 | } | ||
111 | |||
112 | static inline void debug_dma_unmap_sg(struct device *dev, | ||
113 | struct scatterlist *sglist, | ||
114 | int nelems, int dir) | ||
115 | { | ||
116 | } | ||
117 | |||
118 | static inline void debug_dma_alloc_coherent(struct device *dev, size_t size, | ||
119 | dma_addr_t dma_addr, void *virt) | ||
120 | { | ||
121 | } | ||
122 | |||
123 | static inline void debug_dma_free_coherent(struct device *dev, size_t size, | ||
124 | void *virt, dma_addr_t addr) | ||
125 | { | ||
126 | } | ||
127 | |||
128 | static inline void debug_dma_sync_single_for_cpu(struct device *dev, | ||
129 | dma_addr_t dma_handle, | ||
130 | size_t size, int direction) | ||
131 | { | ||
132 | } | ||
133 | |||
134 | static inline void debug_dma_sync_single_for_device(struct device *dev, | ||
135 | dma_addr_t dma_handle, | ||
136 | size_t size, int direction) | ||
137 | { | ||
138 | } | ||
139 | |||
140 | static inline void debug_dma_sync_single_range_for_cpu(struct device *dev, | ||
141 | dma_addr_t dma_handle, | ||
142 | unsigned long offset, | ||
143 | size_t size, | ||
144 | int direction) | ||
145 | { | ||
146 | } | ||
147 | |||
148 | static inline void debug_dma_sync_single_range_for_device(struct device *dev, | ||
149 | dma_addr_t dma_handle, | ||
150 | unsigned long offset, | ||
151 | size_t size, | ||
152 | int direction) | ||
153 | { | ||
154 | } | ||
155 | |||
156 | static inline void debug_dma_sync_sg_for_cpu(struct device *dev, | ||
157 | struct scatterlist *sg, | ||
158 | int nelems, int direction) | ||
159 | { | ||
160 | } | ||
161 | |||
162 | static inline void debug_dma_sync_sg_for_device(struct device *dev, | ||
163 | struct scatterlist *sg, | ||
164 | int nelems, int direction) | ||
165 | { | ||
166 | } | ||
167 | |||
168 | static inline void debug_dma_dump_mappings(struct device *dev) | ||
169 | { | ||
170 | } | ||
171 | |||
172 | #endif /* CONFIG_DMA_API_DEBUG */ | ||
173 | |||
174 | #endif /* __DMA_DEBUG_H */ | ||
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 1bcf9cd4baa0..d9cbada7e2f8 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug | |||
@@ -902,6 +902,17 @@ config DYNAMIC_PRINTK_DEBUG | |||
902 | debugging for all modules. This mode can be turned off via the above | 902 | debugging for all modules. This mode can be turned off via the above |
903 | disable command. | 903 | disable command. |
904 | 904 | ||
905 | config DMA_API_DEBUG | ||
906 | bool "Enable debugging of DMA-API usage" | ||
907 | depends on HAVE_DMA_API_DEBUG | ||
908 | help | ||
909 | Enable this option to debug the use of the DMA API by device drivers. | ||
910 | With this option you will be able to detect common bugs in device | ||
911 | drivers like double-freeing of DMA mappings or freeing mappings that | ||
912 | were never allocated. | ||
913 | This option causes a performance degredation. Use only if you want | ||
914 | to debug device drivers. If unsure, say N. | ||
915 | |||
905 | source "samples/Kconfig" | 916 | source "samples/Kconfig" |
906 | 917 | ||
907 | source "lib/Kconfig.kgdb" | 918 | source "lib/Kconfig.kgdb" |
diff --git a/lib/Makefile b/lib/Makefile index 32b0e64ded27..50b48cf63e4a 100644 --- a/lib/Makefile +++ b/lib/Makefile | |||
@@ -84,6 +84,8 @@ obj-$(CONFIG_HAVE_ARCH_TRACEHOOK) += syscall.o | |||
84 | 84 | ||
85 | obj-$(CONFIG_DYNAMIC_PRINTK_DEBUG) += dynamic_printk.o | 85 | obj-$(CONFIG_DYNAMIC_PRINTK_DEBUG) += dynamic_printk.o |
86 | 86 | ||
87 | obj-$(CONFIG_DMA_API_DEBUG) += dma-debug.o | ||
88 | |||
87 | hostprogs-y := gen_crc32table | 89 | hostprogs-y := gen_crc32table |
88 | clean-files := crc32table.h | 90 | clean-files := crc32table.h |
89 | 91 | ||
diff --git a/lib/dma-debug.c b/lib/dma-debug.c new file mode 100644 index 000000000000..9a350b414a50 --- /dev/null +++ b/lib/dma-debug.c | |||
@@ -0,0 +1,949 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2008 Advanced Micro Devices, Inc. | ||
3 | * | ||
4 | * Author: Joerg Roedel <joerg.roedel@amd.com> | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify it | ||
7 | * under the terms of the GNU General Public License version 2 as published | ||
8 | * by the Free Software Foundation. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, | ||
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
13 | * GNU General Public License for more details. | ||
14 | * | ||
15 | * You should have received a copy of the GNU General Public License | ||
16 | * along with this program; if not, write to the Free Software | ||
17 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
18 | */ | ||
19 | |||
20 | #include <linux/scatterlist.h> | ||
21 | #include <linux/dma-mapping.h> | ||
22 | #include <linux/stacktrace.h> | ||
23 | #include <linux/dma-debug.h> | ||
24 | #include <linux/spinlock.h> | ||
25 | #include <linux/debugfs.h> | ||
26 | #include <linux/device.h> | ||
27 | #include <linux/types.h> | ||
28 | #include <linux/sched.h> | ||
29 | #include <linux/list.h> | ||
30 | #include <linux/slab.h> | ||
31 | |||
32 | #include <asm/sections.h> | ||
33 | |||
34 | #define HASH_SIZE 1024ULL | ||
35 | #define HASH_FN_SHIFT 13 | ||
36 | #define HASH_FN_MASK (HASH_SIZE - 1) | ||
37 | |||
38 | enum { | ||
39 | dma_debug_single, | ||
40 | dma_debug_page, | ||
41 | dma_debug_sg, | ||
42 | dma_debug_coherent, | ||
43 | }; | ||
44 | |||
45 | #define DMA_DEBUG_STACKTRACE_ENTRIES 5 | ||
46 | |||
47 | struct dma_debug_entry { | ||
48 | struct list_head list; | ||
49 | struct device *dev; | ||
50 | int type; | ||
51 | phys_addr_t paddr; | ||
52 | u64 dev_addr; | ||
53 | u64 size; | ||
54 | int direction; | ||
55 | int sg_call_ents; | ||
56 | int sg_mapped_ents; | ||
57 | #ifdef CONFIG_STACKTRACE | ||
58 | struct stack_trace stacktrace; | ||
59 | unsigned long st_entries[DMA_DEBUG_STACKTRACE_ENTRIES]; | ||
60 | #endif | ||
61 | }; | ||
62 | |||
63 | struct hash_bucket { | ||
64 | struct list_head list; | ||
65 | spinlock_t lock; | ||
66 | } ____cacheline_aligned_in_smp; | ||
67 | |||
68 | /* Hash list to save the allocated dma addresses */ | ||
69 | static struct hash_bucket dma_entry_hash[HASH_SIZE]; | ||
70 | /* List of pre-allocated dma_debug_entry's */ | ||
71 | static LIST_HEAD(free_entries); | ||
72 | /* Lock for the list above */ | ||
73 | static DEFINE_SPINLOCK(free_entries_lock); | ||
74 | |||
75 | /* Global disable flag - will be set in case of an error */ | ||
76 | static bool global_disable __read_mostly; | ||
77 | |||
78 | /* Global error count */ | ||
79 | static u32 error_count; | ||
80 | |||
81 | /* Global error show enable*/ | ||
82 | static u32 show_all_errors __read_mostly; | ||
83 | /* Number of errors to show */ | ||
84 | static u32 show_num_errors = 1; | ||
85 | |||
86 | static u32 num_free_entries; | ||
87 | static u32 min_free_entries; | ||
88 | |||
89 | /* number of preallocated entries requested by kernel cmdline */ | ||
90 | static u32 req_entries; | ||
91 | |||
92 | /* debugfs dentry's for the stuff above */ | ||
93 | static struct dentry *dma_debug_dent __read_mostly; | ||
94 | static struct dentry *global_disable_dent __read_mostly; | ||
95 | static struct dentry *error_count_dent __read_mostly; | ||
96 | static struct dentry *show_all_errors_dent __read_mostly; | ||
97 | static struct dentry *show_num_errors_dent __read_mostly; | ||
98 | static struct dentry *num_free_entries_dent __read_mostly; | ||
99 | static struct dentry *min_free_entries_dent __read_mostly; | ||
100 | |||
101 | static const char *type2name[4] = { "single", "page", | ||
102 | "scather-gather", "coherent" }; | ||
103 | |||
104 | static const char *dir2name[4] = { "DMA_BIDIRECTIONAL", "DMA_TO_DEVICE", | ||
105 | "DMA_FROM_DEVICE", "DMA_NONE" }; | ||
106 | |||
107 | /* | ||
108 | * The access to some variables in this macro is racy. We can't use atomic_t | ||
109 | * here because all these variables are exported to debugfs. Some of them even | ||
110 | * writeable. This is also the reason why a lock won't help much. But anyway, | ||
111 | * the races are no big deal. Here is why: | ||
112 | * | ||
113 | * error_count: the addition is racy, but the worst thing that can happen is | ||
114 | * that we don't count some errors | ||
115 | * show_num_errors: the subtraction is racy. Also no big deal because in | ||
116 | * worst case this will result in one warning more in the | ||
117 | * system log than the user configured. This variable is | ||
118 | * writeable via debugfs. | ||
119 | */ | ||
120 | static inline void dump_entry_trace(struct dma_debug_entry *entry) | ||
121 | { | ||
122 | #ifdef CONFIG_STACKTRACE | ||
123 | if (entry) { | ||
124 | printk(KERN_WARNING "Mapped at:\n"); | ||
125 | print_stack_trace(&entry->stacktrace, 0); | ||
126 | } | ||
127 | #endif | ||
128 | } | ||
129 | |||
130 | #define err_printk(dev, entry, format, arg...) do { \ | ||
131 | error_count += 1; \ | ||
132 | if (show_all_errors || show_num_errors > 0) { \ | ||
133 | WARN(1, "%s %s: " format, \ | ||
134 | dev_driver_string(dev), \ | ||
135 | dev_name(dev) , ## arg); \ | ||
136 | dump_entry_trace(entry); \ | ||
137 | } \ | ||
138 | if (!show_all_errors && show_num_errors > 0) \ | ||
139 | show_num_errors -= 1; \ | ||
140 | } while (0); | ||
141 | |||
142 | /* | ||
143 | * Hash related functions | ||
144 | * | ||
145 | * Every DMA-API request is saved into a struct dma_debug_entry. To | ||
146 | * have quick access to these structs they are stored into a hash. | ||
147 | */ | ||
148 | static int hash_fn(struct dma_debug_entry *entry) | ||
149 | { | ||
150 | /* | ||
151 | * Hash function is based on the dma address. | ||
152 | * We use bits 20-27 here as the index into the hash | ||
153 | */ | ||
154 | return (entry->dev_addr >> HASH_FN_SHIFT) & HASH_FN_MASK; | ||
155 | } | ||
156 | |||
157 | /* | ||
158 | * Request exclusive access to a hash bucket for a given dma_debug_entry. | ||
159 | */ | ||
160 | static struct hash_bucket *get_hash_bucket(struct dma_debug_entry *entry, | ||
161 | unsigned long *flags) | ||
162 | { | ||
163 | int idx = hash_fn(entry); | ||
164 | unsigned long __flags; | ||
165 | |||
166 | spin_lock_irqsave(&dma_entry_hash[idx].lock, __flags); | ||
167 | *flags = __flags; | ||
168 | return &dma_entry_hash[idx]; | ||
169 | } | ||
170 | |||
171 | /* | ||
172 | * Give up exclusive access to the hash bucket | ||
173 | */ | ||
174 | static void put_hash_bucket(struct hash_bucket *bucket, | ||
175 | unsigned long *flags) | ||
176 | { | ||
177 | unsigned long __flags = *flags; | ||
178 | |||
179 | spin_unlock_irqrestore(&bucket->lock, __flags); | ||
180 | } | ||
181 | |||
182 | /* | ||
183 | * Search a given entry in the hash bucket list | ||
184 | */ | ||
185 | static struct dma_debug_entry *hash_bucket_find(struct hash_bucket *bucket, | ||
186 | struct dma_debug_entry *ref) | ||
187 | { | ||
188 | struct dma_debug_entry *entry; | ||
189 | |||
190 | list_for_each_entry(entry, &bucket->list, list) { | ||
191 | if ((entry->dev_addr == ref->dev_addr) && | ||
192 | (entry->dev == ref->dev)) | ||
193 | return entry; | ||
194 | } | ||
195 | |||
196 | return NULL; | ||
197 | } | ||
198 | |||
199 | /* | ||
200 | * Add an entry to a hash bucket | ||
201 | */ | ||
202 | static void hash_bucket_add(struct hash_bucket *bucket, | ||
203 | struct dma_debug_entry *entry) | ||
204 | { | ||
205 | list_add_tail(&entry->list, &bucket->list); | ||
206 | } | ||
207 | |||
208 | /* | ||
209 | * Remove entry from a hash bucket list | ||
210 | */ | ||
211 | static void hash_bucket_del(struct dma_debug_entry *entry) | ||
212 | { | ||
213 | list_del(&entry->list); | ||
214 | } | ||
215 | |||
216 | /* | ||
217 | * Dump mapping entries for debugging purposes | ||
218 | */ | ||
219 | void debug_dma_dump_mappings(struct device *dev) | ||
220 | { | ||
221 | int idx; | ||
222 | |||
223 | for (idx = 0; idx < HASH_SIZE; idx++) { | ||
224 | struct hash_bucket *bucket = &dma_entry_hash[idx]; | ||
225 | struct dma_debug_entry *entry; | ||
226 | unsigned long flags; | ||
227 | |||
228 | spin_lock_irqsave(&bucket->lock, flags); | ||
229 | |||
230 | list_for_each_entry(entry, &bucket->list, list) { | ||
231 | if (!dev || dev == entry->dev) { | ||
232 | dev_info(entry->dev, | ||
233 | "%s idx %d P=%Lx D=%Lx L=%Lx %s\n", | ||
234 | type2name[entry->type], idx, | ||
235 | (unsigned long long)entry->paddr, | ||
236 | entry->dev_addr, entry->size, | ||
237 | dir2name[entry->direction]); | ||
238 | } | ||
239 | } | ||
240 | |||
241 | spin_unlock_irqrestore(&bucket->lock, flags); | ||
242 | } | ||
243 | } | ||
244 | EXPORT_SYMBOL(debug_dma_dump_mappings); | ||
245 | |||
246 | /* | ||
247 | * Wrapper function for adding an entry to the hash. | ||
248 | * This function takes care of locking itself. | ||
249 | */ | ||
250 | static void add_dma_entry(struct dma_debug_entry *entry) | ||
251 | { | ||
252 | struct hash_bucket *bucket; | ||
253 | unsigned long flags; | ||
254 | |||
255 | bucket = get_hash_bucket(entry, &flags); | ||
256 | hash_bucket_add(bucket, entry); | ||
257 | put_hash_bucket(bucket, &flags); | ||
258 | } | ||
259 | |||
260 | /* struct dma_entry allocator | ||
261 | * | ||
262 | * The next two functions implement the allocator for | ||
263 | * struct dma_debug_entries. | ||
264 | */ | ||
265 | static struct dma_debug_entry *dma_entry_alloc(void) | ||
266 | { | ||
267 | struct dma_debug_entry *entry = NULL; | ||
268 | unsigned long flags; | ||
269 | |||
270 | spin_lock_irqsave(&free_entries_lock, flags); | ||
271 | |||
272 | if (list_empty(&free_entries)) { | ||
273 | printk(KERN_ERR "DMA-API: debugging out of memory " | ||
274 | "- disabling\n"); | ||
275 | global_disable = true; | ||
276 | goto out; | ||
277 | } | ||
278 | |||
279 | entry = list_entry(free_entries.next, struct dma_debug_entry, list); | ||
280 | list_del(&entry->list); | ||
281 | memset(entry, 0, sizeof(*entry)); | ||
282 | |||
283 | #ifdef CONFIG_STACKTRACE | ||
284 | entry->stacktrace.max_entries = DMA_DEBUG_STACKTRACE_ENTRIES; | ||
285 | entry->stacktrace.entries = entry->st_entries; | ||
286 | entry->stacktrace.skip = 2; | ||
287 | save_stack_trace(&entry->stacktrace); | ||
288 | #endif | ||
289 | num_free_entries -= 1; | ||
290 | if (num_free_entries < min_free_entries) | ||
291 | min_free_entries = num_free_entries; | ||
292 | |||
293 | out: | ||
294 | spin_unlock_irqrestore(&free_entries_lock, flags); | ||
295 | |||
296 | return entry; | ||
297 | } | ||
298 | |||
299 | static void dma_entry_free(struct dma_debug_entry *entry) | ||
300 | { | ||
301 | unsigned long flags; | ||
302 | |||
303 | /* | ||
304 | * add to beginning of the list - this way the entries are | ||
305 | * more likely cache hot when they are reallocated. | ||
306 | */ | ||
307 | spin_lock_irqsave(&free_entries_lock, flags); | ||
308 | list_add(&entry->list, &free_entries); | ||
309 | num_free_entries += 1; | ||
310 | spin_unlock_irqrestore(&free_entries_lock, flags); | ||
311 | } | ||
312 | |||
313 | /* | ||
314 | * DMA-API debugging init code | ||
315 | * | ||
316 | * The init code does two things: | ||
317 | * 1. Initialize core data structures | ||
318 | * 2. Preallocate a given number of dma_debug_entry structs | ||
319 | */ | ||
320 | |||
321 | static int prealloc_memory(u32 num_entries) | ||
322 | { | ||
323 | struct dma_debug_entry *entry, *next_entry; | ||
324 | int i; | ||
325 | |||
326 | for (i = 0; i < num_entries; ++i) { | ||
327 | entry = kzalloc(sizeof(*entry), GFP_KERNEL); | ||
328 | if (!entry) | ||
329 | goto out_err; | ||
330 | |||
331 | list_add_tail(&entry->list, &free_entries); | ||
332 | } | ||
333 | |||
334 | num_free_entries = num_entries; | ||
335 | min_free_entries = num_entries; | ||
336 | |||
337 | printk(KERN_INFO "DMA-API: preallocated %d debug entries\n", | ||
338 | num_entries); | ||
339 | |||
340 | return 0; | ||
341 | |||
342 | out_err: | ||
343 | |||
344 | list_for_each_entry_safe(entry, next_entry, &free_entries, list) { | ||
345 | list_del(&entry->list); | ||
346 | kfree(entry); | ||
347 | } | ||
348 | |||
349 | return -ENOMEM; | ||
350 | } | ||
351 | |||
352 | static int dma_debug_fs_init(void) | ||
353 | { | ||
354 | dma_debug_dent = debugfs_create_dir("dma-api", NULL); | ||
355 | if (!dma_debug_dent) { | ||
356 | printk(KERN_ERR "DMA-API: can not create debugfs directory\n"); | ||
357 | return -ENOMEM; | ||
358 | } | ||
359 | |||
360 | global_disable_dent = debugfs_create_bool("disabled", 0444, | ||
361 | dma_debug_dent, | ||
362 | (u32 *)&global_disable); | ||
363 | if (!global_disable_dent) | ||
364 | goto out_err; | ||
365 | |||
366 | error_count_dent = debugfs_create_u32("error_count", 0444, | ||
367 | dma_debug_dent, &error_count); | ||
368 | if (!error_count_dent) | ||
369 | goto out_err; | ||
370 | |||
371 | show_all_errors_dent = debugfs_create_u32("all_errors", 0644, | ||
372 | dma_debug_dent, | ||
373 | &show_all_errors); | ||
374 | if (!show_all_errors_dent) | ||
375 | goto out_err; | ||
376 | |||
377 | show_num_errors_dent = debugfs_create_u32("num_errors", 0644, | ||
378 | dma_debug_dent, | ||
379 | &show_num_errors); | ||
380 | if (!show_num_errors_dent) | ||
381 | goto out_err; | ||
382 | |||
383 | num_free_entries_dent = debugfs_create_u32("num_free_entries", 0444, | ||
384 | dma_debug_dent, | ||
385 | &num_free_entries); | ||
386 | if (!num_free_entries_dent) | ||
387 | goto out_err; | ||
388 | |||
389 | min_free_entries_dent = debugfs_create_u32("min_free_entries", 0444, | ||
390 | dma_debug_dent, | ||
391 | &min_free_entries); | ||
392 | if (!min_free_entries_dent) | ||
393 | goto out_err; | ||
394 | |||
395 | return 0; | ||
396 | |||
397 | out_err: | ||
398 | debugfs_remove_recursive(dma_debug_dent); | ||
399 | |||
400 | return -ENOMEM; | ||
401 | } | ||
402 | |||
403 | static int device_dma_allocations(struct device *dev) | ||
404 | { | ||
405 | struct dma_debug_entry *entry; | ||
406 | unsigned long flags; | ||
407 | int count = 0, i; | ||
408 | |||
409 | for (i = 0; i < HASH_SIZE; ++i) { | ||
410 | spin_lock_irqsave(&dma_entry_hash[i].lock, flags); | ||
411 | list_for_each_entry(entry, &dma_entry_hash[i].list, list) { | ||
412 | if (entry->dev == dev) | ||
413 | count += 1; | ||
414 | } | ||
415 | spin_unlock_irqrestore(&dma_entry_hash[i].lock, flags); | ||
416 | } | ||
417 | |||
418 | return count; | ||
419 | } | ||
420 | |||
421 | static int dma_debug_device_change(struct notifier_block *nb, | ||
422 | unsigned long action, void *data) | ||
423 | { | ||
424 | struct device *dev = data; | ||
425 | int count; | ||
426 | |||
427 | |||
428 | switch (action) { | ||
429 | case BUS_NOTIFY_UNBIND_DRIVER: | ||
430 | count = device_dma_allocations(dev); | ||
431 | if (count == 0) | ||
432 | break; | ||
433 | err_printk(dev, NULL, "DMA-API: device driver has pending " | ||
434 | "DMA allocations while released from device " | ||
435 | "[count=%d]\n", count); | ||
436 | break; | ||
437 | default: | ||
438 | break; | ||
439 | } | ||
440 | |||
441 | return 0; | ||
442 | } | ||
443 | |||
444 | void dma_debug_add_bus(struct bus_type *bus) | ||
445 | { | ||
446 | struct notifier_block *nb; | ||
447 | |||
448 | nb = kzalloc(sizeof(struct notifier_block), GFP_KERNEL); | ||
449 | if (nb == NULL) { | ||
450 | printk(KERN_ERR "dma_debug_add_bus: out of memory\n"); | ||
451 | return; | ||
452 | } | ||
453 | |||
454 | nb->notifier_call = dma_debug_device_change; | ||
455 | |||
456 | bus_register_notifier(bus, nb); | ||
457 | } | ||
458 | |||
459 | /* | ||
460 | * Let the architectures decide how many entries should be preallocated. | ||
461 | */ | ||
462 | void dma_debug_init(u32 num_entries) | ||
463 | { | ||
464 | int i; | ||
465 | |||
466 | if (global_disable) | ||
467 | return; | ||
468 | |||
469 | for (i = 0; i < HASH_SIZE; ++i) { | ||
470 | INIT_LIST_HEAD(&dma_entry_hash[i].list); | ||
471 | dma_entry_hash[i].lock = SPIN_LOCK_UNLOCKED; | ||
472 | } | ||
473 | |||
474 | if (dma_debug_fs_init() != 0) { | ||
475 | printk(KERN_ERR "DMA-API: error creating debugfs entries " | ||
476 | "- disabling\n"); | ||
477 | global_disable = true; | ||
478 | |||
479 | return; | ||
480 | } | ||
481 | |||
482 | if (req_entries) | ||
483 | num_entries = req_entries; | ||
484 | |||
485 | if (prealloc_memory(num_entries) != 0) { | ||
486 | printk(KERN_ERR "DMA-API: debugging out of memory error " | ||
487 | "- disabled\n"); | ||
488 | global_disable = true; | ||
489 | |||
490 | return; | ||
491 | } | ||
492 | |||
493 | printk(KERN_INFO "DMA-API: debugging enabled by kernel config\n"); | ||
494 | } | ||
495 | |||
496 | static __init int dma_debug_cmdline(char *str) | ||
497 | { | ||
498 | if (!str) | ||
499 | return -EINVAL; | ||
500 | |||
501 | if (strncmp(str, "off", 3) == 0) { | ||
502 | printk(KERN_INFO "DMA-API: debugging disabled on kernel " | ||
503 | "command line\n"); | ||
504 | global_disable = true; | ||
505 | } | ||
506 | |||
507 | return 0; | ||
508 | } | ||
509 | |||
510 | static __init int dma_debug_entries_cmdline(char *str) | ||
511 | { | ||
512 | int res; | ||
513 | |||
514 | if (!str) | ||
515 | return -EINVAL; | ||
516 | |||
517 | res = get_option(&str, &req_entries); | ||
518 | |||
519 | if (!res) | ||
520 | req_entries = 0; | ||
521 | |||
522 | return 0; | ||
523 | } | ||
524 | |||
525 | __setup("dma_debug=", dma_debug_cmdline); | ||
526 | __setup("dma_debug_entries=", dma_debug_entries_cmdline); | ||
527 | |||
528 | static void check_unmap(struct dma_debug_entry *ref) | ||
529 | { | ||
530 | struct dma_debug_entry *entry; | ||
531 | struct hash_bucket *bucket; | ||
532 | unsigned long flags; | ||
533 | |||
534 | if (dma_mapping_error(ref->dev, ref->dev_addr)) | ||
535 | return; | ||
536 | |||
537 | bucket = get_hash_bucket(ref, &flags); | ||
538 | entry = hash_bucket_find(bucket, ref); | ||
539 | |||
540 | if (!entry) { | ||
541 | err_printk(ref->dev, NULL, "DMA-API: device driver tries " | ||
542 | "to free DMA memory it has not allocated " | ||
543 | "[device address=0x%016llx] [size=%llu bytes]\n", | ||
544 | ref->dev_addr, ref->size); | ||
545 | goto out; | ||
546 | } | ||
547 | |||
548 | if (ref->size != entry->size) { | ||
549 | err_printk(ref->dev, entry, "DMA-API: device driver frees " | ||
550 | "DMA memory with different size " | ||
551 | "[device address=0x%016llx] [map size=%llu bytes] " | ||
552 | "[unmap size=%llu bytes]\n", | ||
553 | ref->dev_addr, entry->size, ref->size); | ||
554 | } | ||
555 | |||
556 | if (ref->type != entry->type) { | ||
557 | err_printk(ref->dev, entry, "DMA-API: device driver frees " | ||
558 | "DMA memory with wrong function " | ||
559 | "[device address=0x%016llx] [size=%llu bytes] " | ||
560 | "[mapped as %s] [unmapped as %s]\n", | ||
561 | ref->dev_addr, ref->size, | ||
562 | type2name[entry->type], type2name[ref->type]); | ||
563 | } else if ((entry->type == dma_debug_coherent) && | ||
564 | (ref->paddr != entry->paddr)) { | ||
565 | err_printk(ref->dev, entry, "DMA-API: device driver frees " | ||
566 | "DMA memory with different CPU address " | ||
567 | "[device address=0x%016llx] [size=%llu bytes] " | ||
568 | "[cpu alloc address=%p] [cpu free address=%p]", | ||
569 | ref->dev_addr, ref->size, | ||
570 | (void *)entry->paddr, (void *)ref->paddr); | ||
571 | } | ||
572 | |||
573 | if (ref->sg_call_ents && ref->type == dma_debug_sg && | ||
574 | ref->sg_call_ents != entry->sg_call_ents) { | ||
575 | err_printk(ref->dev, entry, "DMA-API: device driver frees " | ||
576 | "DMA sg list with different entry count " | ||
577 | "[map count=%d] [unmap count=%d]\n", | ||
578 | entry->sg_call_ents, ref->sg_call_ents); | ||
579 | } | ||
580 | |||
581 | /* | ||
582 | * This may be no bug in reality - but most implementations of the | ||
583 | * DMA API don't handle this properly, so check for it here | ||
584 | */ | ||
585 | if (ref->direction != entry->direction) { | ||
586 | err_printk(ref->dev, entry, "DMA-API: device driver frees " | ||
587 | "DMA memory with different direction " | ||
588 | "[device address=0x%016llx] [size=%llu bytes] " | ||
589 | "[mapped with %s] [unmapped with %s]\n", | ||
590 | ref->dev_addr, ref->size, | ||
591 | dir2name[entry->direction], | ||
592 | dir2name[ref->direction]); | ||
593 | } | ||
594 | |||
595 | hash_bucket_del(entry); | ||
596 | dma_entry_free(entry); | ||
597 | |||
598 | out: | ||
599 | put_hash_bucket(bucket, &flags); | ||
600 | } | ||
601 | |||
602 | static void check_for_stack(struct device *dev, void *addr) | ||
603 | { | ||
604 | if (object_is_on_stack(addr)) | ||
605 | err_printk(dev, NULL, "DMA-API: device driver maps memory from" | ||
606 | "stack [addr=%p]\n", addr); | ||
607 | } | ||
608 | |||
609 | static inline bool overlap(void *addr, u64 size, void *start, void *end) | ||
610 | { | ||
611 | void *addr2 = (char *)addr + size; | ||
612 | |||
613 | return ((addr >= start && addr < end) || | ||
614 | (addr2 >= start && addr2 < end) || | ||
615 | ((addr < start) && (addr2 >= end))); | ||
616 | } | ||
617 | |||
618 | static void check_for_illegal_area(struct device *dev, void *addr, u64 size) | ||
619 | { | ||
620 | if (overlap(addr, size, _text, _etext) || | ||
621 | overlap(addr, size, __start_rodata, __end_rodata)) | ||
622 | err_printk(dev, NULL, "DMA-API: device driver maps " | ||
623 | "memory from kernel text or rodata " | ||
624 | "[addr=%p] [size=%llu]\n", addr, size); | ||
625 | } | ||
626 | |||
627 | static void check_sync(struct device *dev, dma_addr_t addr, | ||
628 | u64 size, u64 offset, int direction, bool to_cpu) | ||
629 | { | ||
630 | struct dma_debug_entry ref = { | ||
631 | .dev = dev, | ||
632 | .dev_addr = addr, | ||
633 | .size = size, | ||
634 | .direction = direction, | ||
635 | }; | ||
636 | struct dma_debug_entry *entry; | ||
637 | struct hash_bucket *bucket; | ||
638 | unsigned long flags; | ||
639 | |||
640 | bucket = get_hash_bucket(&ref, &flags); | ||
641 | |||
642 | entry = hash_bucket_find(bucket, &ref); | ||
643 | |||
644 | if (!entry) { | ||
645 | err_printk(dev, NULL, "DMA-API: device driver tries " | ||
646 | "to sync DMA memory it has not allocated " | ||
647 | "[device address=0x%016llx] [size=%llu bytes]\n", | ||
648 | addr, size); | ||
649 | goto out; | ||
650 | } | ||
651 | |||
652 | if ((offset + size) > entry->size) { | ||
653 | err_printk(dev, entry, "DMA-API: device driver syncs" | ||
654 | " DMA memory outside allocated range " | ||
655 | "[device address=0x%016llx] " | ||
656 | "[allocation size=%llu bytes] [sync offset=%llu] " | ||
657 | "[sync size=%llu]\n", entry->dev_addr, entry->size, | ||
658 | offset, size); | ||
659 | } | ||
660 | |||
661 | if (direction != entry->direction) { | ||
662 | err_printk(dev, entry, "DMA-API: device driver syncs " | ||
663 | "DMA memory with different direction " | ||
664 | "[device address=0x%016llx] [size=%llu bytes] " | ||
665 | "[mapped with %s] [synced with %s]\n", | ||
666 | addr, entry->size, | ||
667 | dir2name[entry->direction], | ||
668 | dir2name[direction]); | ||
669 | } | ||
670 | |||
671 | if (entry->direction == DMA_BIDIRECTIONAL) | ||
672 | goto out; | ||
673 | |||
674 | if (to_cpu && !(entry->direction == DMA_FROM_DEVICE) && | ||
675 | !(direction == DMA_TO_DEVICE)) | ||
676 | err_printk(dev, entry, "DMA-API: device driver syncs " | ||
677 | "device read-only DMA memory for cpu " | ||
678 | "[device address=0x%016llx] [size=%llu bytes] " | ||
679 | "[mapped with %s] [synced with %s]\n", | ||
680 | addr, entry->size, | ||
681 | dir2name[entry->direction], | ||
682 | dir2name[direction]); | ||
683 | |||
684 | if (!to_cpu && !(entry->direction == DMA_TO_DEVICE) && | ||
685 | !(direction == DMA_FROM_DEVICE)) | ||
686 | err_printk(dev, entry, "DMA-API: device driver syncs " | ||
687 | "device write-only DMA memory to device " | ||
688 | "[device address=0x%016llx] [size=%llu bytes] " | ||
689 | "[mapped with %s] [synced with %s]\n", | ||
690 | addr, entry->size, | ||
691 | dir2name[entry->direction], | ||
692 | dir2name[direction]); | ||
693 | |||
694 | out: | ||
695 | put_hash_bucket(bucket, &flags); | ||
696 | |||
697 | } | ||
698 | |||
699 | void debug_dma_map_page(struct device *dev, struct page *page, size_t offset, | ||
700 | size_t size, int direction, dma_addr_t dma_addr, | ||
701 | bool map_single) | ||
702 | { | ||
703 | struct dma_debug_entry *entry; | ||
704 | |||
705 | if (unlikely(global_disable)) | ||
706 | return; | ||
707 | |||
708 | if (unlikely(dma_mapping_error(dev, dma_addr))) | ||
709 | return; | ||
710 | |||
711 | entry = dma_entry_alloc(); | ||
712 | if (!entry) | ||
713 | return; | ||
714 | |||
715 | entry->dev = dev; | ||
716 | entry->type = dma_debug_page; | ||
717 | entry->paddr = page_to_phys(page) + offset; | ||
718 | entry->dev_addr = dma_addr; | ||
719 | entry->size = size; | ||
720 | entry->direction = direction; | ||
721 | |||
722 | if (map_single) { | ||
723 | void *addr = ((char *)page_address(page)) + offset; | ||
724 | |||
725 | entry->type = dma_debug_single; | ||
726 | check_for_stack(dev, addr); | ||
727 | check_for_illegal_area(dev, addr, size); | ||
728 | } | ||
729 | |||
730 | add_dma_entry(entry); | ||
731 | } | ||
732 | EXPORT_SYMBOL(debug_dma_map_page); | ||
733 | |||
734 | void debug_dma_unmap_page(struct device *dev, dma_addr_t addr, | ||
735 | size_t size, int direction, bool map_single) | ||
736 | { | ||
737 | struct dma_debug_entry ref = { | ||
738 | .type = dma_debug_page, | ||
739 | .dev = dev, | ||
740 | .dev_addr = addr, | ||
741 | .size = size, | ||
742 | .direction = direction, | ||
743 | }; | ||
744 | |||
745 | if (unlikely(global_disable)) | ||
746 | return; | ||
747 | |||
748 | if (map_single) | ||
749 | ref.type = dma_debug_single; | ||
750 | |||
751 | check_unmap(&ref); | ||
752 | } | ||
753 | EXPORT_SYMBOL(debug_dma_unmap_page); | ||
754 | |||
755 | void debug_dma_map_sg(struct device *dev, struct scatterlist *sg, | ||
756 | int nents, int mapped_ents, int direction) | ||
757 | { | ||
758 | struct dma_debug_entry *entry; | ||
759 | struct scatterlist *s; | ||
760 | int i; | ||
761 | |||
762 | if (unlikely(global_disable)) | ||
763 | return; | ||
764 | |||
765 | for_each_sg(sg, s, mapped_ents, i) { | ||
766 | entry = dma_entry_alloc(); | ||
767 | if (!entry) | ||
768 | return; | ||
769 | |||
770 | entry->type = dma_debug_sg; | ||
771 | entry->dev = dev; | ||
772 | entry->paddr = sg_phys(s); | ||
773 | entry->size = s->length; | ||
774 | entry->dev_addr = s->dma_address; | ||
775 | entry->direction = direction; | ||
776 | entry->sg_call_ents = nents; | ||
777 | entry->sg_mapped_ents = mapped_ents; | ||
778 | |||
779 | check_for_stack(dev, sg_virt(s)); | ||
780 | check_for_illegal_area(dev, sg_virt(s), s->length); | ||
781 | |||
782 | add_dma_entry(entry); | ||
783 | } | ||
784 | } | ||
785 | EXPORT_SYMBOL(debug_dma_map_sg); | ||
786 | |||
787 | void debug_dma_unmap_sg(struct device *dev, struct scatterlist *sglist, | ||
788 | int nelems, int dir) | ||
789 | { | ||
790 | struct dma_debug_entry *entry; | ||
791 | struct scatterlist *s; | ||
792 | int mapped_ents = 0, i; | ||
793 | unsigned long flags; | ||
794 | |||
795 | if (unlikely(global_disable)) | ||
796 | return; | ||
797 | |||
798 | for_each_sg(sglist, s, nelems, i) { | ||
799 | |||
800 | struct dma_debug_entry ref = { | ||
801 | .type = dma_debug_sg, | ||
802 | .dev = dev, | ||
803 | .paddr = sg_phys(s), | ||
804 | .dev_addr = s->dma_address, | ||
805 | .size = s->length, | ||
806 | .direction = dir, | ||
807 | .sg_call_ents = 0, | ||
808 | }; | ||
809 | |||
810 | if (mapped_ents && i >= mapped_ents) | ||
811 | break; | ||
812 | |||
813 | if (mapped_ents == 0) { | ||
814 | struct hash_bucket *bucket; | ||
815 | ref.sg_call_ents = nelems; | ||
816 | bucket = get_hash_bucket(&ref, &flags); | ||
817 | entry = hash_bucket_find(bucket, &ref); | ||
818 | if (entry) | ||
819 | mapped_ents = entry->sg_mapped_ents; | ||
820 | put_hash_bucket(bucket, &flags); | ||
821 | } | ||
822 | |||
823 | check_unmap(&ref); | ||
824 | } | ||
825 | } | ||
826 | EXPORT_SYMBOL(debug_dma_unmap_sg); | ||
827 | |||
828 | void debug_dma_alloc_coherent(struct device *dev, size_t size, | ||
829 | dma_addr_t dma_addr, void *virt) | ||
830 | { | ||
831 | struct dma_debug_entry *entry; | ||
832 | |||
833 | if (unlikely(global_disable)) | ||
834 | return; | ||
835 | |||
836 | if (unlikely(virt == NULL)) | ||
837 | return; | ||
838 | |||
839 | entry = dma_entry_alloc(); | ||
840 | if (!entry) | ||
841 | return; | ||
842 | |||
843 | entry->type = dma_debug_coherent; | ||
844 | entry->dev = dev; | ||
845 | entry->paddr = virt_to_phys(virt); | ||
846 | entry->size = size; | ||
847 | entry->dev_addr = dma_addr; | ||
848 | entry->direction = DMA_BIDIRECTIONAL; | ||
849 | |||
850 | add_dma_entry(entry); | ||
851 | } | ||
852 | EXPORT_SYMBOL(debug_dma_alloc_coherent); | ||
853 | |||
854 | void debug_dma_free_coherent(struct device *dev, size_t size, | ||
855 | void *virt, dma_addr_t addr) | ||
856 | { | ||
857 | struct dma_debug_entry ref = { | ||
858 | .type = dma_debug_coherent, | ||
859 | .dev = dev, | ||
860 | .paddr = virt_to_phys(virt), | ||
861 | .dev_addr = addr, | ||
862 | .size = size, | ||
863 | .direction = DMA_BIDIRECTIONAL, | ||
864 | }; | ||
865 | |||
866 | if (unlikely(global_disable)) | ||
867 | return; | ||
868 | |||
869 | check_unmap(&ref); | ||
870 | } | ||
871 | EXPORT_SYMBOL(debug_dma_free_coherent); | ||
872 | |||
873 | void debug_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, | ||
874 | size_t size, int direction) | ||
875 | { | ||
876 | if (unlikely(global_disable)) | ||
877 | return; | ||
878 | |||
879 | check_sync(dev, dma_handle, size, 0, direction, true); | ||
880 | } | ||
881 | EXPORT_SYMBOL(debug_dma_sync_single_for_cpu); | ||
882 | |||
883 | void debug_dma_sync_single_for_device(struct device *dev, | ||
884 | dma_addr_t dma_handle, size_t size, | ||
885 | int direction) | ||
886 | { | ||
887 | if (unlikely(global_disable)) | ||
888 | return; | ||
889 | |||
890 | check_sync(dev, dma_handle, size, 0, direction, false); | ||
891 | } | ||
892 | EXPORT_SYMBOL(debug_dma_sync_single_for_device); | ||
893 | |||
894 | void debug_dma_sync_single_range_for_cpu(struct device *dev, | ||
895 | dma_addr_t dma_handle, | ||
896 | unsigned long offset, size_t size, | ||
897 | int direction) | ||
898 | { | ||
899 | if (unlikely(global_disable)) | ||
900 | return; | ||
901 | |||
902 | check_sync(dev, dma_handle, size, offset, direction, true); | ||
903 | } | ||
904 | EXPORT_SYMBOL(debug_dma_sync_single_range_for_cpu); | ||
905 | |||
906 | void debug_dma_sync_single_range_for_device(struct device *dev, | ||
907 | dma_addr_t dma_handle, | ||
908 | unsigned long offset, | ||
909 | size_t size, int direction) | ||
910 | { | ||
911 | if (unlikely(global_disable)) | ||
912 | return; | ||
913 | |||
914 | check_sync(dev, dma_handle, size, offset, direction, false); | ||
915 | } | ||
916 | EXPORT_SYMBOL(debug_dma_sync_single_range_for_device); | ||
917 | |||
918 | void debug_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, | ||
919 | int nelems, int direction) | ||
920 | { | ||
921 | struct scatterlist *s; | ||
922 | int i; | ||
923 | |||
924 | if (unlikely(global_disable)) | ||
925 | return; | ||
926 | |||
927 | for_each_sg(sg, s, nelems, i) { | ||
928 | check_sync(dev, s->dma_address, s->dma_length, 0, | ||
929 | direction, true); | ||
930 | } | ||
931 | } | ||
932 | EXPORT_SYMBOL(debug_dma_sync_sg_for_cpu); | ||
933 | |||
934 | void debug_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, | ||
935 | int nelems, int direction) | ||
936 | { | ||
937 | struct scatterlist *s; | ||
938 | int i; | ||
939 | |||
940 | if (unlikely(global_disable)) | ||
941 | return; | ||
942 | |||
943 | for_each_sg(sg, s, nelems, i) { | ||
944 | check_sync(dev, s->dma_address, s->dma_length, 0, | ||
945 | direction, false); | ||
946 | } | ||
947 | } | ||
948 | EXPORT_SYMBOL(debug_dma_sync_sg_for_device); | ||
949 | |||