diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2009-03-30 16:41:00 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-03-30 16:41:00 -0400 |
commit | 712b0006bf3a9ed0b14a56c3291975e582127766 (patch) | |
tree | aff33e947673137ae21734321e1f036600297223 | |
parent | e1c502482853f84606928f5a2f2eb6da1993cda1 (diff) | |
parent | b0d44c0dbbd52effb731b1c0af9afd56215c48de (diff) |
Merge branch 'iommu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'iommu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (60 commits)
dma-debug: make memory range checks more consistent
dma-debug: warn of unmapping an invalid dma address
dma-debug: fix dma_debug_add_bus() definition for !CONFIG_DMA_API_DEBUG
dma-debug/x86: register pci bus for dma-debug leak detection
dma-debug: add a check dma memory leaks
dma-debug: add checks for kernel text and rodata
dma-debug: print stacktrace of mapping path on unmap error
dma-debug: Documentation update
dma-debug: x86 architecture bindings
dma-debug: add function to dump dma mappings
dma-debug: add checks for sync_single_sg_*
dma-debug: add checks for sync_single_range_*
dma-debug: add checks for sync_single_*
dma-debug: add checking for [alloc|free]_coherent
dma-debug: add add checking for map/unmap_sg
dma-debug: add checking for map/unmap_page/single
dma-debug: add core checking functions
dma-debug: add debugfs interface
dma-debug: add kernel command line parameters
dma-debug: add initialization code
...
Fix trivial conflicts due to whitespace changes in arch/x86/kernel/pci-nommu.c
40 files changed, 1899 insertions, 844 deletions
diff --git a/Documentation/DMA-API.txt b/Documentation/DMA-API.txt index 2a3fcc55e981..d9aa43d78bcc 100644 --- a/Documentation/DMA-API.txt +++ b/Documentation/DMA-API.txt | |||
@@ -609,3 +609,109 @@ size is the size (and should be a page-sized multiple). | |||
609 | The return value will be either a pointer to the processor virtual | 609 | The return value will be either a pointer to the processor virtual |
610 | address of the memory, or an error (via PTR_ERR()) if any part of the | 610 | address of the memory, or an error (via PTR_ERR()) if any part of the |
611 | region is occupied. | 611 | region is occupied. |
612 | |||
613 | Part III - Debug drivers use of the DMA-API | ||
614 | ------------------------------------------- | ||
615 | |||
616 | The DMA-API as described above as some constraints. DMA addresses must be | ||
617 | released with the corresponding function with the same size for example. With | ||
618 | the advent of hardware IOMMUs it becomes more and more important that drivers | ||
619 | do not violate those constraints. In the worst case such a violation can | ||
620 | result in data corruption up to destroyed filesystems. | ||
621 | |||
622 | To debug drivers and find bugs in the usage of the DMA-API checking code can | ||
623 | be compiled into the kernel which will tell the developer about those | ||
624 | violations. If your architecture supports it you can select the "Enable | ||
625 | debugging of DMA-API usage" option in your kernel configuration. Enabling this | ||
626 | option has a performance impact. Do not enable it in production kernels. | ||
627 | |||
628 | If you boot the resulting kernel will contain code which does some bookkeeping | ||
629 | about what DMA memory was allocated for which device. If this code detects an | ||
630 | error it prints a warning message with some details into your kernel log. An | ||
631 | example warning message may look like this: | ||
632 | |||
633 | ------------[ cut here ]------------ | ||
634 | WARNING: at /data2/repos/linux-2.6-iommu/lib/dma-debug.c:448 | ||
635 | check_unmap+0x203/0x490() | ||
636 | Hardware name: | ||
637 | forcedeth 0000:00:08.0: DMA-API: device driver frees DMA memory with wrong | ||
638 | function [device address=0x00000000640444be] [size=66 bytes] [mapped as | ||
639 | single] [unmapped as page] | ||
640 | Modules linked in: nfsd exportfs bridge stp llc r8169 | ||
641 | Pid: 0, comm: swapper Tainted: G W 2.6.28-dmatest-09289-g8bb99c0 #1 | ||
642 | Call Trace: | ||
643 | <IRQ> [<ffffffff80240b22>] warn_slowpath+0xf2/0x130 | ||
644 | [<ffffffff80647b70>] _spin_unlock+0x10/0x30 | ||
645 | [<ffffffff80537e75>] usb_hcd_link_urb_to_ep+0x75/0xc0 | ||
646 | [<ffffffff80647c22>] _spin_unlock_irqrestore+0x12/0x40 | ||
647 | [<ffffffff8055347f>] ohci_urb_enqueue+0x19f/0x7c0 | ||
648 | [<ffffffff80252f96>] queue_work+0x56/0x60 | ||
649 | [<ffffffff80237e10>] enqueue_task_fair+0x20/0x50 | ||
650 | [<ffffffff80539279>] usb_hcd_submit_urb+0x379/0xbc0 | ||
651 | [<ffffffff803b78c3>] cpumask_next_and+0x23/0x40 | ||
652 | [<ffffffff80235177>] find_busiest_group+0x207/0x8a0 | ||
653 | [<ffffffff8064784f>] _spin_lock_irqsave+0x1f/0x50 | ||
654 | [<ffffffff803c7ea3>] check_unmap+0x203/0x490 | ||
655 | [<ffffffff803c8259>] debug_dma_unmap_page+0x49/0x50 | ||
656 | [<ffffffff80485f26>] nv_tx_done_optimized+0xc6/0x2c0 | ||
657 | [<ffffffff80486c13>] nv_nic_irq_optimized+0x73/0x2b0 | ||
658 | [<ffffffff8026df84>] handle_IRQ_event+0x34/0x70 | ||
659 | [<ffffffff8026ffe9>] handle_edge_irq+0xc9/0x150 | ||
660 | [<ffffffff8020e3ab>] do_IRQ+0xcb/0x1c0 | ||
661 | [<ffffffff8020c093>] ret_from_intr+0x0/0xa | ||
662 | <EOI> <4>---[ end trace f6435a98e2a38c0e ]--- | ||
663 | |||
664 | The driver developer can find the driver and the device including a stacktrace | ||
665 | of the DMA-API call which caused this warning. | ||
666 | |||
667 | Per default only the first error will result in a warning message. All other | ||
668 | errors will only silently counted. This limitation exist to prevent the code | ||
669 | from flooding your kernel log. To support debugging a device driver this can | ||
670 | be disabled via debugfs. See the debugfs interface documentation below for | ||
671 | details. | ||
672 | |||
673 | The debugfs directory for the DMA-API debugging code is called dma-api/. In | ||
674 | this directory the following files can currently be found: | ||
675 | |||
676 | dma-api/all_errors This file contains a numeric value. If this | ||
677 | value is not equal to zero the debugging code | ||
678 | will print a warning for every error it finds | ||
679 | into the kernel log. Be carefull with this | ||
680 | option. It can easily flood your logs. | ||
681 | |||
682 | dma-api/disabled This read-only file contains the character 'Y' | ||
683 | if the debugging code is disabled. This can | ||
684 | happen when it runs out of memory or if it was | ||
685 | disabled at boot time | ||
686 | |||
687 | dma-api/error_count This file is read-only and shows the total | ||
688 | numbers of errors found. | ||
689 | |||
690 | dma-api/num_errors The number in this file shows how many | ||
691 | warnings will be printed to the kernel log | ||
692 | before it stops. This number is initialized to | ||
693 | one at system boot and be set by writing into | ||
694 | this file | ||
695 | |||
696 | dma-api/min_free_entries | ||
697 | This read-only file can be read to get the | ||
698 | minimum number of free dma_debug_entries the | ||
699 | allocator has ever seen. If this value goes | ||
700 | down to zero the code will disable itself | ||
701 | because it is not longer reliable. | ||
702 | |||
703 | dma-api/num_free_entries | ||
704 | The current number of free dma_debug_entries | ||
705 | in the allocator. | ||
706 | |||
707 | If you have this code compiled into your kernel it will be enabled by default. | ||
708 | If you want to boot without the bookkeeping anyway you can provide | ||
709 | 'dma_debug=off' as a boot parameter. This will disable DMA-API debugging. | ||
710 | Notice that you can not enable it again at runtime. You have to reboot to do | ||
711 | so. | ||
712 | |||
713 | When the code disables itself at runtime this is most likely because it ran | ||
714 | out of dma_debug_entries. These entries are preallocated at boot. The number | ||
715 | of preallocated entries is defined per architecture. If it is too low for you | ||
716 | boot with 'dma_debug_entries=<your_desired_number>' to overwrite the | ||
717 | architectural default. | ||
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt index be3bde51b564..aeedb89a307a 100644 --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt | |||
@@ -492,6 +492,16 @@ and is between 256 and 4096 characters. It is defined in the file | |||
492 | Range: 0 - 8192 | 492 | Range: 0 - 8192 |
493 | Default: 64 | 493 | Default: 64 |
494 | 494 | ||
495 | dma_debug=off If the kernel is compiled with DMA_API_DEBUG support | ||
496 | this option disables the debugging code at boot. | ||
497 | |||
498 | dma_debug_entries=<number> | ||
499 | This option allows to tune the number of preallocated | ||
500 | entries for DMA-API debugging code. One entry is | ||
501 | required per DMA-API allocation. Use this if the | ||
502 | DMA-API debugging code disables itself because the | ||
503 | architectural default is too low. | ||
504 | |||
495 | hpet= [X86-32,HPET] option to control HPET usage | 505 | hpet= [X86-32,HPET] option to control HPET usage |
496 | Format: { enable (default) | disable | force | | 506 | Format: { enable (default) | disable | force | |
497 | verbose } | 507 | verbose } |
diff --git a/arch/Kconfig b/arch/Kconfig index 550dab22daa1..830c16a2b801 100644 --- a/arch/Kconfig +++ b/arch/Kconfig | |||
@@ -106,3 +106,5 @@ config HAVE_CLK | |||
106 | The <linux/clk.h> calls support software clock gating and | 106 | The <linux/clk.h> calls support software clock gating and |
107 | thus are a key power management tool on many systems. | 107 | thus are a key power management tool on many systems. |
108 | 108 | ||
109 | config HAVE_DMA_API_DEBUG | ||
110 | bool | ||
diff --git a/arch/ia64/dig/Makefile b/arch/ia64/dig/Makefile index 5c0283830bd6..2f7caddf093e 100644 --- a/arch/ia64/dig/Makefile +++ b/arch/ia64/dig/Makefile | |||
@@ -7,8 +7,8 @@ | |||
7 | 7 | ||
8 | obj-y := setup.o | 8 | obj-y := setup.o |
9 | ifeq ($(CONFIG_DMAR), y) | 9 | ifeq ($(CONFIG_DMAR), y) |
10 | obj-$(CONFIG_IA64_GENERIC) += machvec.o machvec_vtd.o dig_vtd_iommu.o | 10 | obj-$(CONFIG_IA64_GENERIC) += machvec.o machvec_vtd.o |
11 | else | 11 | else |
12 | obj-$(CONFIG_IA64_GENERIC) += machvec.o | 12 | obj-$(CONFIG_IA64_GENERIC) += machvec.o |
13 | endif | 13 | endif |
14 | obj-$(CONFIG_IA64_DIG_VTD) += dig_vtd_iommu.o | 14 | |
diff --git a/arch/ia64/dig/dig_vtd_iommu.c b/arch/ia64/dig/dig_vtd_iommu.c deleted file mode 100644 index 1c8a079017a3..000000000000 --- a/arch/ia64/dig/dig_vtd_iommu.c +++ /dev/null | |||
@@ -1,59 +0,0 @@ | |||
1 | #include <linux/types.h> | ||
2 | #include <linux/kernel.h> | ||
3 | #include <linux/module.h> | ||
4 | #include <linux/intel-iommu.h> | ||
5 | |||
6 | void * | ||
7 | vtd_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, | ||
8 | gfp_t flags) | ||
9 | { | ||
10 | return intel_alloc_coherent(dev, size, dma_handle, flags); | ||
11 | } | ||
12 | EXPORT_SYMBOL_GPL(vtd_alloc_coherent); | ||
13 | |||
14 | void | ||
15 | vtd_free_coherent(struct device *dev, size_t size, void *vaddr, | ||
16 | dma_addr_t dma_handle) | ||
17 | { | ||
18 | intel_free_coherent(dev, size, vaddr, dma_handle); | ||
19 | } | ||
20 | EXPORT_SYMBOL_GPL(vtd_free_coherent); | ||
21 | |||
22 | dma_addr_t | ||
23 | vtd_map_single_attrs(struct device *dev, void *addr, size_t size, | ||
24 | int dir, struct dma_attrs *attrs) | ||
25 | { | ||
26 | return intel_map_single(dev, (phys_addr_t)addr, size, dir); | ||
27 | } | ||
28 | EXPORT_SYMBOL_GPL(vtd_map_single_attrs); | ||
29 | |||
30 | void | ||
31 | vtd_unmap_single_attrs(struct device *dev, dma_addr_t iova, size_t size, | ||
32 | int dir, struct dma_attrs *attrs) | ||
33 | { | ||
34 | intel_unmap_single(dev, iova, size, dir); | ||
35 | } | ||
36 | EXPORT_SYMBOL_GPL(vtd_unmap_single_attrs); | ||
37 | |||
38 | int | ||
39 | vtd_map_sg_attrs(struct device *dev, struct scatterlist *sglist, int nents, | ||
40 | int dir, struct dma_attrs *attrs) | ||
41 | { | ||
42 | return intel_map_sg(dev, sglist, nents, dir); | ||
43 | } | ||
44 | EXPORT_SYMBOL_GPL(vtd_map_sg_attrs); | ||
45 | |||
46 | void | ||
47 | vtd_unmap_sg_attrs(struct device *dev, struct scatterlist *sglist, | ||
48 | int nents, int dir, struct dma_attrs *attrs) | ||
49 | { | ||
50 | intel_unmap_sg(dev, sglist, nents, dir); | ||
51 | } | ||
52 | EXPORT_SYMBOL_GPL(vtd_unmap_sg_attrs); | ||
53 | |||
54 | int | ||
55 | vtd_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) | ||
56 | { | ||
57 | return 0; | ||
58 | } | ||
59 | EXPORT_SYMBOL_GPL(vtd_dma_mapping_error); | ||
diff --git a/arch/ia64/hp/common/hwsw_iommu.c b/arch/ia64/hp/common/hwsw_iommu.c index 2769dbfd03bf..e4a80d82e3d8 100644 --- a/arch/ia64/hp/common/hwsw_iommu.c +++ b/arch/ia64/hp/common/hwsw_iommu.c | |||
@@ -13,49 +13,34 @@ | |||
13 | */ | 13 | */ |
14 | 14 | ||
15 | #include <linux/device.h> | 15 | #include <linux/device.h> |
16 | #include <linux/dma-mapping.h> | ||
16 | #include <linux/swiotlb.h> | 17 | #include <linux/swiotlb.h> |
17 | |||
18 | #include <asm/machvec.h> | 18 | #include <asm/machvec.h> |
19 | 19 | ||
20 | extern struct dma_map_ops sba_dma_ops, swiotlb_dma_ops; | ||
21 | |||
20 | /* swiotlb declarations & definitions: */ | 22 | /* swiotlb declarations & definitions: */ |
21 | extern int swiotlb_late_init_with_default_size (size_t size); | 23 | extern int swiotlb_late_init_with_default_size (size_t size); |
22 | 24 | ||
23 | /* hwiommu declarations & definitions: */ | ||
24 | |||
25 | extern ia64_mv_dma_alloc_coherent sba_alloc_coherent; | ||
26 | extern ia64_mv_dma_free_coherent sba_free_coherent; | ||
27 | extern ia64_mv_dma_map_single_attrs sba_map_single_attrs; | ||
28 | extern ia64_mv_dma_unmap_single_attrs sba_unmap_single_attrs; | ||
29 | extern ia64_mv_dma_map_sg_attrs sba_map_sg_attrs; | ||
30 | extern ia64_mv_dma_unmap_sg_attrs sba_unmap_sg_attrs; | ||
31 | extern ia64_mv_dma_supported sba_dma_supported; | ||
32 | extern ia64_mv_dma_mapping_error sba_dma_mapping_error; | ||
33 | |||
34 | #define hwiommu_alloc_coherent sba_alloc_coherent | ||
35 | #define hwiommu_free_coherent sba_free_coherent | ||
36 | #define hwiommu_map_single_attrs sba_map_single_attrs | ||
37 | #define hwiommu_unmap_single_attrs sba_unmap_single_attrs | ||
38 | #define hwiommu_map_sg_attrs sba_map_sg_attrs | ||
39 | #define hwiommu_unmap_sg_attrs sba_unmap_sg_attrs | ||
40 | #define hwiommu_dma_supported sba_dma_supported | ||
41 | #define hwiommu_dma_mapping_error sba_dma_mapping_error | ||
42 | #define hwiommu_sync_single_for_cpu machvec_dma_sync_single | ||
43 | #define hwiommu_sync_sg_for_cpu machvec_dma_sync_sg | ||
44 | #define hwiommu_sync_single_for_device machvec_dma_sync_single | ||
45 | #define hwiommu_sync_sg_for_device machvec_dma_sync_sg | ||
46 | |||
47 | |||
48 | /* | 25 | /* |
49 | * Note: we need to make the determination of whether or not to use | 26 | * Note: we need to make the determination of whether or not to use |
50 | * the sw I/O TLB based purely on the device structure. Anything else | 27 | * the sw I/O TLB based purely on the device structure. Anything else |
51 | * would be unreliable or would be too intrusive. | 28 | * would be unreliable or would be too intrusive. |
52 | */ | 29 | */ |
53 | static inline int | 30 | static inline int use_swiotlb(struct device *dev) |
54 | use_swiotlb (struct device *dev) | ||
55 | { | 31 | { |
56 | return dev && dev->dma_mask && !hwiommu_dma_supported(dev, *dev->dma_mask); | 32 | return dev && dev->dma_mask && |
33 | !sba_dma_ops.dma_supported(dev, *dev->dma_mask); | ||
57 | } | 34 | } |
58 | 35 | ||
36 | struct dma_map_ops *hwsw_dma_get_ops(struct device *dev) | ||
37 | { | ||
38 | if (use_swiotlb(dev)) | ||
39 | return &swiotlb_dma_ops; | ||
40 | return &sba_dma_ops; | ||
41 | } | ||
42 | EXPORT_SYMBOL(hwsw_dma_get_ops); | ||
43 | |||
59 | void __init | 44 | void __init |
60 | hwsw_init (void) | 45 | hwsw_init (void) |
61 | { | 46 | { |
@@ -71,125 +56,3 @@ hwsw_init (void) | |||
71 | #endif | 56 | #endif |
72 | } | 57 | } |
73 | } | 58 | } |
74 | |||
75 | void * | ||
76 | hwsw_alloc_coherent (struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flags) | ||
77 | { | ||
78 | if (use_swiotlb(dev)) | ||
79 | return swiotlb_alloc_coherent(dev, size, dma_handle, flags); | ||
80 | else | ||
81 | return hwiommu_alloc_coherent(dev, size, dma_handle, flags); | ||
82 | } | ||
83 | |||
84 | void | ||
85 | hwsw_free_coherent (struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle) | ||
86 | { | ||
87 | if (use_swiotlb(dev)) | ||
88 | swiotlb_free_coherent(dev, size, vaddr, dma_handle); | ||
89 | else | ||
90 | hwiommu_free_coherent(dev, size, vaddr, dma_handle); | ||
91 | } | ||
92 | |||
93 | dma_addr_t | ||
94 | hwsw_map_single_attrs(struct device *dev, void *addr, size_t size, int dir, | ||
95 | struct dma_attrs *attrs) | ||
96 | { | ||
97 | if (use_swiotlb(dev)) | ||
98 | return swiotlb_map_single_attrs(dev, addr, size, dir, attrs); | ||
99 | else | ||
100 | return hwiommu_map_single_attrs(dev, addr, size, dir, attrs); | ||
101 | } | ||
102 | EXPORT_SYMBOL(hwsw_map_single_attrs); | ||
103 | |||
104 | void | ||
105 | hwsw_unmap_single_attrs(struct device *dev, dma_addr_t iova, size_t size, | ||
106 | int dir, struct dma_attrs *attrs) | ||
107 | { | ||
108 | if (use_swiotlb(dev)) | ||
109 | return swiotlb_unmap_single_attrs(dev, iova, size, dir, attrs); | ||
110 | else | ||
111 | return hwiommu_unmap_single_attrs(dev, iova, size, dir, attrs); | ||
112 | } | ||
113 | EXPORT_SYMBOL(hwsw_unmap_single_attrs); | ||
114 | |||
115 | int | ||
116 | hwsw_map_sg_attrs(struct device *dev, struct scatterlist *sglist, int nents, | ||
117 | int dir, struct dma_attrs *attrs) | ||
118 | { | ||
119 | if (use_swiotlb(dev)) | ||
120 | return swiotlb_map_sg_attrs(dev, sglist, nents, dir, attrs); | ||
121 | else | ||
122 | return hwiommu_map_sg_attrs(dev, sglist, nents, dir, attrs); | ||
123 | } | ||
124 | EXPORT_SYMBOL(hwsw_map_sg_attrs); | ||
125 | |||
126 | void | ||
127 | hwsw_unmap_sg_attrs(struct device *dev, struct scatterlist *sglist, int nents, | ||
128 | int dir, struct dma_attrs *attrs) | ||
129 | { | ||
130 | if (use_swiotlb(dev)) | ||
131 | return swiotlb_unmap_sg_attrs(dev, sglist, nents, dir, attrs); | ||
132 | else | ||
133 | return hwiommu_unmap_sg_attrs(dev, sglist, nents, dir, attrs); | ||
134 | } | ||
135 | EXPORT_SYMBOL(hwsw_unmap_sg_attrs); | ||
136 | |||
137 | void | ||
138 | hwsw_sync_single_for_cpu (struct device *dev, dma_addr_t addr, size_t size, int dir) | ||
139 | { | ||
140 | if (use_swiotlb(dev)) | ||
141 | swiotlb_sync_single_for_cpu(dev, addr, size, dir); | ||
142 | else | ||
143 | hwiommu_sync_single_for_cpu(dev, addr, size, dir); | ||
144 | } | ||
145 | |||
146 | void | ||
147 | hwsw_sync_sg_for_cpu (struct device *dev, struct scatterlist *sg, int nelems, int dir) | ||
148 | { | ||
149 | if (use_swiotlb(dev)) | ||
150 | swiotlb_sync_sg_for_cpu(dev, sg, nelems, dir); | ||
151 | else | ||
152 | hwiommu_sync_sg_for_cpu(dev, sg, nelems, dir); | ||
153 | } | ||
154 | |||
155 | void | ||
156 | hwsw_sync_single_for_device (struct device *dev, dma_addr_t addr, size_t size, int dir) | ||
157 | { | ||
158 | if (use_swiotlb(dev)) | ||
159 | swiotlb_sync_single_for_device(dev, addr, size, dir); | ||
160 | else | ||
161 | hwiommu_sync_single_for_device(dev, addr, size, dir); | ||
162 | } | ||
163 | |||
164 | void | ||
165 | hwsw_sync_sg_for_device (struct device *dev, struct scatterlist *sg, int nelems, int dir) | ||
166 | { | ||
167 | if (use_swiotlb(dev)) | ||
168 | swiotlb_sync_sg_for_device(dev, sg, nelems, dir); | ||
169 | else | ||
170 | hwiommu_sync_sg_for_device(dev, sg, nelems, dir); | ||
171 | } | ||
172 | |||
173 | int | ||
174 | hwsw_dma_supported (struct device *dev, u64 mask) | ||
175 | { | ||
176 | if (hwiommu_dma_supported(dev, mask)) | ||
177 | return 1; | ||
178 | return swiotlb_dma_supported(dev, mask); | ||
179 | } | ||
180 | |||
181 | int | ||
182 | hwsw_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) | ||
183 | { | ||
184 | return hwiommu_dma_mapping_error(dev, dma_addr) || | ||
185 | swiotlb_dma_mapping_error(dev, dma_addr); | ||
186 | } | ||
187 | |||
188 | EXPORT_SYMBOL(hwsw_dma_mapping_error); | ||
189 | EXPORT_SYMBOL(hwsw_dma_supported); | ||
190 | EXPORT_SYMBOL(hwsw_alloc_coherent); | ||
191 | EXPORT_SYMBOL(hwsw_free_coherent); | ||
192 | EXPORT_SYMBOL(hwsw_sync_single_for_cpu); | ||
193 | EXPORT_SYMBOL(hwsw_sync_single_for_device); | ||
194 | EXPORT_SYMBOL(hwsw_sync_sg_for_cpu); | ||
195 | EXPORT_SYMBOL(hwsw_sync_sg_for_device); | ||
diff --git a/arch/ia64/hp/common/sba_iommu.c b/arch/ia64/hp/common/sba_iommu.c index 6d5e6c5630e3..56ceb68eb99d 100644 --- a/arch/ia64/hp/common/sba_iommu.c +++ b/arch/ia64/hp/common/sba_iommu.c | |||
@@ -36,6 +36,7 @@ | |||
36 | #include <linux/bitops.h> /* hweight64() */ | 36 | #include <linux/bitops.h> /* hweight64() */ |
37 | #include <linux/crash_dump.h> | 37 | #include <linux/crash_dump.h> |
38 | #include <linux/iommu-helper.h> | 38 | #include <linux/iommu-helper.h> |
39 | #include <linux/dma-mapping.h> | ||
39 | 40 | ||
40 | #include <asm/delay.h> /* ia64_get_itc() */ | 41 | #include <asm/delay.h> /* ia64_get_itc() */ |
41 | #include <asm/io.h> | 42 | #include <asm/io.h> |
@@ -908,11 +909,13 @@ sba_mark_invalid(struct ioc *ioc, dma_addr_t iova, size_t byte_cnt) | |||
908 | * | 909 | * |
909 | * See Documentation/PCI/PCI-DMA-mapping.txt | 910 | * See Documentation/PCI/PCI-DMA-mapping.txt |
910 | */ | 911 | */ |
911 | dma_addr_t | 912 | static dma_addr_t sba_map_page(struct device *dev, struct page *page, |
912 | sba_map_single_attrs(struct device *dev, void *addr, size_t size, int dir, | 913 | unsigned long poff, size_t size, |
913 | struct dma_attrs *attrs) | 914 | enum dma_data_direction dir, |
915 | struct dma_attrs *attrs) | ||
914 | { | 916 | { |
915 | struct ioc *ioc; | 917 | struct ioc *ioc; |
918 | void *addr = page_address(page) + poff; | ||
916 | dma_addr_t iovp; | 919 | dma_addr_t iovp; |
917 | dma_addr_t offset; | 920 | dma_addr_t offset; |
918 | u64 *pdir_start; | 921 | u64 *pdir_start; |
@@ -990,7 +993,14 @@ sba_map_single_attrs(struct device *dev, void *addr, size_t size, int dir, | |||
990 | #endif | 993 | #endif |
991 | return SBA_IOVA(ioc, iovp, offset); | 994 | return SBA_IOVA(ioc, iovp, offset); |
992 | } | 995 | } |
993 | EXPORT_SYMBOL(sba_map_single_attrs); | 996 | |
997 | static dma_addr_t sba_map_single_attrs(struct device *dev, void *addr, | ||
998 | size_t size, enum dma_data_direction dir, | ||
999 | struct dma_attrs *attrs) | ||
1000 | { | ||
1001 | return sba_map_page(dev, virt_to_page(addr), | ||
1002 | (unsigned long)addr & ~PAGE_MASK, size, dir, attrs); | ||
1003 | } | ||
994 | 1004 | ||
995 | #ifdef ENABLE_MARK_CLEAN | 1005 | #ifdef ENABLE_MARK_CLEAN |
996 | static SBA_INLINE void | 1006 | static SBA_INLINE void |
@@ -1026,8 +1036,8 @@ sba_mark_clean(struct ioc *ioc, dma_addr_t iova, size_t size) | |||
1026 | * | 1036 | * |
1027 | * See Documentation/PCI/PCI-DMA-mapping.txt | 1037 | * See Documentation/PCI/PCI-DMA-mapping.txt |
1028 | */ | 1038 | */ |
1029 | void sba_unmap_single_attrs(struct device *dev, dma_addr_t iova, size_t size, | 1039 | static void sba_unmap_page(struct device *dev, dma_addr_t iova, size_t size, |
1030 | int dir, struct dma_attrs *attrs) | 1040 | enum dma_data_direction dir, struct dma_attrs *attrs) |
1031 | { | 1041 | { |
1032 | struct ioc *ioc; | 1042 | struct ioc *ioc; |
1033 | #if DELAYED_RESOURCE_CNT > 0 | 1043 | #if DELAYED_RESOURCE_CNT > 0 |
@@ -1094,7 +1104,12 @@ void sba_unmap_single_attrs(struct device *dev, dma_addr_t iova, size_t size, | |||
1094 | spin_unlock_irqrestore(&ioc->res_lock, flags); | 1104 | spin_unlock_irqrestore(&ioc->res_lock, flags); |
1095 | #endif /* DELAYED_RESOURCE_CNT == 0 */ | 1105 | #endif /* DELAYED_RESOURCE_CNT == 0 */ |
1096 | } | 1106 | } |
1097 | EXPORT_SYMBOL(sba_unmap_single_attrs); | 1107 | |
1108 | void sba_unmap_single_attrs(struct device *dev, dma_addr_t iova, size_t size, | ||
1109 | enum dma_data_direction dir, struct dma_attrs *attrs) | ||
1110 | { | ||
1111 | sba_unmap_page(dev, iova, size, dir, attrs); | ||
1112 | } | ||
1098 | 1113 | ||
1099 | /** | 1114 | /** |
1100 | * sba_alloc_coherent - allocate/map shared mem for DMA | 1115 | * sba_alloc_coherent - allocate/map shared mem for DMA |
@@ -1104,7 +1119,7 @@ EXPORT_SYMBOL(sba_unmap_single_attrs); | |||
1104 | * | 1119 | * |
1105 | * See Documentation/PCI/PCI-DMA-mapping.txt | 1120 | * See Documentation/PCI/PCI-DMA-mapping.txt |
1106 | */ | 1121 | */ |
1107 | void * | 1122 | static void * |
1108 | sba_alloc_coherent (struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flags) | 1123 | sba_alloc_coherent (struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flags) |
1109 | { | 1124 | { |
1110 | struct ioc *ioc; | 1125 | struct ioc *ioc; |
@@ -1167,7 +1182,8 @@ sba_alloc_coherent (struct device *dev, size_t size, dma_addr_t *dma_handle, gfp | |||
1167 | * | 1182 | * |
1168 | * See Documentation/PCI/PCI-DMA-mapping.txt | 1183 | * See Documentation/PCI/PCI-DMA-mapping.txt |
1169 | */ | 1184 | */ |
1170 | void sba_free_coherent (struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle) | 1185 | static void sba_free_coherent (struct device *dev, size_t size, void *vaddr, |
1186 | dma_addr_t dma_handle) | ||
1171 | { | 1187 | { |
1172 | sba_unmap_single_attrs(dev, dma_handle, size, 0, NULL); | 1188 | sba_unmap_single_attrs(dev, dma_handle, size, 0, NULL); |
1173 | free_pages((unsigned long) vaddr, get_order(size)); | 1189 | free_pages((unsigned long) vaddr, get_order(size)); |
@@ -1422,8 +1438,9 @@ sba_coalesce_chunks(struct ioc *ioc, struct device *dev, | |||
1422 | * | 1438 | * |
1423 | * See Documentation/PCI/PCI-DMA-mapping.txt | 1439 | * See Documentation/PCI/PCI-DMA-mapping.txt |
1424 | */ | 1440 | */ |
1425 | int sba_map_sg_attrs(struct device *dev, struct scatterlist *sglist, int nents, | 1441 | static int sba_map_sg_attrs(struct device *dev, struct scatterlist *sglist, |
1426 | int dir, struct dma_attrs *attrs) | 1442 | int nents, enum dma_data_direction dir, |
1443 | struct dma_attrs *attrs) | ||
1427 | { | 1444 | { |
1428 | struct ioc *ioc; | 1445 | struct ioc *ioc; |
1429 | int coalesced, filled = 0; | 1446 | int coalesced, filled = 0; |
@@ -1502,7 +1519,6 @@ int sba_map_sg_attrs(struct device *dev, struct scatterlist *sglist, int nents, | |||
1502 | 1519 | ||
1503 | return filled; | 1520 | return filled; |
1504 | } | 1521 | } |
1505 | EXPORT_SYMBOL(sba_map_sg_attrs); | ||
1506 | 1522 | ||
1507 | /** | 1523 | /** |
1508 | * sba_unmap_sg_attrs - unmap Scatter/Gather list | 1524 | * sba_unmap_sg_attrs - unmap Scatter/Gather list |
@@ -1514,8 +1530,9 @@ EXPORT_SYMBOL(sba_map_sg_attrs); | |||
1514 | * | 1530 | * |
1515 | * See Documentation/PCI/PCI-DMA-mapping.txt | 1531 | * See Documentation/PCI/PCI-DMA-mapping.txt |
1516 | */ | 1532 | */ |
1517 | void sba_unmap_sg_attrs(struct device *dev, struct scatterlist *sglist, | 1533 | static void sba_unmap_sg_attrs(struct device *dev, struct scatterlist *sglist, |
1518 | int nents, int dir, struct dma_attrs *attrs) | 1534 | int nents, enum dma_data_direction dir, |
1535 | struct dma_attrs *attrs) | ||
1519 | { | 1536 | { |
1520 | #ifdef ASSERT_PDIR_SANITY | 1537 | #ifdef ASSERT_PDIR_SANITY |
1521 | struct ioc *ioc; | 1538 | struct ioc *ioc; |
@@ -1551,7 +1568,6 @@ void sba_unmap_sg_attrs(struct device *dev, struct scatterlist *sglist, | |||
1551 | #endif | 1568 | #endif |
1552 | 1569 | ||
1553 | } | 1570 | } |
1554 | EXPORT_SYMBOL(sba_unmap_sg_attrs); | ||
1555 | 1571 | ||
1556 | /************************************************************** | 1572 | /************************************************************** |
1557 | * | 1573 | * |
@@ -2064,6 +2080,8 @@ static struct acpi_driver acpi_sba_ioc_driver = { | |||
2064 | }, | 2080 | }, |
2065 | }; | 2081 | }; |
2066 | 2082 | ||
2083 | extern struct dma_map_ops swiotlb_dma_ops; | ||
2084 | |||
2067 | static int __init | 2085 | static int __init |
2068 | sba_init(void) | 2086 | sba_init(void) |
2069 | { | 2087 | { |
@@ -2077,6 +2095,7 @@ sba_init(void) | |||
2077 | * a successful kdump kernel boot is to use the swiotlb. | 2095 | * a successful kdump kernel boot is to use the swiotlb. |
2078 | */ | 2096 | */ |
2079 | if (is_kdump_kernel()) { | 2097 | if (is_kdump_kernel()) { |
2098 | dma_ops = &swiotlb_dma_ops; | ||
2080 | if (swiotlb_late_init_with_default_size(64 * (1<<20)) != 0) | 2099 | if (swiotlb_late_init_with_default_size(64 * (1<<20)) != 0) |
2081 | panic("Unable to initialize software I/O TLB:" | 2100 | panic("Unable to initialize software I/O TLB:" |
2082 | " Try machvec=dig boot option"); | 2101 | " Try machvec=dig boot option"); |
@@ -2092,6 +2111,7 @@ sba_init(void) | |||
2092 | * If we didn't find something sba_iommu can claim, we | 2111 | * If we didn't find something sba_iommu can claim, we |
2093 | * need to setup the swiotlb and switch to the dig machvec. | 2112 | * need to setup the swiotlb and switch to the dig machvec. |
2094 | */ | 2113 | */ |
2114 | dma_ops = &swiotlb_dma_ops; | ||
2095 | if (swiotlb_late_init_with_default_size(64 * (1<<20)) != 0) | 2115 | if (swiotlb_late_init_with_default_size(64 * (1<<20)) != 0) |
2096 | panic("Unable to find SBA IOMMU or initialize " | 2116 | panic("Unable to find SBA IOMMU or initialize " |
2097 | "software I/O TLB: Try machvec=dig boot option"); | 2117 | "software I/O TLB: Try machvec=dig boot option"); |
@@ -2138,15 +2158,13 @@ nosbagart(char *str) | |||
2138 | return 1; | 2158 | return 1; |
2139 | } | 2159 | } |
2140 | 2160 | ||
2141 | int | 2161 | static int sba_dma_supported (struct device *dev, u64 mask) |
2142 | sba_dma_supported (struct device *dev, u64 mask) | ||
2143 | { | 2162 | { |
2144 | /* make sure it's at least 32bit capable */ | 2163 | /* make sure it's at least 32bit capable */ |
2145 | return ((mask & 0xFFFFFFFFUL) == 0xFFFFFFFFUL); | 2164 | return ((mask & 0xFFFFFFFFUL) == 0xFFFFFFFFUL); |
2146 | } | 2165 | } |
2147 | 2166 | ||
2148 | int | 2167 | static int sba_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) |
2149 | sba_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) | ||
2150 | { | 2168 | { |
2151 | return 0; | 2169 | return 0; |
2152 | } | 2170 | } |
@@ -2176,7 +2194,22 @@ sba_page_override(char *str) | |||
2176 | 2194 | ||
2177 | __setup("sbapagesize=",sba_page_override); | 2195 | __setup("sbapagesize=",sba_page_override); |
2178 | 2196 | ||
2179 | EXPORT_SYMBOL(sba_dma_mapping_error); | 2197 | struct dma_map_ops sba_dma_ops = { |
2180 | EXPORT_SYMBOL(sba_dma_supported); | 2198 | .alloc_coherent = sba_alloc_coherent, |
2181 | EXPORT_SYMBOL(sba_alloc_coherent); | 2199 | .free_coherent = sba_free_coherent, |
2182 | EXPORT_SYMBOL(sba_free_coherent); | 2200 | .map_page = sba_map_page, |
2201 | .unmap_page = sba_unmap_page, | ||
2202 | .map_sg = sba_map_sg_attrs, | ||
2203 | .unmap_sg = sba_unmap_sg_attrs, | ||
2204 | .sync_single_for_cpu = machvec_dma_sync_single, | ||
2205 | .sync_sg_for_cpu = machvec_dma_sync_sg, | ||
2206 | .sync_single_for_device = machvec_dma_sync_single, | ||
2207 | .sync_sg_for_device = machvec_dma_sync_sg, | ||
2208 | .dma_supported = sba_dma_supported, | ||
2209 | .mapping_error = sba_dma_mapping_error, | ||
2210 | }; | ||
2211 | |||
2212 | void sba_dma_init(void) | ||
2213 | { | ||
2214 | dma_ops = &sba_dma_ops; | ||
2215 | } | ||
diff --git a/arch/ia64/include/asm/dma-mapping.h b/arch/ia64/include/asm/dma-mapping.h index 1f912d927585..36c0009dbece 100644 --- a/arch/ia64/include/asm/dma-mapping.h +++ b/arch/ia64/include/asm/dma-mapping.h | |||
@@ -11,99 +11,128 @@ | |||
11 | 11 | ||
12 | #define ARCH_HAS_DMA_GET_REQUIRED_MASK | 12 | #define ARCH_HAS_DMA_GET_REQUIRED_MASK |
13 | 13 | ||
14 | struct dma_mapping_ops { | 14 | extern struct dma_map_ops *dma_ops; |
15 | int (*mapping_error)(struct device *dev, | ||
16 | dma_addr_t dma_addr); | ||
17 | void* (*alloc_coherent)(struct device *dev, size_t size, | ||
18 | dma_addr_t *dma_handle, gfp_t gfp); | ||
19 | void (*free_coherent)(struct device *dev, size_t size, | ||
20 | void *vaddr, dma_addr_t dma_handle); | ||
21 | dma_addr_t (*map_single)(struct device *hwdev, unsigned long ptr, | ||
22 | size_t size, int direction); | ||
23 | void (*unmap_single)(struct device *dev, dma_addr_t addr, | ||
24 | size_t size, int direction); | ||
25 | void (*sync_single_for_cpu)(struct device *hwdev, | ||
26 | dma_addr_t dma_handle, size_t size, | ||
27 | int direction); | ||
28 | void (*sync_single_for_device)(struct device *hwdev, | ||
29 | dma_addr_t dma_handle, size_t size, | ||
30 | int direction); | ||
31 | void (*sync_single_range_for_cpu)(struct device *hwdev, | ||
32 | dma_addr_t dma_handle, unsigned long offset, | ||
33 | size_t size, int direction); | ||
34 | void (*sync_single_range_for_device)(struct device *hwdev, | ||
35 | dma_addr_t dma_handle, unsigned long offset, | ||
36 | size_t size, int direction); | ||
37 | void (*sync_sg_for_cpu)(struct device *hwdev, | ||
38 | struct scatterlist *sg, int nelems, | ||
39 | int direction); | ||
40 | void (*sync_sg_for_device)(struct device *hwdev, | ||
41 | struct scatterlist *sg, int nelems, | ||
42 | int direction); | ||
43 | int (*map_sg)(struct device *hwdev, struct scatterlist *sg, | ||
44 | int nents, int direction); | ||
45 | void (*unmap_sg)(struct device *hwdev, | ||
46 | struct scatterlist *sg, int nents, | ||
47 | int direction); | ||
48 | int (*dma_supported_op)(struct device *hwdev, u64 mask); | ||
49 | int is_phys; | ||
50 | }; | ||
51 | |||
52 | extern struct dma_mapping_ops *dma_ops; | ||
53 | extern struct ia64_machine_vector ia64_mv; | 15 | extern struct ia64_machine_vector ia64_mv; |
54 | extern void set_iommu_machvec(void); | 16 | extern void set_iommu_machvec(void); |
55 | 17 | ||
56 | #define dma_alloc_coherent(dev, size, handle, gfp) \ | 18 | extern void machvec_dma_sync_single(struct device *, dma_addr_t, size_t, |
57 | platform_dma_alloc_coherent(dev, size, handle, (gfp) | GFP_DMA) | 19 | enum dma_data_direction); |
20 | extern void machvec_dma_sync_sg(struct device *, struct scatterlist *, int, | ||
21 | enum dma_data_direction); | ||
58 | 22 | ||
59 | /* coherent mem. is cheap */ | 23 | static inline void *dma_alloc_coherent(struct device *dev, size_t size, |
60 | static inline void * | 24 | dma_addr_t *daddr, gfp_t gfp) |
61 | dma_alloc_noncoherent(struct device *dev, size_t size, dma_addr_t *dma_handle, | ||
62 | gfp_t flag) | ||
63 | { | 25 | { |
64 | return dma_alloc_coherent(dev, size, dma_handle, flag); | 26 | struct dma_map_ops *ops = platform_dma_get_ops(dev); |
27 | return ops->alloc_coherent(dev, size, daddr, gfp); | ||
65 | } | 28 | } |
66 | #define dma_free_coherent platform_dma_free_coherent | 29 | |
67 | static inline void | 30 | static inline void dma_free_coherent(struct device *dev, size_t size, |
68 | dma_free_noncoherent(struct device *dev, size_t size, void *cpu_addr, | 31 | void *caddr, dma_addr_t daddr) |
69 | dma_addr_t dma_handle) | 32 | { |
33 | struct dma_map_ops *ops = platform_dma_get_ops(dev); | ||
34 | ops->free_coherent(dev, size, caddr, daddr); | ||
35 | } | ||
36 | |||
37 | #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) | ||
38 | #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) | ||
39 | |||
40 | static inline dma_addr_t dma_map_single_attrs(struct device *dev, | ||
41 | void *caddr, size_t size, | ||
42 | enum dma_data_direction dir, | ||
43 | struct dma_attrs *attrs) | ||
44 | { | ||
45 | struct dma_map_ops *ops = platform_dma_get_ops(dev); | ||
46 | return ops->map_page(dev, virt_to_page(caddr), | ||
47 | (unsigned long)caddr & ~PAGE_MASK, size, | ||
48 | dir, attrs); | ||
49 | } | ||
50 | |||
51 | static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t daddr, | ||
52 | size_t size, | ||
53 | enum dma_data_direction dir, | ||
54 | struct dma_attrs *attrs) | ||
55 | { | ||
56 | struct dma_map_ops *ops = platform_dma_get_ops(dev); | ||
57 | ops->unmap_page(dev, daddr, size, dir, attrs); | ||
58 | } | ||
59 | |||
60 | #define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, NULL) | ||
61 | #define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, NULL) | ||
62 | |||
63 | static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sgl, | ||
64 | int nents, enum dma_data_direction dir, | ||
65 | struct dma_attrs *attrs) | ||
66 | { | ||
67 | struct dma_map_ops *ops = platform_dma_get_ops(dev); | ||
68 | return ops->map_sg(dev, sgl, nents, dir, attrs); | ||
69 | } | ||
70 | |||
71 | static inline void dma_unmap_sg_attrs(struct device *dev, | ||
72 | struct scatterlist *sgl, int nents, | ||
73 | enum dma_data_direction dir, | ||
74 | struct dma_attrs *attrs) | ||
75 | { | ||
76 | struct dma_map_ops *ops = platform_dma_get_ops(dev); | ||
77 | ops->unmap_sg(dev, sgl, nents, dir, attrs); | ||
78 | } | ||
79 | |||
80 | #define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, NULL) | ||
81 | #define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, NULL) | ||
82 | |||
83 | static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t daddr, | ||
84 | size_t size, | ||
85 | enum dma_data_direction dir) | ||
70 | { | 86 | { |
71 | dma_free_coherent(dev, size, cpu_addr, dma_handle); | 87 | struct dma_map_ops *ops = platform_dma_get_ops(dev); |
88 | ops->sync_single_for_cpu(dev, daddr, size, dir); | ||
72 | } | 89 | } |
73 | #define dma_map_single_attrs platform_dma_map_single_attrs | 90 | |
74 | static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr, | 91 | static inline void dma_sync_sg_for_cpu(struct device *dev, |
75 | size_t size, int dir) | 92 | struct scatterlist *sgl, |
93 | int nents, enum dma_data_direction dir) | ||
76 | { | 94 | { |
77 | return dma_map_single_attrs(dev, cpu_addr, size, dir, NULL); | 95 | struct dma_map_ops *ops = platform_dma_get_ops(dev); |
96 | ops->sync_sg_for_cpu(dev, sgl, nents, dir); | ||
78 | } | 97 | } |
79 | #define dma_map_sg_attrs platform_dma_map_sg_attrs | 98 | |
80 | static inline int dma_map_sg(struct device *dev, struct scatterlist *sgl, | 99 | static inline void dma_sync_single_for_device(struct device *dev, |
81 | int nents, int dir) | 100 | dma_addr_t daddr, |
101 | size_t size, | ||
102 | enum dma_data_direction dir) | ||
82 | { | 103 | { |
83 | return dma_map_sg_attrs(dev, sgl, nents, dir, NULL); | 104 | struct dma_map_ops *ops = platform_dma_get_ops(dev); |
105 | ops->sync_single_for_device(dev, daddr, size, dir); | ||
84 | } | 106 | } |
85 | #define dma_unmap_single_attrs platform_dma_unmap_single_attrs | 107 | |
86 | static inline void dma_unmap_single(struct device *dev, dma_addr_t cpu_addr, | 108 | static inline void dma_sync_sg_for_device(struct device *dev, |
87 | size_t size, int dir) | 109 | struct scatterlist *sgl, |
110 | int nents, | ||
111 | enum dma_data_direction dir) | ||
88 | { | 112 | { |
89 | return dma_unmap_single_attrs(dev, cpu_addr, size, dir, NULL); | 113 | struct dma_map_ops *ops = platform_dma_get_ops(dev); |
114 | ops->sync_sg_for_device(dev, sgl, nents, dir); | ||
90 | } | 115 | } |
91 | #define dma_unmap_sg_attrs platform_dma_unmap_sg_attrs | 116 | |
92 | static inline void dma_unmap_sg(struct device *dev, struct scatterlist *sgl, | 117 | static inline int dma_mapping_error(struct device *dev, dma_addr_t daddr) |
93 | int nents, int dir) | 118 | { |
119 | struct dma_map_ops *ops = platform_dma_get_ops(dev); | ||
120 | return ops->mapping_error(dev, daddr); | ||
121 | } | ||
122 | |||
123 | static inline dma_addr_t dma_map_page(struct device *dev, struct page *page, | ||
124 | size_t offset, size_t size, | ||
125 | enum dma_data_direction dir) | ||
94 | { | 126 | { |
95 | return dma_unmap_sg_attrs(dev, sgl, nents, dir, NULL); | 127 | struct dma_map_ops *ops = platform_dma_get_ops(dev); |
128 | return ops->map_page(dev, page, offset, size, dir, NULL); | ||
96 | } | 129 | } |
97 | #define dma_sync_single_for_cpu platform_dma_sync_single_for_cpu | ||
98 | #define dma_sync_sg_for_cpu platform_dma_sync_sg_for_cpu | ||
99 | #define dma_sync_single_for_device platform_dma_sync_single_for_device | ||
100 | #define dma_sync_sg_for_device platform_dma_sync_sg_for_device | ||
101 | #define dma_mapping_error platform_dma_mapping_error | ||
102 | 130 | ||
103 | #define dma_map_page(dev, pg, off, size, dir) \ | 131 | static inline void dma_unmap_page(struct device *dev, dma_addr_t addr, |
104 | dma_map_single(dev, page_address(pg) + (off), (size), (dir)) | 132 | size_t size, enum dma_data_direction dir) |
105 | #define dma_unmap_page(dev, dma_addr, size, dir) \ | 133 | { |
106 | dma_unmap_single(dev, dma_addr, size, dir) | 134 | dma_unmap_single(dev, addr, size, dir); |
135 | } | ||
107 | 136 | ||
108 | /* | 137 | /* |
109 | * Rest of this file is part of the "Advanced DMA API". Use at your own risk. | 138 | * Rest of this file is part of the "Advanced DMA API". Use at your own risk. |
@@ -115,7 +144,11 @@ static inline void dma_unmap_sg(struct device *dev, struct scatterlist *sgl, | |||
115 | #define dma_sync_single_range_for_device(dev, dma_handle, offset, size, dir) \ | 144 | #define dma_sync_single_range_for_device(dev, dma_handle, offset, size, dir) \ |
116 | dma_sync_single_for_device(dev, dma_handle, size, dir) | 145 | dma_sync_single_for_device(dev, dma_handle, size, dir) |
117 | 146 | ||
118 | #define dma_supported platform_dma_supported | 147 | static inline int dma_supported(struct device *dev, u64 mask) |
148 | { | ||
149 | struct dma_map_ops *ops = platform_dma_get_ops(dev); | ||
150 | return ops->dma_supported(dev, mask); | ||
151 | } | ||
119 | 152 | ||
120 | static inline int | 153 | static inline int |
121 | dma_set_mask (struct device *dev, u64 mask) | 154 | dma_set_mask (struct device *dev, u64 mask) |
@@ -141,11 +174,4 @@ dma_cache_sync (struct device *dev, void *vaddr, size_t size, | |||
141 | 174 | ||
142 | #define dma_is_consistent(d, h) (1) /* all we do is coherent memory... */ | 175 | #define dma_is_consistent(d, h) (1) /* all we do is coherent memory... */ |
143 | 176 | ||
144 | static inline struct dma_mapping_ops *get_dma_ops(struct device *dev) | ||
145 | { | ||
146 | return dma_ops; | ||
147 | } | ||
148 | |||
149 | |||
150 | |||
151 | #endif /* _ASM_IA64_DMA_MAPPING_H */ | 177 | #endif /* _ASM_IA64_DMA_MAPPING_H */ |
diff --git a/arch/ia64/include/asm/machvec.h b/arch/ia64/include/asm/machvec.h index fe87b2121707..367d299d9938 100644 --- a/arch/ia64/include/asm/machvec.h +++ b/arch/ia64/include/asm/machvec.h | |||
@@ -11,7 +11,6 @@ | |||
11 | #define _ASM_IA64_MACHVEC_H | 11 | #define _ASM_IA64_MACHVEC_H |
12 | 12 | ||
13 | #include <linux/types.h> | 13 | #include <linux/types.h> |
14 | #include <linux/swiotlb.h> | ||
15 | 14 | ||
16 | /* forward declarations: */ | 15 | /* forward declarations: */ |
17 | struct device; | 16 | struct device; |
@@ -45,24 +44,8 @@ typedef void ia64_mv_kernel_launch_event_t(void); | |||
45 | 44 | ||
46 | /* DMA-mapping interface: */ | 45 | /* DMA-mapping interface: */ |
47 | typedef void ia64_mv_dma_init (void); | 46 | typedef void ia64_mv_dma_init (void); |
48 | typedef void *ia64_mv_dma_alloc_coherent (struct device *, size_t, dma_addr_t *, gfp_t); | ||
49 | typedef void ia64_mv_dma_free_coherent (struct device *, size_t, void *, dma_addr_t); | ||
50 | typedef dma_addr_t ia64_mv_dma_map_single (struct device *, void *, size_t, int); | ||
51 | typedef void ia64_mv_dma_unmap_single (struct device *, dma_addr_t, size_t, int); | ||
52 | typedef int ia64_mv_dma_map_sg (struct device *, struct scatterlist *, int, int); | ||
53 | typedef void ia64_mv_dma_unmap_sg (struct device *, struct scatterlist *, int, int); | ||
54 | typedef void ia64_mv_dma_sync_single_for_cpu (struct device *, dma_addr_t, size_t, int); | ||
55 | typedef void ia64_mv_dma_sync_sg_for_cpu (struct device *, struct scatterlist *, int, int); | ||
56 | typedef void ia64_mv_dma_sync_single_for_device (struct device *, dma_addr_t, size_t, int); | ||
57 | typedef void ia64_mv_dma_sync_sg_for_device (struct device *, struct scatterlist *, int, int); | ||
58 | typedef int ia64_mv_dma_mapping_error(struct device *, dma_addr_t dma_addr); | ||
59 | typedef int ia64_mv_dma_supported (struct device *, u64); | ||
60 | |||
61 | typedef dma_addr_t ia64_mv_dma_map_single_attrs (struct device *, void *, size_t, int, struct dma_attrs *); | ||
62 | typedef void ia64_mv_dma_unmap_single_attrs (struct device *, dma_addr_t, size_t, int, struct dma_attrs *); | ||
63 | typedef int ia64_mv_dma_map_sg_attrs (struct device *, struct scatterlist *, int, int, struct dma_attrs *); | ||
64 | typedef void ia64_mv_dma_unmap_sg_attrs (struct device *, struct scatterlist *, int, int, struct dma_attrs *); | ||
65 | typedef u64 ia64_mv_dma_get_required_mask (struct device *); | 47 | typedef u64 ia64_mv_dma_get_required_mask (struct device *); |
48 | typedef struct dma_map_ops *ia64_mv_dma_get_ops(struct device *); | ||
66 | 49 | ||
67 | /* | 50 | /* |
68 | * WARNING: The legacy I/O space is _architected_. Platforms are | 51 | * WARNING: The legacy I/O space is _architected_. Platforms are |
@@ -114,8 +97,6 @@ machvec_noop_bus (struct pci_bus *bus) | |||
114 | 97 | ||
115 | extern void machvec_setup (char **); | 98 | extern void machvec_setup (char **); |
116 | extern void machvec_timer_interrupt (int, void *); | 99 | extern void machvec_timer_interrupt (int, void *); |
117 | extern void machvec_dma_sync_single (struct device *, dma_addr_t, size_t, int); | ||
118 | extern void machvec_dma_sync_sg (struct device *, struct scatterlist *, int, int); | ||
119 | extern void machvec_tlb_migrate_finish (struct mm_struct *); | 100 | extern void machvec_tlb_migrate_finish (struct mm_struct *); |
120 | 101 | ||
121 | # if defined (CONFIG_IA64_HP_SIM) | 102 | # if defined (CONFIG_IA64_HP_SIM) |
@@ -148,19 +129,8 @@ extern void machvec_tlb_migrate_finish (struct mm_struct *); | |||
148 | # define platform_global_tlb_purge ia64_mv.global_tlb_purge | 129 | # define platform_global_tlb_purge ia64_mv.global_tlb_purge |
149 | # define platform_tlb_migrate_finish ia64_mv.tlb_migrate_finish | 130 | # define platform_tlb_migrate_finish ia64_mv.tlb_migrate_finish |
150 | # define platform_dma_init ia64_mv.dma_init | 131 | # define platform_dma_init ia64_mv.dma_init |
151 | # define platform_dma_alloc_coherent ia64_mv.dma_alloc_coherent | ||
152 | # define platform_dma_free_coherent ia64_mv.dma_free_coherent | ||
153 | # define platform_dma_map_single_attrs ia64_mv.dma_map_single_attrs | ||
154 | # define platform_dma_unmap_single_attrs ia64_mv.dma_unmap_single_attrs | ||
155 | # define platform_dma_map_sg_attrs ia64_mv.dma_map_sg_attrs | ||
156 | # define platform_dma_unmap_sg_attrs ia64_mv.dma_unmap_sg_attrs | ||
157 | # define platform_dma_sync_single_for_cpu ia64_mv.dma_sync_single_for_cpu | ||
158 | # define platform_dma_sync_sg_for_cpu ia64_mv.dma_sync_sg_for_cpu | ||
159 | # define platform_dma_sync_single_for_device ia64_mv.dma_sync_single_for_device | ||
160 | # define platform_dma_sync_sg_for_device ia64_mv.dma_sync_sg_for_device | ||
161 | # define platform_dma_mapping_error ia64_mv.dma_mapping_error | ||
162 | # define platform_dma_supported ia64_mv.dma_supported | ||
163 | # define platform_dma_get_required_mask ia64_mv.dma_get_required_mask | 132 | # define platform_dma_get_required_mask ia64_mv.dma_get_required_mask |
133 | # define platform_dma_get_ops ia64_mv.dma_get_ops | ||
164 | # define platform_irq_to_vector ia64_mv.irq_to_vector | 134 | # define platform_irq_to_vector ia64_mv.irq_to_vector |
165 | # define platform_local_vector_to_irq ia64_mv.local_vector_to_irq | 135 | # define platform_local_vector_to_irq ia64_mv.local_vector_to_irq |
166 | # define platform_pci_get_legacy_mem ia64_mv.pci_get_legacy_mem | 136 | # define platform_pci_get_legacy_mem ia64_mv.pci_get_legacy_mem |
@@ -203,19 +173,8 @@ struct ia64_machine_vector { | |||
203 | ia64_mv_global_tlb_purge_t *global_tlb_purge; | 173 | ia64_mv_global_tlb_purge_t *global_tlb_purge; |
204 | ia64_mv_tlb_migrate_finish_t *tlb_migrate_finish; | 174 | ia64_mv_tlb_migrate_finish_t *tlb_migrate_finish; |
205 | ia64_mv_dma_init *dma_init; | 175 | ia64_mv_dma_init *dma_init; |
206 | ia64_mv_dma_alloc_coherent *dma_alloc_coherent; | ||
207 | ia64_mv_dma_free_coherent *dma_free_coherent; | ||
208 | ia64_mv_dma_map_single_attrs *dma_map_single_attrs; | ||
209 | ia64_mv_dma_unmap_single_attrs *dma_unmap_single_attrs; | ||
210 | ia64_mv_dma_map_sg_attrs *dma_map_sg_attrs; | ||
211 | ia64_mv_dma_unmap_sg_attrs *dma_unmap_sg_attrs; | ||
212 | ia64_mv_dma_sync_single_for_cpu *dma_sync_single_for_cpu; | ||
213 | ia64_mv_dma_sync_sg_for_cpu *dma_sync_sg_for_cpu; | ||
214 | ia64_mv_dma_sync_single_for_device *dma_sync_single_for_device; | ||
215 | ia64_mv_dma_sync_sg_for_device *dma_sync_sg_for_device; | ||
216 | ia64_mv_dma_mapping_error *dma_mapping_error; | ||
217 | ia64_mv_dma_supported *dma_supported; | ||
218 | ia64_mv_dma_get_required_mask *dma_get_required_mask; | 176 | ia64_mv_dma_get_required_mask *dma_get_required_mask; |
177 | ia64_mv_dma_get_ops *dma_get_ops; | ||
219 | ia64_mv_irq_to_vector *irq_to_vector; | 178 | ia64_mv_irq_to_vector *irq_to_vector; |
220 | ia64_mv_local_vector_to_irq *local_vector_to_irq; | 179 | ia64_mv_local_vector_to_irq *local_vector_to_irq; |
221 | ia64_mv_pci_get_legacy_mem_t *pci_get_legacy_mem; | 180 | ia64_mv_pci_get_legacy_mem_t *pci_get_legacy_mem; |
@@ -254,19 +213,8 @@ struct ia64_machine_vector { | |||
254 | platform_global_tlb_purge, \ | 213 | platform_global_tlb_purge, \ |
255 | platform_tlb_migrate_finish, \ | 214 | platform_tlb_migrate_finish, \ |
256 | platform_dma_init, \ | 215 | platform_dma_init, \ |
257 | platform_dma_alloc_coherent, \ | ||
258 | platform_dma_free_coherent, \ | ||
259 | platform_dma_map_single_attrs, \ | ||
260 | platform_dma_unmap_single_attrs, \ | ||
261 | platform_dma_map_sg_attrs, \ | ||
262 | platform_dma_unmap_sg_attrs, \ | ||
263 | platform_dma_sync_single_for_cpu, \ | ||
264 | platform_dma_sync_sg_for_cpu, \ | ||
265 | platform_dma_sync_single_for_device, \ | ||
266 | platform_dma_sync_sg_for_device, \ | ||
267 | platform_dma_mapping_error, \ | ||
268 | platform_dma_supported, \ | ||
269 | platform_dma_get_required_mask, \ | 216 | platform_dma_get_required_mask, \ |
217 | platform_dma_get_ops, \ | ||
270 | platform_irq_to_vector, \ | 218 | platform_irq_to_vector, \ |
271 | platform_local_vector_to_irq, \ | 219 | platform_local_vector_to_irq, \ |
272 | platform_pci_get_legacy_mem, \ | 220 | platform_pci_get_legacy_mem, \ |
@@ -302,6 +250,9 @@ extern void machvec_init_from_cmdline(const char *cmdline); | |||
302 | # error Unknown configuration. Update arch/ia64/include/asm/machvec.h. | 250 | # error Unknown configuration. Update arch/ia64/include/asm/machvec.h. |
303 | # endif /* CONFIG_IA64_GENERIC */ | 251 | # endif /* CONFIG_IA64_GENERIC */ |
304 | 252 | ||
253 | extern void swiotlb_dma_init(void); | ||
254 | extern struct dma_map_ops *dma_get_ops(struct device *); | ||
255 | |||
305 | /* | 256 | /* |
306 | * Define default versions so we can extend machvec for new platforms without having | 257 | * Define default versions so we can extend machvec for new platforms without having |
307 | * to update the machvec files for all existing platforms. | 258 | * to update the machvec files for all existing platforms. |
@@ -332,43 +283,10 @@ extern void machvec_init_from_cmdline(const char *cmdline); | |||
332 | # define platform_kernel_launch_event machvec_noop | 283 | # define platform_kernel_launch_event machvec_noop |
333 | #endif | 284 | #endif |
334 | #ifndef platform_dma_init | 285 | #ifndef platform_dma_init |
335 | # define platform_dma_init swiotlb_init | 286 | # define platform_dma_init swiotlb_dma_init |
336 | #endif | ||
337 | #ifndef platform_dma_alloc_coherent | ||
338 | # define platform_dma_alloc_coherent swiotlb_alloc_coherent | ||
339 | #endif | ||
340 | #ifndef platform_dma_free_coherent | ||
341 | # define platform_dma_free_coherent swiotlb_free_coherent | ||
342 | #endif | ||
343 | #ifndef platform_dma_map_single_attrs | ||
344 | # define platform_dma_map_single_attrs swiotlb_map_single_attrs | ||
345 | #endif | ||
346 | #ifndef platform_dma_unmap_single_attrs | ||
347 | # define platform_dma_unmap_single_attrs swiotlb_unmap_single_attrs | ||
348 | #endif | ||
349 | #ifndef platform_dma_map_sg_attrs | ||
350 | # define platform_dma_map_sg_attrs swiotlb_map_sg_attrs | ||
351 | #endif | ||
352 | #ifndef platform_dma_unmap_sg_attrs | ||
353 | # define platform_dma_unmap_sg_attrs swiotlb_unmap_sg_attrs | ||
354 | #endif | ||
355 | #ifndef platform_dma_sync_single_for_cpu | ||
356 | # define platform_dma_sync_single_for_cpu swiotlb_sync_single_for_cpu | ||
357 | #endif | ||
358 | #ifndef platform_dma_sync_sg_for_cpu | ||
359 | # define platform_dma_sync_sg_for_cpu swiotlb_sync_sg_for_cpu | ||
360 | #endif | ||
361 | #ifndef platform_dma_sync_single_for_device | ||
362 | # define platform_dma_sync_single_for_device swiotlb_sync_single_for_device | ||
363 | #endif | ||
364 | #ifndef platform_dma_sync_sg_for_device | ||
365 | # define platform_dma_sync_sg_for_device swiotlb_sync_sg_for_device | ||
366 | #endif | ||
367 | #ifndef platform_dma_mapping_error | ||
368 | # define platform_dma_mapping_error swiotlb_dma_mapping_error | ||
369 | #endif | 287 | #endif |
370 | #ifndef platform_dma_supported | 288 | #ifndef platform_dma_get_ops |
371 | # define platform_dma_supported swiotlb_dma_supported | 289 | # define platform_dma_get_ops dma_get_ops |
372 | #endif | 290 | #endif |
373 | #ifndef platform_dma_get_required_mask | 291 | #ifndef platform_dma_get_required_mask |
374 | # define platform_dma_get_required_mask ia64_dma_get_required_mask | 292 | # define platform_dma_get_required_mask ia64_dma_get_required_mask |
diff --git a/arch/ia64/include/asm/machvec_dig_vtd.h b/arch/ia64/include/asm/machvec_dig_vtd.h index 3400b561e711..6ab1de5c45ef 100644 --- a/arch/ia64/include/asm/machvec_dig_vtd.h +++ b/arch/ia64/include/asm/machvec_dig_vtd.h | |||
@@ -2,14 +2,6 @@ | |||
2 | #define _ASM_IA64_MACHVEC_DIG_VTD_h | 2 | #define _ASM_IA64_MACHVEC_DIG_VTD_h |
3 | 3 | ||
4 | extern ia64_mv_setup_t dig_setup; | 4 | extern ia64_mv_setup_t dig_setup; |
5 | extern ia64_mv_dma_alloc_coherent vtd_alloc_coherent; | ||
6 | extern ia64_mv_dma_free_coherent vtd_free_coherent; | ||
7 | extern ia64_mv_dma_map_single_attrs vtd_map_single_attrs; | ||
8 | extern ia64_mv_dma_unmap_single_attrs vtd_unmap_single_attrs; | ||
9 | extern ia64_mv_dma_map_sg_attrs vtd_map_sg_attrs; | ||
10 | extern ia64_mv_dma_unmap_sg_attrs vtd_unmap_sg_attrs; | ||
11 | extern ia64_mv_dma_supported iommu_dma_supported; | ||
12 | extern ia64_mv_dma_mapping_error vtd_dma_mapping_error; | ||
13 | extern ia64_mv_dma_init pci_iommu_alloc; | 5 | extern ia64_mv_dma_init pci_iommu_alloc; |
14 | 6 | ||
15 | /* | 7 | /* |
@@ -22,17 +14,5 @@ extern ia64_mv_dma_init pci_iommu_alloc; | |||
22 | #define platform_name "dig_vtd" | 14 | #define platform_name "dig_vtd" |
23 | #define platform_setup dig_setup | 15 | #define platform_setup dig_setup |
24 | #define platform_dma_init pci_iommu_alloc | 16 | #define platform_dma_init pci_iommu_alloc |
25 | #define platform_dma_alloc_coherent vtd_alloc_coherent | ||
26 | #define platform_dma_free_coherent vtd_free_coherent | ||
27 | #define platform_dma_map_single_attrs vtd_map_single_attrs | ||
28 | #define platform_dma_unmap_single_attrs vtd_unmap_single_attrs | ||
29 | #define platform_dma_map_sg_attrs vtd_map_sg_attrs | ||
30 | #define platform_dma_unmap_sg_attrs vtd_unmap_sg_attrs | ||
31 | #define platform_dma_sync_single_for_cpu machvec_dma_sync_single | ||
32 | #define platform_dma_sync_sg_for_cpu machvec_dma_sync_sg | ||
33 | #define platform_dma_sync_single_for_device machvec_dma_sync_single | ||
34 | #define platform_dma_sync_sg_for_device machvec_dma_sync_sg | ||
35 | #define platform_dma_supported iommu_dma_supported | ||
36 | #define platform_dma_mapping_error vtd_dma_mapping_error | ||
37 | 17 | ||
38 | #endif /* _ASM_IA64_MACHVEC_DIG_VTD_h */ | 18 | #endif /* _ASM_IA64_MACHVEC_DIG_VTD_h */ |
diff --git a/arch/ia64/include/asm/machvec_hpzx1.h b/arch/ia64/include/asm/machvec_hpzx1.h index 2f57f5144b9f..3bd83d78a412 100644 --- a/arch/ia64/include/asm/machvec_hpzx1.h +++ b/arch/ia64/include/asm/machvec_hpzx1.h | |||
@@ -2,14 +2,7 @@ | |||
2 | #define _ASM_IA64_MACHVEC_HPZX1_h | 2 | #define _ASM_IA64_MACHVEC_HPZX1_h |
3 | 3 | ||
4 | extern ia64_mv_setup_t dig_setup; | 4 | extern ia64_mv_setup_t dig_setup; |
5 | extern ia64_mv_dma_alloc_coherent sba_alloc_coherent; | 5 | extern ia64_mv_dma_init sba_dma_init; |
6 | extern ia64_mv_dma_free_coherent sba_free_coherent; | ||
7 | extern ia64_mv_dma_map_single_attrs sba_map_single_attrs; | ||
8 | extern ia64_mv_dma_unmap_single_attrs sba_unmap_single_attrs; | ||
9 | extern ia64_mv_dma_map_sg_attrs sba_map_sg_attrs; | ||
10 | extern ia64_mv_dma_unmap_sg_attrs sba_unmap_sg_attrs; | ||
11 | extern ia64_mv_dma_supported sba_dma_supported; | ||
12 | extern ia64_mv_dma_mapping_error sba_dma_mapping_error; | ||
13 | 6 | ||
14 | /* | 7 | /* |
15 | * This stuff has dual use! | 8 | * This stuff has dual use! |
@@ -20,18 +13,6 @@ extern ia64_mv_dma_mapping_error sba_dma_mapping_error; | |||
20 | */ | 13 | */ |
21 | #define platform_name "hpzx1" | 14 | #define platform_name "hpzx1" |
22 | #define platform_setup dig_setup | 15 | #define platform_setup dig_setup |
23 | #define platform_dma_init machvec_noop | 16 | #define platform_dma_init sba_dma_init |
24 | #define platform_dma_alloc_coherent sba_alloc_coherent | ||
25 | #define platform_dma_free_coherent sba_free_coherent | ||
26 | #define platform_dma_map_single_attrs sba_map_single_attrs | ||
27 | #define platform_dma_unmap_single_attrs sba_unmap_single_attrs | ||
28 | #define platform_dma_map_sg_attrs sba_map_sg_attrs | ||
29 | #define platform_dma_unmap_sg_attrs sba_unmap_sg_attrs | ||
30 | #define platform_dma_sync_single_for_cpu machvec_dma_sync_single | ||
31 | #define platform_dma_sync_sg_for_cpu machvec_dma_sync_sg | ||
32 | #define platform_dma_sync_single_for_device machvec_dma_sync_single | ||
33 | #define platform_dma_sync_sg_for_device machvec_dma_sync_sg | ||
34 | #define platform_dma_supported sba_dma_supported | ||
35 | #define platform_dma_mapping_error sba_dma_mapping_error | ||
36 | 17 | ||
37 | #endif /* _ASM_IA64_MACHVEC_HPZX1_h */ | 18 | #endif /* _ASM_IA64_MACHVEC_HPZX1_h */ |
diff --git a/arch/ia64/include/asm/machvec_hpzx1_swiotlb.h b/arch/ia64/include/asm/machvec_hpzx1_swiotlb.h index a842cdda827b..1091ac39740c 100644 --- a/arch/ia64/include/asm/machvec_hpzx1_swiotlb.h +++ b/arch/ia64/include/asm/machvec_hpzx1_swiotlb.h | |||
@@ -2,18 +2,7 @@ | |||
2 | #define _ASM_IA64_MACHVEC_HPZX1_SWIOTLB_h | 2 | #define _ASM_IA64_MACHVEC_HPZX1_SWIOTLB_h |
3 | 3 | ||
4 | extern ia64_mv_setup_t dig_setup; | 4 | extern ia64_mv_setup_t dig_setup; |
5 | extern ia64_mv_dma_alloc_coherent hwsw_alloc_coherent; | 5 | extern ia64_mv_dma_get_ops hwsw_dma_get_ops; |
6 | extern ia64_mv_dma_free_coherent hwsw_free_coherent; | ||
7 | extern ia64_mv_dma_map_single_attrs hwsw_map_single_attrs; | ||
8 | extern ia64_mv_dma_unmap_single_attrs hwsw_unmap_single_attrs; | ||
9 | extern ia64_mv_dma_map_sg_attrs hwsw_map_sg_attrs; | ||
10 | extern ia64_mv_dma_unmap_sg_attrs hwsw_unmap_sg_attrs; | ||
11 | extern ia64_mv_dma_supported hwsw_dma_supported; | ||
12 | extern ia64_mv_dma_mapping_error hwsw_dma_mapping_error; | ||
13 | extern ia64_mv_dma_sync_single_for_cpu hwsw_sync_single_for_cpu; | ||
14 | extern ia64_mv_dma_sync_sg_for_cpu hwsw_sync_sg_for_cpu; | ||
15 | extern ia64_mv_dma_sync_single_for_device hwsw_sync_single_for_device; | ||
16 | extern ia64_mv_dma_sync_sg_for_device hwsw_sync_sg_for_device; | ||
17 | 6 | ||
18 | /* | 7 | /* |
19 | * This stuff has dual use! | 8 | * This stuff has dual use! |
@@ -23,20 +12,8 @@ extern ia64_mv_dma_sync_sg_for_device hwsw_sync_sg_for_device; | |||
23 | * the macros are used directly. | 12 | * the macros are used directly. |
24 | */ | 13 | */ |
25 | #define platform_name "hpzx1_swiotlb" | 14 | #define platform_name "hpzx1_swiotlb" |
26 | |||
27 | #define platform_setup dig_setup | 15 | #define platform_setup dig_setup |
28 | #define platform_dma_init machvec_noop | 16 | #define platform_dma_init machvec_noop |
29 | #define platform_dma_alloc_coherent hwsw_alloc_coherent | 17 | #define platform_dma_get_ops hwsw_dma_get_ops |
30 | #define platform_dma_free_coherent hwsw_free_coherent | ||
31 | #define platform_dma_map_single_attrs hwsw_map_single_attrs | ||
32 | #define platform_dma_unmap_single_attrs hwsw_unmap_single_attrs | ||
33 | #define platform_dma_map_sg_attrs hwsw_map_sg_attrs | ||
34 | #define platform_dma_unmap_sg_attrs hwsw_unmap_sg_attrs | ||
35 | #define platform_dma_supported hwsw_dma_supported | ||
36 | #define platform_dma_mapping_error hwsw_dma_mapping_error | ||
37 | #define platform_dma_sync_single_for_cpu hwsw_sync_single_for_cpu | ||
38 | #define platform_dma_sync_sg_for_cpu hwsw_sync_sg_for_cpu | ||
39 | #define platform_dma_sync_single_for_device hwsw_sync_single_for_device | ||
40 | #define platform_dma_sync_sg_for_device hwsw_sync_sg_for_device | ||
41 | 18 | ||
42 | #endif /* _ASM_IA64_MACHVEC_HPZX1_SWIOTLB_h */ | 19 | #endif /* _ASM_IA64_MACHVEC_HPZX1_SWIOTLB_h */ |
diff --git a/arch/ia64/include/asm/machvec_sn2.h b/arch/ia64/include/asm/machvec_sn2.h index f1a6e0d6dfa5..f061a30aac42 100644 --- a/arch/ia64/include/asm/machvec_sn2.h +++ b/arch/ia64/include/asm/machvec_sn2.h | |||
@@ -55,19 +55,8 @@ extern ia64_mv_readb_t __sn_readb_relaxed; | |||
55 | extern ia64_mv_readw_t __sn_readw_relaxed; | 55 | extern ia64_mv_readw_t __sn_readw_relaxed; |
56 | extern ia64_mv_readl_t __sn_readl_relaxed; | 56 | extern ia64_mv_readl_t __sn_readl_relaxed; |
57 | extern ia64_mv_readq_t __sn_readq_relaxed; | 57 | extern ia64_mv_readq_t __sn_readq_relaxed; |
58 | extern ia64_mv_dma_alloc_coherent sn_dma_alloc_coherent; | ||
59 | extern ia64_mv_dma_free_coherent sn_dma_free_coherent; | ||
60 | extern ia64_mv_dma_map_single_attrs sn_dma_map_single_attrs; | ||
61 | extern ia64_mv_dma_unmap_single_attrs sn_dma_unmap_single_attrs; | ||
62 | extern ia64_mv_dma_map_sg_attrs sn_dma_map_sg_attrs; | ||
63 | extern ia64_mv_dma_unmap_sg_attrs sn_dma_unmap_sg_attrs; | ||
64 | extern ia64_mv_dma_sync_single_for_cpu sn_dma_sync_single_for_cpu; | ||
65 | extern ia64_mv_dma_sync_sg_for_cpu sn_dma_sync_sg_for_cpu; | ||
66 | extern ia64_mv_dma_sync_single_for_device sn_dma_sync_single_for_device; | ||
67 | extern ia64_mv_dma_sync_sg_for_device sn_dma_sync_sg_for_device; | ||
68 | extern ia64_mv_dma_mapping_error sn_dma_mapping_error; | ||
69 | extern ia64_mv_dma_supported sn_dma_supported; | ||
70 | extern ia64_mv_dma_get_required_mask sn_dma_get_required_mask; | 58 | extern ia64_mv_dma_get_required_mask sn_dma_get_required_mask; |
59 | extern ia64_mv_dma_init sn_dma_init; | ||
71 | extern ia64_mv_migrate_t sn_migrate; | 60 | extern ia64_mv_migrate_t sn_migrate; |
72 | extern ia64_mv_kernel_launch_event_t sn_kernel_launch_event; | 61 | extern ia64_mv_kernel_launch_event_t sn_kernel_launch_event; |
73 | extern ia64_mv_setup_msi_irq_t sn_setup_msi_irq; | 62 | extern ia64_mv_setup_msi_irq_t sn_setup_msi_irq; |
@@ -111,20 +100,8 @@ extern ia64_mv_pci_fixup_bus_t sn_pci_fixup_bus; | |||
111 | #define platform_pci_get_legacy_mem sn_pci_get_legacy_mem | 100 | #define platform_pci_get_legacy_mem sn_pci_get_legacy_mem |
112 | #define platform_pci_legacy_read sn_pci_legacy_read | 101 | #define platform_pci_legacy_read sn_pci_legacy_read |
113 | #define platform_pci_legacy_write sn_pci_legacy_write | 102 | #define platform_pci_legacy_write sn_pci_legacy_write |
114 | #define platform_dma_init machvec_noop | ||
115 | #define platform_dma_alloc_coherent sn_dma_alloc_coherent | ||
116 | #define platform_dma_free_coherent sn_dma_free_coherent | ||
117 | #define platform_dma_map_single_attrs sn_dma_map_single_attrs | ||
118 | #define platform_dma_unmap_single_attrs sn_dma_unmap_single_attrs | ||
119 | #define platform_dma_map_sg_attrs sn_dma_map_sg_attrs | ||
120 | #define platform_dma_unmap_sg_attrs sn_dma_unmap_sg_attrs | ||
121 | #define platform_dma_sync_single_for_cpu sn_dma_sync_single_for_cpu | ||
122 | #define platform_dma_sync_sg_for_cpu sn_dma_sync_sg_for_cpu | ||
123 | #define platform_dma_sync_single_for_device sn_dma_sync_single_for_device | ||
124 | #define platform_dma_sync_sg_for_device sn_dma_sync_sg_for_device | ||
125 | #define platform_dma_mapping_error sn_dma_mapping_error | ||
126 | #define platform_dma_supported sn_dma_supported | ||
127 | #define platform_dma_get_required_mask sn_dma_get_required_mask | 103 | #define platform_dma_get_required_mask sn_dma_get_required_mask |
104 | #define platform_dma_init sn_dma_init | ||
128 | #define platform_migrate sn_migrate | 105 | #define platform_migrate sn_migrate |
129 | #define platform_kernel_launch_event sn_kernel_launch_event | 106 | #define platform_kernel_launch_event sn_kernel_launch_event |
130 | #ifdef CONFIG_PCI_MSI | 107 | #ifdef CONFIG_PCI_MSI |
diff --git a/arch/ia64/kernel/Makefile b/arch/ia64/kernel/Makefile index c381ea954892..f2778f2c4fd9 100644 --- a/arch/ia64/kernel/Makefile +++ b/arch/ia64/kernel/Makefile | |||
@@ -7,7 +7,7 @@ extra-y := head.o init_task.o vmlinux.lds | |||
7 | obj-y := acpi.o entry.o efi.o efi_stub.o gate-data.o fsys.o ia64_ksyms.o irq.o irq_ia64.o \ | 7 | obj-y := acpi.o entry.o efi.o efi_stub.o gate-data.o fsys.o ia64_ksyms.o irq.o irq_ia64.o \ |
8 | irq_lsapic.o ivt.o machvec.o pal.o patch.o process.o perfmon.o ptrace.o sal.o \ | 8 | irq_lsapic.o ivt.o machvec.o pal.o patch.o process.o perfmon.o ptrace.o sal.o \ |
9 | salinfo.o setup.o signal.o sys_ia64.o time.o traps.o unaligned.o \ | 9 | salinfo.o setup.o signal.o sys_ia64.o time.o traps.o unaligned.o \ |
10 | unwind.o mca.o mca_asm.o topology.o | 10 | unwind.o mca.o mca_asm.o topology.o dma-mapping.o |
11 | 11 | ||
12 | obj-$(CONFIG_IA64_BRL_EMU) += brl_emu.o | 12 | obj-$(CONFIG_IA64_BRL_EMU) += brl_emu.o |
13 | obj-$(CONFIG_IA64_GENERIC) += acpi-ext.o | 13 | obj-$(CONFIG_IA64_GENERIC) += acpi-ext.o |
@@ -43,9 +43,7 @@ ifneq ($(CONFIG_IA64_ESI),) | |||
43 | obj-y += esi_stub.o # must be in kernel proper | 43 | obj-y += esi_stub.o # must be in kernel proper |
44 | endif | 44 | endif |
45 | obj-$(CONFIG_DMAR) += pci-dma.o | 45 | obj-$(CONFIG_DMAR) += pci-dma.o |
46 | ifeq ($(CONFIG_DMAR), y) | ||
47 | obj-$(CONFIG_SWIOTLB) += pci-swiotlb.o | 46 | obj-$(CONFIG_SWIOTLB) += pci-swiotlb.o |
48 | endif | ||
49 | 47 | ||
50 | # The gate DSO image is built using a special linker script. | 48 | # The gate DSO image is built using a special linker script. |
51 | targets += gate.so gate-syms.o | 49 | targets += gate.so gate-syms.o |
diff --git a/arch/ia64/kernel/dma-mapping.c b/arch/ia64/kernel/dma-mapping.c new file mode 100644 index 000000000000..086a2aeb0404 --- /dev/null +++ b/arch/ia64/kernel/dma-mapping.c | |||
@@ -0,0 +1,13 @@ | |||
1 | #include <linux/dma-mapping.h> | ||
2 | |||
3 | /* Set this to 1 if there is a HW IOMMU in the system */ | ||
4 | int iommu_detected __read_mostly; | ||
5 | |||
6 | struct dma_map_ops *dma_ops; | ||
7 | EXPORT_SYMBOL(dma_ops); | ||
8 | |||
9 | struct dma_map_ops *dma_get_ops(struct device *dev) | ||
10 | { | ||
11 | return dma_ops; | ||
12 | } | ||
13 | EXPORT_SYMBOL(dma_get_ops); | ||
diff --git a/arch/ia64/kernel/machvec.c b/arch/ia64/kernel/machvec.c index 7ccb228ceedc..d41a40ef80c0 100644 --- a/arch/ia64/kernel/machvec.c +++ b/arch/ia64/kernel/machvec.c | |||
@@ -1,5 +1,5 @@ | |||
1 | #include <linux/module.h> | 1 | #include <linux/module.h> |
2 | 2 | #include <linux/dma-mapping.h> | |
3 | #include <asm/machvec.h> | 3 | #include <asm/machvec.h> |
4 | #include <asm/system.h> | 4 | #include <asm/system.h> |
5 | 5 | ||
@@ -75,14 +75,16 @@ machvec_timer_interrupt (int irq, void *dev_id) | |||
75 | EXPORT_SYMBOL(machvec_timer_interrupt); | 75 | EXPORT_SYMBOL(machvec_timer_interrupt); |
76 | 76 | ||
77 | void | 77 | void |
78 | machvec_dma_sync_single (struct device *hwdev, dma_addr_t dma_handle, size_t size, int dir) | 78 | machvec_dma_sync_single(struct device *hwdev, dma_addr_t dma_handle, size_t size, |
79 | enum dma_data_direction dir) | ||
79 | { | 80 | { |
80 | mb(); | 81 | mb(); |
81 | } | 82 | } |
82 | EXPORT_SYMBOL(machvec_dma_sync_single); | 83 | EXPORT_SYMBOL(machvec_dma_sync_single); |
83 | 84 | ||
84 | void | 85 | void |
85 | machvec_dma_sync_sg (struct device *hwdev, struct scatterlist *sg, int n, int dir) | 86 | machvec_dma_sync_sg(struct device *hwdev, struct scatterlist *sg, int n, |
87 | enum dma_data_direction dir) | ||
86 | { | 88 | { |
87 | mb(); | 89 | mb(); |
88 | } | 90 | } |
diff --git a/arch/ia64/kernel/pci-dma.c b/arch/ia64/kernel/pci-dma.c index d0ada067a4af..e4cb443bb988 100644 --- a/arch/ia64/kernel/pci-dma.c +++ b/arch/ia64/kernel/pci-dma.c | |||
@@ -32,9 +32,6 @@ int force_iommu __read_mostly = 1; | |||
32 | int force_iommu __read_mostly; | 32 | int force_iommu __read_mostly; |
33 | #endif | 33 | #endif |
34 | 34 | ||
35 | /* Set this to 1 if there is a HW IOMMU in the system */ | ||
36 | int iommu_detected __read_mostly; | ||
37 | |||
38 | /* Dummy device used for NULL arguments (normally ISA). Better would | 35 | /* Dummy device used for NULL arguments (normally ISA). Better would |
39 | be probably a smaller DMA mask, but this is bug-to-bug compatible | 36 | be probably a smaller DMA mask, but this is bug-to-bug compatible |
40 | to i386. */ | 37 | to i386. */ |
@@ -44,18 +41,7 @@ struct device fallback_dev = { | |||
44 | .dma_mask = &fallback_dev.coherent_dma_mask, | 41 | .dma_mask = &fallback_dev.coherent_dma_mask, |
45 | }; | 42 | }; |
46 | 43 | ||
47 | void __init pci_iommu_alloc(void) | 44 | extern struct dma_map_ops intel_dma_ops; |
48 | { | ||
49 | /* | ||
50 | * The order of these functions is important for | ||
51 | * fall-back/fail-over reasons | ||
52 | */ | ||
53 | detect_intel_iommu(); | ||
54 | |||
55 | #ifdef CONFIG_SWIOTLB | ||
56 | pci_swiotlb_init(); | ||
57 | #endif | ||
58 | } | ||
59 | 45 | ||
60 | static int __init pci_iommu_init(void) | 46 | static int __init pci_iommu_init(void) |
61 | { | 47 | { |
@@ -79,15 +65,12 @@ iommu_dma_init(void) | |||
79 | return; | 65 | return; |
80 | } | 66 | } |
81 | 67 | ||
82 | struct dma_mapping_ops *dma_ops; | ||
83 | EXPORT_SYMBOL(dma_ops); | ||
84 | |||
85 | int iommu_dma_supported(struct device *dev, u64 mask) | 68 | int iommu_dma_supported(struct device *dev, u64 mask) |
86 | { | 69 | { |
87 | struct dma_mapping_ops *ops = get_dma_ops(dev); | 70 | struct dma_map_ops *ops = platform_dma_get_ops(dev); |
88 | 71 | ||
89 | if (ops->dma_supported_op) | 72 | if (ops->dma_supported) |
90 | return ops->dma_supported_op(dev, mask); | 73 | return ops->dma_supported(dev, mask); |
91 | 74 | ||
92 | /* Copied from i386. Doesn't make much sense, because it will | 75 | /* Copied from i386. Doesn't make much sense, because it will |
93 | only work for pci_alloc_coherent. | 76 | only work for pci_alloc_coherent. |
@@ -116,4 +99,25 @@ int iommu_dma_supported(struct device *dev, u64 mask) | |||
116 | } | 99 | } |
117 | EXPORT_SYMBOL(iommu_dma_supported); | 100 | EXPORT_SYMBOL(iommu_dma_supported); |
118 | 101 | ||
102 | void __init pci_iommu_alloc(void) | ||
103 | { | ||
104 | dma_ops = &intel_dma_ops; | ||
105 | |||
106 | dma_ops->sync_single_for_cpu = machvec_dma_sync_single; | ||
107 | dma_ops->sync_sg_for_cpu = machvec_dma_sync_sg; | ||
108 | dma_ops->sync_single_for_device = machvec_dma_sync_single; | ||
109 | dma_ops->sync_sg_for_device = machvec_dma_sync_sg; | ||
110 | dma_ops->dma_supported = iommu_dma_supported; | ||
111 | |||
112 | /* | ||
113 | * The order of these functions is important for | ||
114 | * fall-back/fail-over reasons | ||
115 | */ | ||
116 | detect_intel_iommu(); | ||
117 | |||
118 | #ifdef CONFIG_SWIOTLB | ||
119 | pci_swiotlb_init(); | ||
120 | #endif | ||
121 | } | ||
122 | |||
119 | #endif | 123 | #endif |
diff --git a/arch/ia64/kernel/pci-swiotlb.c b/arch/ia64/kernel/pci-swiotlb.c index 16c50516dbc1..573f02c39a00 100644 --- a/arch/ia64/kernel/pci-swiotlb.c +++ b/arch/ia64/kernel/pci-swiotlb.c | |||
@@ -13,23 +13,37 @@ | |||
13 | int swiotlb __read_mostly; | 13 | int swiotlb __read_mostly; |
14 | EXPORT_SYMBOL(swiotlb); | 14 | EXPORT_SYMBOL(swiotlb); |
15 | 15 | ||
16 | struct dma_mapping_ops swiotlb_dma_ops = { | 16 | static void *ia64_swiotlb_alloc_coherent(struct device *dev, size_t size, |
17 | .mapping_error = swiotlb_dma_mapping_error, | 17 | dma_addr_t *dma_handle, gfp_t gfp) |
18 | .alloc_coherent = swiotlb_alloc_coherent, | 18 | { |
19 | if (dev->coherent_dma_mask != DMA_64BIT_MASK) | ||
20 | gfp |= GFP_DMA; | ||
21 | return swiotlb_alloc_coherent(dev, size, dma_handle, gfp); | ||
22 | } | ||
23 | |||
24 | struct dma_map_ops swiotlb_dma_ops = { | ||
25 | .alloc_coherent = ia64_swiotlb_alloc_coherent, | ||
19 | .free_coherent = swiotlb_free_coherent, | 26 | .free_coherent = swiotlb_free_coherent, |
20 | .map_single = swiotlb_map_single, | 27 | .map_page = swiotlb_map_page, |
21 | .unmap_single = swiotlb_unmap_single, | 28 | .unmap_page = swiotlb_unmap_page, |
29 | .map_sg = swiotlb_map_sg_attrs, | ||
30 | .unmap_sg = swiotlb_unmap_sg_attrs, | ||
22 | .sync_single_for_cpu = swiotlb_sync_single_for_cpu, | 31 | .sync_single_for_cpu = swiotlb_sync_single_for_cpu, |
23 | .sync_single_for_device = swiotlb_sync_single_for_device, | 32 | .sync_single_for_device = swiotlb_sync_single_for_device, |
24 | .sync_single_range_for_cpu = swiotlb_sync_single_range_for_cpu, | 33 | .sync_single_range_for_cpu = swiotlb_sync_single_range_for_cpu, |
25 | .sync_single_range_for_device = swiotlb_sync_single_range_for_device, | 34 | .sync_single_range_for_device = swiotlb_sync_single_range_for_device, |
26 | .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu, | 35 | .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu, |
27 | .sync_sg_for_device = swiotlb_sync_sg_for_device, | 36 | .sync_sg_for_device = swiotlb_sync_sg_for_device, |
28 | .map_sg = swiotlb_map_sg, | 37 | .dma_supported = swiotlb_dma_supported, |
29 | .unmap_sg = swiotlb_unmap_sg, | 38 | .mapping_error = swiotlb_dma_mapping_error, |
30 | .dma_supported_op = swiotlb_dma_supported, | ||
31 | }; | 39 | }; |
32 | 40 | ||
41 | void __init swiotlb_dma_init(void) | ||
42 | { | ||
43 | dma_ops = &swiotlb_dma_ops; | ||
44 | swiotlb_init(); | ||
45 | } | ||
46 | |||
33 | void __init pci_swiotlb_init(void) | 47 | void __init pci_swiotlb_init(void) |
34 | { | 48 | { |
35 | if (!iommu_detected) { | 49 | if (!iommu_detected) { |
diff --git a/arch/ia64/sn/pci/pci_dma.c b/arch/ia64/sn/pci/pci_dma.c index 863f5017baae..8c130e8f00e1 100644 --- a/arch/ia64/sn/pci/pci_dma.c +++ b/arch/ia64/sn/pci/pci_dma.c | |||
@@ -10,7 +10,7 @@ | |||
10 | */ | 10 | */ |
11 | 11 | ||
12 | #include <linux/module.h> | 12 | #include <linux/module.h> |
13 | #include <linux/dma-attrs.h> | 13 | #include <linux/dma-mapping.h> |
14 | #include <asm/dma.h> | 14 | #include <asm/dma.h> |
15 | #include <asm/sn/intr.h> | 15 | #include <asm/sn/intr.h> |
16 | #include <asm/sn/pcibus_provider_defs.h> | 16 | #include <asm/sn/pcibus_provider_defs.h> |
@@ -31,7 +31,7 @@ | |||
31 | * this function. Of course, SN only supports devices that have 32 or more | 31 | * this function. Of course, SN only supports devices that have 32 or more |
32 | * address bits when using the PMU. | 32 | * address bits when using the PMU. |
33 | */ | 33 | */ |
34 | int sn_dma_supported(struct device *dev, u64 mask) | 34 | static int sn_dma_supported(struct device *dev, u64 mask) |
35 | { | 35 | { |
36 | BUG_ON(dev->bus != &pci_bus_type); | 36 | BUG_ON(dev->bus != &pci_bus_type); |
37 | 37 | ||
@@ -39,7 +39,6 @@ int sn_dma_supported(struct device *dev, u64 mask) | |||
39 | return 0; | 39 | return 0; |
40 | return 1; | 40 | return 1; |
41 | } | 41 | } |
42 | EXPORT_SYMBOL(sn_dma_supported); | ||
43 | 42 | ||
44 | /** | 43 | /** |
45 | * sn_dma_set_mask - set the DMA mask | 44 | * sn_dma_set_mask - set the DMA mask |
@@ -75,8 +74,8 @@ EXPORT_SYMBOL(sn_dma_set_mask); | |||
75 | * queue for a SCSI controller). See Documentation/DMA-API.txt for | 74 | * queue for a SCSI controller). See Documentation/DMA-API.txt for |
76 | * more information. | 75 | * more information. |
77 | */ | 76 | */ |
78 | void *sn_dma_alloc_coherent(struct device *dev, size_t size, | 77 | static void *sn_dma_alloc_coherent(struct device *dev, size_t size, |
79 | dma_addr_t * dma_handle, gfp_t flags) | 78 | dma_addr_t * dma_handle, gfp_t flags) |
80 | { | 79 | { |
81 | void *cpuaddr; | 80 | void *cpuaddr; |
82 | unsigned long phys_addr; | 81 | unsigned long phys_addr; |
@@ -124,7 +123,6 @@ void *sn_dma_alloc_coherent(struct device *dev, size_t size, | |||
124 | 123 | ||
125 | return cpuaddr; | 124 | return cpuaddr; |
126 | } | 125 | } |
127 | EXPORT_SYMBOL(sn_dma_alloc_coherent); | ||
128 | 126 | ||
129 | /** | 127 | /** |
130 | * sn_pci_free_coherent - free memory associated with coherent DMAable region | 128 | * sn_pci_free_coherent - free memory associated with coherent DMAable region |
@@ -136,8 +134,8 @@ EXPORT_SYMBOL(sn_dma_alloc_coherent); | |||
136 | * Frees the memory allocated by dma_alloc_coherent(), potentially unmapping | 134 | * Frees the memory allocated by dma_alloc_coherent(), potentially unmapping |
137 | * any associated IOMMU mappings. | 135 | * any associated IOMMU mappings. |
138 | */ | 136 | */ |
139 | void sn_dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, | 137 | static void sn_dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, |
140 | dma_addr_t dma_handle) | 138 | dma_addr_t dma_handle) |
141 | { | 139 | { |
142 | struct pci_dev *pdev = to_pci_dev(dev); | 140 | struct pci_dev *pdev = to_pci_dev(dev); |
143 | struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev); | 141 | struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev); |
@@ -147,7 +145,6 @@ void sn_dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, | |||
147 | provider->dma_unmap(pdev, dma_handle, 0); | 145 | provider->dma_unmap(pdev, dma_handle, 0); |
148 | free_pages((unsigned long)cpu_addr, get_order(size)); | 146 | free_pages((unsigned long)cpu_addr, get_order(size)); |
149 | } | 147 | } |
150 | EXPORT_SYMBOL(sn_dma_free_coherent); | ||
151 | 148 | ||
152 | /** | 149 | /** |
153 | * sn_dma_map_single_attrs - map a single page for DMA | 150 | * sn_dma_map_single_attrs - map a single page for DMA |
@@ -173,10 +170,12 @@ EXPORT_SYMBOL(sn_dma_free_coherent); | |||
173 | * TODO: simplify our interface; | 170 | * TODO: simplify our interface; |
174 | * figure out how to save dmamap handle so can use two step. | 171 | * figure out how to save dmamap handle so can use two step. |
175 | */ | 172 | */ |
176 | dma_addr_t sn_dma_map_single_attrs(struct device *dev, void *cpu_addr, | 173 | static dma_addr_t sn_dma_map_page(struct device *dev, struct page *page, |
177 | size_t size, int direction, | 174 | unsigned long offset, size_t size, |
178 | struct dma_attrs *attrs) | 175 | enum dma_data_direction dir, |
176 | struct dma_attrs *attrs) | ||
179 | { | 177 | { |
178 | void *cpu_addr = page_address(page) + offset; | ||
180 | dma_addr_t dma_addr; | 179 | dma_addr_t dma_addr; |
181 | unsigned long phys_addr; | 180 | unsigned long phys_addr; |
182 | struct pci_dev *pdev = to_pci_dev(dev); | 181 | struct pci_dev *pdev = to_pci_dev(dev); |
@@ -201,7 +200,6 @@ dma_addr_t sn_dma_map_single_attrs(struct device *dev, void *cpu_addr, | |||
201 | } | 200 | } |
202 | return dma_addr; | 201 | return dma_addr; |
203 | } | 202 | } |
204 | EXPORT_SYMBOL(sn_dma_map_single_attrs); | ||
205 | 203 | ||
206 | /** | 204 | /** |
207 | * sn_dma_unmap_single_attrs - unamp a DMA mapped page | 205 | * sn_dma_unmap_single_attrs - unamp a DMA mapped page |
@@ -215,21 +213,20 @@ EXPORT_SYMBOL(sn_dma_map_single_attrs); | |||
215 | * by @dma_handle into the coherence domain. On SN, we're always cache | 213 | * by @dma_handle into the coherence domain. On SN, we're always cache |
216 | * coherent, so we just need to free any ATEs associated with this mapping. | 214 | * coherent, so we just need to free any ATEs associated with this mapping. |
217 | */ | 215 | */ |
218 | void sn_dma_unmap_single_attrs(struct device *dev, dma_addr_t dma_addr, | 216 | static void sn_dma_unmap_page(struct device *dev, dma_addr_t dma_addr, |
219 | size_t size, int direction, | 217 | size_t size, enum dma_data_direction dir, |
220 | struct dma_attrs *attrs) | 218 | struct dma_attrs *attrs) |
221 | { | 219 | { |
222 | struct pci_dev *pdev = to_pci_dev(dev); | 220 | struct pci_dev *pdev = to_pci_dev(dev); |
223 | struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev); | 221 | struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev); |
224 | 222 | ||
225 | BUG_ON(dev->bus != &pci_bus_type); | 223 | BUG_ON(dev->bus != &pci_bus_type); |
226 | 224 | ||
227 | provider->dma_unmap(pdev, dma_addr, direction); | 225 | provider->dma_unmap(pdev, dma_addr, dir); |
228 | } | 226 | } |
229 | EXPORT_SYMBOL(sn_dma_unmap_single_attrs); | ||
230 | 227 | ||
231 | /** | 228 | /** |
232 | * sn_dma_unmap_sg_attrs - unmap a DMA scatterlist | 229 | * sn_dma_unmap_sg - unmap a DMA scatterlist |
233 | * @dev: device to unmap | 230 | * @dev: device to unmap |
234 | * @sg: scatterlist to unmap | 231 | * @sg: scatterlist to unmap |
235 | * @nhwentries: number of scatterlist entries | 232 | * @nhwentries: number of scatterlist entries |
@@ -238,9 +235,9 @@ EXPORT_SYMBOL(sn_dma_unmap_single_attrs); | |||
238 | * | 235 | * |
239 | * Unmap a set of streaming mode DMA translations. | 236 | * Unmap a set of streaming mode DMA translations. |
240 | */ | 237 | */ |
241 | void sn_dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sgl, | 238 | static void sn_dma_unmap_sg(struct device *dev, struct scatterlist *sgl, |
242 | int nhwentries, int direction, | 239 | int nhwentries, enum dma_data_direction dir, |
243 | struct dma_attrs *attrs) | 240 | struct dma_attrs *attrs) |
244 | { | 241 | { |
245 | int i; | 242 | int i; |
246 | struct pci_dev *pdev = to_pci_dev(dev); | 243 | struct pci_dev *pdev = to_pci_dev(dev); |
@@ -250,15 +247,14 @@ void sn_dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sgl, | |||
250 | BUG_ON(dev->bus != &pci_bus_type); | 247 | BUG_ON(dev->bus != &pci_bus_type); |
251 | 248 | ||
252 | for_each_sg(sgl, sg, nhwentries, i) { | 249 | for_each_sg(sgl, sg, nhwentries, i) { |
253 | provider->dma_unmap(pdev, sg->dma_address, direction); | 250 | provider->dma_unmap(pdev, sg->dma_address, dir); |
254 | sg->dma_address = (dma_addr_t) NULL; | 251 | sg->dma_address = (dma_addr_t) NULL; |
255 | sg->dma_length = 0; | 252 | sg->dma_length = 0; |
256 | } | 253 | } |
257 | } | 254 | } |
258 | EXPORT_SYMBOL(sn_dma_unmap_sg_attrs); | ||
259 | 255 | ||
260 | /** | 256 | /** |
261 | * sn_dma_map_sg_attrs - map a scatterlist for DMA | 257 | * sn_dma_map_sg - map a scatterlist for DMA |
262 | * @dev: device to map for | 258 | * @dev: device to map for |
263 | * @sg: scatterlist to map | 259 | * @sg: scatterlist to map |
264 | * @nhwentries: number of entries | 260 | * @nhwentries: number of entries |
@@ -272,8 +268,9 @@ EXPORT_SYMBOL(sn_dma_unmap_sg_attrs); | |||
272 | * | 268 | * |
273 | * Maps each entry of @sg for DMA. | 269 | * Maps each entry of @sg for DMA. |
274 | */ | 270 | */ |
275 | int sn_dma_map_sg_attrs(struct device *dev, struct scatterlist *sgl, | 271 | static int sn_dma_map_sg(struct device *dev, struct scatterlist *sgl, |
276 | int nhwentries, int direction, struct dma_attrs *attrs) | 272 | int nhwentries, enum dma_data_direction dir, |
273 | struct dma_attrs *attrs) | ||
277 | { | 274 | { |
278 | unsigned long phys_addr; | 275 | unsigned long phys_addr; |
279 | struct scatterlist *saved_sg = sgl, *sg; | 276 | struct scatterlist *saved_sg = sgl, *sg; |
@@ -310,8 +307,7 @@ int sn_dma_map_sg_attrs(struct device *dev, struct scatterlist *sgl, | |||
310 | * Free any successfully allocated entries. | 307 | * Free any successfully allocated entries. |
311 | */ | 308 | */ |
312 | if (i > 0) | 309 | if (i > 0) |
313 | sn_dma_unmap_sg_attrs(dev, saved_sg, i, | 310 | sn_dma_unmap_sg(dev, saved_sg, i, dir, attrs); |
314 | direction, attrs); | ||
315 | return 0; | 311 | return 0; |
316 | } | 312 | } |
317 | 313 | ||
@@ -320,41 +316,36 @@ int sn_dma_map_sg_attrs(struct device *dev, struct scatterlist *sgl, | |||
320 | 316 | ||
321 | return nhwentries; | 317 | return nhwentries; |
322 | } | 318 | } |
323 | EXPORT_SYMBOL(sn_dma_map_sg_attrs); | ||
324 | 319 | ||
325 | void sn_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, | 320 | static void sn_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, |
326 | size_t size, int direction) | 321 | size_t size, enum dma_data_direction dir) |
327 | { | 322 | { |
328 | BUG_ON(dev->bus != &pci_bus_type); | 323 | BUG_ON(dev->bus != &pci_bus_type); |
329 | } | 324 | } |
330 | EXPORT_SYMBOL(sn_dma_sync_single_for_cpu); | ||
331 | 325 | ||
332 | void sn_dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, | 326 | static void sn_dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, |
333 | size_t size, int direction) | 327 | size_t size, |
328 | enum dma_data_direction dir) | ||
334 | { | 329 | { |
335 | BUG_ON(dev->bus != &pci_bus_type); | 330 | BUG_ON(dev->bus != &pci_bus_type); |
336 | } | 331 | } |
337 | EXPORT_SYMBOL(sn_dma_sync_single_for_device); | ||
338 | 332 | ||
339 | void sn_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, | 333 | static void sn_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, |
340 | int nelems, int direction) | 334 | int nelems, enum dma_data_direction dir) |
341 | { | 335 | { |
342 | BUG_ON(dev->bus != &pci_bus_type); | 336 | BUG_ON(dev->bus != &pci_bus_type); |
343 | } | 337 | } |
344 | EXPORT_SYMBOL(sn_dma_sync_sg_for_cpu); | ||
345 | 338 | ||
346 | void sn_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, | 339 | static void sn_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, |
347 | int nelems, int direction) | 340 | int nelems, enum dma_data_direction dir) |
348 | { | 341 | { |
349 | BUG_ON(dev->bus != &pci_bus_type); | 342 | BUG_ON(dev->bus != &pci_bus_type); |
350 | } | 343 | } |
351 | EXPORT_SYMBOL(sn_dma_sync_sg_for_device); | ||
352 | 344 | ||
353 | int sn_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) | 345 | static int sn_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) |
354 | { | 346 | { |
355 | return 0; | 347 | return 0; |
356 | } | 348 | } |
357 | EXPORT_SYMBOL(sn_dma_mapping_error); | ||
358 | 349 | ||
359 | u64 sn_dma_get_required_mask(struct device *dev) | 350 | u64 sn_dma_get_required_mask(struct device *dev) |
360 | { | 351 | { |
@@ -471,3 +462,23 @@ int sn_pci_legacy_write(struct pci_bus *bus, u16 port, u32 val, u8 size) | |||
471 | out: | 462 | out: |
472 | return ret; | 463 | return ret; |
473 | } | 464 | } |
465 | |||
466 | static struct dma_map_ops sn_dma_ops = { | ||
467 | .alloc_coherent = sn_dma_alloc_coherent, | ||
468 | .free_coherent = sn_dma_free_coherent, | ||
469 | .map_page = sn_dma_map_page, | ||
470 | .unmap_page = sn_dma_unmap_page, | ||
471 | .map_sg = sn_dma_map_sg, | ||
472 | .unmap_sg = sn_dma_unmap_sg, | ||
473 | .sync_single_for_cpu = sn_dma_sync_single_for_cpu, | ||
474 | .sync_sg_for_cpu = sn_dma_sync_sg_for_cpu, | ||
475 | .sync_single_for_device = sn_dma_sync_single_for_device, | ||
476 | .sync_sg_for_device = sn_dma_sync_sg_for_device, | ||
477 | .mapping_error = sn_dma_mapping_error, | ||
478 | .dma_supported = sn_dma_supported, | ||
479 | }; | ||
480 | |||
481 | void sn_dma_init(void) | ||
482 | { | ||
483 | dma_ops = &sn_dma_ops; | ||
484 | } | ||
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 34bc3a89228b..45161b816313 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig | |||
@@ -40,6 +40,7 @@ config X86 | |||
40 | select HAVE_GENERIC_DMA_COHERENT if X86_32 | 40 | select HAVE_GENERIC_DMA_COHERENT if X86_32 |
41 | select HAVE_EFFICIENT_UNALIGNED_ACCESS | 41 | select HAVE_EFFICIENT_UNALIGNED_ACCESS |
42 | select USER_STACKTRACE_SUPPORT | 42 | select USER_STACKTRACE_SUPPORT |
43 | select HAVE_DMA_API_DEBUG | ||
43 | select HAVE_KERNEL_GZIP | 44 | select HAVE_KERNEL_GZIP |
44 | select HAVE_KERNEL_BZIP2 | 45 | select HAVE_KERNEL_BZIP2 |
45 | select HAVE_KERNEL_LZMA | 46 | select HAVE_KERNEL_LZMA |
diff --git a/arch/x86/include/asm/device.h b/arch/x86/include/asm/device.h index 3c034f48fdb0..4994a20acbcb 100644 --- a/arch/x86/include/asm/device.h +++ b/arch/x86/include/asm/device.h | |||
@@ -6,7 +6,7 @@ struct dev_archdata { | |||
6 | void *acpi_handle; | 6 | void *acpi_handle; |
7 | #endif | 7 | #endif |
8 | #ifdef CONFIG_X86_64 | 8 | #ifdef CONFIG_X86_64 |
9 | struct dma_mapping_ops *dma_ops; | 9 | struct dma_map_ops *dma_ops; |
10 | #endif | 10 | #endif |
11 | #ifdef CONFIG_DMAR | 11 | #ifdef CONFIG_DMAR |
12 | void *iommu; /* hook for IOMMU specific extension */ | 12 | void *iommu; /* hook for IOMMU specific extension */ |
diff --git a/arch/x86/include/asm/dma-mapping.h b/arch/x86/include/asm/dma-mapping.h index 132a134d12f2..cea7b74963e9 100644 --- a/arch/x86/include/asm/dma-mapping.h +++ b/arch/x86/include/asm/dma-mapping.h | |||
@@ -7,6 +7,8 @@ | |||
7 | */ | 7 | */ |
8 | 8 | ||
9 | #include <linux/scatterlist.h> | 9 | #include <linux/scatterlist.h> |
10 | #include <linux/dma-debug.h> | ||
11 | #include <linux/dma-attrs.h> | ||
10 | #include <asm/io.h> | 12 | #include <asm/io.h> |
11 | #include <asm/swiotlb.h> | 13 | #include <asm/swiotlb.h> |
12 | #include <asm-generic/dma-coherent.h> | 14 | #include <asm-generic/dma-coherent.h> |
@@ -16,47 +18,9 @@ extern int iommu_merge; | |||
16 | extern struct device x86_dma_fallback_dev; | 18 | extern struct device x86_dma_fallback_dev; |
17 | extern int panic_on_overflow; | 19 | extern int panic_on_overflow; |
18 | 20 | ||
19 | struct dma_mapping_ops { | 21 | extern struct dma_map_ops *dma_ops; |
20 | int (*mapping_error)(struct device *dev, | 22 | |
21 | dma_addr_t dma_addr); | 23 | static inline struct dma_map_ops *get_dma_ops(struct device *dev) |
22 | void* (*alloc_coherent)(struct device *dev, size_t size, | ||
23 | dma_addr_t *dma_handle, gfp_t gfp); | ||
24 | void (*free_coherent)(struct device *dev, size_t size, | ||
25 | void *vaddr, dma_addr_t dma_handle); | ||
26 | dma_addr_t (*map_single)(struct device *hwdev, phys_addr_t ptr, | ||
27 | size_t size, int direction); | ||
28 | void (*unmap_single)(struct device *dev, dma_addr_t addr, | ||
29 | size_t size, int direction); | ||
30 | void (*sync_single_for_cpu)(struct device *hwdev, | ||
31 | dma_addr_t dma_handle, size_t size, | ||
32 | int direction); | ||
33 | void (*sync_single_for_device)(struct device *hwdev, | ||
34 | dma_addr_t dma_handle, size_t size, | ||
35 | int direction); | ||
36 | void (*sync_single_range_for_cpu)(struct device *hwdev, | ||
37 | dma_addr_t dma_handle, unsigned long offset, | ||
38 | size_t size, int direction); | ||
39 | void (*sync_single_range_for_device)(struct device *hwdev, | ||
40 | dma_addr_t dma_handle, unsigned long offset, | ||
41 | size_t size, int direction); | ||
42 | void (*sync_sg_for_cpu)(struct device *hwdev, | ||
43 | struct scatterlist *sg, int nelems, | ||
44 | int direction); | ||
45 | void (*sync_sg_for_device)(struct device *hwdev, | ||
46 | struct scatterlist *sg, int nelems, | ||
47 | int direction); | ||
48 | int (*map_sg)(struct device *hwdev, struct scatterlist *sg, | ||
49 | int nents, int direction); | ||
50 | void (*unmap_sg)(struct device *hwdev, | ||
51 | struct scatterlist *sg, int nents, | ||
52 | int direction); | ||
53 | int (*dma_supported)(struct device *hwdev, u64 mask); | ||
54 | int is_phys; | ||
55 | }; | ||
56 | |||
57 | extern struct dma_mapping_ops *dma_ops; | ||
58 | |||
59 | static inline struct dma_mapping_ops *get_dma_ops(struct device *dev) | ||
60 | { | 24 | { |
61 | #ifdef CONFIG_X86_32 | 25 | #ifdef CONFIG_X86_32 |
62 | return dma_ops; | 26 | return dma_ops; |
@@ -71,7 +35,7 @@ static inline struct dma_mapping_ops *get_dma_ops(struct device *dev) | |||
71 | /* Make sure we keep the same behaviour */ | 35 | /* Make sure we keep the same behaviour */ |
72 | static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) | 36 | static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) |
73 | { | 37 | { |
74 | struct dma_mapping_ops *ops = get_dma_ops(dev); | 38 | struct dma_map_ops *ops = get_dma_ops(dev); |
75 | if (ops->mapping_error) | 39 | if (ops->mapping_error) |
76 | return ops->mapping_error(dev, dma_addr); | 40 | return ops->mapping_error(dev, dma_addr); |
77 | 41 | ||
@@ -90,137 +54,167 @@ extern void *dma_generic_alloc_coherent(struct device *dev, size_t size, | |||
90 | 54 | ||
91 | static inline dma_addr_t | 55 | static inline dma_addr_t |
92 | dma_map_single(struct device *hwdev, void *ptr, size_t size, | 56 | dma_map_single(struct device *hwdev, void *ptr, size_t size, |
93 | int direction) | 57 | enum dma_data_direction dir) |
94 | { | 58 | { |
95 | struct dma_mapping_ops *ops = get_dma_ops(hwdev); | 59 | struct dma_map_ops *ops = get_dma_ops(hwdev); |
96 | 60 | dma_addr_t addr; | |
97 | BUG_ON(!valid_dma_direction(direction)); | 61 | |
98 | return ops->map_single(hwdev, virt_to_phys(ptr), size, direction); | 62 | BUG_ON(!valid_dma_direction(dir)); |
63 | addr = ops->map_page(hwdev, virt_to_page(ptr), | ||
64 | (unsigned long)ptr & ~PAGE_MASK, size, | ||
65 | dir, NULL); | ||
66 | debug_dma_map_page(hwdev, virt_to_page(ptr), | ||
67 | (unsigned long)ptr & ~PAGE_MASK, size, | ||
68 | dir, addr, true); | ||
69 | return addr; | ||
99 | } | 70 | } |
100 | 71 | ||
101 | static inline void | 72 | static inline void |
102 | dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size, | 73 | dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size, |
103 | int direction) | 74 | enum dma_data_direction dir) |
104 | { | 75 | { |
105 | struct dma_mapping_ops *ops = get_dma_ops(dev); | 76 | struct dma_map_ops *ops = get_dma_ops(dev); |
106 | 77 | ||
107 | BUG_ON(!valid_dma_direction(direction)); | 78 | BUG_ON(!valid_dma_direction(dir)); |
108 | if (ops->unmap_single) | 79 | if (ops->unmap_page) |
109 | ops->unmap_single(dev, addr, size, direction); | 80 | ops->unmap_page(dev, addr, size, dir, NULL); |
81 | debug_dma_unmap_page(dev, addr, size, dir, true); | ||
110 | } | 82 | } |
111 | 83 | ||
112 | static inline int | 84 | static inline int |
113 | dma_map_sg(struct device *hwdev, struct scatterlist *sg, | 85 | dma_map_sg(struct device *hwdev, struct scatterlist *sg, |
114 | int nents, int direction) | 86 | int nents, enum dma_data_direction dir) |
115 | { | 87 | { |
116 | struct dma_mapping_ops *ops = get_dma_ops(hwdev); | 88 | struct dma_map_ops *ops = get_dma_ops(hwdev); |
89 | int ents; | ||
90 | |||
91 | BUG_ON(!valid_dma_direction(dir)); | ||
92 | ents = ops->map_sg(hwdev, sg, nents, dir, NULL); | ||
93 | debug_dma_map_sg(hwdev, sg, nents, ents, dir); | ||
117 | 94 | ||
118 | BUG_ON(!valid_dma_direction(direction)); | 95 | return ents; |
119 | return ops->map_sg(hwdev, sg, nents, direction); | ||
120 | } | 96 | } |
121 | 97 | ||
122 | static inline void | 98 | static inline void |
123 | dma_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents, | 99 | dma_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents, |
124 | int direction) | 100 | enum dma_data_direction dir) |
125 | { | 101 | { |
126 | struct dma_mapping_ops *ops = get_dma_ops(hwdev); | 102 | struct dma_map_ops *ops = get_dma_ops(hwdev); |
127 | 103 | ||
128 | BUG_ON(!valid_dma_direction(direction)); | 104 | BUG_ON(!valid_dma_direction(dir)); |
105 | debug_dma_unmap_sg(hwdev, sg, nents, dir); | ||
129 | if (ops->unmap_sg) | 106 | if (ops->unmap_sg) |
130 | ops->unmap_sg(hwdev, sg, nents, direction); | 107 | ops->unmap_sg(hwdev, sg, nents, dir, NULL); |
131 | } | 108 | } |
132 | 109 | ||
133 | static inline void | 110 | static inline void |
134 | dma_sync_single_for_cpu(struct device *hwdev, dma_addr_t dma_handle, | 111 | dma_sync_single_for_cpu(struct device *hwdev, dma_addr_t dma_handle, |
135 | size_t size, int direction) | 112 | size_t size, enum dma_data_direction dir) |
136 | { | 113 | { |
137 | struct dma_mapping_ops *ops = get_dma_ops(hwdev); | 114 | struct dma_map_ops *ops = get_dma_ops(hwdev); |
138 | 115 | ||
139 | BUG_ON(!valid_dma_direction(direction)); | 116 | BUG_ON(!valid_dma_direction(dir)); |
140 | if (ops->sync_single_for_cpu) | 117 | if (ops->sync_single_for_cpu) |
141 | ops->sync_single_for_cpu(hwdev, dma_handle, size, direction); | 118 | ops->sync_single_for_cpu(hwdev, dma_handle, size, dir); |
119 | debug_dma_sync_single_for_cpu(hwdev, dma_handle, size, dir); | ||
142 | flush_write_buffers(); | 120 | flush_write_buffers(); |
143 | } | 121 | } |
144 | 122 | ||
145 | static inline void | 123 | static inline void |
146 | dma_sync_single_for_device(struct device *hwdev, dma_addr_t dma_handle, | 124 | dma_sync_single_for_device(struct device *hwdev, dma_addr_t dma_handle, |
147 | size_t size, int direction) | 125 | size_t size, enum dma_data_direction dir) |
148 | { | 126 | { |
149 | struct dma_mapping_ops *ops = get_dma_ops(hwdev); | 127 | struct dma_map_ops *ops = get_dma_ops(hwdev); |
150 | 128 | ||
151 | BUG_ON(!valid_dma_direction(direction)); | 129 | BUG_ON(!valid_dma_direction(dir)); |
152 | if (ops->sync_single_for_device) | 130 | if (ops->sync_single_for_device) |
153 | ops->sync_single_for_device(hwdev, dma_handle, size, direction); | 131 | ops->sync_single_for_device(hwdev, dma_handle, size, dir); |
132 | debug_dma_sync_single_for_device(hwdev, dma_handle, size, dir); | ||
154 | flush_write_buffers(); | 133 | flush_write_buffers(); |
155 | } | 134 | } |
156 | 135 | ||
157 | static inline void | 136 | static inline void |
158 | dma_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dma_handle, | 137 | dma_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dma_handle, |
159 | unsigned long offset, size_t size, int direction) | 138 | unsigned long offset, size_t size, |
139 | enum dma_data_direction dir) | ||
160 | { | 140 | { |
161 | struct dma_mapping_ops *ops = get_dma_ops(hwdev); | 141 | struct dma_map_ops *ops = get_dma_ops(hwdev); |
162 | 142 | ||
163 | BUG_ON(!valid_dma_direction(direction)); | 143 | BUG_ON(!valid_dma_direction(dir)); |
164 | if (ops->sync_single_range_for_cpu) | 144 | if (ops->sync_single_range_for_cpu) |
165 | ops->sync_single_range_for_cpu(hwdev, dma_handle, offset, | 145 | ops->sync_single_range_for_cpu(hwdev, dma_handle, offset, |
166 | size, direction); | 146 | size, dir); |
147 | debug_dma_sync_single_range_for_cpu(hwdev, dma_handle, | ||
148 | offset, size, dir); | ||
167 | flush_write_buffers(); | 149 | flush_write_buffers(); |
168 | } | 150 | } |
169 | 151 | ||
170 | static inline void | 152 | static inline void |
171 | dma_sync_single_range_for_device(struct device *hwdev, dma_addr_t dma_handle, | 153 | dma_sync_single_range_for_device(struct device *hwdev, dma_addr_t dma_handle, |
172 | unsigned long offset, size_t size, | 154 | unsigned long offset, size_t size, |
173 | int direction) | 155 | enum dma_data_direction dir) |
174 | { | 156 | { |
175 | struct dma_mapping_ops *ops = get_dma_ops(hwdev); | 157 | struct dma_map_ops *ops = get_dma_ops(hwdev); |
176 | 158 | ||
177 | BUG_ON(!valid_dma_direction(direction)); | 159 | BUG_ON(!valid_dma_direction(dir)); |
178 | if (ops->sync_single_range_for_device) | 160 | if (ops->sync_single_range_for_device) |
179 | ops->sync_single_range_for_device(hwdev, dma_handle, | 161 | ops->sync_single_range_for_device(hwdev, dma_handle, |
180 | offset, size, direction); | 162 | offset, size, dir); |
163 | debug_dma_sync_single_range_for_device(hwdev, dma_handle, | ||
164 | offset, size, dir); | ||
181 | flush_write_buffers(); | 165 | flush_write_buffers(); |
182 | } | 166 | } |
183 | 167 | ||
184 | static inline void | 168 | static inline void |
185 | dma_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg, | 169 | dma_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg, |
186 | int nelems, int direction) | 170 | int nelems, enum dma_data_direction dir) |
187 | { | 171 | { |
188 | struct dma_mapping_ops *ops = get_dma_ops(hwdev); | 172 | struct dma_map_ops *ops = get_dma_ops(hwdev); |
189 | 173 | ||
190 | BUG_ON(!valid_dma_direction(direction)); | 174 | BUG_ON(!valid_dma_direction(dir)); |
191 | if (ops->sync_sg_for_cpu) | 175 | if (ops->sync_sg_for_cpu) |
192 | ops->sync_sg_for_cpu(hwdev, sg, nelems, direction); | 176 | ops->sync_sg_for_cpu(hwdev, sg, nelems, dir); |
177 | debug_dma_sync_sg_for_cpu(hwdev, sg, nelems, dir); | ||
193 | flush_write_buffers(); | 178 | flush_write_buffers(); |
194 | } | 179 | } |
195 | 180 | ||
196 | static inline void | 181 | static inline void |
197 | dma_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg, | 182 | dma_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg, |
198 | int nelems, int direction) | 183 | int nelems, enum dma_data_direction dir) |
199 | { | 184 | { |
200 | struct dma_mapping_ops *ops = get_dma_ops(hwdev); | 185 | struct dma_map_ops *ops = get_dma_ops(hwdev); |
201 | 186 | ||
202 | BUG_ON(!valid_dma_direction(direction)); | 187 | BUG_ON(!valid_dma_direction(dir)); |
203 | if (ops->sync_sg_for_device) | 188 | if (ops->sync_sg_for_device) |
204 | ops->sync_sg_for_device(hwdev, sg, nelems, direction); | 189 | ops->sync_sg_for_device(hwdev, sg, nelems, dir); |
190 | debug_dma_sync_sg_for_device(hwdev, sg, nelems, dir); | ||
205 | 191 | ||
206 | flush_write_buffers(); | 192 | flush_write_buffers(); |
207 | } | 193 | } |
208 | 194 | ||
209 | static inline dma_addr_t dma_map_page(struct device *dev, struct page *page, | 195 | static inline dma_addr_t dma_map_page(struct device *dev, struct page *page, |
210 | size_t offset, size_t size, | 196 | size_t offset, size_t size, |
211 | int direction) | 197 | enum dma_data_direction dir) |
212 | { | 198 | { |
213 | struct dma_mapping_ops *ops = get_dma_ops(dev); | 199 | struct dma_map_ops *ops = get_dma_ops(dev); |
200 | dma_addr_t addr; | ||
214 | 201 | ||
215 | BUG_ON(!valid_dma_direction(direction)); | 202 | BUG_ON(!valid_dma_direction(dir)); |
216 | return ops->map_single(dev, page_to_phys(page) + offset, | 203 | addr = ops->map_page(dev, page, offset, size, dir, NULL); |
217 | size, direction); | 204 | debug_dma_map_page(dev, page, offset, size, dir, addr, false); |
205 | |||
206 | return addr; | ||
218 | } | 207 | } |
219 | 208 | ||
220 | static inline void dma_unmap_page(struct device *dev, dma_addr_t addr, | 209 | static inline void dma_unmap_page(struct device *dev, dma_addr_t addr, |
221 | size_t size, int direction) | 210 | size_t size, enum dma_data_direction dir) |
222 | { | 211 | { |
223 | dma_unmap_single(dev, addr, size, direction); | 212 | struct dma_map_ops *ops = get_dma_ops(dev); |
213 | |||
214 | BUG_ON(!valid_dma_direction(dir)); | ||
215 | if (ops->unmap_page) | ||
216 | ops->unmap_page(dev, addr, size, dir, NULL); | ||
217 | debug_dma_unmap_page(dev, addr, size, dir, false); | ||
224 | } | 218 | } |
225 | 219 | ||
226 | static inline void | 220 | static inline void |
@@ -266,7 +260,7 @@ static inline void * | |||
266 | dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, | 260 | dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, |
267 | gfp_t gfp) | 261 | gfp_t gfp) |
268 | { | 262 | { |
269 | struct dma_mapping_ops *ops = get_dma_ops(dev); | 263 | struct dma_map_ops *ops = get_dma_ops(dev); |
270 | void *memory; | 264 | void *memory; |
271 | 265 | ||
272 | gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32); | 266 | gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32); |
@@ -285,20 +279,24 @@ dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, | |||
285 | if (!ops->alloc_coherent) | 279 | if (!ops->alloc_coherent) |
286 | return NULL; | 280 | return NULL; |
287 | 281 | ||
288 | return ops->alloc_coherent(dev, size, dma_handle, | 282 | memory = ops->alloc_coherent(dev, size, dma_handle, |
289 | dma_alloc_coherent_gfp_flags(dev, gfp)); | 283 | dma_alloc_coherent_gfp_flags(dev, gfp)); |
284 | debug_dma_alloc_coherent(dev, size, *dma_handle, memory); | ||
285 | |||
286 | return memory; | ||
290 | } | 287 | } |
291 | 288 | ||
292 | static inline void dma_free_coherent(struct device *dev, size_t size, | 289 | static inline void dma_free_coherent(struct device *dev, size_t size, |
293 | void *vaddr, dma_addr_t bus) | 290 | void *vaddr, dma_addr_t bus) |
294 | { | 291 | { |
295 | struct dma_mapping_ops *ops = get_dma_ops(dev); | 292 | struct dma_map_ops *ops = get_dma_ops(dev); |
296 | 293 | ||
297 | WARN_ON(irqs_disabled()); /* for portability */ | 294 | WARN_ON(irqs_disabled()); /* for portability */ |
298 | 295 | ||
299 | if (dma_release_from_coherent(dev, get_order(size), vaddr)) | 296 | if (dma_release_from_coherent(dev, get_order(size), vaddr)) |
300 | return; | 297 | return; |
301 | 298 | ||
299 | debug_dma_free_coherent(dev, size, vaddr, bus); | ||
302 | if (ops->free_coherent) | 300 | if (ops->free_coherent) |
303 | ops->free_coherent(dev, size, vaddr, bus); | 301 | ops->free_coherent(dev, size, vaddr, bus); |
304 | } | 302 | } |
diff --git a/arch/x86/include/asm/iommu.h b/arch/x86/include/asm/iommu.h index a6ee9e6f530f..af326a2975b5 100644 --- a/arch/x86/include/asm/iommu.h +++ b/arch/x86/include/asm/iommu.h | |||
@@ -3,7 +3,7 @@ | |||
3 | 3 | ||
4 | extern void pci_iommu_shutdown(void); | 4 | extern void pci_iommu_shutdown(void); |
5 | extern void no_iommu_init(void); | 5 | extern void no_iommu_init(void); |
6 | extern struct dma_mapping_ops nommu_dma_ops; | 6 | extern struct dma_map_ops nommu_dma_ops; |
7 | extern int force_iommu, no_iommu; | 7 | extern int force_iommu, no_iommu; |
8 | extern int iommu_detected; | 8 | extern int iommu_detected; |
9 | 9 | ||
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile index 6e9c1f320acf..c611ad64137f 100644 --- a/arch/x86/kernel/Makefile +++ b/arch/x86/kernel/Makefile | |||
@@ -105,7 +105,7 @@ obj-$(CONFIG_MICROCODE) += microcode.o | |||
105 | 105 | ||
106 | obj-$(CONFIG_X86_CHECK_BIOS_CORRUPTION) += check.o | 106 | obj-$(CONFIG_X86_CHECK_BIOS_CORRUPTION) += check.o |
107 | 107 | ||
108 | obj-$(CONFIG_SWIOTLB) += pci-swiotlb_64.o # NB rename without _64 | 108 | obj-$(CONFIG_SWIOTLB) += pci-swiotlb.o |
109 | 109 | ||
110 | ### | 110 | ### |
111 | # 64 bit specific files | 111 | # 64 bit specific files |
diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c index 5113c080f0c4..c5962fe3796f 100644 --- a/arch/x86/kernel/amd_iommu.c +++ b/arch/x86/kernel/amd_iommu.c | |||
@@ -22,10 +22,9 @@ | |||
22 | #include <linux/bitops.h> | 22 | #include <linux/bitops.h> |
23 | #include <linux/debugfs.h> | 23 | #include <linux/debugfs.h> |
24 | #include <linux/scatterlist.h> | 24 | #include <linux/scatterlist.h> |
25 | #include <linux/dma-mapping.h> | ||
25 | #include <linux/iommu-helper.h> | 26 | #include <linux/iommu-helper.h> |
26 | #ifdef CONFIG_IOMMU_API | ||
27 | #include <linux/iommu.h> | 27 | #include <linux/iommu.h> |
28 | #endif | ||
29 | #include <asm/proto.h> | 28 | #include <asm/proto.h> |
30 | #include <asm/iommu.h> | 29 | #include <asm/iommu.h> |
31 | #include <asm/gart.h> | 30 | #include <asm/gart.h> |
@@ -1297,8 +1296,10 @@ static void __unmap_single(struct amd_iommu *iommu, | |||
1297 | /* | 1296 | /* |
1298 | * The exported map_single function for dma_ops. | 1297 | * The exported map_single function for dma_ops. |
1299 | */ | 1298 | */ |
1300 | static dma_addr_t map_single(struct device *dev, phys_addr_t paddr, | 1299 | static dma_addr_t map_page(struct device *dev, struct page *page, |
1301 | size_t size, int dir) | 1300 | unsigned long offset, size_t size, |
1301 | enum dma_data_direction dir, | ||
1302 | struct dma_attrs *attrs) | ||
1302 | { | 1303 | { |
1303 | unsigned long flags; | 1304 | unsigned long flags; |
1304 | struct amd_iommu *iommu; | 1305 | struct amd_iommu *iommu; |
@@ -1306,6 +1307,7 @@ static dma_addr_t map_single(struct device *dev, phys_addr_t paddr, | |||
1306 | u16 devid; | 1307 | u16 devid; |
1307 | dma_addr_t addr; | 1308 | dma_addr_t addr; |
1308 | u64 dma_mask; | 1309 | u64 dma_mask; |
1310 | phys_addr_t paddr = page_to_phys(page) + offset; | ||
1309 | 1311 | ||
1310 | INC_STATS_COUNTER(cnt_map_single); | 1312 | INC_STATS_COUNTER(cnt_map_single); |
1311 | 1313 | ||
@@ -1340,8 +1342,8 @@ out: | |||
1340 | /* | 1342 | /* |
1341 | * The exported unmap_single function for dma_ops. | 1343 | * The exported unmap_single function for dma_ops. |
1342 | */ | 1344 | */ |
1343 | static void unmap_single(struct device *dev, dma_addr_t dma_addr, | 1345 | static void unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size, |
1344 | size_t size, int dir) | 1346 | enum dma_data_direction dir, struct dma_attrs *attrs) |
1345 | { | 1347 | { |
1346 | unsigned long flags; | 1348 | unsigned long flags; |
1347 | struct amd_iommu *iommu; | 1349 | struct amd_iommu *iommu; |
@@ -1390,7 +1392,8 @@ static int map_sg_no_iommu(struct device *dev, struct scatterlist *sglist, | |||
1390 | * lists). | 1392 | * lists). |
1391 | */ | 1393 | */ |
1392 | static int map_sg(struct device *dev, struct scatterlist *sglist, | 1394 | static int map_sg(struct device *dev, struct scatterlist *sglist, |
1393 | int nelems, int dir) | 1395 | int nelems, enum dma_data_direction dir, |
1396 | struct dma_attrs *attrs) | ||
1394 | { | 1397 | { |
1395 | unsigned long flags; | 1398 | unsigned long flags; |
1396 | struct amd_iommu *iommu; | 1399 | struct amd_iommu *iommu; |
@@ -1457,7 +1460,8 @@ unmap: | |||
1457 | * lists). | 1460 | * lists). |
1458 | */ | 1461 | */ |
1459 | static void unmap_sg(struct device *dev, struct scatterlist *sglist, | 1462 | static void unmap_sg(struct device *dev, struct scatterlist *sglist, |
1460 | int nelems, int dir) | 1463 | int nelems, enum dma_data_direction dir, |
1464 | struct dma_attrs *attrs) | ||
1461 | { | 1465 | { |
1462 | unsigned long flags; | 1466 | unsigned long flags; |
1463 | struct amd_iommu *iommu; | 1467 | struct amd_iommu *iommu; |
@@ -1644,11 +1648,11 @@ static void prealloc_protection_domains(void) | |||
1644 | } | 1648 | } |
1645 | } | 1649 | } |
1646 | 1650 | ||
1647 | static struct dma_mapping_ops amd_iommu_dma_ops = { | 1651 | static struct dma_map_ops amd_iommu_dma_ops = { |
1648 | .alloc_coherent = alloc_coherent, | 1652 | .alloc_coherent = alloc_coherent, |
1649 | .free_coherent = free_coherent, | 1653 | .free_coherent = free_coherent, |
1650 | .map_single = map_single, | 1654 | .map_page = map_page, |
1651 | .unmap_single = unmap_single, | 1655 | .unmap_page = unmap_page, |
1652 | .map_sg = map_sg, | 1656 | .map_sg = map_sg, |
1653 | .unmap_sg = unmap_sg, | 1657 | .unmap_sg = unmap_sg, |
1654 | .dma_supported = amd_iommu_dma_supported, | 1658 | .dma_supported = amd_iommu_dma_supported, |
diff --git a/arch/x86/kernel/pci-calgary_64.c b/arch/x86/kernel/pci-calgary_64.c index d28bbdc35e4e..755c21e906f3 100644 --- a/arch/x86/kernel/pci-calgary_64.c +++ b/arch/x86/kernel/pci-calgary_64.c | |||
@@ -380,8 +380,9 @@ static inline struct iommu_table *find_iommu_table(struct device *dev) | |||
380 | return tbl; | 380 | return tbl; |
381 | } | 381 | } |
382 | 382 | ||
383 | static void calgary_unmap_sg(struct device *dev, | 383 | static void calgary_unmap_sg(struct device *dev, struct scatterlist *sglist, |
384 | struct scatterlist *sglist, int nelems, int direction) | 384 | int nelems,enum dma_data_direction dir, |
385 | struct dma_attrs *attrs) | ||
385 | { | 386 | { |
386 | struct iommu_table *tbl = find_iommu_table(dev); | 387 | struct iommu_table *tbl = find_iommu_table(dev); |
387 | struct scatterlist *s; | 388 | struct scatterlist *s; |
@@ -404,7 +405,8 @@ static void calgary_unmap_sg(struct device *dev, | |||
404 | } | 405 | } |
405 | 406 | ||
406 | static int calgary_map_sg(struct device *dev, struct scatterlist *sg, | 407 | static int calgary_map_sg(struct device *dev, struct scatterlist *sg, |
407 | int nelems, int direction) | 408 | int nelems, enum dma_data_direction dir, |
409 | struct dma_attrs *attrs) | ||
408 | { | 410 | { |
409 | struct iommu_table *tbl = find_iommu_table(dev); | 411 | struct iommu_table *tbl = find_iommu_table(dev); |
410 | struct scatterlist *s; | 412 | struct scatterlist *s; |
@@ -429,15 +431,14 @@ static int calgary_map_sg(struct device *dev, struct scatterlist *sg, | |||
429 | s->dma_address = (entry << PAGE_SHIFT) | s->offset; | 431 | s->dma_address = (entry << PAGE_SHIFT) | s->offset; |
430 | 432 | ||
431 | /* insert into HW table */ | 433 | /* insert into HW table */ |
432 | tce_build(tbl, entry, npages, vaddr & PAGE_MASK, | 434 | tce_build(tbl, entry, npages, vaddr & PAGE_MASK, dir); |
433 | direction); | ||
434 | 435 | ||
435 | s->dma_length = s->length; | 436 | s->dma_length = s->length; |
436 | } | 437 | } |
437 | 438 | ||
438 | return nelems; | 439 | return nelems; |
439 | error: | 440 | error: |
440 | calgary_unmap_sg(dev, sg, nelems, direction); | 441 | calgary_unmap_sg(dev, sg, nelems, dir, NULL); |
441 | for_each_sg(sg, s, nelems, i) { | 442 | for_each_sg(sg, s, nelems, i) { |
442 | sg->dma_address = bad_dma_address; | 443 | sg->dma_address = bad_dma_address; |
443 | sg->dma_length = 0; | 444 | sg->dma_length = 0; |
@@ -445,10 +446,12 @@ error: | |||
445 | return 0; | 446 | return 0; |
446 | } | 447 | } |
447 | 448 | ||
448 | static dma_addr_t calgary_map_single(struct device *dev, phys_addr_t paddr, | 449 | static dma_addr_t calgary_map_page(struct device *dev, struct page *page, |
449 | size_t size, int direction) | 450 | unsigned long offset, size_t size, |
451 | enum dma_data_direction dir, | ||
452 | struct dma_attrs *attrs) | ||
450 | { | 453 | { |
451 | void *vaddr = phys_to_virt(paddr); | 454 | void *vaddr = page_address(page) + offset; |
452 | unsigned long uaddr; | 455 | unsigned long uaddr; |
453 | unsigned int npages; | 456 | unsigned int npages; |
454 | struct iommu_table *tbl = find_iommu_table(dev); | 457 | struct iommu_table *tbl = find_iommu_table(dev); |
@@ -456,17 +459,18 @@ static dma_addr_t calgary_map_single(struct device *dev, phys_addr_t paddr, | |||
456 | uaddr = (unsigned long)vaddr; | 459 | uaddr = (unsigned long)vaddr; |
457 | npages = iommu_num_pages(uaddr, size, PAGE_SIZE); | 460 | npages = iommu_num_pages(uaddr, size, PAGE_SIZE); |
458 | 461 | ||
459 | return iommu_alloc(dev, tbl, vaddr, npages, direction); | 462 | return iommu_alloc(dev, tbl, vaddr, npages, dir); |
460 | } | 463 | } |
461 | 464 | ||
462 | static void calgary_unmap_single(struct device *dev, dma_addr_t dma_handle, | 465 | static void calgary_unmap_page(struct device *dev, dma_addr_t dma_addr, |
463 | size_t size, int direction) | 466 | size_t size, enum dma_data_direction dir, |
467 | struct dma_attrs *attrs) | ||
464 | { | 468 | { |
465 | struct iommu_table *tbl = find_iommu_table(dev); | 469 | struct iommu_table *tbl = find_iommu_table(dev); |
466 | unsigned int npages; | 470 | unsigned int npages; |
467 | 471 | ||
468 | npages = iommu_num_pages(dma_handle, size, PAGE_SIZE); | 472 | npages = iommu_num_pages(dma_addr, size, PAGE_SIZE); |
469 | iommu_free(tbl, dma_handle, npages); | 473 | iommu_free(tbl, dma_addr, npages); |
470 | } | 474 | } |
471 | 475 | ||
472 | static void* calgary_alloc_coherent(struct device *dev, size_t size, | 476 | static void* calgary_alloc_coherent(struct device *dev, size_t size, |
@@ -515,13 +519,13 @@ static void calgary_free_coherent(struct device *dev, size_t size, | |||
515 | free_pages((unsigned long)vaddr, get_order(size)); | 519 | free_pages((unsigned long)vaddr, get_order(size)); |
516 | } | 520 | } |
517 | 521 | ||
518 | static struct dma_mapping_ops calgary_dma_ops = { | 522 | static struct dma_map_ops calgary_dma_ops = { |
519 | .alloc_coherent = calgary_alloc_coherent, | 523 | .alloc_coherent = calgary_alloc_coherent, |
520 | .free_coherent = calgary_free_coherent, | 524 | .free_coherent = calgary_free_coherent, |
521 | .map_single = calgary_map_single, | ||
522 | .unmap_single = calgary_unmap_single, | ||
523 | .map_sg = calgary_map_sg, | 525 | .map_sg = calgary_map_sg, |
524 | .unmap_sg = calgary_unmap_sg, | 526 | .unmap_sg = calgary_unmap_sg, |
527 | .map_page = calgary_map_page, | ||
528 | .unmap_page = calgary_unmap_page, | ||
525 | }; | 529 | }; |
526 | 530 | ||
527 | static inline void __iomem * busno_to_bbar(unsigned char num) | 531 | static inline void __iomem * busno_to_bbar(unsigned char num) |
diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c index b25428533141..c7c4776ff630 100644 --- a/arch/x86/kernel/pci-dma.c +++ b/arch/x86/kernel/pci-dma.c | |||
@@ -1,4 +1,5 @@ | |||
1 | #include <linux/dma-mapping.h> | 1 | #include <linux/dma-mapping.h> |
2 | #include <linux/dma-debug.h> | ||
2 | #include <linux/dmar.h> | 3 | #include <linux/dmar.h> |
3 | #include <linux/bootmem.h> | 4 | #include <linux/bootmem.h> |
4 | #include <linux/pci.h> | 5 | #include <linux/pci.h> |
@@ -12,7 +13,7 @@ | |||
12 | 13 | ||
13 | static int forbid_dac __read_mostly; | 14 | static int forbid_dac __read_mostly; |
14 | 15 | ||
15 | struct dma_mapping_ops *dma_ops; | 16 | struct dma_map_ops *dma_ops; |
16 | EXPORT_SYMBOL(dma_ops); | 17 | EXPORT_SYMBOL(dma_ops); |
17 | 18 | ||
18 | static int iommu_sac_force __read_mostly; | 19 | static int iommu_sac_force __read_mostly; |
@@ -44,6 +45,9 @@ struct device x86_dma_fallback_dev = { | |||
44 | }; | 45 | }; |
45 | EXPORT_SYMBOL(x86_dma_fallback_dev); | 46 | EXPORT_SYMBOL(x86_dma_fallback_dev); |
46 | 47 | ||
48 | /* Number of entries preallocated for DMA-API debugging */ | ||
49 | #define PREALLOC_DMA_DEBUG_ENTRIES 32768 | ||
50 | |||
47 | int dma_set_mask(struct device *dev, u64 mask) | 51 | int dma_set_mask(struct device *dev, u64 mask) |
48 | { | 52 | { |
49 | if (!dev->dma_mask || !dma_supported(dev, mask)) | 53 | if (!dev->dma_mask || !dma_supported(dev, mask)) |
@@ -224,7 +228,7 @@ early_param("iommu", iommu_setup); | |||
224 | 228 | ||
225 | int dma_supported(struct device *dev, u64 mask) | 229 | int dma_supported(struct device *dev, u64 mask) |
226 | { | 230 | { |
227 | struct dma_mapping_ops *ops = get_dma_ops(dev); | 231 | struct dma_map_ops *ops = get_dma_ops(dev); |
228 | 232 | ||
229 | #ifdef CONFIG_PCI | 233 | #ifdef CONFIG_PCI |
230 | if (mask > 0xffffffff && forbid_dac > 0) { | 234 | if (mask > 0xffffffff && forbid_dac > 0) { |
@@ -265,6 +269,12 @@ EXPORT_SYMBOL(dma_supported); | |||
265 | 269 | ||
266 | static int __init pci_iommu_init(void) | 270 | static int __init pci_iommu_init(void) |
267 | { | 271 | { |
272 | dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES); | ||
273 | |||
274 | #ifdef CONFIG_PCI | ||
275 | dma_debug_add_bus(&pci_bus_type); | ||
276 | #endif | ||
277 | |||
268 | calgary_iommu_init(); | 278 | calgary_iommu_init(); |
269 | 279 | ||
270 | intel_iommu_init(); | 280 | intel_iommu_init(); |
diff --git a/arch/x86/kernel/pci-gart_64.c b/arch/x86/kernel/pci-gart_64.c index d5768b1af080..b284b58c035c 100644 --- a/arch/x86/kernel/pci-gart_64.c +++ b/arch/x86/kernel/pci-gart_64.c | |||
@@ -255,10 +255,13 @@ static dma_addr_t dma_map_area(struct device *dev, dma_addr_t phys_mem, | |||
255 | } | 255 | } |
256 | 256 | ||
257 | /* Map a single area into the IOMMU */ | 257 | /* Map a single area into the IOMMU */ |
258 | static dma_addr_t | 258 | static dma_addr_t gart_map_page(struct device *dev, struct page *page, |
259 | gart_map_single(struct device *dev, phys_addr_t paddr, size_t size, int dir) | 259 | unsigned long offset, size_t size, |
260 | enum dma_data_direction dir, | ||
261 | struct dma_attrs *attrs) | ||
260 | { | 262 | { |
261 | unsigned long bus; | 263 | unsigned long bus; |
264 | phys_addr_t paddr = page_to_phys(page) + offset; | ||
262 | 265 | ||
263 | if (!dev) | 266 | if (!dev) |
264 | dev = &x86_dma_fallback_dev; | 267 | dev = &x86_dma_fallback_dev; |
@@ -275,8 +278,9 @@ gart_map_single(struct device *dev, phys_addr_t paddr, size_t size, int dir) | |||
275 | /* | 278 | /* |
276 | * Free a DMA mapping. | 279 | * Free a DMA mapping. |
277 | */ | 280 | */ |
278 | static void gart_unmap_single(struct device *dev, dma_addr_t dma_addr, | 281 | static void gart_unmap_page(struct device *dev, dma_addr_t dma_addr, |
279 | size_t size, int direction) | 282 | size_t size, enum dma_data_direction dir, |
283 | struct dma_attrs *attrs) | ||
280 | { | 284 | { |
281 | unsigned long iommu_page; | 285 | unsigned long iommu_page; |
282 | int npages; | 286 | int npages; |
@@ -298,8 +302,8 @@ static void gart_unmap_single(struct device *dev, dma_addr_t dma_addr, | |||
298 | /* | 302 | /* |
299 | * Wrapper for pci_unmap_single working with scatterlists. | 303 | * Wrapper for pci_unmap_single working with scatterlists. |
300 | */ | 304 | */ |
301 | static void | 305 | static void gart_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, |
302 | gart_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, int dir) | 306 | enum dma_data_direction dir, struct dma_attrs *attrs) |
303 | { | 307 | { |
304 | struct scatterlist *s; | 308 | struct scatterlist *s; |
305 | int i; | 309 | int i; |
@@ -307,7 +311,7 @@ gart_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, int dir) | |||
307 | for_each_sg(sg, s, nents, i) { | 311 | for_each_sg(sg, s, nents, i) { |
308 | if (!s->dma_length || !s->length) | 312 | if (!s->dma_length || !s->length) |
309 | break; | 313 | break; |
310 | gart_unmap_single(dev, s->dma_address, s->dma_length, dir); | 314 | gart_unmap_page(dev, s->dma_address, s->dma_length, dir, NULL); |
311 | } | 315 | } |
312 | } | 316 | } |
313 | 317 | ||
@@ -329,7 +333,7 @@ static int dma_map_sg_nonforce(struct device *dev, struct scatterlist *sg, | |||
329 | addr = dma_map_area(dev, addr, s->length, dir, 0); | 333 | addr = dma_map_area(dev, addr, s->length, dir, 0); |
330 | if (addr == bad_dma_address) { | 334 | if (addr == bad_dma_address) { |
331 | if (i > 0) | 335 | if (i > 0) |
332 | gart_unmap_sg(dev, sg, i, dir); | 336 | gart_unmap_sg(dev, sg, i, dir, NULL); |
333 | nents = 0; | 337 | nents = 0; |
334 | sg[0].dma_length = 0; | 338 | sg[0].dma_length = 0; |
335 | break; | 339 | break; |
@@ -400,8 +404,8 @@ dma_map_cont(struct device *dev, struct scatterlist *start, int nelems, | |||
400 | * DMA map all entries in a scatterlist. | 404 | * DMA map all entries in a scatterlist. |
401 | * Merge chunks that have page aligned sizes into a continuous mapping. | 405 | * Merge chunks that have page aligned sizes into a continuous mapping. |
402 | */ | 406 | */ |
403 | static int | 407 | static int gart_map_sg(struct device *dev, struct scatterlist *sg, int nents, |
404 | gart_map_sg(struct device *dev, struct scatterlist *sg, int nents, int dir) | 408 | enum dma_data_direction dir, struct dma_attrs *attrs) |
405 | { | 409 | { |
406 | struct scatterlist *s, *ps, *start_sg, *sgmap; | 410 | struct scatterlist *s, *ps, *start_sg, *sgmap; |
407 | int need = 0, nextneed, i, out, start; | 411 | int need = 0, nextneed, i, out, start; |
@@ -468,7 +472,7 @@ gart_map_sg(struct device *dev, struct scatterlist *sg, int nents, int dir) | |||
468 | 472 | ||
469 | error: | 473 | error: |
470 | flush_gart(); | 474 | flush_gart(); |
471 | gart_unmap_sg(dev, sg, out, dir); | 475 | gart_unmap_sg(dev, sg, out, dir, NULL); |
472 | 476 | ||
473 | /* When it was forced or merged try again in a dumb way */ | 477 | /* When it was forced or merged try again in a dumb way */ |
474 | if (force_iommu || iommu_merge) { | 478 | if (force_iommu || iommu_merge) { |
@@ -521,7 +525,7 @@ static void | |||
521 | gart_free_coherent(struct device *dev, size_t size, void *vaddr, | 525 | gart_free_coherent(struct device *dev, size_t size, void *vaddr, |
522 | dma_addr_t dma_addr) | 526 | dma_addr_t dma_addr) |
523 | { | 527 | { |
524 | gart_unmap_single(dev, dma_addr, size, DMA_BIDIRECTIONAL); | 528 | gart_unmap_page(dev, dma_addr, size, DMA_BIDIRECTIONAL, NULL); |
525 | free_pages((unsigned long)vaddr, get_order(size)); | 529 | free_pages((unsigned long)vaddr, get_order(size)); |
526 | } | 530 | } |
527 | 531 | ||
@@ -707,11 +711,11 @@ static __init int init_k8_gatt(struct agp_kern_info *info) | |||
707 | return -1; | 711 | return -1; |
708 | } | 712 | } |
709 | 713 | ||
710 | static struct dma_mapping_ops gart_dma_ops = { | 714 | static struct dma_map_ops gart_dma_ops = { |
711 | .map_single = gart_map_single, | ||
712 | .unmap_single = gart_unmap_single, | ||
713 | .map_sg = gart_map_sg, | 715 | .map_sg = gart_map_sg, |
714 | .unmap_sg = gart_unmap_sg, | 716 | .unmap_sg = gart_unmap_sg, |
717 | .map_page = gart_map_page, | ||
718 | .unmap_page = gart_unmap_page, | ||
715 | .alloc_coherent = gart_alloc_coherent, | 719 | .alloc_coherent = gart_alloc_coherent, |
716 | .free_coherent = gart_free_coherent, | 720 | .free_coherent = gart_free_coherent, |
717 | }; | 721 | }; |
diff --git a/arch/x86/kernel/pci-nommu.c b/arch/x86/kernel/pci-nommu.c index 8b02a3936d42..c6d703b39326 100644 --- a/arch/x86/kernel/pci-nommu.c +++ b/arch/x86/kernel/pci-nommu.c | |||
@@ -25,19 +25,19 @@ check_addr(char *name, struct device *hwdev, dma_addr_t bus, size_t size) | |||
25 | return 1; | 25 | return 1; |
26 | } | 26 | } |
27 | 27 | ||
28 | static dma_addr_t | 28 | static dma_addr_t nommu_map_page(struct device *dev, struct page *page, |
29 | nommu_map_single(struct device *hwdev, phys_addr_t paddr, size_t size, | 29 | unsigned long offset, size_t size, |
30 | int direction) | 30 | enum dma_data_direction dir, |
31 | struct dma_attrs *attrs) | ||
31 | { | 32 | { |
32 | dma_addr_t bus = paddr; | 33 | dma_addr_t bus = page_to_phys(page) + offset; |
33 | WARN_ON(size == 0); | 34 | WARN_ON(size == 0); |
34 | if (!check_addr("map_single", hwdev, bus, size)) | 35 | if (!check_addr("map_single", dev, bus, size)) |
35 | return bad_dma_address; | 36 | return bad_dma_address; |
36 | flush_write_buffers(); | 37 | flush_write_buffers(); |
37 | return bus; | 38 | return bus; |
38 | } | 39 | } |
39 | 40 | ||
40 | |||
41 | /* Map a set of buffers described by scatterlist in streaming | 41 | /* Map a set of buffers described by scatterlist in streaming |
42 | * mode for DMA. This is the scatter-gather version of the | 42 | * mode for DMA. This is the scatter-gather version of the |
43 | * above pci_map_single interface. Here the scatter gather list | 43 | * above pci_map_single interface. Here the scatter gather list |
@@ -54,7 +54,8 @@ nommu_map_single(struct device *hwdev, phys_addr_t paddr, size_t size, | |||
54 | * the same here. | 54 | * the same here. |
55 | */ | 55 | */ |
56 | static int nommu_map_sg(struct device *hwdev, struct scatterlist *sg, | 56 | static int nommu_map_sg(struct device *hwdev, struct scatterlist *sg, |
57 | int nents, int direction) | 57 | int nents, enum dma_data_direction dir, |
58 | struct dma_attrs *attrs) | ||
58 | { | 59 | { |
59 | struct scatterlist *s; | 60 | struct scatterlist *s; |
60 | int i; | 61 | int i; |
@@ -78,11 +79,11 @@ static void nommu_free_coherent(struct device *dev, size_t size, void *vaddr, | |||
78 | free_pages((unsigned long)vaddr, get_order(size)); | 79 | free_pages((unsigned long)vaddr, get_order(size)); |
79 | } | 80 | } |
80 | 81 | ||
81 | struct dma_mapping_ops nommu_dma_ops = { | 82 | struct dma_map_ops nommu_dma_ops = { |
82 | .alloc_coherent = dma_generic_alloc_coherent, | 83 | .alloc_coherent = dma_generic_alloc_coherent, |
83 | .free_coherent = nommu_free_coherent, | 84 | .free_coherent = nommu_free_coherent, |
84 | .map_single = nommu_map_single, | ||
85 | .map_sg = nommu_map_sg, | 85 | .map_sg = nommu_map_sg, |
86 | .map_page = nommu_map_page, | ||
86 | .is_phys = 1, | 87 | .is_phys = 1, |
87 | }; | 88 | }; |
88 | 89 | ||
diff --git a/arch/x86/kernel/pci-swiotlb_64.c b/arch/x86/kernel/pci-swiotlb.c index d59c91747665..34f12e9996ed 100644 --- a/arch/x86/kernel/pci-swiotlb_64.c +++ b/arch/x86/kernel/pci-swiotlb.c | |||
@@ -33,18 +33,11 @@ phys_addr_t swiotlb_bus_to_phys(dma_addr_t baddr) | |||
33 | return baddr; | 33 | return baddr; |
34 | } | 34 | } |
35 | 35 | ||
36 | int __weak swiotlb_arch_range_needs_mapping(void *ptr, size_t size) | 36 | int __weak swiotlb_arch_range_needs_mapping(phys_addr_t paddr, size_t size) |
37 | { | 37 | { |
38 | return 0; | 38 | return 0; |
39 | } | 39 | } |
40 | 40 | ||
41 | static dma_addr_t | ||
42 | swiotlb_map_single_phys(struct device *hwdev, phys_addr_t paddr, size_t size, | ||
43 | int direction) | ||
44 | { | ||
45 | return swiotlb_map_single(hwdev, phys_to_virt(paddr), size, direction); | ||
46 | } | ||
47 | |||
48 | static void *x86_swiotlb_alloc_coherent(struct device *hwdev, size_t size, | 41 | static void *x86_swiotlb_alloc_coherent(struct device *hwdev, size_t size, |
49 | dma_addr_t *dma_handle, gfp_t flags) | 42 | dma_addr_t *dma_handle, gfp_t flags) |
50 | { | 43 | { |
@@ -57,20 +50,20 @@ static void *x86_swiotlb_alloc_coherent(struct device *hwdev, size_t size, | |||
57 | return swiotlb_alloc_coherent(hwdev, size, dma_handle, flags); | 50 | return swiotlb_alloc_coherent(hwdev, size, dma_handle, flags); |
58 | } | 51 | } |
59 | 52 | ||
60 | struct dma_mapping_ops swiotlb_dma_ops = { | 53 | struct dma_map_ops swiotlb_dma_ops = { |
61 | .mapping_error = swiotlb_dma_mapping_error, | 54 | .mapping_error = swiotlb_dma_mapping_error, |
62 | .alloc_coherent = x86_swiotlb_alloc_coherent, | 55 | .alloc_coherent = x86_swiotlb_alloc_coherent, |
63 | .free_coherent = swiotlb_free_coherent, | 56 | .free_coherent = swiotlb_free_coherent, |
64 | .map_single = swiotlb_map_single_phys, | ||
65 | .unmap_single = swiotlb_unmap_single, | ||
66 | .sync_single_for_cpu = swiotlb_sync_single_for_cpu, | 57 | .sync_single_for_cpu = swiotlb_sync_single_for_cpu, |
67 | .sync_single_for_device = swiotlb_sync_single_for_device, | 58 | .sync_single_for_device = swiotlb_sync_single_for_device, |
68 | .sync_single_range_for_cpu = swiotlb_sync_single_range_for_cpu, | 59 | .sync_single_range_for_cpu = swiotlb_sync_single_range_for_cpu, |
69 | .sync_single_range_for_device = swiotlb_sync_single_range_for_device, | 60 | .sync_single_range_for_device = swiotlb_sync_single_range_for_device, |
70 | .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu, | 61 | .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu, |
71 | .sync_sg_for_device = swiotlb_sync_sg_for_device, | 62 | .sync_sg_for_device = swiotlb_sync_sg_for_device, |
72 | .map_sg = swiotlb_map_sg, | 63 | .map_sg = swiotlb_map_sg_attrs, |
73 | .unmap_sg = swiotlb_unmap_sg, | 64 | .unmap_sg = swiotlb_unmap_sg_attrs, |
65 | .map_page = swiotlb_map_page, | ||
66 | .unmap_page = swiotlb_unmap_page, | ||
74 | .dma_supported = NULL, | 67 | .dma_supported = NULL, |
75 | }; | 68 | }; |
76 | 69 | ||
diff --git a/drivers/base/iommu.c b/drivers/base/iommu.c index 5e039d4f877c..c2d1eed90376 100644 --- a/drivers/base/iommu.c +++ b/drivers/base/iommu.c | |||
@@ -31,7 +31,7 @@ void register_iommu(struct iommu_ops *ops) | |||
31 | iommu_ops = ops; | 31 | iommu_ops = ops; |
32 | } | 32 | } |
33 | 33 | ||
34 | bool iommu_found() | 34 | bool iommu_found(void) |
35 | { | 35 | { |
36 | return iommu_ops != NULL; | 36 | return iommu_ops != NULL; |
37 | } | 37 | } |
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c index ef167b8b047d..49402c399232 100644 --- a/drivers/pci/intel-iommu.c +++ b/drivers/pci/intel-iommu.c | |||
@@ -2124,11 +2124,13 @@ error: | |||
2124 | return 0; | 2124 | return 0; |
2125 | } | 2125 | } |
2126 | 2126 | ||
2127 | dma_addr_t intel_map_single(struct device *hwdev, phys_addr_t paddr, | 2127 | static dma_addr_t intel_map_page(struct device *dev, struct page *page, |
2128 | size_t size, int dir) | 2128 | unsigned long offset, size_t size, |
2129 | enum dma_data_direction dir, | ||
2130 | struct dma_attrs *attrs) | ||
2129 | { | 2131 | { |
2130 | return __intel_map_single(hwdev, paddr, size, dir, | 2132 | return __intel_map_single(dev, page_to_phys(page) + offset, size, |
2131 | to_pci_dev(hwdev)->dma_mask); | 2133 | dir, to_pci_dev(dev)->dma_mask); |
2132 | } | 2134 | } |
2133 | 2135 | ||
2134 | static void flush_unmaps(void) | 2136 | static void flush_unmaps(void) |
@@ -2192,8 +2194,9 @@ static void add_unmap(struct dmar_domain *dom, struct iova *iova) | |||
2192 | spin_unlock_irqrestore(&async_umap_flush_lock, flags); | 2194 | spin_unlock_irqrestore(&async_umap_flush_lock, flags); |
2193 | } | 2195 | } |
2194 | 2196 | ||
2195 | void intel_unmap_single(struct device *dev, dma_addr_t dev_addr, size_t size, | 2197 | static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr, |
2196 | int dir) | 2198 | size_t size, enum dma_data_direction dir, |
2199 | struct dma_attrs *attrs) | ||
2197 | { | 2200 | { |
2198 | struct pci_dev *pdev = to_pci_dev(dev); | 2201 | struct pci_dev *pdev = to_pci_dev(dev); |
2199 | struct dmar_domain *domain; | 2202 | struct dmar_domain *domain; |
@@ -2237,8 +2240,14 @@ void intel_unmap_single(struct device *dev, dma_addr_t dev_addr, size_t size, | |||
2237 | } | 2240 | } |
2238 | } | 2241 | } |
2239 | 2242 | ||
2240 | void *intel_alloc_coherent(struct device *hwdev, size_t size, | 2243 | static void intel_unmap_single(struct device *dev, dma_addr_t dev_addr, size_t size, |
2241 | dma_addr_t *dma_handle, gfp_t flags) | 2244 | int dir) |
2245 | { | ||
2246 | intel_unmap_page(dev, dev_addr, size, dir, NULL); | ||
2247 | } | ||
2248 | |||
2249 | static void *intel_alloc_coherent(struct device *hwdev, size_t size, | ||
2250 | dma_addr_t *dma_handle, gfp_t flags) | ||
2242 | { | 2251 | { |
2243 | void *vaddr; | 2252 | void *vaddr; |
2244 | int order; | 2253 | int order; |
@@ -2261,8 +2270,8 @@ void *intel_alloc_coherent(struct device *hwdev, size_t size, | |||
2261 | return NULL; | 2270 | return NULL; |
2262 | } | 2271 | } |
2263 | 2272 | ||
2264 | void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr, | 2273 | static void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr, |
2265 | dma_addr_t dma_handle) | 2274 | dma_addr_t dma_handle) |
2266 | { | 2275 | { |
2267 | int order; | 2276 | int order; |
2268 | 2277 | ||
@@ -2275,8 +2284,9 @@ void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr, | |||
2275 | 2284 | ||
2276 | #define SG_ENT_VIRT_ADDRESS(sg) (sg_virt((sg))) | 2285 | #define SG_ENT_VIRT_ADDRESS(sg) (sg_virt((sg))) |
2277 | 2286 | ||
2278 | void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist, | 2287 | static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist, |
2279 | int nelems, int dir) | 2288 | int nelems, enum dma_data_direction dir, |
2289 | struct dma_attrs *attrs) | ||
2280 | { | 2290 | { |
2281 | int i; | 2291 | int i; |
2282 | struct pci_dev *pdev = to_pci_dev(hwdev); | 2292 | struct pci_dev *pdev = to_pci_dev(hwdev); |
@@ -2333,8 +2343,8 @@ static int intel_nontranslate_map_sg(struct device *hddev, | |||
2333 | return nelems; | 2343 | return nelems; |
2334 | } | 2344 | } |
2335 | 2345 | ||
2336 | int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems, | 2346 | static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems, |
2337 | int dir) | 2347 | enum dma_data_direction dir, struct dma_attrs *attrs) |
2338 | { | 2348 | { |
2339 | void *addr; | 2349 | void *addr; |
2340 | int i; | 2350 | int i; |
@@ -2414,13 +2424,19 @@ int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems, | |||
2414 | return nelems; | 2424 | return nelems; |
2415 | } | 2425 | } |
2416 | 2426 | ||
2417 | static struct dma_mapping_ops intel_dma_ops = { | 2427 | static int intel_mapping_error(struct device *dev, dma_addr_t dma_addr) |
2428 | { | ||
2429 | return !dma_addr; | ||
2430 | } | ||
2431 | |||
2432 | struct dma_map_ops intel_dma_ops = { | ||
2418 | .alloc_coherent = intel_alloc_coherent, | 2433 | .alloc_coherent = intel_alloc_coherent, |
2419 | .free_coherent = intel_free_coherent, | 2434 | .free_coherent = intel_free_coherent, |
2420 | .map_single = intel_map_single, | ||
2421 | .unmap_single = intel_unmap_single, | ||
2422 | .map_sg = intel_map_sg, | 2435 | .map_sg = intel_map_sg, |
2423 | .unmap_sg = intel_unmap_sg, | 2436 | .unmap_sg = intel_unmap_sg, |
2437 | .map_page = intel_map_page, | ||
2438 | .unmap_page = intel_unmap_page, | ||
2439 | .mapping_error = intel_mapping_error, | ||
2424 | }; | 2440 | }; |
2425 | 2441 | ||
2426 | static inline int iommu_domain_cache_init(void) | 2442 | static inline int iommu_domain_cache_init(void) |
diff --git a/include/linux/dma-debug.h b/include/linux/dma-debug.h new file mode 100644 index 000000000000..28d53cb7b5a2 --- /dev/null +++ b/include/linux/dma-debug.h | |||
@@ -0,0 +1,174 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2008 Advanced Micro Devices, Inc. | ||
3 | * | ||
4 | * Author: Joerg Roedel <joerg.roedel@amd.com> | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify it | ||
7 | * under the terms of the GNU General Public License version 2 as published | ||
8 | * by the Free Software Foundation. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, | ||
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
13 | * GNU General Public License for more details. | ||
14 | * | ||
15 | * You should have received a copy of the GNU General Public License | ||
16 | * along with this program; if not, write to the Free Software | ||
17 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
18 | */ | ||
19 | |||
20 | #ifndef __DMA_DEBUG_H | ||
21 | #define __DMA_DEBUG_H | ||
22 | |||
23 | #include <linux/types.h> | ||
24 | |||
25 | struct device; | ||
26 | struct scatterlist; | ||
27 | struct bus_type; | ||
28 | |||
29 | #ifdef CONFIG_DMA_API_DEBUG | ||
30 | |||
31 | extern void dma_debug_add_bus(struct bus_type *bus); | ||
32 | |||
33 | extern void dma_debug_init(u32 num_entries); | ||
34 | |||
35 | extern void debug_dma_map_page(struct device *dev, struct page *page, | ||
36 | size_t offset, size_t size, | ||
37 | int direction, dma_addr_t dma_addr, | ||
38 | bool map_single); | ||
39 | |||
40 | extern void debug_dma_unmap_page(struct device *dev, dma_addr_t addr, | ||
41 | size_t size, int direction, bool map_single); | ||
42 | |||
43 | extern void debug_dma_map_sg(struct device *dev, struct scatterlist *sg, | ||
44 | int nents, int mapped_ents, int direction); | ||
45 | |||
46 | extern void debug_dma_unmap_sg(struct device *dev, struct scatterlist *sglist, | ||
47 | int nelems, int dir); | ||
48 | |||
49 | extern void debug_dma_alloc_coherent(struct device *dev, size_t size, | ||
50 | dma_addr_t dma_addr, void *virt); | ||
51 | |||
52 | extern void debug_dma_free_coherent(struct device *dev, size_t size, | ||
53 | void *virt, dma_addr_t addr); | ||
54 | |||
55 | extern void debug_dma_sync_single_for_cpu(struct device *dev, | ||
56 | dma_addr_t dma_handle, size_t size, | ||
57 | int direction); | ||
58 | |||
59 | extern void debug_dma_sync_single_for_device(struct device *dev, | ||
60 | dma_addr_t dma_handle, | ||
61 | size_t size, int direction); | ||
62 | |||
63 | extern void debug_dma_sync_single_range_for_cpu(struct device *dev, | ||
64 | dma_addr_t dma_handle, | ||
65 | unsigned long offset, | ||
66 | size_t size, | ||
67 | int direction); | ||
68 | |||
69 | extern void debug_dma_sync_single_range_for_device(struct device *dev, | ||
70 | dma_addr_t dma_handle, | ||
71 | unsigned long offset, | ||
72 | size_t size, int direction); | ||
73 | |||
74 | extern void debug_dma_sync_sg_for_cpu(struct device *dev, | ||
75 | struct scatterlist *sg, | ||
76 | int nelems, int direction); | ||
77 | |||
78 | extern void debug_dma_sync_sg_for_device(struct device *dev, | ||
79 | struct scatterlist *sg, | ||
80 | int nelems, int direction); | ||
81 | |||
82 | extern void debug_dma_dump_mappings(struct device *dev); | ||
83 | |||
84 | #else /* CONFIG_DMA_API_DEBUG */ | ||
85 | |||
86 | static inline void dma_debug_add_bus(struct bus_type *bus) | ||
87 | { | ||
88 | } | ||
89 | |||
90 | static inline void dma_debug_init(u32 num_entries) | ||
91 | { | ||
92 | } | ||
93 | |||
94 | static inline void debug_dma_map_page(struct device *dev, struct page *page, | ||
95 | size_t offset, size_t size, | ||
96 | int direction, dma_addr_t dma_addr, | ||
97 | bool map_single) | ||
98 | { | ||
99 | } | ||
100 | |||
101 | static inline void debug_dma_unmap_page(struct device *dev, dma_addr_t addr, | ||
102 | size_t size, int direction, | ||
103 | bool map_single) | ||
104 | { | ||
105 | } | ||
106 | |||
107 | static inline void debug_dma_map_sg(struct device *dev, struct scatterlist *sg, | ||
108 | int nents, int mapped_ents, int direction) | ||
109 | { | ||
110 | } | ||
111 | |||
112 | static inline void debug_dma_unmap_sg(struct device *dev, | ||
113 | struct scatterlist *sglist, | ||
114 | int nelems, int dir) | ||
115 | { | ||
116 | } | ||
117 | |||
118 | static inline void debug_dma_alloc_coherent(struct device *dev, size_t size, | ||
119 | dma_addr_t dma_addr, void *virt) | ||
120 | { | ||
121 | } | ||
122 | |||
123 | static inline void debug_dma_free_coherent(struct device *dev, size_t size, | ||
124 | void *virt, dma_addr_t addr) | ||
125 | { | ||
126 | } | ||
127 | |||
128 | static inline void debug_dma_sync_single_for_cpu(struct device *dev, | ||
129 | dma_addr_t dma_handle, | ||
130 | size_t size, int direction) | ||
131 | { | ||
132 | } | ||
133 | |||
134 | static inline void debug_dma_sync_single_for_device(struct device *dev, | ||
135 | dma_addr_t dma_handle, | ||
136 | size_t size, int direction) | ||
137 | { | ||
138 | } | ||
139 | |||
140 | static inline void debug_dma_sync_single_range_for_cpu(struct device *dev, | ||
141 | dma_addr_t dma_handle, | ||
142 | unsigned long offset, | ||
143 | size_t size, | ||
144 | int direction) | ||
145 | { | ||
146 | } | ||
147 | |||
148 | static inline void debug_dma_sync_single_range_for_device(struct device *dev, | ||
149 | dma_addr_t dma_handle, | ||
150 | unsigned long offset, | ||
151 | size_t size, | ||
152 | int direction) | ||
153 | { | ||
154 | } | ||
155 | |||
156 | static inline void debug_dma_sync_sg_for_cpu(struct device *dev, | ||
157 | struct scatterlist *sg, | ||
158 | int nelems, int direction) | ||
159 | { | ||
160 | } | ||
161 | |||
162 | static inline void debug_dma_sync_sg_for_device(struct device *dev, | ||
163 | struct scatterlist *sg, | ||
164 | int nelems, int direction) | ||
165 | { | ||
166 | } | ||
167 | |||
168 | static inline void debug_dma_dump_mappings(struct device *dev) | ||
169 | { | ||
170 | } | ||
171 | |||
172 | #endif /* CONFIG_DMA_API_DEBUG */ | ||
173 | |||
174 | #endif /* __DMA_DEBUG_H */ | ||
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h index ba9114ec5d3a..d7d090d21031 100644 --- a/include/linux/dma-mapping.h +++ b/include/linux/dma-mapping.h | |||
@@ -3,6 +3,8 @@ | |||
3 | 3 | ||
4 | #include <linux/device.h> | 4 | #include <linux/device.h> |
5 | #include <linux/err.h> | 5 | #include <linux/err.h> |
6 | #include <linux/dma-attrs.h> | ||
7 | #include <linux/scatterlist.h> | ||
6 | 8 | ||
7 | /* These definitions mirror those in pci.h, so they can be used | 9 | /* These definitions mirror those in pci.h, so they can be used |
8 | * interchangeably with their PCI_ counterparts */ | 10 | * interchangeably with their PCI_ counterparts */ |
@@ -13,6 +15,52 @@ enum dma_data_direction { | |||
13 | DMA_NONE = 3, | 15 | DMA_NONE = 3, |
14 | }; | 16 | }; |
15 | 17 | ||
18 | struct dma_map_ops { | ||
19 | void* (*alloc_coherent)(struct device *dev, size_t size, | ||
20 | dma_addr_t *dma_handle, gfp_t gfp); | ||
21 | void (*free_coherent)(struct device *dev, size_t size, | ||
22 | void *vaddr, dma_addr_t dma_handle); | ||
23 | dma_addr_t (*map_page)(struct device *dev, struct page *page, | ||
24 | unsigned long offset, size_t size, | ||
25 | enum dma_data_direction dir, | ||
26 | struct dma_attrs *attrs); | ||
27 | void (*unmap_page)(struct device *dev, dma_addr_t dma_handle, | ||
28 | size_t size, enum dma_data_direction dir, | ||
29 | struct dma_attrs *attrs); | ||
30 | int (*map_sg)(struct device *dev, struct scatterlist *sg, | ||
31 | int nents, enum dma_data_direction dir, | ||
32 | struct dma_attrs *attrs); | ||
33 | void (*unmap_sg)(struct device *dev, | ||
34 | struct scatterlist *sg, int nents, | ||
35 | enum dma_data_direction dir, | ||
36 | struct dma_attrs *attrs); | ||
37 | void (*sync_single_for_cpu)(struct device *dev, | ||
38 | dma_addr_t dma_handle, size_t size, | ||
39 | enum dma_data_direction dir); | ||
40 | void (*sync_single_for_device)(struct device *dev, | ||
41 | dma_addr_t dma_handle, size_t size, | ||
42 | enum dma_data_direction dir); | ||
43 | void (*sync_single_range_for_cpu)(struct device *dev, | ||
44 | dma_addr_t dma_handle, | ||
45 | unsigned long offset, | ||
46 | size_t size, | ||
47 | enum dma_data_direction dir); | ||
48 | void (*sync_single_range_for_device)(struct device *dev, | ||
49 | dma_addr_t dma_handle, | ||
50 | unsigned long offset, | ||
51 | size_t size, | ||
52 | enum dma_data_direction dir); | ||
53 | void (*sync_sg_for_cpu)(struct device *dev, | ||
54 | struct scatterlist *sg, int nents, | ||
55 | enum dma_data_direction dir); | ||
56 | void (*sync_sg_for_device)(struct device *dev, | ||
57 | struct scatterlist *sg, int nents, | ||
58 | enum dma_data_direction dir); | ||
59 | int (*mapping_error)(struct device *dev, dma_addr_t dma_addr); | ||
60 | int (*dma_supported)(struct device *dev, u64 mask); | ||
61 | int is_phys; | ||
62 | }; | ||
63 | |||
16 | #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1)) | 64 | #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1)) |
17 | 65 | ||
18 | /* | 66 | /* |
diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h index 78c1262e8704..1d6c71d96ede 100644 --- a/include/linux/intel-iommu.h +++ b/include/linux/intel-iommu.h | |||
@@ -332,11 +332,4 @@ extern int qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr, | |||
332 | 332 | ||
333 | extern int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu); | 333 | extern int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu); |
334 | 334 | ||
335 | extern void *intel_alloc_coherent(struct device *, size_t, dma_addr_t *, gfp_t); | ||
336 | extern void intel_free_coherent(struct device *, size_t, void *, dma_addr_t); | ||
337 | extern dma_addr_t intel_map_single(struct device *, phys_addr_t, size_t, int); | ||
338 | extern void intel_unmap_single(struct device *, dma_addr_t, size_t, int); | ||
339 | extern int intel_map_sg(struct device *, struct scatterlist *, int, int); | ||
340 | extern void intel_unmap_sg(struct device *, struct scatterlist *, int, int); | ||
341 | |||
342 | #endif | 335 | #endif |
diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h index dedd3c0cfe30..ac9ff54f7cb3 100644 --- a/include/linux/swiotlb.h +++ b/include/linux/swiotlb.h | |||
@@ -31,7 +31,7 @@ extern dma_addr_t swiotlb_phys_to_bus(struct device *hwdev, | |||
31 | phys_addr_t address); | 31 | phys_addr_t address); |
32 | extern phys_addr_t swiotlb_bus_to_phys(dma_addr_t address); | 32 | extern phys_addr_t swiotlb_bus_to_phys(dma_addr_t address); |
33 | 33 | ||
34 | extern int swiotlb_arch_range_needs_mapping(void *ptr, size_t size); | 34 | extern int swiotlb_arch_range_needs_mapping(phys_addr_t paddr, size_t size); |
35 | 35 | ||
36 | extern void | 36 | extern void |
37 | *swiotlb_alloc_coherent(struct device *hwdev, size_t size, | 37 | *swiotlb_alloc_coherent(struct device *hwdev, size_t size, |
@@ -41,20 +41,13 @@ extern void | |||
41 | swiotlb_free_coherent(struct device *hwdev, size_t size, | 41 | swiotlb_free_coherent(struct device *hwdev, size_t size, |
42 | void *vaddr, dma_addr_t dma_handle); | 42 | void *vaddr, dma_addr_t dma_handle); |
43 | 43 | ||
44 | extern dma_addr_t | 44 | extern dma_addr_t swiotlb_map_page(struct device *dev, struct page *page, |
45 | swiotlb_map_single(struct device *hwdev, void *ptr, size_t size, int dir); | 45 | unsigned long offset, size_t size, |
46 | 46 | enum dma_data_direction dir, | |
47 | extern void | 47 | struct dma_attrs *attrs); |
48 | swiotlb_unmap_single(struct device *hwdev, dma_addr_t dev_addr, | 48 | extern void swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr, |
49 | size_t size, int dir); | 49 | size_t size, enum dma_data_direction dir, |
50 | 50 | struct dma_attrs *attrs); | |
51 | extern dma_addr_t | ||
52 | swiotlb_map_single_attrs(struct device *hwdev, void *ptr, size_t size, | ||
53 | int dir, struct dma_attrs *attrs); | ||
54 | |||
55 | extern void | ||
56 | swiotlb_unmap_single_attrs(struct device *hwdev, dma_addr_t dev_addr, | ||
57 | size_t size, int dir, struct dma_attrs *attrs); | ||
58 | 51 | ||
59 | extern int | 52 | extern int |
60 | swiotlb_map_sg(struct device *hwdev, struct scatterlist *sg, int nents, | 53 | swiotlb_map_sg(struct device *hwdev, struct scatterlist *sg, int nents, |
@@ -66,36 +59,38 @@ swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents, | |||
66 | 59 | ||
67 | extern int | 60 | extern int |
68 | swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems, | 61 | swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems, |
69 | int dir, struct dma_attrs *attrs); | 62 | enum dma_data_direction dir, struct dma_attrs *attrs); |
70 | 63 | ||
71 | extern void | 64 | extern void |
72 | swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl, | 65 | swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl, |
73 | int nelems, int dir, struct dma_attrs *attrs); | 66 | int nelems, enum dma_data_direction dir, |
67 | struct dma_attrs *attrs); | ||
74 | 68 | ||
75 | extern void | 69 | extern void |
76 | swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr, | 70 | swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr, |
77 | size_t size, int dir); | 71 | size_t size, enum dma_data_direction dir); |
78 | 72 | ||
79 | extern void | 73 | extern void |
80 | swiotlb_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg, | 74 | swiotlb_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg, |
81 | int nelems, int dir); | 75 | int nelems, enum dma_data_direction dir); |
82 | 76 | ||
83 | extern void | 77 | extern void |
84 | swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr, | 78 | swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr, |
85 | size_t size, int dir); | 79 | size_t size, enum dma_data_direction dir); |
86 | 80 | ||
87 | extern void | 81 | extern void |
88 | swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg, | 82 | swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg, |
89 | int nelems, int dir); | 83 | int nelems, enum dma_data_direction dir); |
90 | 84 | ||
91 | extern void | 85 | extern void |
92 | swiotlb_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dev_addr, | 86 | swiotlb_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dev_addr, |
93 | unsigned long offset, size_t size, int dir); | 87 | unsigned long offset, size_t size, |
88 | enum dma_data_direction dir); | ||
94 | 89 | ||
95 | extern void | 90 | extern void |
96 | swiotlb_sync_single_range_for_device(struct device *hwdev, dma_addr_t dev_addr, | 91 | swiotlb_sync_single_range_for_device(struct device *hwdev, dma_addr_t dev_addr, |
97 | unsigned long offset, size_t size, | 92 | unsigned long offset, size_t size, |
98 | int dir); | 93 | enum dma_data_direction dir); |
99 | 94 | ||
100 | extern int | 95 | extern int |
101 | swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr); | 96 | swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr); |
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 0626fa4856e6..58bfe7e8faba 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug | |||
@@ -912,6 +912,17 @@ config DYNAMIC_DEBUG | |||
912 | 912 | ||
913 | See Documentation/dynamic-debug-howto.txt for additional information. | 913 | See Documentation/dynamic-debug-howto.txt for additional information. |
914 | 914 | ||
915 | config DMA_API_DEBUG | ||
916 | bool "Enable debugging of DMA-API usage" | ||
917 | depends on HAVE_DMA_API_DEBUG | ||
918 | help | ||
919 | Enable this option to debug the use of the DMA API by device drivers. | ||
920 | With this option you will be able to detect common bugs in device | ||
921 | drivers like double-freeing of DMA mappings or freeing mappings that | ||
922 | were never allocated. | ||
923 | This option causes a performance degredation. Use only if you want | ||
924 | to debug device drivers. If unsure, say N. | ||
925 | |||
915 | source "samples/Kconfig" | 926 | source "samples/Kconfig" |
916 | 927 | ||
917 | source "lib/Kconfig.kgdb" | 928 | source "lib/Kconfig.kgdb" |
diff --git a/lib/Makefile b/lib/Makefile index 051a33a8e028..d6edd6753f40 100644 --- a/lib/Makefile +++ b/lib/Makefile | |||
@@ -90,6 +90,8 @@ obj-$(CONFIG_DYNAMIC_DEBUG) += dynamic_debug.o | |||
90 | 90 | ||
91 | obj-$(CONFIG_NLATTR) += nlattr.o | 91 | obj-$(CONFIG_NLATTR) += nlattr.o |
92 | 92 | ||
93 | obj-$(CONFIG_DMA_API_DEBUG) += dma-debug.o | ||
94 | |||
93 | hostprogs-y := gen_crc32table | 95 | hostprogs-y := gen_crc32table |
94 | clean-files := crc32table.h | 96 | clean-files := crc32table.h |
95 | 97 | ||
diff --git a/lib/dma-debug.c b/lib/dma-debug.c new file mode 100644 index 000000000000..1a992089486c --- /dev/null +++ b/lib/dma-debug.c | |||
@@ -0,0 +1,955 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2008 Advanced Micro Devices, Inc. | ||
3 | * | ||
4 | * Author: Joerg Roedel <joerg.roedel@amd.com> | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify it | ||
7 | * under the terms of the GNU General Public License version 2 as published | ||
8 | * by the Free Software Foundation. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, | ||
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
13 | * GNU General Public License for more details. | ||
14 | * | ||
15 | * You should have received a copy of the GNU General Public License | ||
16 | * along with this program; if not, write to the Free Software | ||
17 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
18 | */ | ||
19 | |||
20 | #include <linux/scatterlist.h> | ||
21 | #include <linux/dma-mapping.h> | ||
22 | #include <linux/stacktrace.h> | ||
23 | #include <linux/dma-debug.h> | ||
24 | #include <linux/spinlock.h> | ||
25 | #include <linux/debugfs.h> | ||
26 | #include <linux/device.h> | ||
27 | #include <linux/types.h> | ||
28 | #include <linux/sched.h> | ||
29 | #include <linux/list.h> | ||
30 | #include <linux/slab.h> | ||
31 | |||
32 | #include <asm/sections.h> | ||
33 | |||
34 | #define HASH_SIZE 1024ULL | ||
35 | #define HASH_FN_SHIFT 13 | ||
36 | #define HASH_FN_MASK (HASH_SIZE - 1) | ||
37 | |||
38 | enum { | ||
39 | dma_debug_single, | ||
40 | dma_debug_page, | ||
41 | dma_debug_sg, | ||
42 | dma_debug_coherent, | ||
43 | }; | ||
44 | |||
45 | #define DMA_DEBUG_STACKTRACE_ENTRIES 5 | ||
46 | |||
47 | struct dma_debug_entry { | ||
48 | struct list_head list; | ||
49 | struct device *dev; | ||
50 | int type; | ||
51 | phys_addr_t paddr; | ||
52 | u64 dev_addr; | ||
53 | u64 size; | ||
54 | int direction; | ||
55 | int sg_call_ents; | ||
56 | int sg_mapped_ents; | ||
57 | #ifdef CONFIG_STACKTRACE | ||
58 | struct stack_trace stacktrace; | ||
59 | unsigned long st_entries[DMA_DEBUG_STACKTRACE_ENTRIES]; | ||
60 | #endif | ||
61 | }; | ||
62 | |||
63 | struct hash_bucket { | ||
64 | struct list_head list; | ||
65 | spinlock_t lock; | ||
66 | } ____cacheline_aligned_in_smp; | ||
67 | |||
68 | /* Hash list to save the allocated dma addresses */ | ||
69 | static struct hash_bucket dma_entry_hash[HASH_SIZE]; | ||
70 | /* List of pre-allocated dma_debug_entry's */ | ||
71 | static LIST_HEAD(free_entries); | ||
72 | /* Lock for the list above */ | ||
73 | static DEFINE_SPINLOCK(free_entries_lock); | ||
74 | |||
75 | /* Global disable flag - will be set in case of an error */ | ||
76 | static bool global_disable __read_mostly; | ||
77 | |||
78 | /* Global error count */ | ||
79 | static u32 error_count; | ||
80 | |||
81 | /* Global error show enable*/ | ||
82 | static u32 show_all_errors __read_mostly; | ||
83 | /* Number of errors to show */ | ||
84 | static u32 show_num_errors = 1; | ||
85 | |||
86 | static u32 num_free_entries; | ||
87 | static u32 min_free_entries; | ||
88 | |||
89 | /* number of preallocated entries requested by kernel cmdline */ | ||
90 | static u32 req_entries; | ||
91 | |||
92 | /* debugfs dentry's for the stuff above */ | ||
93 | static struct dentry *dma_debug_dent __read_mostly; | ||
94 | static struct dentry *global_disable_dent __read_mostly; | ||
95 | static struct dentry *error_count_dent __read_mostly; | ||
96 | static struct dentry *show_all_errors_dent __read_mostly; | ||
97 | static struct dentry *show_num_errors_dent __read_mostly; | ||
98 | static struct dentry *num_free_entries_dent __read_mostly; | ||
99 | static struct dentry *min_free_entries_dent __read_mostly; | ||
100 | |||
101 | static const char *type2name[4] = { "single", "page", | ||
102 | "scather-gather", "coherent" }; | ||
103 | |||
104 | static const char *dir2name[4] = { "DMA_BIDIRECTIONAL", "DMA_TO_DEVICE", | ||
105 | "DMA_FROM_DEVICE", "DMA_NONE" }; | ||
106 | |||
107 | /* | ||
108 | * The access to some variables in this macro is racy. We can't use atomic_t | ||
109 | * here because all these variables are exported to debugfs. Some of them even | ||
110 | * writeable. This is also the reason why a lock won't help much. But anyway, | ||
111 | * the races are no big deal. Here is why: | ||
112 | * | ||
113 | * error_count: the addition is racy, but the worst thing that can happen is | ||
114 | * that we don't count some errors | ||
115 | * show_num_errors: the subtraction is racy. Also no big deal because in | ||
116 | * worst case this will result in one warning more in the | ||
117 | * system log than the user configured. This variable is | ||
118 | * writeable via debugfs. | ||
119 | */ | ||
120 | static inline void dump_entry_trace(struct dma_debug_entry *entry) | ||
121 | { | ||
122 | #ifdef CONFIG_STACKTRACE | ||
123 | if (entry) { | ||
124 | printk(KERN_WARNING "Mapped at:\n"); | ||
125 | print_stack_trace(&entry->stacktrace, 0); | ||
126 | } | ||
127 | #endif | ||
128 | } | ||
129 | |||
130 | #define err_printk(dev, entry, format, arg...) do { \ | ||
131 | error_count += 1; \ | ||
132 | if (show_all_errors || show_num_errors > 0) { \ | ||
133 | WARN(1, "%s %s: " format, \ | ||
134 | dev_driver_string(dev), \ | ||
135 | dev_name(dev) , ## arg); \ | ||
136 | dump_entry_trace(entry); \ | ||
137 | } \ | ||
138 | if (!show_all_errors && show_num_errors > 0) \ | ||
139 | show_num_errors -= 1; \ | ||
140 | } while (0); | ||
141 | |||
142 | /* | ||
143 | * Hash related functions | ||
144 | * | ||
145 | * Every DMA-API request is saved into a struct dma_debug_entry. To | ||
146 | * have quick access to these structs they are stored into a hash. | ||
147 | */ | ||
148 | static int hash_fn(struct dma_debug_entry *entry) | ||
149 | { | ||
150 | /* | ||
151 | * Hash function is based on the dma address. | ||
152 | * We use bits 20-27 here as the index into the hash | ||
153 | */ | ||
154 | return (entry->dev_addr >> HASH_FN_SHIFT) & HASH_FN_MASK; | ||
155 | } | ||
156 | |||
157 | /* | ||
158 | * Request exclusive access to a hash bucket for a given dma_debug_entry. | ||
159 | */ | ||
160 | static struct hash_bucket *get_hash_bucket(struct dma_debug_entry *entry, | ||
161 | unsigned long *flags) | ||
162 | { | ||
163 | int idx = hash_fn(entry); | ||
164 | unsigned long __flags; | ||
165 | |||
166 | spin_lock_irqsave(&dma_entry_hash[idx].lock, __flags); | ||
167 | *flags = __flags; | ||
168 | return &dma_entry_hash[idx]; | ||
169 | } | ||
170 | |||
171 | /* | ||
172 | * Give up exclusive access to the hash bucket | ||
173 | */ | ||
174 | static void put_hash_bucket(struct hash_bucket *bucket, | ||
175 | unsigned long *flags) | ||
176 | { | ||
177 | unsigned long __flags = *flags; | ||
178 | |||
179 | spin_unlock_irqrestore(&bucket->lock, __flags); | ||
180 | } | ||
181 | |||
182 | /* | ||
183 | * Search a given entry in the hash bucket list | ||
184 | */ | ||
185 | static struct dma_debug_entry *hash_bucket_find(struct hash_bucket *bucket, | ||
186 | struct dma_debug_entry *ref) | ||
187 | { | ||
188 | struct dma_debug_entry *entry; | ||
189 | |||
190 | list_for_each_entry(entry, &bucket->list, list) { | ||
191 | if ((entry->dev_addr == ref->dev_addr) && | ||
192 | (entry->dev == ref->dev)) | ||
193 | return entry; | ||
194 | } | ||
195 | |||
196 | return NULL; | ||
197 | } | ||
198 | |||
199 | /* | ||
200 | * Add an entry to a hash bucket | ||
201 | */ | ||
202 | static void hash_bucket_add(struct hash_bucket *bucket, | ||
203 | struct dma_debug_entry *entry) | ||
204 | { | ||
205 | list_add_tail(&entry->list, &bucket->list); | ||
206 | } | ||
207 | |||
208 | /* | ||
209 | * Remove entry from a hash bucket list | ||
210 | */ | ||
211 | static void hash_bucket_del(struct dma_debug_entry *entry) | ||
212 | { | ||
213 | list_del(&entry->list); | ||
214 | } | ||
215 | |||
216 | /* | ||
217 | * Dump mapping entries for debugging purposes | ||
218 | */ | ||
219 | void debug_dma_dump_mappings(struct device *dev) | ||
220 | { | ||
221 | int idx; | ||
222 | |||
223 | for (idx = 0; idx < HASH_SIZE; idx++) { | ||
224 | struct hash_bucket *bucket = &dma_entry_hash[idx]; | ||
225 | struct dma_debug_entry *entry; | ||
226 | unsigned long flags; | ||
227 | |||
228 | spin_lock_irqsave(&bucket->lock, flags); | ||
229 | |||
230 | list_for_each_entry(entry, &bucket->list, list) { | ||
231 | if (!dev || dev == entry->dev) { | ||
232 | dev_info(entry->dev, | ||
233 | "%s idx %d P=%Lx D=%Lx L=%Lx %s\n", | ||
234 | type2name[entry->type], idx, | ||
235 | (unsigned long long)entry->paddr, | ||
236 | entry->dev_addr, entry->size, | ||
237 | dir2name[entry->direction]); | ||
238 | } | ||
239 | } | ||
240 | |||
241 | spin_unlock_irqrestore(&bucket->lock, flags); | ||
242 | } | ||
243 | } | ||
244 | EXPORT_SYMBOL(debug_dma_dump_mappings); | ||
245 | |||
246 | /* | ||
247 | * Wrapper function for adding an entry to the hash. | ||
248 | * This function takes care of locking itself. | ||
249 | */ | ||
250 | static void add_dma_entry(struct dma_debug_entry *entry) | ||
251 | { | ||
252 | struct hash_bucket *bucket; | ||
253 | unsigned long flags; | ||
254 | |||
255 | bucket = get_hash_bucket(entry, &flags); | ||
256 | hash_bucket_add(bucket, entry); | ||
257 | put_hash_bucket(bucket, &flags); | ||
258 | } | ||
259 | |||
260 | /* struct dma_entry allocator | ||
261 | * | ||
262 | * The next two functions implement the allocator for | ||
263 | * struct dma_debug_entries. | ||
264 | */ | ||
265 | static struct dma_debug_entry *dma_entry_alloc(void) | ||
266 | { | ||
267 | struct dma_debug_entry *entry = NULL; | ||
268 | unsigned long flags; | ||
269 | |||
270 | spin_lock_irqsave(&free_entries_lock, flags); | ||
271 | |||
272 | if (list_empty(&free_entries)) { | ||
273 | printk(KERN_ERR "DMA-API: debugging out of memory " | ||
274 | "- disabling\n"); | ||
275 | global_disable = true; | ||
276 | goto out; | ||
277 | } | ||
278 | |||
279 | entry = list_entry(free_entries.next, struct dma_debug_entry, list); | ||
280 | list_del(&entry->list); | ||
281 | memset(entry, 0, sizeof(*entry)); | ||
282 | |||
283 | #ifdef CONFIG_STACKTRACE | ||
284 | entry->stacktrace.max_entries = DMA_DEBUG_STACKTRACE_ENTRIES; | ||
285 | entry->stacktrace.entries = entry->st_entries; | ||
286 | entry->stacktrace.skip = 2; | ||
287 | save_stack_trace(&entry->stacktrace); | ||
288 | #endif | ||
289 | num_free_entries -= 1; | ||
290 | if (num_free_entries < min_free_entries) | ||
291 | min_free_entries = num_free_entries; | ||
292 | |||
293 | out: | ||
294 | spin_unlock_irqrestore(&free_entries_lock, flags); | ||
295 | |||
296 | return entry; | ||
297 | } | ||
298 | |||
299 | static void dma_entry_free(struct dma_debug_entry *entry) | ||
300 | { | ||
301 | unsigned long flags; | ||
302 | |||
303 | /* | ||
304 | * add to beginning of the list - this way the entries are | ||
305 | * more likely cache hot when they are reallocated. | ||
306 | */ | ||
307 | spin_lock_irqsave(&free_entries_lock, flags); | ||
308 | list_add(&entry->list, &free_entries); | ||
309 | num_free_entries += 1; | ||
310 | spin_unlock_irqrestore(&free_entries_lock, flags); | ||
311 | } | ||
312 | |||
313 | /* | ||
314 | * DMA-API debugging init code | ||
315 | * | ||
316 | * The init code does two things: | ||
317 | * 1. Initialize core data structures | ||
318 | * 2. Preallocate a given number of dma_debug_entry structs | ||
319 | */ | ||
320 | |||
321 | static int prealloc_memory(u32 num_entries) | ||
322 | { | ||
323 | struct dma_debug_entry *entry, *next_entry; | ||
324 | int i; | ||
325 | |||
326 | for (i = 0; i < num_entries; ++i) { | ||
327 | entry = kzalloc(sizeof(*entry), GFP_KERNEL); | ||
328 | if (!entry) | ||
329 | goto out_err; | ||
330 | |||
331 | list_add_tail(&entry->list, &free_entries); | ||
332 | } | ||
333 | |||
334 | num_free_entries = num_entries; | ||
335 | min_free_entries = num_entries; | ||
336 | |||
337 | printk(KERN_INFO "DMA-API: preallocated %d debug entries\n", | ||
338 | num_entries); | ||
339 | |||
340 | return 0; | ||
341 | |||
342 | out_err: | ||
343 | |||
344 | list_for_each_entry_safe(entry, next_entry, &free_entries, list) { | ||
345 | list_del(&entry->list); | ||
346 | kfree(entry); | ||
347 | } | ||
348 | |||
349 | return -ENOMEM; | ||
350 | } | ||
351 | |||
352 | static int dma_debug_fs_init(void) | ||
353 | { | ||
354 | dma_debug_dent = debugfs_create_dir("dma-api", NULL); | ||
355 | if (!dma_debug_dent) { | ||
356 | printk(KERN_ERR "DMA-API: can not create debugfs directory\n"); | ||
357 | return -ENOMEM; | ||
358 | } | ||
359 | |||
360 | global_disable_dent = debugfs_create_bool("disabled", 0444, | ||
361 | dma_debug_dent, | ||
362 | (u32 *)&global_disable); | ||
363 | if (!global_disable_dent) | ||
364 | goto out_err; | ||
365 | |||
366 | error_count_dent = debugfs_create_u32("error_count", 0444, | ||
367 | dma_debug_dent, &error_count); | ||
368 | if (!error_count_dent) | ||
369 | goto out_err; | ||
370 | |||
371 | show_all_errors_dent = debugfs_create_u32("all_errors", 0644, | ||
372 | dma_debug_dent, | ||
373 | &show_all_errors); | ||
374 | if (!show_all_errors_dent) | ||
375 | goto out_err; | ||
376 | |||
377 | show_num_errors_dent = debugfs_create_u32("num_errors", 0644, | ||
378 | dma_debug_dent, | ||
379 | &show_num_errors); | ||
380 | if (!show_num_errors_dent) | ||
381 | goto out_err; | ||
382 | |||
383 | num_free_entries_dent = debugfs_create_u32("num_free_entries", 0444, | ||
384 | dma_debug_dent, | ||
385 | &num_free_entries); | ||
386 | if (!num_free_entries_dent) | ||
387 | goto out_err; | ||
388 | |||
389 | min_free_entries_dent = debugfs_create_u32("min_free_entries", 0444, | ||
390 | dma_debug_dent, | ||
391 | &min_free_entries); | ||
392 | if (!min_free_entries_dent) | ||
393 | goto out_err; | ||
394 | |||
395 | return 0; | ||
396 | |||
397 | out_err: | ||
398 | debugfs_remove_recursive(dma_debug_dent); | ||
399 | |||
400 | return -ENOMEM; | ||
401 | } | ||
402 | |||
403 | static int device_dma_allocations(struct device *dev) | ||
404 | { | ||
405 | struct dma_debug_entry *entry; | ||
406 | unsigned long flags; | ||
407 | int count = 0, i; | ||
408 | |||
409 | for (i = 0; i < HASH_SIZE; ++i) { | ||
410 | spin_lock_irqsave(&dma_entry_hash[i].lock, flags); | ||
411 | list_for_each_entry(entry, &dma_entry_hash[i].list, list) { | ||
412 | if (entry->dev == dev) | ||
413 | count += 1; | ||
414 | } | ||
415 | spin_unlock_irqrestore(&dma_entry_hash[i].lock, flags); | ||
416 | } | ||
417 | |||
418 | return count; | ||
419 | } | ||
420 | |||
421 | static int dma_debug_device_change(struct notifier_block *nb, | ||
422 | unsigned long action, void *data) | ||
423 | { | ||
424 | struct device *dev = data; | ||
425 | int count; | ||
426 | |||
427 | |||
428 | switch (action) { | ||
429 | case BUS_NOTIFY_UNBIND_DRIVER: | ||
430 | count = device_dma_allocations(dev); | ||
431 | if (count == 0) | ||
432 | break; | ||
433 | err_printk(dev, NULL, "DMA-API: device driver has pending " | ||
434 | "DMA allocations while released from device " | ||
435 | "[count=%d]\n", count); | ||
436 | break; | ||
437 | default: | ||
438 | break; | ||
439 | } | ||
440 | |||
441 | return 0; | ||
442 | } | ||
443 | |||
444 | void dma_debug_add_bus(struct bus_type *bus) | ||
445 | { | ||
446 | struct notifier_block *nb; | ||
447 | |||
448 | nb = kzalloc(sizeof(struct notifier_block), GFP_KERNEL); | ||
449 | if (nb == NULL) { | ||
450 | printk(KERN_ERR "dma_debug_add_bus: out of memory\n"); | ||
451 | return; | ||
452 | } | ||
453 | |||
454 | nb->notifier_call = dma_debug_device_change; | ||
455 | |||
456 | bus_register_notifier(bus, nb); | ||
457 | } | ||
458 | |||
459 | /* | ||
460 | * Let the architectures decide how many entries should be preallocated. | ||
461 | */ | ||
462 | void dma_debug_init(u32 num_entries) | ||
463 | { | ||
464 | int i; | ||
465 | |||
466 | if (global_disable) | ||
467 | return; | ||
468 | |||
469 | for (i = 0; i < HASH_SIZE; ++i) { | ||
470 | INIT_LIST_HEAD(&dma_entry_hash[i].list); | ||
471 | dma_entry_hash[i].lock = SPIN_LOCK_UNLOCKED; | ||
472 | } | ||
473 | |||
474 | if (dma_debug_fs_init() != 0) { | ||
475 | printk(KERN_ERR "DMA-API: error creating debugfs entries " | ||
476 | "- disabling\n"); | ||
477 | global_disable = true; | ||
478 | |||
479 | return; | ||
480 | } | ||
481 | |||
482 | if (req_entries) | ||
483 | num_entries = req_entries; | ||
484 | |||
485 | if (prealloc_memory(num_entries) != 0) { | ||
486 | printk(KERN_ERR "DMA-API: debugging out of memory error " | ||
487 | "- disabled\n"); | ||
488 | global_disable = true; | ||
489 | |||
490 | return; | ||
491 | } | ||
492 | |||
493 | printk(KERN_INFO "DMA-API: debugging enabled by kernel config\n"); | ||
494 | } | ||
495 | |||
496 | static __init int dma_debug_cmdline(char *str) | ||
497 | { | ||
498 | if (!str) | ||
499 | return -EINVAL; | ||
500 | |||
501 | if (strncmp(str, "off", 3) == 0) { | ||
502 | printk(KERN_INFO "DMA-API: debugging disabled on kernel " | ||
503 | "command line\n"); | ||
504 | global_disable = true; | ||
505 | } | ||
506 | |||
507 | return 0; | ||
508 | } | ||
509 | |||
510 | static __init int dma_debug_entries_cmdline(char *str) | ||
511 | { | ||
512 | int res; | ||
513 | |||
514 | if (!str) | ||
515 | return -EINVAL; | ||
516 | |||
517 | res = get_option(&str, &req_entries); | ||
518 | |||
519 | if (!res) | ||
520 | req_entries = 0; | ||
521 | |||
522 | return 0; | ||
523 | } | ||
524 | |||
525 | __setup("dma_debug=", dma_debug_cmdline); | ||
526 | __setup("dma_debug_entries=", dma_debug_entries_cmdline); | ||
527 | |||
528 | static void check_unmap(struct dma_debug_entry *ref) | ||
529 | { | ||
530 | struct dma_debug_entry *entry; | ||
531 | struct hash_bucket *bucket; | ||
532 | unsigned long flags; | ||
533 | |||
534 | if (dma_mapping_error(ref->dev, ref->dev_addr)) { | ||
535 | err_printk(ref->dev, NULL, "DMA-API: device driver tries " | ||
536 | "to free an invalid DMA memory address\n"); | ||
537 | return; | ||
538 | } | ||
539 | |||
540 | bucket = get_hash_bucket(ref, &flags); | ||
541 | entry = hash_bucket_find(bucket, ref); | ||
542 | |||
543 | if (!entry) { | ||
544 | err_printk(ref->dev, NULL, "DMA-API: device driver tries " | ||
545 | "to free DMA memory it has not allocated " | ||
546 | "[device address=0x%016llx] [size=%llu bytes]\n", | ||
547 | ref->dev_addr, ref->size); | ||
548 | goto out; | ||
549 | } | ||
550 | |||
551 | if (ref->size != entry->size) { | ||
552 | err_printk(ref->dev, entry, "DMA-API: device driver frees " | ||
553 | "DMA memory with different size " | ||
554 | "[device address=0x%016llx] [map size=%llu bytes] " | ||
555 | "[unmap size=%llu bytes]\n", | ||
556 | ref->dev_addr, entry->size, ref->size); | ||
557 | } | ||
558 | |||
559 | if (ref->type != entry->type) { | ||
560 | err_printk(ref->dev, entry, "DMA-API: device driver frees " | ||
561 | "DMA memory with wrong function " | ||
562 | "[device address=0x%016llx] [size=%llu bytes] " | ||
563 | "[mapped as %s] [unmapped as %s]\n", | ||
564 | ref->dev_addr, ref->size, | ||
565 | type2name[entry->type], type2name[ref->type]); | ||
566 | } else if ((entry->type == dma_debug_coherent) && | ||
567 | (ref->paddr != entry->paddr)) { | ||
568 | err_printk(ref->dev, entry, "DMA-API: device driver frees " | ||
569 | "DMA memory with different CPU address " | ||
570 | "[device address=0x%016llx] [size=%llu bytes] " | ||
571 | "[cpu alloc address=%p] [cpu free address=%p]", | ||
572 | ref->dev_addr, ref->size, | ||
573 | (void *)entry->paddr, (void *)ref->paddr); | ||
574 | } | ||
575 | |||
576 | if (ref->sg_call_ents && ref->type == dma_debug_sg && | ||
577 | ref->sg_call_ents != entry->sg_call_ents) { | ||
578 | err_printk(ref->dev, entry, "DMA-API: device driver frees " | ||
579 | "DMA sg list with different entry count " | ||
580 | "[map count=%d] [unmap count=%d]\n", | ||
581 | entry->sg_call_ents, ref->sg_call_ents); | ||
582 | } | ||
583 | |||
584 | /* | ||
585 | * This may be no bug in reality - but most implementations of the | ||
586 | * DMA API don't handle this properly, so check for it here | ||
587 | */ | ||
588 | if (ref->direction != entry->direction) { | ||
589 | err_printk(ref->dev, entry, "DMA-API: device driver frees " | ||
590 | "DMA memory with different direction " | ||
591 | "[device address=0x%016llx] [size=%llu bytes] " | ||
592 | "[mapped with %s] [unmapped with %s]\n", | ||
593 | ref->dev_addr, ref->size, | ||
594 | dir2name[entry->direction], | ||
595 | dir2name[ref->direction]); | ||
596 | } | ||
597 | |||
598 | hash_bucket_del(entry); | ||
599 | dma_entry_free(entry); | ||
600 | |||
601 | out: | ||
602 | put_hash_bucket(bucket, &flags); | ||
603 | } | ||
604 | |||
605 | static void check_for_stack(struct device *dev, void *addr) | ||
606 | { | ||
607 | if (object_is_on_stack(addr)) | ||
608 | err_printk(dev, NULL, "DMA-API: device driver maps memory from" | ||
609 | "stack [addr=%p]\n", addr); | ||
610 | } | ||
611 | |||
612 | static inline bool overlap(void *addr, u64 size, void *start, void *end) | ||
613 | { | ||
614 | void *addr2 = (char *)addr + size; | ||
615 | |||
616 | return ((addr >= start && addr < end) || | ||
617 | (addr2 >= start && addr2 < end) || | ||
618 | ((addr < start) && (addr2 >= end))); | ||
619 | } | ||
620 | |||
621 | static void check_for_illegal_area(struct device *dev, void *addr, u64 size) | ||
622 | { | ||
623 | if (overlap(addr, size, _text, _etext) || | ||
624 | overlap(addr, size, __start_rodata, __end_rodata)) | ||
625 | err_printk(dev, NULL, "DMA-API: device driver maps " | ||
626 | "memory from kernel text or rodata " | ||
627 | "[addr=%p] [size=%llu]\n", addr, size); | ||
628 | } | ||
629 | |||
630 | static void check_sync(struct device *dev, dma_addr_t addr, | ||
631 | u64 size, u64 offset, int direction, bool to_cpu) | ||
632 | { | ||
633 | struct dma_debug_entry ref = { | ||
634 | .dev = dev, | ||
635 | .dev_addr = addr, | ||
636 | .size = size, | ||
637 | .direction = direction, | ||
638 | }; | ||
639 | struct dma_debug_entry *entry; | ||
640 | struct hash_bucket *bucket; | ||
641 | unsigned long flags; | ||
642 | |||
643 | bucket = get_hash_bucket(&ref, &flags); | ||
644 | |||
645 | entry = hash_bucket_find(bucket, &ref); | ||
646 | |||
647 | if (!entry) { | ||
648 | err_printk(dev, NULL, "DMA-API: device driver tries " | ||
649 | "to sync DMA memory it has not allocated " | ||
650 | "[device address=0x%016llx] [size=%llu bytes]\n", | ||
651 | addr, size); | ||
652 | goto out; | ||
653 | } | ||
654 | |||
655 | if ((offset + size) > entry->size) { | ||
656 | err_printk(dev, entry, "DMA-API: device driver syncs" | ||
657 | " DMA memory outside allocated range " | ||
658 | "[device address=0x%016llx] " | ||
659 | "[allocation size=%llu bytes] [sync offset=%llu] " | ||
660 | "[sync size=%llu]\n", entry->dev_addr, entry->size, | ||
661 | offset, size); | ||
662 | } | ||
663 | |||
664 | if (direction != entry->direction) { | ||
665 | err_printk(dev, entry, "DMA-API: device driver syncs " | ||
666 | "DMA memory with different direction " | ||
667 | "[device address=0x%016llx] [size=%llu bytes] " | ||
668 | "[mapped with %s] [synced with %s]\n", | ||
669 | addr, entry->size, | ||
670 | dir2name[entry->direction], | ||
671 | dir2name[direction]); | ||
672 | } | ||
673 | |||
674 | if (entry->direction == DMA_BIDIRECTIONAL) | ||
675 | goto out; | ||
676 | |||
677 | if (to_cpu && !(entry->direction == DMA_FROM_DEVICE) && | ||
678 | !(direction == DMA_TO_DEVICE)) | ||
679 | err_printk(dev, entry, "DMA-API: device driver syncs " | ||
680 | "device read-only DMA memory for cpu " | ||
681 | "[device address=0x%016llx] [size=%llu bytes] " | ||
682 | "[mapped with %s] [synced with %s]\n", | ||
683 | addr, entry->size, | ||
684 | dir2name[entry->direction], | ||
685 | dir2name[direction]); | ||
686 | |||
687 | if (!to_cpu && !(entry->direction == DMA_TO_DEVICE) && | ||
688 | !(direction == DMA_FROM_DEVICE)) | ||
689 | err_printk(dev, entry, "DMA-API: device driver syncs " | ||
690 | "device write-only DMA memory to device " | ||
691 | "[device address=0x%016llx] [size=%llu bytes] " | ||
692 | "[mapped with %s] [synced with %s]\n", | ||
693 | addr, entry->size, | ||
694 | dir2name[entry->direction], | ||
695 | dir2name[direction]); | ||
696 | |||
697 | out: | ||
698 | put_hash_bucket(bucket, &flags); | ||
699 | |||
700 | } | ||
701 | |||
702 | void debug_dma_map_page(struct device *dev, struct page *page, size_t offset, | ||
703 | size_t size, int direction, dma_addr_t dma_addr, | ||
704 | bool map_single) | ||
705 | { | ||
706 | struct dma_debug_entry *entry; | ||
707 | |||
708 | if (unlikely(global_disable)) | ||
709 | return; | ||
710 | |||
711 | if (unlikely(dma_mapping_error(dev, dma_addr))) | ||
712 | return; | ||
713 | |||
714 | entry = dma_entry_alloc(); | ||
715 | if (!entry) | ||
716 | return; | ||
717 | |||
718 | entry->dev = dev; | ||
719 | entry->type = dma_debug_page; | ||
720 | entry->paddr = page_to_phys(page) + offset; | ||
721 | entry->dev_addr = dma_addr; | ||
722 | entry->size = size; | ||
723 | entry->direction = direction; | ||
724 | |||
725 | if (map_single) | ||
726 | entry->type = dma_debug_single; | ||
727 | |||
728 | if (!PageHighMem(page)) { | ||
729 | void *addr = ((char *)page_address(page)) + offset; | ||
730 | check_for_stack(dev, addr); | ||
731 | check_for_illegal_area(dev, addr, size); | ||
732 | } | ||
733 | |||
734 | add_dma_entry(entry); | ||
735 | } | ||
736 | EXPORT_SYMBOL(debug_dma_map_page); | ||
737 | |||
738 | void debug_dma_unmap_page(struct device *dev, dma_addr_t addr, | ||
739 | size_t size, int direction, bool map_single) | ||
740 | { | ||
741 | struct dma_debug_entry ref = { | ||
742 | .type = dma_debug_page, | ||
743 | .dev = dev, | ||
744 | .dev_addr = addr, | ||
745 | .size = size, | ||
746 | .direction = direction, | ||
747 | }; | ||
748 | |||
749 | if (unlikely(global_disable)) | ||
750 | return; | ||
751 | |||
752 | if (map_single) | ||
753 | ref.type = dma_debug_single; | ||
754 | |||
755 | check_unmap(&ref); | ||
756 | } | ||
757 | EXPORT_SYMBOL(debug_dma_unmap_page); | ||
758 | |||
759 | void debug_dma_map_sg(struct device *dev, struct scatterlist *sg, | ||
760 | int nents, int mapped_ents, int direction) | ||
761 | { | ||
762 | struct dma_debug_entry *entry; | ||
763 | struct scatterlist *s; | ||
764 | int i; | ||
765 | |||
766 | if (unlikely(global_disable)) | ||
767 | return; | ||
768 | |||
769 | for_each_sg(sg, s, mapped_ents, i) { | ||
770 | entry = dma_entry_alloc(); | ||
771 | if (!entry) | ||
772 | return; | ||
773 | |||
774 | entry->type = dma_debug_sg; | ||
775 | entry->dev = dev; | ||
776 | entry->paddr = sg_phys(s); | ||
777 | entry->size = s->length; | ||
778 | entry->dev_addr = s->dma_address; | ||
779 | entry->direction = direction; | ||
780 | entry->sg_call_ents = nents; | ||
781 | entry->sg_mapped_ents = mapped_ents; | ||
782 | |||
783 | if (!PageHighMem(sg_page(s))) { | ||
784 | check_for_stack(dev, sg_virt(s)); | ||
785 | check_for_illegal_area(dev, sg_virt(s), s->length); | ||
786 | } | ||
787 | |||
788 | add_dma_entry(entry); | ||
789 | } | ||
790 | } | ||
791 | EXPORT_SYMBOL(debug_dma_map_sg); | ||
792 | |||
793 | void debug_dma_unmap_sg(struct device *dev, struct scatterlist *sglist, | ||
794 | int nelems, int dir) | ||
795 | { | ||
796 | struct dma_debug_entry *entry; | ||
797 | struct scatterlist *s; | ||
798 | int mapped_ents = 0, i; | ||
799 | unsigned long flags; | ||
800 | |||
801 | if (unlikely(global_disable)) | ||
802 | return; | ||
803 | |||
804 | for_each_sg(sglist, s, nelems, i) { | ||
805 | |||
806 | struct dma_debug_entry ref = { | ||
807 | .type = dma_debug_sg, | ||
808 | .dev = dev, | ||
809 | .paddr = sg_phys(s), | ||
810 | .dev_addr = s->dma_address, | ||
811 | .size = s->length, | ||
812 | .direction = dir, | ||
813 | .sg_call_ents = 0, | ||
814 | }; | ||
815 | |||
816 | if (mapped_ents && i >= mapped_ents) | ||
817 | break; | ||
818 | |||
819 | if (mapped_ents == 0) { | ||
820 | struct hash_bucket *bucket; | ||
821 | ref.sg_call_ents = nelems; | ||
822 | bucket = get_hash_bucket(&ref, &flags); | ||
823 | entry = hash_bucket_find(bucket, &ref); | ||
824 | if (entry) | ||
825 | mapped_ents = entry->sg_mapped_ents; | ||
826 | put_hash_bucket(bucket, &flags); | ||
827 | } | ||
828 | |||
829 | check_unmap(&ref); | ||
830 | } | ||
831 | } | ||
832 | EXPORT_SYMBOL(debug_dma_unmap_sg); | ||
833 | |||
834 | void debug_dma_alloc_coherent(struct device *dev, size_t size, | ||
835 | dma_addr_t dma_addr, void *virt) | ||
836 | { | ||
837 | struct dma_debug_entry *entry; | ||
838 | |||
839 | if (unlikely(global_disable)) | ||
840 | return; | ||
841 | |||
842 | if (unlikely(virt == NULL)) | ||
843 | return; | ||
844 | |||
845 | entry = dma_entry_alloc(); | ||
846 | if (!entry) | ||
847 | return; | ||
848 | |||
849 | entry->type = dma_debug_coherent; | ||
850 | entry->dev = dev; | ||
851 | entry->paddr = virt_to_phys(virt); | ||
852 | entry->size = size; | ||
853 | entry->dev_addr = dma_addr; | ||
854 | entry->direction = DMA_BIDIRECTIONAL; | ||
855 | |||
856 | add_dma_entry(entry); | ||
857 | } | ||
858 | EXPORT_SYMBOL(debug_dma_alloc_coherent); | ||
859 | |||
860 | void debug_dma_free_coherent(struct device *dev, size_t size, | ||
861 | void *virt, dma_addr_t addr) | ||
862 | { | ||
863 | struct dma_debug_entry ref = { | ||
864 | .type = dma_debug_coherent, | ||
865 | .dev = dev, | ||
866 | .paddr = virt_to_phys(virt), | ||
867 | .dev_addr = addr, | ||
868 | .size = size, | ||
869 | .direction = DMA_BIDIRECTIONAL, | ||
870 | }; | ||
871 | |||
872 | if (unlikely(global_disable)) | ||
873 | return; | ||
874 | |||
875 | check_unmap(&ref); | ||
876 | } | ||
877 | EXPORT_SYMBOL(debug_dma_free_coherent); | ||
878 | |||
879 | void debug_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, | ||
880 | size_t size, int direction) | ||
881 | { | ||
882 | if (unlikely(global_disable)) | ||
883 | return; | ||
884 | |||
885 | check_sync(dev, dma_handle, size, 0, direction, true); | ||
886 | } | ||
887 | EXPORT_SYMBOL(debug_dma_sync_single_for_cpu); | ||
888 | |||
889 | void debug_dma_sync_single_for_device(struct device *dev, | ||
890 | dma_addr_t dma_handle, size_t size, | ||
891 | int direction) | ||
892 | { | ||
893 | if (unlikely(global_disable)) | ||
894 | return; | ||
895 | |||
896 | check_sync(dev, dma_handle, size, 0, direction, false); | ||
897 | } | ||
898 | EXPORT_SYMBOL(debug_dma_sync_single_for_device); | ||
899 | |||
900 | void debug_dma_sync_single_range_for_cpu(struct device *dev, | ||
901 | dma_addr_t dma_handle, | ||
902 | unsigned long offset, size_t size, | ||
903 | int direction) | ||
904 | { | ||
905 | if (unlikely(global_disable)) | ||
906 | return; | ||
907 | |||
908 | check_sync(dev, dma_handle, size, offset, direction, true); | ||
909 | } | ||
910 | EXPORT_SYMBOL(debug_dma_sync_single_range_for_cpu); | ||
911 | |||
912 | void debug_dma_sync_single_range_for_device(struct device *dev, | ||
913 | dma_addr_t dma_handle, | ||
914 | unsigned long offset, | ||
915 | size_t size, int direction) | ||
916 | { | ||
917 | if (unlikely(global_disable)) | ||
918 | return; | ||
919 | |||
920 | check_sync(dev, dma_handle, size, offset, direction, false); | ||
921 | } | ||
922 | EXPORT_SYMBOL(debug_dma_sync_single_range_for_device); | ||
923 | |||
924 | void debug_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, | ||
925 | int nelems, int direction) | ||
926 | { | ||
927 | struct scatterlist *s; | ||
928 | int i; | ||
929 | |||
930 | if (unlikely(global_disable)) | ||
931 | return; | ||
932 | |||
933 | for_each_sg(sg, s, nelems, i) { | ||
934 | check_sync(dev, s->dma_address, s->dma_length, 0, | ||
935 | direction, true); | ||
936 | } | ||
937 | } | ||
938 | EXPORT_SYMBOL(debug_dma_sync_sg_for_cpu); | ||
939 | |||
940 | void debug_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, | ||
941 | int nelems, int direction) | ||
942 | { | ||
943 | struct scatterlist *s; | ||
944 | int i; | ||
945 | |||
946 | if (unlikely(global_disable)) | ||
947 | return; | ||
948 | |||
949 | for_each_sg(sg, s, nelems, i) { | ||
950 | check_sync(dev, s->dma_address, s->dma_length, 0, | ||
951 | direction, false); | ||
952 | } | ||
953 | } | ||
954 | EXPORT_SYMBOL(debug_dma_sync_sg_for_device); | ||
955 | |||
diff --git a/lib/swiotlb.c b/lib/swiotlb.c index 1f991acc2a05..32e2bd3b1142 100644 --- a/lib/swiotlb.c +++ b/lib/swiotlb.c | |||
@@ -145,7 +145,7 @@ static void *swiotlb_bus_to_virt(dma_addr_t address) | |||
145 | return phys_to_virt(swiotlb_bus_to_phys(address)); | 145 | return phys_to_virt(swiotlb_bus_to_phys(address)); |
146 | } | 146 | } |
147 | 147 | ||
148 | int __weak swiotlb_arch_range_needs_mapping(void *ptr, size_t size) | 148 | int __weak swiotlb_arch_range_needs_mapping(phys_addr_t paddr, size_t size) |
149 | { | 149 | { |
150 | return 0; | 150 | return 0; |
151 | } | 151 | } |
@@ -315,9 +315,9 @@ address_needs_mapping(struct device *hwdev, dma_addr_t addr, size_t size) | |||
315 | return !is_buffer_dma_capable(dma_get_mask(hwdev), addr, size); | 315 | return !is_buffer_dma_capable(dma_get_mask(hwdev), addr, size); |
316 | } | 316 | } |
317 | 317 | ||
318 | static inline int range_needs_mapping(void *ptr, size_t size) | 318 | static inline int range_needs_mapping(phys_addr_t paddr, size_t size) |
319 | { | 319 | { |
320 | return swiotlb_force || swiotlb_arch_range_needs_mapping(ptr, size); | 320 | return swiotlb_force || swiotlb_arch_range_needs_mapping(paddr, size); |
321 | } | 321 | } |
322 | 322 | ||
323 | static int is_swiotlb_buffer(char *addr) | 323 | static int is_swiotlb_buffer(char *addr) |
@@ -636,11 +636,14 @@ swiotlb_full(struct device *dev, size_t size, int dir, int do_panic) | |||
636 | * Once the device is given the dma address, the device owns this memory until | 636 | * Once the device is given the dma address, the device owns this memory until |
637 | * either swiotlb_unmap_single or swiotlb_dma_sync_single is performed. | 637 | * either swiotlb_unmap_single or swiotlb_dma_sync_single is performed. |
638 | */ | 638 | */ |
639 | dma_addr_t | 639 | dma_addr_t swiotlb_map_page(struct device *dev, struct page *page, |
640 | swiotlb_map_single_attrs(struct device *hwdev, void *ptr, size_t size, | 640 | unsigned long offset, size_t size, |
641 | int dir, struct dma_attrs *attrs) | 641 | enum dma_data_direction dir, |
642 | { | 642 | struct dma_attrs *attrs) |
643 | dma_addr_t dev_addr = swiotlb_virt_to_bus(hwdev, ptr); | 643 | { |
644 | phys_addr_t phys = page_to_phys(page) + offset; | ||
645 | void *ptr = page_address(page) + offset; | ||
646 | dma_addr_t dev_addr = swiotlb_phys_to_bus(dev, phys); | ||
644 | void *map; | 647 | void *map; |
645 | 648 | ||
646 | BUG_ON(dir == DMA_NONE); | 649 | BUG_ON(dir == DMA_NONE); |
@@ -649,37 +652,30 @@ swiotlb_map_single_attrs(struct device *hwdev, void *ptr, size_t size, | |||
649 | * we can safely return the device addr and not worry about bounce | 652 | * we can safely return the device addr and not worry about bounce |
650 | * buffering it. | 653 | * buffering it. |
651 | */ | 654 | */ |
652 | if (!address_needs_mapping(hwdev, dev_addr, size) && | 655 | if (!address_needs_mapping(dev, dev_addr, size) && |
653 | !range_needs_mapping(ptr, size)) | 656 | !range_needs_mapping(virt_to_phys(ptr), size)) |
654 | return dev_addr; | 657 | return dev_addr; |
655 | 658 | ||
656 | /* | 659 | /* |
657 | * Oh well, have to allocate and map a bounce buffer. | 660 | * Oh well, have to allocate and map a bounce buffer. |
658 | */ | 661 | */ |
659 | map = map_single(hwdev, virt_to_phys(ptr), size, dir); | 662 | map = map_single(dev, phys, size, dir); |
660 | if (!map) { | 663 | if (!map) { |
661 | swiotlb_full(hwdev, size, dir, 1); | 664 | swiotlb_full(dev, size, dir, 1); |
662 | map = io_tlb_overflow_buffer; | 665 | map = io_tlb_overflow_buffer; |
663 | } | 666 | } |
664 | 667 | ||
665 | dev_addr = swiotlb_virt_to_bus(hwdev, map); | 668 | dev_addr = swiotlb_virt_to_bus(dev, map); |
666 | 669 | ||
667 | /* | 670 | /* |
668 | * Ensure that the address returned is DMA'ble | 671 | * Ensure that the address returned is DMA'ble |
669 | */ | 672 | */ |
670 | if (address_needs_mapping(hwdev, dev_addr, size)) | 673 | if (address_needs_mapping(dev, dev_addr, size)) |
671 | panic("map_single: bounce buffer is not DMA'ble"); | 674 | panic("map_single: bounce buffer is not DMA'ble"); |
672 | 675 | ||
673 | return dev_addr; | 676 | return dev_addr; |
674 | } | 677 | } |
675 | EXPORT_SYMBOL(swiotlb_map_single_attrs); | 678 | EXPORT_SYMBOL_GPL(swiotlb_map_page); |
676 | |||
677 | dma_addr_t | ||
678 | swiotlb_map_single(struct device *hwdev, void *ptr, size_t size, int dir) | ||
679 | { | ||
680 | return swiotlb_map_single_attrs(hwdev, ptr, size, dir, NULL); | ||
681 | } | ||
682 | EXPORT_SYMBOL(swiotlb_map_single); | ||
683 | 679 | ||
684 | /* | 680 | /* |
685 | * Unmap a single streaming mode DMA translation. The dma_addr and size must | 681 | * Unmap a single streaming mode DMA translation. The dma_addr and size must |
@@ -689,9 +685,9 @@ EXPORT_SYMBOL(swiotlb_map_single); | |||
689 | * After this call, reads by the cpu to the buffer are guaranteed to see | 685 | * After this call, reads by the cpu to the buffer are guaranteed to see |
690 | * whatever the device wrote there. | 686 | * whatever the device wrote there. |
691 | */ | 687 | */ |
692 | void | 688 | void swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr, |
693 | swiotlb_unmap_single_attrs(struct device *hwdev, dma_addr_t dev_addr, | 689 | size_t size, enum dma_data_direction dir, |
694 | size_t size, int dir, struct dma_attrs *attrs) | 690 | struct dma_attrs *attrs) |
695 | { | 691 | { |
696 | char *dma_addr = swiotlb_bus_to_virt(dev_addr); | 692 | char *dma_addr = swiotlb_bus_to_virt(dev_addr); |
697 | 693 | ||
@@ -701,15 +697,7 @@ swiotlb_unmap_single_attrs(struct device *hwdev, dma_addr_t dev_addr, | |||
701 | else if (dir == DMA_FROM_DEVICE) | 697 | else if (dir == DMA_FROM_DEVICE) |
702 | dma_mark_clean(dma_addr, size); | 698 | dma_mark_clean(dma_addr, size); |
703 | } | 699 | } |
704 | EXPORT_SYMBOL(swiotlb_unmap_single_attrs); | 700 | EXPORT_SYMBOL_GPL(swiotlb_unmap_page); |
705 | |||
706 | void | ||
707 | swiotlb_unmap_single(struct device *hwdev, dma_addr_t dev_addr, size_t size, | ||
708 | int dir) | ||
709 | { | ||
710 | return swiotlb_unmap_single_attrs(hwdev, dev_addr, size, dir, NULL); | ||
711 | } | ||
712 | EXPORT_SYMBOL(swiotlb_unmap_single); | ||
713 | 701 | ||
714 | /* | 702 | /* |
715 | * Make physical memory consistent for a single streaming mode DMA translation | 703 | * Make physical memory consistent for a single streaming mode DMA translation |
@@ -736,7 +724,7 @@ swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr, | |||
736 | 724 | ||
737 | void | 725 | void |
738 | swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr, | 726 | swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr, |
739 | size_t size, int dir) | 727 | size_t size, enum dma_data_direction dir) |
740 | { | 728 | { |
741 | swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_CPU); | 729 | swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_CPU); |
742 | } | 730 | } |
@@ -744,7 +732,7 @@ EXPORT_SYMBOL(swiotlb_sync_single_for_cpu); | |||
744 | 732 | ||
745 | void | 733 | void |
746 | swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr, | 734 | swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr, |
747 | size_t size, int dir) | 735 | size_t size, enum dma_data_direction dir) |
748 | { | 736 | { |
749 | swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_DEVICE); | 737 | swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_DEVICE); |
750 | } | 738 | } |
@@ -769,7 +757,8 @@ swiotlb_sync_single_range(struct device *hwdev, dma_addr_t dev_addr, | |||
769 | 757 | ||
770 | void | 758 | void |
771 | swiotlb_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dev_addr, | 759 | swiotlb_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dev_addr, |
772 | unsigned long offset, size_t size, int dir) | 760 | unsigned long offset, size_t size, |
761 | enum dma_data_direction dir) | ||
773 | { | 762 | { |
774 | swiotlb_sync_single_range(hwdev, dev_addr, offset, size, dir, | 763 | swiotlb_sync_single_range(hwdev, dev_addr, offset, size, dir, |
775 | SYNC_FOR_CPU); | 764 | SYNC_FOR_CPU); |
@@ -778,7 +767,8 @@ EXPORT_SYMBOL_GPL(swiotlb_sync_single_range_for_cpu); | |||
778 | 767 | ||
779 | void | 768 | void |
780 | swiotlb_sync_single_range_for_device(struct device *hwdev, dma_addr_t dev_addr, | 769 | swiotlb_sync_single_range_for_device(struct device *hwdev, dma_addr_t dev_addr, |
781 | unsigned long offset, size_t size, int dir) | 770 | unsigned long offset, size_t size, |
771 | enum dma_data_direction dir) | ||
782 | { | 772 | { |
783 | swiotlb_sync_single_range(hwdev, dev_addr, offset, size, dir, | 773 | swiotlb_sync_single_range(hwdev, dev_addr, offset, size, dir, |
784 | SYNC_FOR_DEVICE); | 774 | SYNC_FOR_DEVICE); |
@@ -803,7 +793,7 @@ EXPORT_SYMBOL_GPL(swiotlb_sync_single_range_for_device); | |||
803 | */ | 793 | */ |
804 | int | 794 | int |
805 | swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems, | 795 | swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems, |
806 | int dir, struct dma_attrs *attrs) | 796 | enum dma_data_direction dir, struct dma_attrs *attrs) |
807 | { | 797 | { |
808 | struct scatterlist *sg; | 798 | struct scatterlist *sg; |
809 | int i; | 799 | int i; |
@@ -811,10 +801,10 @@ swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems, | |||
811 | BUG_ON(dir == DMA_NONE); | 801 | BUG_ON(dir == DMA_NONE); |
812 | 802 | ||
813 | for_each_sg(sgl, sg, nelems, i) { | 803 | for_each_sg(sgl, sg, nelems, i) { |
814 | void *addr = sg_virt(sg); | 804 | phys_addr_t paddr = sg_phys(sg); |
815 | dma_addr_t dev_addr = swiotlb_virt_to_bus(hwdev, addr); | 805 | dma_addr_t dev_addr = swiotlb_phys_to_bus(hwdev, paddr); |
816 | 806 | ||
817 | if (range_needs_mapping(addr, sg->length) || | 807 | if (range_needs_mapping(paddr, sg->length) || |
818 | address_needs_mapping(hwdev, dev_addr, sg->length)) { | 808 | address_needs_mapping(hwdev, dev_addr, sg->length)) { |
819 | void *map = map_single(hwdev, sg_phys(sg), | 809 | void *map = map_single(hwdev, sg_phys(sg), |
820 | sg->length, dir); | 810 | sg->length, dir); |
@@ -850,7 +840,7 @@ EXPORT_SYMBOL(swiotlb_map_sg); | |||
850 | */ | 840 | */ |
851 | void | 841 | void |
852 | swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl, | 842 | swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl, |
853 | int nelems, int dir, struct dma_attrs *attrs) | 843 | int nelems, enum dma_data_direction dir, struct dma_attrs *attrs) |
854 | { | 844 | { |
855 | struct scatterlist *sg; | 845 | struct scatterlist *sg; |
856 | int i; | 846 | int i; |
@@ -858,11 +848,11 @@ swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl, | |||
858 | BUG_ON(dir == DMA_NONE); | 848 | BUG_ON(dir == DMA_NONE); |
859 | 849 | ||
860 | for_each_sg(sgl, sg, nelems, i) { | 850 | for_each_sg(sgl, sg, nelems, i) { |
861 | if (sg->dma_address != swiotlb_virt_to_bus(hwdev, sg_virt(sg))) | 851 | if (sg->dma_address != swiotlb_phys_to_bus(hwdev, sg_phys(sg))) |
862 | unmap_single(hwdev, swiotlb_bus_to_virt(sg->dma_address), | 852 | unmap_single(hwdev, swiotlb_bus_to_virt(sg->dma_address), |
863 | sg->dma_length, dir); | 853 | sg->dma_length, dir); |
864 | else if (dir == DMA_FROM_DEVICE) | 854 | else if (dir == DMA_FROM_DEVICE) |
865 | dma_mark_clean(sg_virt(sg), sg->dma_length); | 855 | dma_mark_clean(swiotlb_bus_to_virt(sg->dma_address), sg->dma_length); |
866 | } | 856 | } |
867 | } | 857 | } |
868 | EXPORT_SYMBOL(swiotlb_unmap_sg_attrs); | 858 | EXPORT_SYMBOL(swiotlb_unmap_sg_attrs); |
@@ -892,17 +882,17 @@ swiotlb_sync_sg(struct device *hwdev, struct scatterlist *sgl, | |||
892 | BUG_ON(dir == DMA_NONE); | 882 | BUG_ON(dir == DMA_NONE); |
893 | 883 | ||
894 | for_each_sg(sgl, sg, nelems, i) { | 884 | for_each_sg(sgl, sg, nelems, i) { |
895 | if (sg->dma_address != swiotlb_virt_to_bus(hwdev, sg_virt(sg))) | 885 | if (sg->dma_address != swiotlb_phys_to_bus(hwdev, sg_phys(sg))) |
896 | sync_single(hwdev, swiotlb_bus_to_virt(sg->dma_address), | 886 | sync_single(hwdev, swiotlb_bus_to_virt(sg->dma_address), |
897 | sg->dma_length, dir, target); | 887 | sg->dma_length, dir, target); |
898 | else if (dir == DMA_FROM_DEVICE) | 888 | else if (dir == DMA_FROM_DEVICE) |
899 | dma_mark_clean(sg_virt(sg), sg->dma_length); | 889 | dma_mark_clean(swiotlb_bus_to_virt(sg->dma_address), sg->dma_length); |
900 | } | 890 | } |
901 | } | 891 | } |
902 | 892 | ||
903 | void | 893 | void |
904 | swiotlb_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg, | 894 | swiotlb_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg, |
905 | int nelems, int dir) | 895 | int nelems, enum dma_data_direction dir) |
906 | { | 896 | { |
907 | swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_CPU); | 897 | swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_CPU); |
908 | } | 898 | } |
@@ -910,7 +900,7 @@ EXPORT_SYMBOL(swiotlb_sync_sg_for_cpu); | |||
910 | 900 | ||
911 | void | 901 | void |
912 | swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg, | 902 | swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg, |
913 | int nelems, int dir) | 903 | int nelems, enum dma_data_direction dir) |
914 | { | 904 | { |
915 | swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_DEVICE); | 905 | swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_DEVICE); |
916 | } | 906 | } |